summaryrefslogtreecommitdiff
path: root/libs/luajit-cmake/luajit/src
diff options
context:
space:
mode:
Diffstat (limited to 'libs/luajit-cmake/luajit/src')
-rw-r--r--libs/luajit-cmake/luajit/src/.gitignore7
-rw-r--r--libs/luajit-cmake/luajit/src/Makefile726
-rw-r--r--libs/luajit-cmake/luajit/src/Makefile.dep259
-rw-r--r--libs/luajit-cmake/luajit/src/host/.gitignore3
-rw-r--r--libs/luajit-cmake/luajit/src/host/README4
-rw-r--r--libs/luajit-cmake/luajit/src/host/buildvm.c528
-rw-r--r--libs/luajit-cmake/luajit/src/host/buildvm.h105
-rw-r--r--libs/luajit-cmake/luajit/src/host/buildvm_asm.c348
-rw-r--r--libs/luajit-cmake/luajit/src/host/buildvm_fold.c236
-rw-r--r--libs/luajit-cmake/luajit/src/host/buildvm_lib.c459
-rw-r--r--libs/luajit-cmake/luajit/src/host/buildvm_libbc.h81
-rw-r--r--libs/luajit-cmake/luajit/src/host/buildvm_peobj.c379
-rw-r--r--libs/luajit-cmake/luajit/src/host/genlibbc.lua225
-rw-r--r--libs/luajit-cmake/luajit/src/host/genminilua.lua436
-rw-r--r--libs/luajit-cmake/luajit/src/host/minilua.c7771
-rw-r--r--libs/luajit-cmake/luajit/src/jit/.gitignore1
-rw-r--r--libs/luajit-cmake/luajit/src/jit/bc.lua190
-rw-r--r--libs/luajit-cmake/luajit/src/jit/bcsave.lua705
-rw-r--r--libs/luajit-cmake/luajit/src/jit/dis_arm.lua689
-rw-r--r--libs/luajit-cmake/luajit/src/jit/dis_arm64.lua1216
-rw-r--r--libs/luajit-cmake/luajit/src/jit/dis_arm64be.lua12
-rw-r--r--libs/luajit-cmake/luajit/src/jit/dis_mips.lua694
-rw-r--r--libs/luajit-cmake/luajit/src/jit/dis_mips64.lua17
-rw-r--r--libs/luajit-cmake/luajit/src/jit/dis_mips64el.lua17
-rw-r--r--libs/luajit-cmake/luajit/src/jit/dis_mips64r6.lua17
-rw-r--r--libs/luajit-cmake/luajit/src/jit/dis_mips64r6el.lua17
-rw-r--r--libs/luajit-cmake/luajit/src/jit/dis_mipsel.lua17
-rw-r--r--libs/luajit-cmake/luajit/src/jit/dis_ppc.lua591
-rw-r--r--libs/luajit-cmake/luajit/src/jit/dis_x64.lua17
-rw-r--r--libs/luajit-cmake/luajit/src/jit/dis_x86.lua953
-rw-r--r--libs/luajit-cmake/luajit/src/jit/dump.lua726
-rw-r--r--libs/luajit-cmake/luajit/src/jit/p.lua312
-rw-r--r--libs/luajit-cmake/luajit/src/jit/v.lua170
-rw-r--r--libs/luajit-cmake/luajit/src/jit/zone.lua45
-rw-r--r--libs/luajit-cmake/luajit/src/lauxlib.h161
-rw-r--r--libs/luajit-cmake/luajit/src/lib_aux.c370
-rw-r--r--libs/luajit-cmake/luajit/src/lib_base.c696
-rw-r--r--libs/luajit-cmake/luajit/src/lib_bit.c180
-rw-r--r--libs/luajit-cmake/luajit/src/lib_buffer.c360
-rw-r--r--libs/luajit-cmake/luajit/src/lib_debug.c406
-rw-r--r--libs/luajit-cmake/luajit/src/lib_ffi.c870
-rw-r--r--libs/luajit-cmake/luajit/src/lib_init.c55
-rw-r--r--libs/luajit-cmake/luajit/src/lib_io.c551
-rw-r--r--libs/luajit-cmake/luajit/src/lib_jit.c761
-rw-r--r--libs/luajit-cmake/luajit/src/lib_math.c201
-rw-r--r--libs/luajit-cmake/luajit/src/lib_os.c292
-rw-r--r--libs/luajit-cmake/luajit/src/lib_package.c628
-rw-r--r--libs/luajit-cmake/luajit/src/lib_string.c676
-rw-r--r--libs/luajit-cmake/luajit/src/lib_table.c327
-rw-r--r--libs/luajit-cmake/luajit/src/lj_alloc.c1485
-rw-r--r--libs/luajit-cmake/luajit/src/lj_alloc.h18
-rw-r--r--libs/luajit-cmake/luajit/src/lj_api.c1313
-rw-r--r--libs/luajit-cmake/luajit/src/lj_arch.h719
-rw-r--r--libs/luajit-cmake/luajit/src/lj_asm.c2571
-rw-r--r--libs/luajit-cmake/luajit/src/lj_asm.h17
-rw-r--r--libs/luajit-cmake/luajit/src/lj_asm_arm.h2286
-rw-r--r--libs/luajit-cmake/luajit/src/lj_asm_arm64.h2070
-rw-r--r--libs/luajit-cmake/luajit/src/lj_asm_mips.h2808
-rw-r--r--libs/luajit-cmake/luajit/src/lj_asm_ppc.h2325
-rw-r--r--libs/luajit-cmake/luajit/src/lj_asm_x86.h3125
-rw-r--r--libs/luajit-cmake/luajit/src/lj_assert.c28
-rw-r--r--libs/luajit-cmake/luajit/src/lj_bc.c14
-rw-r--r--libs/luajit-cmake/luajit/src/lj_bc.h265
-rw-r--r--libs/luajit-cmake/luajit/src/lj_bcdump.h68
-rw-r--r--libs/luajit-cmake/luajit/src/lj_bcread.c453
-rw-r--r--libs/luajit-cmake/luajit/src/lj_bcwrite.c372
-rw-r--r--libs/luajit-cmake/luajit/src/lj_buf.c305
-rw-r--r--libs/luajit-cmake/luajit/src/lj_buf.h198
-rw-r--r--libs/luajit-cmake/luajit/src/lj_carith.c432
-rw-r--r--libs/luajit-cmake/luajit/src/lj_carith.h37
-rw-r--r--libs/luajit-cmake/luajit/src/lj_ccall.c1189
-rw-r--r--libs/luajit-cmake/luajit/src/lj_ccall.h194
-rw-r--r--libs/luajit-cmake/luajit/src/lj_ccallback.c796
-rw-r--r--libs/luajit-cmake/luajit/src/lj_ccallback.h25
-rw-r--r--libs/luajit-cmake/luajit/src/lj_cconv.c770
-rw-r--r--libs/luajit-cmake/luajit/src/lj_cconv.h71
-rw-r--r--libs/luajit-cmake/luajit/src/lj_cdata.c304
-rw-r--r--libs/luajit-cmake/luajit/src/lj_cdata.h79
-rw-r--r--libs/luajit-cmake/luajit/src/lj_char.c43
-rw-r--r--libs/luajit-cmake/luajit/src/lj_char.h42
-rw-r--r--libs/luajit-cmake/luajit/src/lj_clib.c434
-rw-r--r--libs/luajit-cmake/luajit/src/lj_clib.h29
-rw-r--r--libs/luajit-cmake/luajit/src/lj_cparse.c1927
-rw-r--r--libs/luajit-cmake/luajit/src/lj_cparse.h67
-rw-r--r--libs/luajit-cmake/luajit/src/lj_crecord.c1944
-rw-r--r--libs/luajit-cmake/luajit/src/lj_crecord.h43
-rw-r--r--libs/luajit-cmake/luajit/src/lj_ctype.c646
-rw-r--r--libs/luajit-cmake/luajit/src/lj_ctype.h481
-rw-r--r--libs/luajit-cmake/luajit/src/lj_debug.c705
-rw-r--r--libs/luajit-cmake/luajit/src/lj_debug.h66
-rw-r--r--libs/luajit-cmake/luajit/src/lj_def.h381
-rw-r--r--libs/luajit-cmake/luajit/src/lj_dispatch.c559
-rw-r--r--libs/luajit-cmake/luajit/src/lj_dispatch.h164
-rw-r--r--libs/luajit-cmake/luajit/src/lj_emit_arm.h361
-rw-r--r--libs/luajit-cmake/luajit/src/lj_emit_arm64.h424
-rw-r--r--libs/luajit-cmake/luajit/src/lj_emit_mips.h310
-rw-r--r--libs/luajit-cmake/luajit/src/lj_emit_ppc.h238
-rw-r--r--libs/luajit-cmake/luajit/src/lj_emit_x86.h572
-rw-r--r--libs/luajit-cmake/luajit/src/lj_err.c1098
-rw-r--r--libs/luajit-cmake/luajit/src/lj_err.h58
-rw-r--r--libs/luajit-cmake/luajit/src/lj_errmsg.h200
-rw-r--r--libs/luajit-cmake/luajit/src/lj_ff.h18
-rw-r--r--libs/luajit-cmake/luajit/src/lj_ffrecord.c1574
-rw-r--r--libs/luajit-cmake/luajit/src/lj_ffrecord.h24
-rw-r--r--libs/luajit-cmake/luajit/src/lj_frame.h297
-rw-r--r--libs/luajit-cmake/luajit/src/lj_func.c191
-rw-r--r--libs/luajit-cmake/luajit/src/lj_func.h24
-rw-r--r--libs/luajit-cmake/luajit/src/lj_gc.c909
-rw-r--r--libs/luajit-cmake/luajit/src/lj_gc.h136
-rw-r--r--libs/luajit-cmake/luajit/src/lj_gdbjit.c818
-rw-r--r--libs/luajit-cmake/luajit/src/lj_gdbjit.h22
-rw-r--r--libs/luajit-cmake/luajit/src/lj_ir.c500
-rw-r--r--libs/luajit-cmake/luajit/src/lj_ir.h614
-rw-r--r--libs/luajit-cmake/luajit/src/lj_ircall.h383
-rw-r--r--libs/luajit-cmake/luajit/src/lj_iropt.h162
-rw-r--r--libs/luajit-cmake/luajit/src/lj_jit.h528
-rw-r--r--libs/luajit-cmake/luajit/src/lj_lex.c514
-rw-r--r--libs/luajit-cmake/luajit/src/lj_lex.h93
-rw-r--r--libs/luajit-cmake/luajit/src/lj_lib.c359
-rw-r--r--libs/luajit-cmake/luajit/src/lj_lib.h116
-rw-r--r--libs/luajit-cmake/luajit/src/lj_load.c168
-rw-r--r--libs/luajit-cmake/luajit/src/lj_mcode.c374
-rw-r--r--libs/luajit-cmake/luajit/src/lj_mcode.h30
-rw-r--r--libs/luajit-cmake/luajit/src/lj_meta.c482
-rw-r--r--libs/luajit-cmake/luajit/src/lj_meta.h38
-rw-r--r--libs/luajit-cmake/luajit/src/lj_obj.c51
-rw-r--r--libs/luajit-cmake/luajit/src/lj_obj.h1045
-rw-r--r--libs/luajit-cmake/luajit/src/lj_opt_dce.c75
-rw-r--r--libs/luajit-cmake/luajit/src/lj_opt_fold.c2602
-rw-r--r--libs/luajit-cmake/luajit/src/lj_opt_loop.c453
-rw-r--r--libs/luajit-cmake/luajit/src/lj_opt_mem.c979
-rw-r--r--libs/luajit-cmake/luajit/src/lj_opt_narrow.c622
-rw-r--r--libs/luajit-cmake/luajit/src/lj_opt_sink.c258
-rw-r--r--libs/luajit-cmake/luajit/src/lj_opt_split.c848
-rw-r--r--libs/luajit-cmake/luajit/src/lj_parse.c2747
-rw-r--r--libs/luajit-cmake/luajit/src/lj_parse.h18
-rw-r--r--libs/luajit-cmake/luajit/src/lj_prng.c259
-rw-r--r--libs/luajit-cmake/luajit/src/lj_prng.h24
-rw-r--r--libs/luajit-cmake/luajit/src/lj_profile.c371
-rw-r--r--libs/luajit-cmake/luajit/src/lj_profile.h21
-rw-r--r--libs/luajit-cmake/luajit/src/lj_record.c2838
-rw-r--r--libs/luajit-cmake/luajit/src/lj_record.h47
-rw-r--r--libs/luajit-cmake/luajit/src/lj_serialize.c539
-rw-r--r--libs/luajit-cmake/luajit/src/lj_serialize.h28
-rw-r--r--libs/luajit-cmake/luajit/src/lj_snap.c996
-rw-r--r--libs/luajit-cmake/luajit/src/lj_snap.h35
-rw-r--r--libs/luajit-cmake/luajit/src/lj_state.c335
-rw-r--r--libs/luajit-cmake/luajit/src/lj_state.h37
-rw-r--r--libs/luajit-cmake/luajit/src/lj_str.c370
-rw-r--r--libs/luajit-cmake/luajit/src/lj_str.h31
-rw-r--r--libs/luajit-cmake/luajit/src/lj_strfmt.c606
-rw-r--r--libs/luajit-cmake/luajit/src/lj_strfmt.h131
-rw-r--r--libs/luajit-cmake/luajit/src/lj_strfmt_num.c592
-rw-r--r--libs/luajit-cmake/luajit/src/lj_strscan.c558
-rw-r--r--libs/luajit-cmake/luajit/src/lj_strscan.h40
-rw-r--r--libs/luajit-cmake/luajit/src/lj_tab.c693
-rw-r--r--libs/luajit-cmake/luajit/src/lj_tab.h96
-rw-r--r--libs/luajit-cmake/luajit/src/lj_target.h165
-rw-r--r--libs/luajit-cmake/luajit/src/lj_target_arm.h271
-rw-r--r--libs/luajit-cmake/luajit/src/lj_target_arm64.h336
-rw-r--r--libs/luajit-cmake/luajit/src/lj_target_mips.h417
-rw-r--r--libs/luajit-cmake/luajit/src/lj_target_ppc.h280
-rw-r--r--libs/luajit-cmake/luajit/src/lj_target_x86.h357
-rw-r--r--libs/luajit-cmake/luajit/src/lj_trace.c987
-rw-r--r--libs/luajit-cmake/luajit/src/lj_trace.h58
-rw-r--r--libs/luajit-cmake/luajit/src/lj_traceerr.h61
-rw-r--r--libs/luajit-cmake/luajit/src/lj_udata.c62
-rw-r--r--libs/luajit-cmake/luajit/src/lj_udata.h17
-rw-r--r--libs/luajit-cmake/luajit/src/lj_vm.h116
-rw-r--r--libs/luajit-cmake/luajit/src/lj_vmevent.c58
-rw-r--r--libs/luajit-cmake/luajit/src/lj_vmevent.h59
-rw-r--r--libs/luajit-cmake/luajit/src/lj_vmmath.c107
-rw-r--r--libs/luajit-cmake/luajit/src/ljamalg.c91
-rw-r--r--libs/luajit-cmake/luajit/src/lua.h402
-rw-r--r--libs/luajit-cmake/luajit/src/lua.hpp9
-rw-r--r--libs/luajit-cmake/luajit/src/luaconf.h152
-rw-r--r--libs/luajit-cmake/luajit/src/luajit.c586
-rw-r--r--libs/luajit-cmake/luajit/src/luajit.h79
-rw-r--r--libs/luajit-cmake/luajit/src/lualib.h44
-rw-r--r--libs/luajit-cmake/luajit/src/msvcbuild.bat127
-rw-r--r--libs/luajit-cmake/luajit/src/nxbuild.bat159
-rw-r--r--libs/luajit-cmake/luajit/src/ps4build.bat123
-rw-r--r--libs/luajit-cmake/luajit/src/ps5build.bat123
-rw-r--r--libs/luajit-cmake/luajit/src/psvitabuild.bat93
-rw-r--r--libs/luajit-cmake/luajit/src/vm_arm.dasc4663
-rw-r--r--libs/luajit-cmake/luajit/src/vm_arm64.dasc4158
-rw-r--r--libs/luajit-cmake/luajit/src/vm_mips.dasc5392
-rw-r--r--libs/luajit-cmake/luajit/src/vm_mips64.dasc5538
-rw-r--r--libs/luajit-cmake/luajit/src/vm_ppc.dasc6041
-rw-r--r--libs/luajit-cmake/luajit/src/vm_x64.dasc4935
-rw-r--r--libs/luajit-cmake/luajit/src/vm_x86.dasc5825
-rw-r--r--libs/luajit-cmake/luajit/src/xb1build.bat101
-rw-r--r--libs/luajit-cmake/luajit/src/xedkbuild.bat92
193 files changed, 128439 insertions, 0 deletions
diff --git a/libs/luajit-cmake/luajit/src/.gitignore b/libs/luajit-cmake/luajit/src/.gitignore
new file mode 100644
index 0000000..1a30573
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/.gitignore
@@ -0,0 +1,7 @@
+luajit
+lj_bcdef.h
+lj_ffdef.h
+lj_libdef.h
+lj_recdef.h
+lj_folddef.h
+lj_vm.[sS]
diff --git a/libs/luajit-cmake/luajit/src/Makefile b/libs/luajit-cmake/luajit/src/Makefile
new file mode 100644
index 0000000..30d64be
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/Makefile
@@ -0,0 +1,726 @@
+##############################################################################
+# LuaJIT Makefile. Requires GNU Make.
+#
+# Please read doc/install.html before changing any variables!
+#
+# Suitable for POSIX platforms (Linux, *BSD, OSX etc.).
+# Also works with MinGW and Cygwin on Windows.
+# Please check msvcbuild.bat for building with MSVC on Windows.
+#
+# Copyright (C) 2005-2022 Mike Pall. See Copyright Notice in luajit.h
+##############################################################################
+
+MAJVER= 2
+MINVER= 1
+RELVER= 0
+ABIVER= 5.1
+NODOTABIVER= 51
+
+##############################################################################
+############################# COMPILER OPTIONS #############################
+##############################################################################
+# These options mainly affect the speed of the JIT compiler itself, not the
+# speed of the JIT-compiled code. Turn any of the optional settings on by
+# removing the '#' in front of them. Make sure you force a full recompile
+# with "make clean", followed by "make" if you change any options.
+#
+DEFAULT_CC = gcc
+#
+# LuaJIT builds as a native 32 or 64 bit binary by default.
+CC= $(DEFAULT_CC)
+#
+# Use this if you want to force a 32 bit build on a 64 bit multilib OS.
+#CC= $(DEFAULT_CC) -m32
+#
+# Since the assembler part does NOT maintain a frame pointer, it's pointless
+# to slow down the C part by not omitting it. Debugging, tracebacks and
+# unwinding are not affected -- the assembler part has frame unwind
+# information and GCC emits it where needed (x64) or with -g (see CCDEBUG).
+CCOPT= -O2 -fomit-frame-pointer
+# Use this if you want to generate a smaller binary (but it's slower):
+#CCOPT= -Os -fomit-frame-pointer
+# Note: it's no longer recommended to use -O3 with GCC 4.x.
+# The I-Cache bloat usually outweighs the benefits from aggressive inlining.
+#
+# Target-specific compiler options:
+#
+# x86/x64 only: For GCC 4.2 or higher and if you don't intend to distribute
+# the binaries to a different machine you could also use: -march=native
+#
+CCOPT_x86= -march=i686 -msse -msse2 -mfpmath=sse
+CCOPT_x64=
+CCOPT_arm=
+CCOPT_arm64=
+CCOPT_ppc=
+CCOPT_mips=
+#
+CCDEBUG=
+# Uncomment the next line to generate debug information:
+#CCDEBUG= -g
+#
+CCWARN= -Wall
+# Uncomment the next line to enable more warnings:
+#CCWARN+= -Wextra -Wdeclaration-after-statement -Wredundant-decls -Wshadow -Wpointer-arith
+#
+##############################################################################
+
+##############################################################################
+################################ BUILD MODE ################################
+##############################################################################
+# The default build mode is mixed mode on POSIX. On Windows this is the same
+# as dynamic mode.
+#
+# Mixed mode creates a static + dynamic library and a statically linked luajit.
+BUILDMODE= mixed
+#
+# Static mode creates a static library and a statically linked luajit.
+#BUILDMODE= static
+#
+# Dynamic mode creates a dynamic library and a dynamically linked luajit.
+# Note: this executable will only run when the library is installed!
+#BUILDMODE= dynamic
+#
+##############################################################################
+
+##############################################################################
+################################# FEATURES #################################
+##############################################################################
+# Enable/disable these features as needed, but make sure you force a full
+# recompile with "make clean", followed by "make".
+XCFLAGS=
+#
+# Permanently disable the FFI extension to reduce the size of the LuaJIT
+# executable. But please consider that the FFI library is compiled-in,
+# but NOT loaded by default. It only allocates any memory, if you actually
+# make use of it.
+#XCFLAGS+= -DLUAJIT_DISABLE_FFI
+#
+# Features from Lua 5.2 that are unlikely to break existing code are
+# enabled by default. Some other features that *might* break some existing
+# code (e.g. __pairs or os.execute() return values) can be enabled here.
+# Note: this does not provide full compatibility with Lua 5.2 at this time.
+#XCFLAGS+= -DLUAJIT_ENABLE_LUA52COMPAT
+#
+# Disable the JIT compiler, i.e. turn LuaJIT into a pure interpreter.
+#XCFLAGS+= -DLUAJIT_DISABLE_JIT
+#
+# Some architectures (e.g. PPC) can use either single-number (1) or
+# dual-number (2) mode. Uncomment one of these lines to override the
+# default mode. Please see LJ_ARCH_NUMMODE in lj_arch.h for details.
+#XCFLAGS+= -DLUAJIT_NUMMODE=1
+#XCFLAGS+= -DLUAJIT_NUMMODE=2
+#
+# Disable LJ_GC64 mode for x64.
+#XCFLAGS+= -DLUAJIT_DISABLE_GC64
+#
+##############################################################################
+
+##############################################################################
+############################ DEBUGGING SUPPORT #############################
+##############################################################################
+# Enable these options as needed, but make sure you force a full recompile
+# with "make clean", followed by "make".
+# Note that most of these are NOT suitable for benchmarking or release mode!
+#
+# Use the system provided memory allocator (realloc) instead of the
+# bundled memory allocator. This is slower, but sometimes helpful for
+# debugging. This option cannot be enabled on x64 without GC64, since
+# realloc usually doesn't return addresses in the right address range.
+# OTOH this option is mandatory for Valgrind's memcheck tool on x64 and
+# the only way to get useful results from it for all other architectures.
+#XCFLAGS+= -DLUAJIT_USE_SYSMALLOC
+#
+# This define is required to run LuaJIT under Valgrind. The Valgrind
+# header files must be installed. You should enable debug information, too.
+#XCFLAGS+= -DLUAJIT_USE_VALGRIND
+#
+# This is the client for the GDB JIT API. GDB 7.0 or higher is required
+# to make use of it. See lj_gdbjit.c for details. Enabling this causes
+# a non-negligible overhead, even when not running under GDB.
+#XCFLAGS+= -DLUAJIT_USE_GDBJIT
+#
+# Turn on assertions for the Lua/C API to debug problems with lua_* calls.
+# This is rather slow -- use only while developing C libraries/embeddings.
+#XCFLAGS+= -DLUA_USE_APICHECK
+#
+# Turn on assertions for the whole LuaJIT VM. This significantly slows down
+# everything. Use only if you suspect a problem with LuaJIT itself.
+#XCFLAGS+= -DLUA_USE_ASSERT
+#
+##############################################################################
+# You probably don't need to change anything below this line!
+##############################################################################
+
+##############################################################################
+# Host system detection.
+##############################################################################
+
+ifeq (Windows,$(findstring Windows,$(OS))$(MSYSTEM)$(TERM))
+ HOST_SYS= Windows
+else
+ HOST_SYS:= $(shell uname -s)
+ ifneq (,$(findstring MINGW,$(HOST_SYS)))
+ HOST_SYS= Windows
+ HOST_MSYS= mingw
+ endif
+ ifneq (,$(findstring MSYS,$(HOST_SYS)))
+ HOST_SYS= Windows
+ HOST_MSYS= mingw
+ endif
+ ifneq (,$(findstring CYGWIN,$(HOST_SYS)))
+ HOST_SYS= Windows
+ HOST_MSYS= cygwin
+ endif
+endif
+
+##############################################################################
+# Flags and options for host and target.
+##############################################################################
+
+# You can override the following variables at the make command line:
+# CC HOST_CC STATIC_CC DYNAMIC_CC
+# CFLAGS HOST_CFLAGS TARGET_CFLAGS
+# LDFLAGS HOST_LDFLAGS TARGET_LDFLAGS TARGET_SHLDFLAGS
+# LIBS HOST_LIBS TARGET_LIBS
+# CROSS HOST_SYS TARGET_SYS TARGET_FLAGS
+#
+# Cross-compilation examples:
+# make HOST_CC="gcc -m32" CROSS=i586-mingw32msvc- TARGET_SYS=Windows
+# make HOST_CC="gcc -m32" CROSS=powerpc-linux-gnu-
+
+ASOPTIONS= $(CCOPT) $(CCWARN) $(XCFLAGS) $(CFLAGS)
+CCOPTIONS= $(CCDEBUG) $(ASOPTIONS)
+LDOPTIONS= $(CCDEBUG) $(LDFLAGS)
+
+HOST_CC= $(CC)
+HOST_RM?= rm -f
+# If left blank, minilua is built and used. You can supply an installed
+# copy of (plain) Lua 5.1 or 5.2, plus Lua BitOp. E.g. with: HOST_LUA=lua
+HOST_LUA=
+
+HOST_XCFLAGS= -I.
+HOST_XLDFLAGS=
+HOST_XLIBS=
+HOST_ACFLAGS= $(CCOPTIONS) $(HOST_XCFLAGS) $(TARGET_ARCH) $(HOST_CFLAGS)
+HOST_ALDFLAGS= $(LDOPTIONS) $(HOST_XLDFLAGS) $(HOST_LDFLAGS)
+HOST_ALIBS= $(HOST_XLIBS) $(LIBS) $(HOST_LIBS)
+
+STATIC_CC = $(CROSS)$(CC)
+DYNAMIC_CC = $(CROSS)$(CC) -fPIC
+TARGET_CC= $(STATIC_CC)
+TARGET_STCC= $(STATIC_CC)
+TARGET_DYNCC= $(DYNAMIC_CC)
+TARGET_LD= $(CROSS)$(CC)
+TARGET_AR= $(CROSS)ar rcus
+TARGET_STRIP= $(CROSS)strip
+
+TARGET_LIBPATH= $(or $(PREFIX),/usr/local)/$(or $(MULTILIB),lib)
+TARGET_SONAME= libluajit-$(ABIVER).so.$(MAJVER)
+TARGET_DYLIBNAME= libluajit-$(ABIVER).$(MAJVER).dylib
+TARGET_DYLIBPATH= $(TARGET_LIBPATH)/$(TARGET_DYLIBNAME)
+TARGET_DLLNAME= lua$(NODOTABIVER).dll
+TARGET_DLLDOTANAME= libluajit-$(ABIVER).dll.a
+TARGET_XSHLDFLAGS= -shared -fPIC -Wl,-soname,$(TARGET_SONAME)
+TARGET_DYNXLDOPTS=
+
+TARGET_LFSFLAGS= -D_FILE_OFFSET_BITS=64 -D_LARGEFILE_SOURCE
+TARGET_XCFLAGS= $(TARGET_LFSFLAGS) -U_FORTIFY_SOURCE
+TARGET_XLDFLAGS=
+TARGET_XLIBS= -lm
+TARGET_TCFLAGS= $(CCOPTIONS) $(TARGET_XCFLAGS) $(TARGET_FLAGS) $(TARGET_CFLAGS)
+TARGET_ACFLAGS= $(CCOPTIONS) $(TARGET_XCFLAGS) $(TARGET_FLAGS) $(TARGET_CFLAGS)
+TARGET_ASFLAGS= $(ASOPTIONS) $(TARGET_XCFLAGS) $(TARGET_FLAGS) $(TARGET_CFLAGS)
+TARGET_ALDFLAGS= $(LDOPTIONS) $(TARGET_XLDFLAGS) $(TARGET_FLAGS) $(TARGET_LDFLAGS)
+TARGET_ASHLDFLAGS= $(LDOPTIONS) $(TARGET_XSHLDFLAGS) $(TARGET_FLAGS) $(TARGET_SHLDFLAGS)
+TARGET_ALIBS= $(TARGET_XLIBS) $(LIBS) $(TARGET_LIBS)
+
+TARGET_TESTARCH=$(shell $(TARGET_CC) $(TARGET_TCFLAGS) -E lj_arch.h -dM)
+ifneq (,$(findstring LJ_TARGET_X64 ,$(TARGET_TESTARCH)))
+ TARGET_LJARCH= x64
+else
+ifneq (,$(findstring LJ_TARGET_X86 ,$(TARGET_TESTARCH)))
+ TARGET_LJARCH= x86
+else
+ifneq (,$(findstring LJ_TARGET_ARM ,$(TARGET_TESTARCH)))
+ TARGET_LJARCH= arm
+else
+ifneq (,$(findstring LJ_TARGET_ARM64 ,$(TARGET_TESTARCH)))
+ ifneq (,$(findstring __AARCH64EB__ ,$(TARGET_TESTARCH)))
+ TARGET_ARCH= -D__AARCH64EB__=1
+ endif
+ TARGET_LJARCH= arm64
+else
+ifneq (,$(findstring LJ_TARGET_PPC ,$(TARGET_TESTARCH)))
+ ifneq (,$(findstring LJ_LE 1,$(TARGET_TESTARCH)))
+ TARGET_ARCH= -DLJ_ARCH_ENDIAN=LUAJIT_LE
+ else
+ TARGET_ARCH= -DLJ_ARCH_ENDIAN=LUAJIT_BE
+ endif
+ TARGET_LJARCH= ppc
+else
+ifneq (,$(findstring LJ_TARGET_MIPS ,$(TARGET_TESTARCH)))
+ ifneq (,$(findstring MIPSEL ,$(TARGET_TESTARCH)))
+ TARGET_ARCH= -D__MIPSEL__=1
+ endif
+ ifneq (,$(findstring LJ_TARGET_MIPS64 ,$(TARGET_TESTARCH)))
+ TARGET_LJARCH= mips64
+ else
+ TARGET_LJARCH= mips
+ endif
+else
+ $(error Unsupported target architecture)
+endif
+endif
+endif
+endif
+endif
+endif
+
+ifneq (,$(findstring LJ_TARGET_PS3 1,$(TARGET_TESTARCH)))
+ TARGET_SYS= PS3
+ TARGET_ARCH+= -D__CELLOS_LV2__
+ TARGET_XCFLAGS+= -DLUAJIT_USE_SYSMALLOC
+ TARGET_XLIBS+= -lpthread
+endif
+
+TARGET_XCFLAGS+= $(CCOPT_$(TARGET_LJARCH))
+TARGET_ARCH+= $(patsubst %,-DLUAJIT_TARGET=LUAJIT_ARCH_%,$(TARGET_LJARCH))
+
+ifneq (,$(PREFIX))
+ifneq (/usr/local,$(PREFIX))
+ TARGET_XCFLAGS+= -DLUA_ROOT=\"$(PREFIX)\"
+ ifneq (/usr,$(PREFIX))
+ TARGET_DYNXLDOPTS= -Wl,-rpath,$(TARGET_LIBPATH)
+ endif
+endif
+endif
+ifneq (,$(MULTILIB))
+ TARGET_XCFLAGS+= -DLUA_MULTILIB=\"$(MULTILIB)\"
+endif
+ifneq (,$(LMULTILIB))
+ TARGET_XCFLAGS+= -DLUA_LMULTILIB=\"$(LMULTILIB)\"
+endif
+
+##############################################################################
+# Target system detection.
+##############################################################################
+
+TARGET_SYS?= $(HOST_SYS)
+ifeq (Windows,$(TARGET_SYS))
+ TARGET_STRIP+= --strip-unneeded
+ TARGET_XSHLDFLAGS= -shared -Wl,--out-implib,$(TARGET_DLLDOTANAME)
+ TARGET_DYNXLDOPTS=
+else
+ TARGET_AR+= 2>/dev/null
+ifeq (,$(shell $(TARGET_CC) -o /dev/null -c -x c /dev/null -fno-stack-protector 2>/dev/null || echo 1))
+ TARGET_XCFLAGS+= -fno-stack-protector
+endif
+ifeq (Darwin,$(TARGET_SYS))
+ ifeq (,$(MACOSX_DEPLOYMENT_TARGET))
+ $(error missing: export MACOSX_DEPLOYMENT_TARGET=XX.YY)
+ endif
+ TARGET_STRIP+= -x
+ TARGET_XCFLAGS+= -DLUAJIT_UNWIND_EXTERNAL
+ TARGET_XSHLDFLAGS= -dynamiclib -single_module -undefined dynamic_lookup -fPIC
+ TARGET_DYNXLDOPTS=
+ TARGET_XSHLDFLAGS+= -install_name $(TARGET_DYLIBPATH) -compatibility_version $(MAJVER).$(MINVER) -current_version $(MAJVER).$(MINVER).$(RELVER)
+else
+ifeq (iOS,$(TARGET_SYS))
+ TARGET_STRIP+= -x
+ TARGET_XSHLDFLAGS= -dynamiclib -single_module -undefined dynamic_lookup -fPIC
+ TARGET_DYNXLDOPTS=
+ TARGET_XSHLDFLAGS+= -install_name $(TARGET_DYLIBPATH) -compatibility_version $(MAJVER).$(MINVER) -current_version $(MAJVER).$(MINVER).$(RELVER)
+ ifeq (arm64,$(TARGET_LJARCH))
+ TARGET_XCFLAGS+= -fno-omit-frame-pointer
+ endif
+else
+ ifeq (,$(findstring LJ_NO_UNWIND 1,$(TARGET_TESTARCH)))
+ # Find out whether the target toolchain always generates unwind tables.
+ TARGET_TESTUNWIND=$(shell exec 2>/dev/null; echo 'extern void b(void);int a(void){b();return 0;}' | $(TARGET_CC) -c -x c - -o tmpunwind.o && { grep -qa -e eh_frame -e __unwind_info tmpunwind.o || grep -qU -e eh_frame -e __unwind_info tmpunwind.o; } && echo E; rm -f tmpunwind.o)
+ ifneq (,$(findstring E,$(TARGET_TESTUNWIND)))
+ TARGET_XCFLAGS+= -DLUAJIT_UNWIND_EXTERNAL
+ endif
+ endif
+ ifneq (SunOS,$(TARGET_SYS))
+ ifneq (PS3,$(TARGET_SYS))
+ TARGET_XLDFLAGS+= -Wl,-E
+ endif
+ endif
+ ifeq (Linux,$(TARGET_SYS))
+ TARGET_XLIBS+= -ldl
+ endif
+ ifeq (GNU/kFreeBSD,$(TARGET_SYS))
+ TARGET_XLIBS+= -ldl
+ endif
+endif
+endif
+endif
+
+ifneq ($(HOST_SYS),$(TARGET_SYS))
+ ifeq (Windows,$(TARGET_SYS))
+ HOST_XCFLAGS+= -malign-double -DLUAJIT_OS=LUAJIT_OS_WINDOWS
+ else
+ ifeq (Linux,$(TARGET_SYS))
+ HOST_XCFLAGS+= -DLUAJIT_OS=LUAJIT_OS_LINUX
+ else
+ ifeq (Darwin,$(TARGET_SYS))
+ HOST_XCFLAGS+= -DLUAJIT_OS=LUAJIT_OS_OSX
+ else
+ ifeq (iOS,$(TARGET_SYS))
+ HOST_XCFLAGS+= -DLUAJIT_OS=LUAJIT_OS_OSX -DTARGET_OS_IPHONE=1
+ else
+ HOST_XCFLAGS+= -DLUAJIT_OS=LUAJIT_OS_OTHER
+ endif
+ endif
+ endif
+ endif
+endif
+
+ifneq (,$(CCDEBUG))
+ TARGET_STRIP= @:
+endif
+
+##############################################################################
+# Files and pathnames.
+##############################################################################
+
+MINILUA_O= host/minilua.o
+MINILUA_LIBS= -lm
+MINILUA_T= host/minilua
+MINILUA_X= $(MINILUA_T)
+
+ifeq (,$(HOST_LUA))
+ HOST_LUA= $(MINILUA_X)
+ DASM_DEP= $(MINILUA_T)
+endif
+
+DASM_DIR= ../dynasm
+DASM= $(HOST_LUA) $(DASM_DIR)/dynasm.lua
+DASM_XFLAGS=
+DASM_AFLAGS=
+DASM_ARCH= $(TARGET_LJARCH)
+
+ifneq (,$(findstring LJ_LE 1,$(TARGET_TESTARCH)))
+ DASM_AFLAGS+= -D ENDIAN_LE
+else
+ DASM_AFLAGS+= -D ENDIAN_BE
+endif
+ifneq (,$(findstring LJ_ARCH_BITS 64,$(TARGET_TESTARCH)))
+ DASM_AFLAGS+= -D P64
+endif
+ifneq (,$(findstring LJ_HASJIT 1,$(TARGET_TESTARCH)))
+ DASM_AFLAGS+= -D JIT
+endif
+ifneq (,$(findstring LJ_HASFFI 1,$(TARGET_TESTARCH)))
+ DASM_AFLAGS+= -D FFI
+endif
+ifneq (,$(findstring LJ_DUALNUM 1,$(TARGET_TESTARCH)))
+ DASM_AFLAGS+= -D DUALNUM
+endif
+ifneq (,$(findstring LJ_ARCH_HASFPU 1,$(TARGET_TESTARCH)))
+ DASM_AFLAGS+= -D FPU
+ TARGET_ARCH+= -DLJ_ARCH_HASFPU=1
+else
+ TARGET_ARCH+= -DLJ_ARCH_HASFPU=0
+endif
+ifeq (,$(findstring LJ_ABI_SOFTFP 1,$(TARGET_TESTARCH)))
+ DASM_AFLAGS+= -D HFABI
+ TARGET_ARCH+= -DLJ_ABI_SOFTFP=0
+else
+ TARGET_ARCH+= -DLJ_ABI_SOFTFP=1
+endif
+ifneq (,$(findstring LJ_NO_UNWIND 1,$(TARGET_TESTARCH)))
+ DASM_AFLAGS+= -D NO_UNWIND
+ TARGET_ARCH+= -DLUAJIT_NO_UNWIND
+endif
+DASM_AFLAGS+= -D VER=$(subst LJ_ARCH_VERSION_,,$(filter LJ_ARCH_VERSION_%,$(subst LJ_ARCH_VERSION ,LJ_ARCH_VERSION_,$(TARGET_TESTARCH))))
+ifeq (Windows,$(TARGET_SYS))
+ DASM_AFLAGS+= -D WIN
+endif
+ifeq (x64,$(TARGET_LJARCH))
+ ifeq (,$(findstring LJ_FR2 1,$(TARGET_TESTARCH)))
+ DASM_ARCH= x86
+ endif
+else
+ifeq (arm,$(TARGET_LJARCH))
+ ifeq (iOS,$(TARGET_SYS))
+ DASM_AFLAGS+= -D IOS
+ endif
+else
+ifneq (,$(findstring LJ_TARGET_MIPSR6 ,$(TARGET_TESTARCH)))
+ DASM_AFLAGS+= -D MIPSR6
+endif
+ifeq (ppc,$(TARGET_LJARCH))
+ ifneq (,$(findstring LJ_ARCH_SQRT 1,$(TARGET_TESTARCH)))
+ DASM_AFLAGS+= -D SQRT
+ endif
+ ifneq (,$(findstring LJ_ARCH_ROUND 1,$(TARGET_TESTARCH)))
+ DASM_AFLAGS+= -D ROUND
+ endif
+ ifneq (,$(findstring LJ_ARCH_PPC32ON64 1,$(TARGET_TESTARCH)))
+ DASM_AFLAGS+= -D GPR64
+ endif
+ ifeq (PS3,$(TARGET_SYS))
+ DASM_AFLAGS+= -D PPE -D TOC
+ endif
+endif
+endif
+endif
+
+DASM_FLAGS= $(DASM_XFLAGS) $(DASM_AFLAGS)
+DASM_DASC= vm_$(DASM_ARCH).dasc
+
+BUILDVM_O= host/buildvm.o host/buildvm_asm.o host/buildvm_peobj.o \
+ host/buildvm_lib.o host/buildvm_fold.o
+BUILDVM_T= host/buildvm
+BUILDVM_X= $(BUILDVM_T)
+
+HOST_O= $(MINILUA_O) $(BUILDVM_O)
+HOST_T= $(MINILUA_T) $(BUILDVM_T)
+
+LJVM_S= lj_vm.S
+LJVM_O= lj_vm.o
+LJVM_BOUT= $(LJVM_S)
+LJVM_MODE= elfasm
+
+LJLIB_O= lib_base.o lib_math.o lib_bit.o lib_string.o lib_table.o \
+ lib_io.o lib_os.o lib_package.o lib_debug.o lib_jit.o lib_ffi.o \
+ lib_buffer.o
+LJLIB_C= $(LJLIB_O:.o=.c)
+
+LJCORE_O= lj_assert.o lj_gc.o lj_err.o lj_char.o lj_bc.o lj_obj.o lj_buf.o \
+ lj_str.o lj_tab.o lj_func.o lj_udata.o lj_meta.o lj_debug.o \
+ lj_prng.o lj_state.o lj_dispatch.o lj_vmevent.o lj_vmmath.o \
+ lj_strscan.o lj_strfmt.o lj_strfmt_num.o lj_serialize.o \
+ lj_api.o lj_profile.o \
+ lj_lex.o lj_parse.o lj_bcread.o lj_bcwrite.o lj_load.o \
+ lj_ir.o lj_opt_mem.o lj_opt_fold.o lj_opt_narrow.o \
+ lj_opt_dce.o lj_opt_loop.o lj_opt_split.o lj_opt_sink.o \
+ lj_mcode.o lj_snap.o lj_record.o lj_crecord.o lj_ffrecord.o \
+ lj_asm.o lj_trace.o lj_gdbjit.o \
+ lj_ctype.o lj_cdata.o lj_cconv.o lj_ccall.o lj_ccallback.o \
+ lj_carith.o lj_clib.o lj_cparse.o \
+ lj_lib.o lj_alloc.o lib_aux.o \
+ $(LJLIB_O) lib_init.o
+
+LJVMCORE_O= $(LJVM_O) $(LJCORE_O)
+LJVMCORE_DYNO= $(LJVMCORE_O:.o=_dyn.o)
+
+LIB_VMDEF= jit/vmdef.lua
+LIB_VMDEFP= $(LIB_VMDEF)
+
+LUAJIT_O= luajit.o
+LUAJIT_A= libluajit.a
+LUAJIT_SO= libluajit.so
+LUAJIT_T= luajit
+
+ALL_T= $(LUAJIT_T) $(LUAJIT_A) $(LUAJIT_SO) $(HOST_T)
+ALL_HDRGEN= lj_bcdef.h lj_ffdef.h lj_libdef.h lj_recdef.h lj_folddef.h \
+ host/buildvm_arch.h
+ALL_GEN= $(LJVM_S) $(ALL_HDRGEN) $(LIB_VMDEFP)
+WIN_RM= *.obj *.lib *.exp *.dll *.exe *.manifest *.pdb *.ilk
+ALL_RM= $(ALL_T) $(ALL_GEN) *.o host/*.o $(WIN_RM)
+
+##############################################################################
+# Build mode handling.
+##############################################################################
+
+# Mixed mode defaults.
+TARGET_O= $(LUAJIT_A)
+TARGET_T= $(LUAJIT_T) $(LUAJIT_SO)
+TARGET_DEP= $(LIB_VMDEF) $(LUAJIT_SO)
+
+ifeq (Windows,$(TARGET_SYS))
+ TARGET_DYNCC= $(STATIC_CC)
+ LJVM_MODE= peobj
+ LJVM_BOUT= $(LJVM_O)
+ LUAJIT_T= luajit.exe
+ ifeq (cygwin,$(HOST_MSYS))
+ LUAJIT_SO= cyg$(TARGET_DLLNAME)
+ else
+ LUAJIT_SO= $(TARGET_DLLNAME)
+ endif
+ # Mixed mode is not supported on Windows. And static mode doesn't work well.
+ # C modules cannot be loaded, because they bind to lua51.dll.
+ ifneq (static,$(BUILDMODE))
+ BUILDMODE= dynamic
+ TARGET_XCFLAGS+= -DLUA_BUILD_AS_DLL
+ endif
+endif
+ifeq (Darwin,$(TARGET_SYS))
+ LJVM_MODE= machasm
+endif
+ifeq (iOS,$(TARGET_SYS))
+ LJVM_MODE= machasm
+endif
+ifeq (SunOS,$(TARGET_SYS))
+ BUILDMODE= static
+endif
+ifeq (PS3,$(TARGET_SYS))
+ BUILDMODE= static
+endif
+
+ifeq (Windows,$(HOST_SYS))
+ MINILUA_T= host/minilua.exe
+ BUILDVM_T= host/buildvm.exe
+ ifeq (,$(HOST_MSYS))
+ MINILUA_X= host\minilua
+ BUILDVM_X= host\buildvm
+ ALL_RM:= $(subst /,\,$(ALL_RM))
+ HOST_RM= del
+ endif
+endif
+
+ifeq (static,$(BUILDMODE))
+ TARGET_DYNCC= @:
+ TARGET_T= $(LUAJIT_T)
+ TARGET_DEP= $(LIB_VMDEF)
+else
+ifeq (dynamic,$(BUILDMODE))
+ ifneq (Windows,$(TARGET_SYS))
+ TARGET_CC= $(DYNAMIC_CC)
+ endif
+ TARGET_DYNCC= @:
+ LJVMCORE_DYNO= $(LJVMCORE_O)
+ TARGET_O= $(LUAJIT_SO)
+ TARGET_XLDFLAGS+= $(TARGET_DYNXLDOPTS)
+else
+ifeq (Darwin,$(TARGET_SYS))
+ TARGET_DYNCC= @:
+ LJVMCORE_DYNO= $(LJVMCORE_O)
+endif
+ifeq (iOS,$(TARGET_SYS))
+ TARGET_DYNCC= @:
+ LJVMCORE_DYNO= $(LJVMCORE_O)
+endif
+endif
+endif
+
+Q= @
+E= @echo
+#Q=
+#E= @:
+
+##############################################################################
+# Make targets.
+##############################################################################
+
+default all: $(TARGET_T)
+
+amalg:
+ $(MAKE) all "LJCORE_O=ljamalg.o"
+
+clean:
+ $(HOST_RM) $(ALL_RM)
+
+libbc:
+ ./$(LUAJIT_T) host/genlibbc.lua -o host/buildvm_libbc.h $(LJLIB_C)
+ $(MAKE) all
+
+depend:
+ @for file in $(ALL_HDRGEN); do \
+ test -f $$file || touch $$file; \
+ done
+ @$(HOST_CC) $(HOST_ACFLAGS) -MM *.c host/*.c | \
+ sed -e "s| [^ ]*/dasm_\S*\.h||g" \
+ -e "s|^\([^l ]\)|host/\1|" \
+ -e "s| lj_target_\S*\.h| lj_target_*.h|g" \
+ -e "s| lj_emit_\S*\.h| lj_emit_*.h|g" \
+ -e "s| lj_asm_\S*\.h| lj_asm_*.h|g" >Makefile.dep
+ @for file in $(ALL_HDRGEN); do \
+ test -s $$file || $(HOST_RM) $$file; \
+ done
+
+.PHONY: default all amalg clean libbc depend
+
+##############################################################################
+# Rules for generated files.
+##############################################################################
+
+$(MINILUA_T): $(MINILUA_O)
+ $(E) "HOSTLINK $@"
+ $(Q)$(HOST_CC) $(HOST_ALDFLAGS) -o $@ $(MINILUA_O) $(MINILUA_LIBS) $(HOST_ALIBS)
+
+host/buildvm_arch.h: $(DASM_DASC) $(DASM_DEP) $(DASM_DIR)/*.lua lj_arch.h lua.h luaconf.h
+ $(E) "DYNASM $@"
+ $(Q)$(DASM) $(DASM_FLAGS) -o $@ $(DASM_DASC)
+
+host/buildvm.o: $(DASM_DIR)/dasm_*.h
+
+$(BUILDVM_T): $(BUILDVM_O)
+ $(E) "HOSTLINK $@"
+ $(Q)$(HOST_CC) $(HOST_ALDFLAGS) -o $@ $(BUILDVM_O) $(HOST_ALIBS)
+
+$(LJVM_BOUT): $(BUILDVM_T)
+ $(E) "BUILDVM $@"
+ $(Q)$(BUILDVM_X) -m $(LJVM_MODE) -o $@
+
+lj_bcdef.h: $(BUILDVM_T) $(LJLIB_C)
+ $(E) "BUILDVM $@"
+ $(Q)$(BUILDVM_X) -m bcdef -o $@ $(LJLIB_C)
+
+lj_ffdef.h: $(BUILDVM_T) $(LJLIB_C)
+ $(E) "BUILDVM $@"
+ $(Q)$(BUILDVM_X) -m ffdef -o $@ $(LJLIB_C)
+
+lj_libdef.h: $(BUILDVM_T) $(LJLIB_C)
+ $(E) "BUILDVM $@"
+ $(Q)$(BUILDVM_X) -m libdef -o $@ $(LJLIB_C)
+
+lj_recdef.h: $(BUILDVM_T) $(LJLIB_C)
+ $(E) "BUILDVM $@"
+ $(Q)$(BUILDVM_X) -m recdef -o $@ $(LJLIB_C)
+
+$(LIB_VMDEF): $(BUILDVM_T) $(LJLIB_C)
+ $(E) "BUILDVM $@"
+ $(Q)$(BUILDVM_X) -m vmdef -o $(LIB_VMDEFP) $(LJLIB_C)
+
+lj_folddef.h: $(BUILDVM_T) lj_opt_fold.c
+ $(E) "BUILDVM $@"
+ $(Q)$(BUILDVM_X) -m folddef -o $@ lj_opt_fold.c
+
+##############################################################################
+# Object file rules.
+##############################################################################
+
+%.o: %.c
+ $(E) "CC $@"
+ $(Q)$(TARGET_DYNCC) $(TARGET_ACFLAGS) -c -o $(@:.o=_dyn.o) $<
+ $(Q)$(TARGET_CC) $(TARGET_ACFLAGS) -c -o $@ $<
+
+%.o: %.S
+ $(E) "ASM $@"
+ $(Q)$(TARGET_DYNCC) $(TARGET_ASFLAGS) -c -o $(@:.o=_dyn.o) $<
+ $(Q)$(TARGET_CC) $(TARGET_ASFLAGS) -c -o $@ $<
+
+$(LUAJIT_O):
+ $(E) "CC $@"
+ $(Q)$(TARGET_STCC) $(TARGET_ACFLAGS) -c -o $@ $<
+
+$(HOST_O): %.o: %.c
+ $(E) "HOSTCC $@"
+ $(Q)$(HOST_CC) $(HOST_ACFLAGS) -c -o $@ $<
+
+include Makefile.dep
+
+##############################################################################
+# Target file rules.
+##############################################################################
+
+$(LUAJIT_A): $(LJVMCORE_O)
+ $(E) "AR $@"
+ $(Q)$(TARGET_AR) $@ $(LJVMCORE_O)
+
+# The dependency on _O, but linking with _DYNO is intentional.
+$(LUAJIT_SO): $(LJVMCORE_O)
+ $(E) "DYNLINK $@"
+ $(Q)$(TARGET_LD) $(TARGET_ASHLDFLAGS) -o $@ $(LJVMCORE_DYNO) $(TARGET_ALIBS)
+ $(Q)$(TARGET_STRIP) $@
+
+$(LUAJIT_T): $(TARGET_O) $(LUAJIT_O) $(TARGET_DEP)
+ $(E) "LINK $@"
+ $(Q)$(TARGET_LD) $(TARGET_ALDFLAGS) -o $@ $(LUAJIT_O) $(TARGET_O) $(TARGET_ALIBS)
+ $(Q)$(TARGET_STRIP) $@
+ $(E) "OK Successfully built LuaJIT"
+
+##############################################################################
diff --git a/libs/luajit-cmake/luajit/src/Makefile.dep b/libs/luajit-cmake/luajit/src/Makefile.dep
new file mode 100644
index 0000000..1ad6701
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/Makefile.dep
@@ -0,0 +1,259 @@
+lib_aux.o: lib_aux.c lua.h luaconf.h lauxlib.h lj_obj.h lj_def.h \
+ lj_arch.h lj_err.h lj_errmsg.h lj_state.h lj_trace.h lj_jit.h lj_ir.h \
+ lj_dispatch.h lj_bc.h lj_traceerr.h lj_lib.h
+lib_base.o: lib_base.c lua.h luaconf.h lauxlib.h lualib.h lj_obj.h \
+ lj_def.h lj_arch.h lj_gc.h lj_err.h lj_errmsg.h lj_debug.h lj_buf.h \
+ lj_str.h lj_tab.h lj_meta.h lj_state.h lj_frame.h lj_bc.h lj_ctype.h \
+ lj_cconv.h lj_ff.h lj_ffdef.h lj_dispatch.h lj_jit.h lj_ir.h lj_char.h \
+ lj_strscan.h lj_strfmt.h lj_lib.h lj_libdef.h
+lib_bit.o: lib_bit.c lua.h luaconf.h lauxlib.h lualib.h lj_obj.h lj_def.h \
+ lj_arch.h lj_err.h lj_errmsg.h lj_buf.h lj_gc.h lj_str.h lj_strscan.h \
+ lj_strfmt.h lj_ctype.h lj_cdata.h lj_cconv.h lj_carith.h lj_ff.h \
+ lj_ffdef.h lj_lib.h lj_libdef.h
+lib_buffer.o: lib_buffer.c lua.h luaconf.h lauxlib.h lualib.h lj_obj.h \
+ lj_def.h lj_arch.h lj_gc.h lj_err.h lj_errmsg.h lj_buf.h lj_str.h \
+ lj_tab.h lj_udata.h lj_meta.h lj_ctype.h lj_cdata.h lj_cconv.h \
+ lj_strfmt.h lj_serialize.h lj_lib.h lj_libdef.h
+lib_debug.o: lib_debug.c lua.h luaconf.h lauxlib.h lualib.h lj_obj.h \
+ lj_def.h lj_arch.h lj_gc.h lj_err.h lj_errmsg.h lj_debug.h lj_lib.h \
+ lj_libdef.h
+lib_ffi.o: lib_ffi.c lua.h luaconf.h lauxlib.h lualib.h lj_obj.h lj_def.h \
+ lj_arch.h lj_gc.h lj_err.h lj_errmsg.h lj_str.h lj_tab.h lj_meta.h \
+ lj_ctype.h lj_cparse.h lj_cdata.h lj_cconv.h lj_carith.h lj_ccall.h \
+ lj_ccallback.h lj_clib.h lj_strfmt.h lj_ff.h lj_ffdef.h lj_lib.h \
+ lj_libdef.h
+lib_init.o: lib_init.c lua.h luaconf.h lauxlib.h lualib.h lj_arch.h
+lib_io.o: lib_io.c lua.h luaconf.h lauxlib.h lualib.h lj_obj.h lj_def.h \
+ lj_arch.h lj_gc.h lj_err.h lj_errmsg.h lj_buf.h lj_str.h lj_state.h \
+ lj_strfmt.h lj_ff.h lj_ffdef.h lj_lib.h lj_libdef.h
+lib_jit.o: lib_jit.c lua.h luaconf.h lauxlib.h lualib.h lj_obj.h lj_def.h \
+ lj_arch.h lj_gc.h lj_err.h lj_errmsg.h lj_debug.h lj_str.h lj_tab.h \
+ lj_state.h lj_bc.h lj_ctype.h lj_ir.h lj_jit.h lj_ircall.h lj_iropt.h \
+ lj_target.h lj_target_*.h lj_trace.h lj_dispatch.h lj_traceerr.h \
+ lj_vm.h lj_vmevent.h lj_lib.h luajit.h lj_libdef.h
+lib_math.o: lib_math.c lua.h luaconf.h lauxlib.h lualib.h lj_obj.h \
+ lj_def.h lj_arch.h lj_lib.h lj_vm.h lj_prng.h lj_libdef.h
+lib_os.o: lib_os.c lua.h luaconf.h lauxlib.h lualib.h lj_obj.h lj_def.h \
+ lj_arch.h lj_gc.h lj_err.h lj_errmsg.h lj_buf.h lj_str.h lj_lib.h \
+ lj_libdef.h
+lib_package.o: lib_package.c lua.h luaconf.h lauxlib.h lualib.h lj_obj.h \
+ lj_def.h lj_arch.h lj_err.h lj_errmsg.h lj_lib.h
+lib_string.o: lib_string.c lua.h luaconf.h lauxlib.h lualib.h lj_obj.h \
+ lj_def.h lj_arch.h lj_gc.h lj_err.h lj_errmsg.h lj_buf.h lj_str.h \
+ lj_tab.h lj_meta.h lj_state.h lj_ff.h lj_ffdef.h lj_bcdump.h lj_lex.h \
+ lj_char.h lj_strfmt.h lj_lib.h lj_libdef.h
+lib_table.o: lib_table.c lua.h luaconf.h lauxlib.h lualib.h lj_obj.h \
+ lj_def.h lj_arch.h lj_gc.h lj_err.h lj_errmsg.h lj_buf.h lj_str.h \
+ lj_tab.h lj_ff.h lj_ffdef.h lj_lib.h lj_libdef.h
+lj_alloc.o: lj_alloc.c lj_def.h lua.h luaconf.h lj_arch.h lj_alloc.h \
+ lj_prng.h
+lj_api.o: lj_api.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h lj_gc.h \
+ lj_err.h lj_errmsg.h lj_debug.h lj_str.h lj_tab.h lj_func.h lj_udata.h \
+ lj_meta.h lj_state.h lj_bc.h lj_frame.h lj_trace.h lj_jit.h lj_ir.h \
+ lj_dispatch.h lj_traceerr.h lj_vm.h lj_strscan.h lj_strfmt.h
+lj_asm.o: lj_asm.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h lj_gc.h \
+ lj_buf.h lj_str.h lj_tab.h lj_frame.h lj_bc.h lj_ctype.h lj_ir.h \
+ lj_jit.h lj_ircall.h lj_iropt.h lj_mcode.h lj_trace.h lj_dispatch.h \
+ lj_traceerr.h lj_snap.h lj_asm.h lj_vm.h lj_target.h lj_target_*.h \
+ lj_emit_*.h lj_asm_*.h
+lj_assert.o: lj_assert.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h
+lj_bc.o: lj_bc.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h lj_bc.h \
+ lj_bcdef.h
+lj_bcread.o: lj_bcread.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \
+ lj_gc.h lj_err.h lj_errmsg.h lj_buf.h lj_str.h lj_tab.h lj_bc.h \
+ lj_ctype.h lj_cdata.h lualib.h lj_lex.h lj_bcdump.h lj_state.h \
+ lj_strfmt.h
+lj_bcwrite.o: lj_bcwrite.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \
+ lj_gc.h lj_buf.h lj_str.h lj_bc.h lj_ctype.h lj_dispatch.h lj_jit.h \
+ lj_ir.h lj_strfmt.h lj_bcdump.h lj_lex.h lj_err.h lj_errmsg.h lj_vm.h
+lj_buf.o: lj_buf.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h lj_gc.h \
+ lj_err.h lj_errmsg.h lj_buf.h lj_str.h lj_tab.h lj_strfmt.h
+lj_carith.o: lj_carith.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \
+ lj_gc.h lj_err.h lj_errmsg.h lj_tab.h lj_meta.h lj_ir.h lj_ctype.h \
+ lj_cconv.h lj_cdata.h lj_carith.h lj_strscan.h
+lj_ccall.o: lj_ccall.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \
+ lj_gc.h lj_err.h lj_errmsg.h lj_tab.h lj_ctype.h lj_cconv.h lj_cdata.h \
+ lj_ccall.h lj_trace.h lj_jit.h lj_ir.h lj_dispatch.h lj_bc.h \
+ lj_traceerr.h
+lj_ccallback.o: lj_ccallback.c lj_obj.h lua.h luaconf.h lj_def.h \
+ lj_arch.h lj_gc.h lj_err.h lj_errmsg.h lj_tab.h lj_state.h lj_frame.h \
+ lj_bc.h lj_ctype.h lj_cconv.h lj_ccall.h lj_ccallback.h lj_target.h \
+ lj_target_*.h lj_mcode.h lj_jit.h lj_ir.h lj_trace.h lj_dispatch.h \
+ lj_traceerr.h lj_vm.h
+lj_cconv.o: lj_cconv.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \
+ lj_err.h lj_errmsg.h lj_buf.h lj_gc.h lj_str.h lj_tab.h lj_ctype.h \
+ lj_cdata.h lj_cconv.h lj_ccallback.h
+lj_cdata.o: lj_cdata.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \
+ lj_gc.h lj_err.h lj_errmsg.h lj_tab.h lj_ctype.h lj_cconv.h lj_cdata.h
+lj_char.o: lj_char.c lj_char.h lj_def.h lua.h luaconf.h
+lj_clib.o: lj_clib.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h lj_gc.h \
+ lj_err.h lj_errmsg.h lj_tab.h lj_str.h lj_udata.h lj_ctype.h lj_cconv.h \
+ lj_cdata.h lj_clib.h lj_strfmt.h
+lj_cparse.o: lj_cparse.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \
+ lj_gc.h lj_err.h lj_errmsg.h lj_buf.h lj_str.h lj_ctype.h lj_cparse.h \
+ lj_frame.h lj_bc.h lj_vm.h lj_char.h lj_strscan.h lj_strfmt.h
+lj_crecord.o: lj_crecord.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \
+ lj_err.h lj_errmsg.h lj_tab.h lj_frame.h lj_bc.h lj_ctype.h lj_gc.h \
+ lj_cdata.h lj_cparse.h lj_cconv.h lj_carith.h lj_clib.h lj_ccall.h \
+ lj_ff.h lj_ffdef.h lj_ir.h lj_jit.h lj_ircall.h lj_iropt.h lj_trace.h \
+ lj_dispatch.h lj_traceerr.h lj_record.h lj_ffrecord.h lj_snap.h \
+ lj_crecord.h lj_strfmt.h
+lj_ctype.o: lj_ctype.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \
+ lj_gc.h lj_err.h lj_errmsg.h lj_str.h lj_tab.h lj_strfmt.h lj_ctype.h \
+ lj_ccallback.h lj_buf.h
+lj_debug.o: lj_debug.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \
+ lj_err.h lj_errmsg.h lj_debug.h lj_buf.h lj_gc.h lj_str.h lj_tab.h \
+ lj_state.h lj_frame.h lj_bc.h lj_strfmt.h lj_jit.h lj_ir.h
+lj_dispatch.o: lj_dispatch.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \
+ lj_err.h lj_errmsg.h lj_buf.h lj_gc.h lj_str.h lj_func.h lj_tab.h \
+ lj_meta.h lj_debug.h lj_state.h lj_frame.h lj_bc.h lj_ff.h lj_ffdef.h \
+ lj_strfmt.h lj_jit.h lj_ir.h lj_ccallback.h lj_ctype.h lj_trace.h \
+ lj_dispatch.h lj_traceerr.h lj_profile.h lj_vm.h luajit.h
+lj_err.o: lj_err.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h lj_err.h \
+ lj_errmsg.h lj_debug.h lj_str.h lj_func.h lj_state.h lj_frame.h lj_bc.h \
+ lj_ff.h lj_ffdef.h lj_trace.h lj_jit.h lj_ir.h lj_dispatch.h \
+ lj_traceerr.h lj_vm.h lj_strfmt.h
+lj_ffrecord.o: lj_ffrecord.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \
+ lj_err.h lj_errmsg.h lj_buf.h lj_gc.h lj_str.h lj_tab.h lj_frame.h \
+ lj_bc.h lj_ff.h lj_ffdef.h lj_ir.h lj_jit.h lj_ircall.h lj_iropt.h \
+ lj_trace.h lj_dispatch.h lj_traceerr.h lj_record.h lj_ffrecord.h \
+ lj_crecord.h lj_vm.h lj_strscan.h lj_strfmt.h lj_serialize.h lj_recdef.h
+lj_func.o: lj_func.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h lj_gc.h \
+ lj_func.h lj_trace.h lj_jit.h lj_ir.h lj_dispatch.h lj_bc.h \
+ lj_traceerr.h lj_vm.h
+lj_gc.o: lj_gc.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h lj_gc.h \
+ lj_err.h lj_errmsg.h lj_buf.h lj_str.h lj_tab.h lj_func.h lj_udata.h \
+ lj_meta.h lj_state.h lj_frame.h lj_bc.h lj_ctype.h lj_cdata.h lj_trace.h \
+ lj_jit.h lj_ir.h lj_dispatch.h lj_traceerr.h lj_vm.h
+lj_gdbjit.o: lj_gdbjit.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \
+ lj_gc.h lj_err.h lj_errmsg.h lj_debug.h lj_frame.h lj_bc.h lj_buf.h \
+ lj_str.h lj_strfmt.h lj_jit.h lj_ir.h lj_dispatch.h
+lj_ir.o: lj_ir.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h lj_gc.h \
+ lj_buf.h lj_str.h lj_tab.h lj_ir.h lj_jit.h lj_ircall.h lj_iropt.h \
+ lj_trace.h lj_dispatch.h lj_bc.h lj_traceerr.h lj_ctype.h lj_cdata.h \
+ lj_carith.h lj_vm.h lj_strscan.h lj_serialize.h lj_strfmt.h lj_prng.h
+lj_lex.o: lj_lex.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h lj_gc.h \
+ lj_err.h lj_errmsg.h lj_buf.h lj_str.h lj_tab.h lj_ctype.h lj_cdata.h \
+ lualib.h lj_state.h lj_lex.h lj_parse.h lj_char.h lj_strscan.h \
+ lj_strfmt.h
+lj_lib.o: lj_lib.c lauxlib.h lua.h luaconf.h lj_obj.h lj_def.h lj_arch.h \
+ lj_gc.h lj_err.h lj_errmsg.h lj_str.h lj_tab.h lj_func.h lj_bc.h \
+ lj_dispatch.h lj_jit.h lj_ir.h lj_ctype.h lj_vm.h lj_strscan.h \
+ lj_strfmt.h lj_lex.h lj_bcdump.h lj_lib.h
+lj_load.o: lj_load.c lua.h luaconf.h lauxlib.h lj_obj.h lj_def.h \
+ lj_arch.h lj_gc.h lj_err.h lj_errmsg.h lj_buf.h lj_str.h lj_func.h \
+ lj_frame.h lj_bc.h lj_vm.h lj_lex.h lj_bcdump.h lj_parse.h
+lj_mcode.o: lj_mcode.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \
+ lj_gc.h lj_err.h lj_errmsg.h lj_jit.h lj_ir.h lj_mcode.h lj_trace.h \
+ lj_dispatch.h lj_bc.h lj_traceerr.h lj_prng.h lj_vm.h
+lj_meta.o: lj_meta.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h lj_gc.h \
+ lj_err.h lj_errmsg.h lj_buf.h lj_str.h lj_tab.h lj_meta.h lj_frame.h \
+ lj_bc.h lj_vm.h lj_strscan.h lj_strfmt.h lj_lib.h
+lj_obj.o: lj_obj.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h
+lj_opt_dce.o: lj_opt_dce.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \
+ lj_ir.h lj_jit.h lj_iropt.h
+lj_opt_fold.o: lj_opt_fold.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \
+ lj_buf.h lj_gc.h lj_str.h lj_tab.h lj_ir.h lj_jit.h lj_ircall.h \
+ lj_iropt.h lj_trace.h lj_dispatch.h lj_bc.h lj_traceerr.h lj_ctype.h \
+ lj_carith.h lj_vm.h lj_strscan.h lj_strfmt.h lj_folddef.h
+lj_opt_loop.o: lj_opt_loop.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \
+ lj_err.h lj_errmsg.h lj_buf.h lj_gc.h lj_str.h lj_ir.h lj_jit.h \
+ lj_iropt.h lj_trace.h lj_dispatch.h lj_bc.h lj_traceerr.h lj_snap.h \
+ lj_vm.h
+lj_opt_mem.o: lj_opt_mem.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \
+ lj_tab.h lj_ir.h lj_jit.h lj_iropt.h lj_ircall.h lj_dispatch.h lj_bc.h
+lj_opt_narrow.o: lj_opt_narrow.c lj_obj.h lua.h luaconf.h lj_def.h \
+ lj_arch.h lj_bc.h lj_ir.h lj_jit.h lj_iropt.h lj_trace.h lj_dispatch.h \
+ lj_traceerr.h lj_vm.h lj_strscan.h
+lj_opt_sink.o: lj_opt_sink.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \
+ lj_ir.h lj_jit.h lj_iropt.h lj_target.h lj_target_*.h
+lj_opt_split.o: lj_opt_split.c lj_obj.h lua.h luaconf.h lj_def.h \
+ lj_arch.h lj_err.h lj_errmsg.h lj_buf.h lj_gc.h lj_str.h lj_ir.h \
+ lj_jit.h lj_ircall.h lj_iropt.h lj_dispatch.h lj_bc.h lj_vm.h
+lj_parse.o: lj_parse.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \
+ lj_gc.h lj_err.h lj_errmsg.h lj_debug.h lj_buf.h lj_str.h lj_tab.h \
+ lj_func.h lj_state.h lj_bc.h lj_ctype.h lj_strfmt.h lj_lex.h lj_parse.h \
+ lj_vm.h lj_vmevent.h
+lj_prng.o: lj_prng.c lj_def.h lua.h luaconf.h lj_arch.h lj_prng.h
+lj_profile.o: lj_profile.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \
+ lj_buf.h lj_gc.h lj_str.h lj_frame.h lj_bc.h lj_debug.h lj_dispatch.h \
+ lj_jit.h lj_ir.h lj_trace.h lj_traceerr.h lj_profile.h luajit.h
+lj_record.o: lj_record.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \
+ lj_err.h lj_errmsg.h lj_str.h lj_tab.h lj_meta.h lj_frame.h lj_bc.h \
+ lj_ctype.h lj_gc.h lj_ff.h lj_ffdef.h lj_debug.h lj_ir.h lj_jit.h \
+ lj_ircall.h lj_iropt.h lj_trace.h lj_dispatch.h lj_traceerr.h \
+ lj_record.h lj_ffrecord.h lj_snap.h lj_vm.h lj_prng.h
+lj_serialize.o: lj_serialize.c lj_obj.h lua.h luaconf.h lj_def.h \
+ lj_arch.h lj_err.h lj_errmsg.h lj_buf.h lj_gc.h lj_str.h lj_tab.h \
+ lj_udata.h lj_ctype.h lj_cdata.h lj_ir.h lj_serialize.h
+lj_snap.o: lj_snap.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h lj_gc.h \
+ lj_tab.h lj_state.h lj_frame.h lj_bc.h lj_ir.h lj_jit.h lj_iropt.h \
+ lj_trace.h lj_dispatch.h lj_traceerr.h lj_snap.h lj_target.h \
+ lj_target_*.h lj_ctype.h lj_cdata.h
+lj_state.o: lj_state.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \
+ lj_gc.h lj_err.h lj_errmsg.h lj_buf.h lj_str.h lj_tab.h lj_func.h \
+ lj_meta.h lj_state.h lj_frame.h lj_bc.h lj_ctype.h lj_trace.h lj_jit.h \
+ lj_ir.h lj_dispatch.h lj_traceerr.h lj_vm.h lj_prng.h lj_lex.h \
+ lj_alloc.h luajit.h
+lj_str.o: lj_str.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h lj_gc.h \
+ lj_err.h lj_errmsg.h lj_str.h lj_char.h lj_prng.h
+lj_strfmt.o: lj_strfmt.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \
+ lj_err.h lj_errmsg.h lj_buf.h lj_gc.h lj_str.h lj_meta.h lj_state.h \
+ lj_char.h lj_strfmt.h lj_ctype.h lj_lib.h
+lj_strfmt_num.o: lj_strfmt_num.c lj_obj.h lua.h luaconf.h lj_def.h \
+ lj_arch.h lj_buf.h lj_gc.h lj_str.h lj_strfmt.h
+lj_strscan.o: lj_strscan.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \
+ lj_char.h lj_strscan.h
+lj_tab.o: lj_tab.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h lj_gc.h \
+ lj_err.h lj_errmsg.h lj_tab.h
+lj_trace.o: lj_trace.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \
+ lj_gc.h lj_err.h lj_errmsg.h lj_debug.h lj_str.h lj_frame.h lj_bc.h \
+ lj_state.h lj_ir.h lj_jit.h lj_iropt.h lj_mcode.h lj_trace.h \
+ lj_dispatch.h lj_traceerr.h lj_snap.h lj_gdbjit.h lj_record.h lj_asm.h \
+ lj_vm.h lj_vmevent.h lj_target.h lj_target_*.h lj_prng.h
+lj_udata.o: lj_udata.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \
+ lj_gc.h lj_err.h lj_errmsg.h lj_udata.h
+lj_vmevent.o: lj_vmevent.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \
+ lj_str.h lj_tab.h lj_state.h lj_dispatch.h lj_bc.h lj_jit.h lj_ir.h \
+ lj_vm.h lj_vmevent.h
+lj_vmmath.o: lj_vmmath.c lj_obj.h lua.h luaconf.h lj_def.h lj_arch.h \
+ lj_ir.h lj_vm.h
+ljamalg.o: ljamalg.c lua.h luaconf.h lauxlib.h lj_assert.c lj_obj.h \
+ lj_def.h lj_arch.h lj_gc.c lj_gc.h lj_err.h lj_errmsg.h lj_buf.h \
+ lj_str.h lj_tab.h lj_func.h lj_udata.h lj_meta.h lj_state.h lj_frame.h \
+ lj_bc.h lj_ctype.h lj_cdata.h lj_trace.h lj_jit.h lj_ir.h lj_dispatch.h \
+ lj_traceerr.h lj_vm.h lj_err.c lj_debug.h lj_ff.h lj_ffdef.h lj_strfmt.h \
+ lj_char.c lj_char.h lj_bc.c lj_bcdef.h lj_obj.c lj_buf.c lj_str.c \
+ lj_prng.h lj_tab.c lj_func.c lj_udata.c lj_meta.c lj_strscan.h lj_lib.h \
+ lj_debug.c lj_prng.c lj_state.c lj_lex.h lj_alloc.h luajit.h \
+ lj_dispatch.c lj_ccallback.h lj_profile.h lj_vmevent.c lj_vmevent.h \
+ lj_vmmath.c lj_strscan.c lj_strfmt.c lj_strfmt_num.c lj_serialize.c \
+ lj_serialize.h lj_api.c lj_profile.c lj_lex.c lualib.h lj_parse.h \
+ lj_parse.c lj_bcread.c lj_bcdump.h lj_bcwrite.c lj_load.c lj_ctype.c \
+ lj_cdata.c lj_cconv.h lj_cconv.c lj_ccall.c lj_ccall.h lj_ccallback.c \
+ lj_target.h lj_target_*.h lj_mcode.h lj_carith.c lj_carith.h lj_clib.c \
+ lj_clib.h lj_cparse.c lj_cparse.h lj_lib.c lj_ir.c lj_ircall.h \
+ lj_iropt.h lj_opt_mem.c lj_opt_fold.c lj_folddef.h lj_opt_narrow.c \
+ lj_opt_dce.c lj_opt_loop.c lj_snap.h lj_opt_split.c lj_opt_sink.c \
+ lj_mcode.c lj_snap.c lj_record.c lj_record.h lj_ffrecord.h lj_crecord.c \
+ lj_crecord.h lj_ffrecord.c lj_recdef.h lj_asm.c lj_asm.h lj_emit_*.h \
+ lj_asm_*.h lj_trace.c lj_gdbjit.h lj_gdbjit.c lj_alloc.c lib_aux.c \
+ lib_base.c lj_libdef.h lib_math.c lib_string.c lib_table.c lib_io.c \
+ lib_os.c lib_package.c lib_debug.c lib_bit.c lib_jit.c lib_ffi.c \
+ lib_buffer.c lib_init.c
+luajit.o: luajit.c lua.h luaconf.h lauxlib.h lualib.h luajit.h lj_arch.h
+host/buildvm.o: host/buildvm.c host/buildvm.h lj_def.h lua.h luaconf.h \
+ lj_arch.h lj_obj.h lj_def.h lj_arch.h lj_gc.h lj_obj.h lj_bc.h lj_ir.h \
+ lj_ircall.h lj_ir.h lj_jit.h lj_frame.h lj_bc.h lj_dispatch.h lj_ctype.h \
+ lj_gc.h lj_ccall.h lj_ctype.h luajit.h \
+ host/buildvm_arch.h lj_traceerr.h
+host/buildvm_asm.o: host/buildvm_asm.c host/buildvm.h lj_def.h lua.h luaconf.h \
+ lj_arch.h lj_bc.h lj_def.h lj_arch.h
+host/buildvm_fold.o: host/buildvm_fold.c host/buildvm.h lj_def.h lua.h \
+ luaconf.h lj_arch.h lj_obj.h lj_def.h lj_arch.h lj_ir.h lj_obj.h
+host/buildvm_lib.o: host/buildvm_lib.c host/buildvm.h lj_def.h lua.h luaconf.h \
+ lj_arch.h lj_obj.h lj_def.h lj_arch.h lj_bc.h lj_lib.h lj_obj.h \
+ host/buildvm_libbc.h
+host/buildvm_peobj.o: host/buildvm_peobj.c host/buildvm.h lj_def.h lua.h \
+ luaconf.h lj_arch.h lj_bc.h lj_def.h lj_arch.h
+host/minilua.o: host/minilua.c
diff --git a/libs/luajit-cmake/luajit/src/host/.gitignore b/libs/luajit-cmake/luajit/src/host/.gitignore
new file mode 100644
index 0000000..762ac2a
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/host/.gitignore
@@ -0,0 +1,3 @@
+minilua
+buildvm
+buildvm_arch.h
diff --git a/libs/luajit-cmake/luajit/src/host/README b/libs/luajit-cmake/luajit/src/host/README
new file mode 100644
index 0000000..abfcdaa
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/host/README
@@ -0,0 +1,4 @@
+The files in this directory are only used during the build process of LuaJIT.
+For cross-compilation, they must be executed on the host, not on the target.
+
+These files should NOT be installed!
diff --git a/libs/luajit-cmake/luajit/src/host/buildvm.c b/libs/luajit-cmake/luajit/src/host/buildvm.c
new file mode 100644
index 0000000..9ee47ad
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/host/buildvm.c
@@ -0,0 +1,528 @@
+/*
+** LuaJIT VM builder.
+** Copyright (C) 2005-2022 Mike Pall. See Copyright Notice in luajit.h
+**
+** This is a tool to build the hand-tuned assembler code required for
+** LuaJIT's bytecode interpreter. It supports a variety of output formats
+** to feed different toolchains (see usage() below).
+**
+** This tool is not particularly optimized because it's only used while
+** _building_ LuaJIT. There's no point in distributing or installing it.
+** Only the object code generated by this tool is linked into LuaJIT.
+**
+** Caveat: some memory is not free'd, error handling is lazy.
+** It's a one-shot tool -- any effort fixing this would be wasted.
+*/
+
+#include "buildvm.h"
+#include "lj_obj.h"
+#include "lj_gc.h"
+#include "lj_bc.h"
+#if LJ_HASJIT
+#include "lj_ir.h"
+#include "lj_ircall.h"
+#endif
+#include "lj_frame.h"
+#include "lj_dispatch.h"
+#if LJ_HASFFI
+#include "lj_ctype.h"
+#include "lj_ccall.h"
+#endif
+#include "luajit.h"
+
+#if defined(_WIN32)
+#include <fcntl.h>
+#include <io.h>
+#endif
+
+/* ------------------------------------------------------------------------ */
+
+/* DynASM glue definitions. */
+#define Dst ctx
+#define Dst_DECL BuildCtx *ctx
+#define Dst_REF (ctx->D)
+#define DASM_CHECKS 1
+
+#include "../dynasm/dasm_proto.h"
+
+/* Glue macros for DynASM. */
+static int collect_reloc(BuildCtx *ctx, uint8_t *addr, int idx, int type);
+
+#define DASM_EXTERN(ctx, addr, idx, type) \
+ collect_reloc(ctx, addr, idx, type)
+
+/* ------------------------------------------------------------------------ */
+
+/* Avoid trouble if cross-compiling for an x86 target. Speed doesn't matter. */
+#define DASM_ALIGNED_WRITES 1
+
+/* Embed architecture-specific DynASM encoder. */
+#if LJ_TARGET_X86ORX64
+#include "../dynasm/dasm_x86.h"
+#elif LJ_TARGET_ARM
+#include "../dynasm/dasm_arm.h"
+#elif LJ_TARGET_ARM64
+#include "../dynasm/dasm_arm64.h"
+#elif LJ_TARGET_PPC
+#include "../dynasm/dasm_ppc.h"
+#elif LJ_TARGET_MIPS
+#include "../dynasm/dasm_mips.h"
+#else
+#error "No support for this architecture (yet)"
+#endif
+
+/* Embed generated architecture-specific backend. */
+#include "buildvm_arch.h"
+
+/* ------------------------------------------------------------------------ */
+
+void owrite(BuildCtx *ctx, const void *ptr, size_t sz)
+{
+ if (fwrite(ptr, 1, sz, ctx->fp) != sz) {
+ fprintf(stderr, "Error: cannot write to output file: %s\n",
+ strerror(errno));
+ exit(1);
+ }
+}
+
+/* ------------------------------------------------------------------------ */
+
+/* Emit code as raw bytes. Only used for DynASM debugging. */
+static void emit_raw(BuildCtx *ctx)
+{
+ owrite(ctx, ctx->code, ctx->codesz);
+}
+
+/* -- Build machine code -------------------------------------------------- */
+
+static const char *sym_decorate(BuildCtx *ctx,
+ const char *prefix, const char *suffix)
+{
+ char name[256];
+ char *p;
+#if LJ_64
+ const char *symprefix = ctx->mode == BUILD_machasm ? "_" : "";
+#elif LJ_TARGET_XBOX360
+ const char *symprefix = "";
+#else
+ const char *symprefix = ctx->mode != BUILD_elfasm ? "_" : "";
+#endif
+ sprintf(name, "%s%s%s", symprefix, prefix, suffix);
+ p = strchr(name, '@');
+ if (p) {
+#if LJ_TARGET_X86ORX64
+ if (!LJ_64 && (ctx->mode == BUILD_coffasm || ctx->mode == BUILD_peobj))
+ name[0] = name[1] == 'R' ? '_' : '@'; /* Just for _RtlUnwind@16. */
+ else
+ *p = '\0';
+#elif LJ_TARGET_PPC && !LJ_TARGET_CONSOLE
+ /* Keep @plt etc. */
+#else
+ *p = '\0';
+#endif
+ }
+ p = (char *)malloc(strlen(name)+1); /* MSVC doesn't like strdup. */
+ strcpy(p, name);
+ return p;
+}
+
+#define NRELOCSYM (sizeof(extnames)/sizeof(extnames[0])-1)
+
+static int relocmap[NRELOCSYM];
+
+/* Collect external relocations. */
+static int collect_reloc(BuildCtx *ctx, uint8_t *addr, int idx, int type)
+{
+ if (ctx->nreloc >= BUILD_MAX_RELOC) {
+ fprintf(stderr, "Error: too many relocations, increase BUILD_MAX_RELOC.\n");
+ exit(1);
+ }
+ if (relocmap[idx] < 0) {
+ relocmap[idx] = ctx->nrelocsym;
+ ctx->relocsym[ctx->nrelocsym] = sym_decorate(ctx, "", extnames[idx]);
+ ctx->nrelocsym++;
+ }
+ ctx->reloc[ctx->nreloc].ofs = (int32_t)(addr - ctx->code);
+ ctx->reloc[ctx->nreloc].sym = relocmap[idx];
+ ctx->reloc[ctx->nreloc].type = type;
+ ctx->nreloc++;
+#if LJ_TARGET_XBOX360
+ return (int)(ctx->code - addr) + 4; /* Encode symbol offset of .text. */
+#else
+ return 0; /* Encode symbol offset of 0. */
+#endif
+}
+
+/* Naive insertion sort. Performance doesn't matter here. */
+static void sym_insert(BuildCtx *ctx, int32_t ofs,
+ const char *prefix, const char *suffix)
+{
+ ptrdiff_t i = ctx->nsym++;
+ while (i > 0) {
+ if (ctx->sym[i-1].ofs <= ofs)
+ break;
+ ctx->sym[i] = ctx->sym[i-1];
+ i--;
+ }
+ ctx->sym[i].ofs = ofs;
+ ctx->sym[i].name = sym_decorate(ctx, prefix, suffix);
+}
+
+/* Build the machine code. */
+static int build_code(BuildCtx *ctx)
+{
+ int status;
+ int i;
+
+ /* Initialize DynASM structures. */
+ ctx->nglob = GLOB__MAX;
+ ctx->glob = (void **)malloc(ctx->nglob*sizeof(void *));
+ memset(ctx->glob, 0, ctx->nglob*sizeof(void *));
+ ctx->nreloc = 0;
+
+ ctx->globnames = globnames;
+ ctx->extnames = extnames;
+ ctx->relocsym = (const char **)malloc(NRELOCSYM*sizeof(const char *));
+ ctx->nrelocsym = 0;
+ for (i = 0; i < (int)NRELOCSYM; i++) relocmap[i] = -1;
+
+ ctx->dasm_ident = DASM_IDENT;
+ ctx->dasm_arch = DASM_ARCH;
+
+ dasm_init(Dst, DASM_MAXSECTION);
+ dasm_setupglobal(Dst, ctx->glob, ctx->nglob);
+ dasm_setup(Dst, build_actionlist);
+
+ /* Call arch-specific backend to emit the code. */
+ ctx->npc = build_backend(ctx);
+
+ /* Finalize the code. */
+ (void)dasm_checkstep(Dst, -1);
+ if ((status = dasm_link(Dst, &ctx->codesz))) return status;
+ ctx->code = (uint8_t *)malloc(ctx->codesz);
+ if ((status = dasm_encode(Dst, (void *)ctx->code))) return status;
+
+ /* Allocate symbol table and bytecode offsets. */
+ ctx->beginsym = sym_decorate(ctx, "", LABEL_PREFIX "vm_asm_begin");
+ ctx->sym = (BuildSym *)malloc((ctx->npc+ctx->nglob+1)*sizeof(BuildSym));
+ ctx->nsym = 0;
+ ctx->bc_ofs = (int32_t *)malloc(ctx->npc*sizeof(int32_t));
+
+ /* Collect the opcodes (PC labels). */
+ for (i = 0; i < ctx->npc; i++) {
+ int32_t ofs = dasm_getpclabel(Dst, i);
+ if (ofs < 0) return 0x22000000|i;
+ ctx->bc_ofs[i] = ofs;
+ if ((LJ_HASJIT ||
+ !(i == BC_JFORI || i == BC_JFORL || i == BC_JITERL || i == BC_JLOOP ||
+ i == BC_IFORL || i == BC_IITERL || i == BC_ILOOP)) &&
+ (LJ_HASFFI || i != BC_KCDATA))
+ sym_insert(ctx, ofs, LABEL_PREFIX_BC, bc_names[i]);
+ }
+
+ /* Collect the globals (named labels). */
+ for (i = 0; i < ctx->nglob; i++) {
+ const char *gl = globnames[i];
+ int len = (int)strlen(gl);
+ if (!ctx->glob[i]) {
+ fprintf(stderr, "Error: undefined global %s\n", gl);
+ exit(2);
+ }
+ /* Skip the _Z symbols. */
+ if (!(len >= 2 && gl[len-2] == '_' && gl[len-1] == 'Z'))
+ sym_insert(ctx, (int32_t)((uint8_t *)(ctx->glob[i]) - ctx->code),
+ LABEL_PREFIX, globnames[i]);
+ }
+
+ /* Close the address range. */
+ sym_insert(ctx, (int32_t)ctx->codesz, "", "");
+ ctx->nsym--;
+
+ dasm_free(Dst);
+
+ return 0;
+}
+
+/* -- Generate VM enums --------------------------------------------------- */
+
+const char *const bc_names[] = {
+#define BCNAME(name, ma, mb, mc, mt) #name,
+BCDEF(BCNAME)
+#undef BCNAME
+ NULL
+};
+
+#if LJ_HASJIT
+const char *const ir_names[] = {
+#define IRNAME(name, m, m1, m2) #name,
+IRDEF(IRNAME)
+#undef IRNAME
+ NULL
+};
+
+const char *const irt_names[] = {
+#define IRTNAME(name, size) #name,
+IRTDEF(IRTNAME)
+#undef IRTNAME
+ NULL
+};
+
+const char *const irfpm_names[] = {
+#define FPMNAME(name) #name,
+IRFPMDEF(FPMNAME)
+#undef FPMNAME
+ NULL
+};
+
+const char *const irfield_names[] = {
+#define FLNAME(name, ofs) #name,
+IRFLDEF(FLNAME)
+#undef FLNAME
+ NULL
+};
+
+const char *const ircall_names[] = {
+#define IRCALLNAME(cond, name, nargs, kind, type, flags) #name,
+IRCALLDEF(IRCALLNAME)
+#undef IRCALLNAME
+ NULL
+};
+
+static const char *const trace_errors[] = {
+#define TREDEF(name, msg) msg,
+#include "lj_traceerr.h"
+ NULL
+};
+#endif
+
+#if LJ_HASJIT
+static const char *lower(char *buf, const char *s)
+{
+ char *p = buf;
+ while (*s) {
+ *p++ = (*s >= 'A' && *s <= 'Z') ? *s+0x20 : *s;
+ s++;
+ }
+ *p = '\0';
+ return buf;
+}
+#endif
+
+/* Emit C source code for bytecode-related definitions. */
+static void emit_bcdef(BuildCtx *ctx)
+{
+ int i;
+ fprintf(ctx->fp, "/* This is a generated file. DO NOT EDIT! */\n\n");
+ fprintf(ctx->fp, "LJ_DATADEF const uint16_t lj_bc_ofs[] = {\n");
+ for (i = 0; i < ctx->npc; i++) {
+ if (i != 0)
+ fprintf(ctx->fp, ",\n");
+ fprintf(ctx->fp, "%d", ctx->bc_ofs[i]);
+ }
+}
+
+/* Emit VM definitions as Lua code for debug modules. */
+static void emit_vmdef(BuildCtx *ctx)
+{
+#if LJ_HASJIT
+ char buf[80];
+#endif
+ int i;
+ fprintf(ctx->fp, "-- This is a generated file. DO NOT EDIT!\n\n");
+ fprintf(ctx->fp, "return {\n\n");
+
+ fprintf(ctx->fp, "bcnames = \"");
+ for (i = 0; bc_names[i]; i++) fprintf(ctx->fp, "%-6s", bc_names[i]);
+ fprintf(ctx->fp, "\",\n\n");
+
+#if LJ_HASJIT
+ fprintf(ctx->fp, "irnames = \"");
+ for (i = 0; ir_names[i]; i++) fprintf(ctx->fp, "%-6s", ir_names[i]);
+ fprintf(ctx->fp, "\",\n\n");
+
+ fprintf(ctx->fp, "irfpm = { [0]=");
+ for (i = 0; irfpm_names[i]; i++)
+ fprintf(ctx->fp, "\"%s\", ", lower(buf, irfpm_names[i]));
+ fprintf(ctx->fp, "},\n\n");
+
+ fprintf(ctx->fp, "irfield = { [0]=");
+ for (i = 0; irfield_names[i]; i++) {
+ char *p;
+ lower(buf, irfield_names[i]);
+ p = strchr(buf, '_');
+ if (p) *p = '.';
+ fprintf(ctx->fp, "\"%s\", ", buf);
+ }
+ fprintf(ctx->fp, "},\n\n");
+
+ fprintf(ctx->fp, "ircall = {\n[0]=");
+ for (i = 0; ircall_names[i]; i++)
+ fprintf(ctx->fp, "\"%s\",\n", ircall_names[i]);
+ fprintf(ctx->fp, "},\n\n");
+
+ fprintf(ctx->fp, "traceerr = {\n[0]=");
+ for (i = 0; trace_errors[i]; i++)
+ fprintf(ctx->fp, "\"%s\",\n", trace_errors[i]);
+ fprintf(ctx->fp, "},\n\n");
+#endif
+}
+
+/* -- Argument parsing ---------------------------------------------------- */
+
+/* Build mode names. */
+static const char *const modenames[] = {
+#define BUILDNAME(name) #name,
+BUILDDEF(BUILDNAME)
+#undef BUILDNAME
+ NULL
+};
+
+/* Print usage information and exit. */
+static void usage(void)
+{
+ int i;
+ fprintf(stderr, LUAJIT_VERSION " VM builder.\n");
+ fprintf(stderr, LUAJIT_COPYRIGHT ", " LUAJIT_URL "\n");
+ fprintf(stderr, "Target architecture: " LJ_ARCH_NAME "\n\n");
+ fprintf(stderr, "Usage: buildvm -m mode [-o outfile] [infiles...]\n\n");
+ fprintf(stderr, "Available modes:\n");
+ for (i = 0; i < BUILD__MAX; i++)
+ fprintf(stderr, " %s\n", modenames[i]);
+ exit(1);
+}
+
+/* Parse the output mode name. */
+static BuildMode parsemode(const char *mode)
+{
+ int i;
+ for (i = 0; modenames[i]; i++)
+ if (!strcmp(mode, modenames[i]))
+ return (BuildMode)i;
+ usage();
+ return (BuildMode)-1;
+}
+
+/* Parse arguments. */
+static void parseargs(BuildCtx *ctx, char **argv)
+{
+ const char *a;
+ int i;
+ ctx->mode = (BuildMode)-1;
+ ctx->outname = "-";
+ for (i = 1; (a = argv[i]) != NULL; i++) {
+ if (a[0] != '-')
+ break;
+ switch (a[1]) {
+ case '-':
+ if (a[2]) goto err;
+ i++;
+ goto ok;
+ case '\0':
+ goto ok;
+ case 'm':
+ i++;
+ if (a[2] || argv[i] == NULL) goto err;
+ ctx->mode = parsemode(argv[i]);
+ break;
+ case 'o':
+ i++;
+ if (a[2] || argv[i] == NULL) goto err;
+ ctx->outname = argv[i];
+ break;
+ default: err:
+ usage();
+ break;
+ }
+ }
+ok:
+ ctx->args = argv+i;
+ if (ctx->mode == (BuildMode)-1) goto err;
+}
+
+int main(int argc, char **argv)
+{
+ BuildCtx ctx_;
+ BuildCtx *ctx = &ctx_;
+ int status, binmode;
+
+ if (sizeof(void *) != 4*LJ_32+8*LJ_64) {
+ fprintf(stderr,"Error: pointer size mismatch in cross-build.\n");
+ fprintf(stderr,"Try: make HOST_CC=\"gcc -m32\" CROSS=...\n\n");
+ return 1;
+ }
+
+ UNUSED(argc);
+ parseargs(ctx, argv);
+
+ if ((status = build_code(ctx))) {
+ fprintf(stderr,"Error: DASM error %08x\n", status);
+ return 1;
+ }
+
+ switch (ctx->mode) {
+ case BUILD_peobj:
+ case BUILD_raw:
+ binmode = 1;
+ break;
+ default:
+ binmode = 0;
+ break;
+ }
+
+ if (ctx->outname[0] == '-' && ctx->outname[1] == '\0') {
+ ctx->fp = stdout;
+#if defined(_WIN32)
+ if (binmode)
+ _setmode(_fileno(stdout), _O_BINARY); /* Yuck. */
+#endif
+ } else if (!(ctx->fp = fopen(ctx->outname, binmode ? "wb" : "w"))) {
+ fprintf(stderr, "Error: cannot open output file '%s': %s\n",
+ ctx->outname, strerror(errno));
+ exit(1);
+ }
+
+ switch (ctx->mode) {
+ case BUILD_elfasm:
+ case BUILD_coffasm:
+ case BUILD_machasm:
+ emit_asm(ctx);
+ emit_asm_debug(ctx);
+ break;
+ case BUILD_peobj:
+ emit_peobj(ctx);
+ break;
+ case BUILD_raw:
+ emit_raw(ctx);
+ break;
+ case BUILD_bcdef:
+ emit_bcdef(ctx);
+ emit_lib(ctx);
+ break;
+ case BUILD_vmdef:
+ emit_vmdef(ctx);
+ emit_lib(ctx);
+ fprintf(ctx->fp, "}\n\n");
+ break;
+ case BUILD_ffdef:
+ case BUILD_libdef:
+ case BUILD_recdef:
+ emit_lib(ctx);
+ break;
+ case BUILD_folddef:
+ emit_fold(ctx);
+ break;
+ default:
+ break;
+ }
+
+ fflush(ctx->fp);
+ if (ferror(ctx->fp)) {
+ fprintf(stderr, "Error: cannot write to output file: %s\n",
+ strerror(errno));
+ exit(1);
+ }
+ fclose(ctx->fp);
+
+ return 0;
+}
+
diff --git a/libs/luajit-cmake/luajit/src/host/buildvm.h b/libs/luajit-cmake/luajit/src/host/buildvm.h
new file mode 100644
index 0000000..18cd884
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/host/buildvm.h
@@ -0,0 +1,105 @@
+/*
+** LuaJIT VM builder.
+** Copyright (C) 2005-2022 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _BUILDVM_H
+#define _BUILDVM_H
+
+#include <sys/types.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <errno.h>
+
+#include "lj_def.h"
+#include "lj_arch.h"
+
+/* Hardcoded limits. Increase as needed. */
+#define BUILD_MAX_RELOC 200 /* Max. number of relocations. */
+#define BUILD_MAX_FOLD 4096 /* Max. number of fold rules. */
+
+/* Prefix for scanned library definitions. */
+#define LIBDEF_PREFIX "LJLIB_"
+
+/* Prefix for scanned fold definitions. */
+#define FOLDDEF_PREFIX "LJFOLD"
+
+/* Prefixes for generated labels. */
+#define LABEL_PREFIX "lj_"
+#define LABEL_PREFIX_BC LABEL_PREFIX "BC_"
+#define LABEL_PREFIX_FF LABEL_PREFIX "ff_"
+#define LABEL_PREFIX_CF LABEL_PREFIX "cf_"
+#define LABEL_PREFIX_FFH LABEL_PREFIX "ffh_"
+#define LABEL_PREFIX_LIBCF LABEL_PREFIX "lib_cf_"
+#define LABEL_PREFIX_LIBINIT LABEL_PREFIX "lib_init_"
+
+/* Forward declaration. */
+struct dasm_State;
+
+/* Build modes. */
+#define BUILDDEF(_) \
+ _(elfasm) _(coffasm) _(machasm) _(peobj) _(raw) \
+ _(bcdef) _(ffdef) _(libdef) _(recdef) _(vmdef) \
+ _(folddef)
+
+typedef enum {
+#define BUILDENUM(name) BUILD_##name,
+BUILDDEF(BUILDENUM)
+#undef BUILDENUM
+ BUILD__MAX
+} BuildMode;
+
+/* Code relocation. */
+typedef struct BuildReloc {
+ int32_t ofs;
+ int sym;
+ int type;
+} BuildReloc;
+
+typedef struct BuildSym {
+ const char *name;
+ int32_t ofs;
+} BuildSym;
+
+/* Build context structure. */
+typedef struct BuildCtx {
+ /* DynASM state pointer. Should be first member. */
+ struct dasm_State *D;
+ /* Parsed command line. */
+ BuildMode mode;
+ FILE *fp;
+ const char *outname;
+ char **args;
+ /* Code and symbols generated by DynASM. */
+ uint8_t *code;
+ size_t codesz;
+ int npc, nglob, nsym, nreloc, nrelocsym;
+ void **glob;
+ BuildSym *sym;
+ const char **relocsym;
+ int32_t *bc_ofs;
+ const char *beginsym;
+ /* Strings generated by DynASM. */
+ const char *const *globnames;
+ const char *const *extnames;
+ const char *dasm_ident;
+ const char *dasm_arch;
+ /* Relocations. */
+ BuildReloc reloc[BUILD_MAX_RELOC];
+} BuildCtx;
+
+extern void owrite(BuildCtx *ctx, const void *ptr, size_t sz);
+extern void emit_asm(BuildCtx *ctx);
+extern void emit_peobj(BuildCtx *ctx);
+extern void emit_lib(BuildCtx *ctx);
+extern void emit_fold(BuildCtx *ctx);
+
+extern const char *const bc_names[];
+extern const char *const ir_names[];
+extern const char *const irt_names[];
+extern const char *const irfpm_names[];
+extern const char *const irfield_names[];
+extern const char *const ircall_names[];
+
+#endif
diff --git a/libs/luajit-cmake/luajit/src/host/buildvm_asm.c b/libs/luajit-cmake/luajit/src/host/buildvm_asm.c
new file mode 100644
index 0000000..7baa011
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/host/buildvm_asm.c
@@ -0,0 +1,348 @@
+/*
+** LuaJIT VM builder: Assembler source code emitter.
+** Copyright (C) 2005-2022 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#include "buildvm.h"
+#include "lj_bc.h"
+
+/* ------------------------------------------------------------------------ */
+
+#if LJ_TARGET_X86ORX64
+/* Emit bytes piecewise as assembler text. */
+static void emit_asm_bytes(BuildCtx *ctx, uint8_t *p, int n)
+{
+ int i;
+ for (i = 0; i < n; i++) {
+ if ((i & 15) == 0)
+ fprintf(ctx->fp, "\t.byte %d", p[i]);
+ else
+ fprintf(ctx->fp, ",%d", p[i]);
+ if ((i & 15) == 15) putc('\n', ctx->fp);
+ }
+ if ((n & 15) != 0) putc('\n', ctx->fp);
+}
+
+/* Emit relocation */
+static void emit_asm_reloc(BuildCtx *ctx, int type, const char *sym)
+{
+ switch (ctx->mode) {
+ case BUILD_elfasm:
+ if (type)
+ fprintf(ctx->fp, "\t.long %s-.-4\n", sym);
+ else
+ fprintf(ctx->fp, "\t.long %s\n", sym);
+ break;
+ case BUILD_coffasm:
+ fprintf(ctx->fp, "\t.def %s; .scl 3; .type 32; .endef\n", sym);
+ if (type)
+ fprintf(ctx->fp, "\t.long %s-.-4\n", sym);
+ else
+ fprintf(ctx->fp, "\t.long %s\n", sym);
+ break;
+ default: /* BUILD_machasm for relative relocations handled below. */
+ fprintf(ctx->fp, "\t.long %s\n", sym);
+ break;
+ }
+}
+
+static const char *const jccnames[] = {
+ "jo", "jno", "jb", "jnb", "jz", "jnz", "jbe", "ja",
+ "js", "jns", "jpe", "jpo", "jl", "jge", "jle", "jg"
+};
+
+/* Emit x86/x64 text relocations. */
+static void emit_asm_reloc_text(BuildCtx *ctx, uint8_t *cp, int n,
+ const char *sym)
+{
+ const char *opname = NULL;
+ if (--n < 0) goto err;
+ if (cp[n] == 0xe8) {
+ opname = "call";
+ } else if (cp[n] == 0xe9) {
+ opname = "jmp";
+ } else if (cp[n] >= 0x80 && cp[n] <= 0x8f && n > 0 && cp[n-1] == 0x0f) {
+ opname = jccnames[cp[n]-0x80];
+ n--;
+ } else {
+err:
+ fprintf(stderr, "Error: unsupported opcode for %s symbol relocation.\n",
+ sym);
+ exit(1);
+ }
+ emit_asm_bytes(ctx, cp, n);
+ if (strncmp(sym+(*sym == '_'), LABEL_PREFIX, sizeof(LABEL_PREFIX)-1)) {
+ /* Various fixups for external symbols outside of our binary. */
+ if (ctx->mode == BUILD_elfasm) {
+ if (LJ_32)
+ fprintf(ctx->fp, "#if __PIC__\n\t%s lj_wrap_%s\n#else\n", opname, sym);
+ fprintf(ctx->fp, "\t%s %s@PLT\n", opname, sym);
+ if (LJ_32)
+ fprintf(ctx->fp, "#endif\n");
+ return;
+ } else if (LJ_32 && ctx->mode == BUILD_machasm) {
+ fprintf(ctx->fp, "\t%s L%s$stub\n", opname, sym);
+ return;
+ }
+ }
+ fprintf(ctx->fp, "\t%s %s\n", opname, sym);
+}
+#else
+/* Emit words piecewise as assembler text. */
+static void emit_asm_words(BuildCtx *ctx, uint8_t *p, int n)
+{
+ int i;
+ for (i = 0; i < n; i += 4) {
+ uint32_t ins = *(uint32_t *)(p+i);
+#if LJ_TARGET_ARM64 && LJ_BE
+ ins = lj_bswap(ins); /* ARM64 instructions are always little-endian. */
+#endif
+ if ((i & 15) == 0)
+ fprintf(ctx->fp, "\t.long 0x%08x", ins);
+ else
+ fprintf(ctx->fp, ",0x%08x", ins);
+ if ((i & 15) == 12) putc('\n', ctx->fp);
+ }
+ if ((n & 15) != 0) putc('\n', ctx->fp);
+}
+
+/* Emit relocation as part of an instruction. */
+static void emit_asm_wordreloc(BuildCtx *ctx, uint8_t *p, int n,
+ const char *sym)
+{
+ uint32_t ins;
+ emit_asm_words(ctx, p, n-4);
+ ins = *(uint32_t *)(p+n-4);
+#if LJ_TARGET_ARM
+ if ((ins & 0xff000000u) == 0xfa000000u) {
+ fprintf(ctx->fp, "\tblx %s\n", sym);
+ } else if ((ins & 0x0e000000u) == 0x0a000000u) {
+ fprintf(ctx->fp, "\t%s%.2s %s\n", (ins & 0x01000000u) ? "bl" : "b",
+ &"eqnecsccmiplvsvchilsgeltgtle"[2*(ins >> 28)], sym);
+ } else {
+ fprintf(stderr,
+ "Error: unsupported opcode %08x for %s symbol relocation.\n",
+ ins, sym);
+ exit(1);
+ }
+#elif LJ_TARGET_ARM64
+ if ((ins >> 26) == 0x25u) {
+ fprintf(ctx->fp, "\tbl %s\n", sym);
+ } else {
+ fprintf(stderr,
+ "Error: unsupported opcode %08x for %s symbol relocation.\n",
+ ins, sym);
+ exit(1);
+ }
+#elif LJ_TARGET_PPC
+#if LJ_TARGET_PS3
+#define TOCPREFIX "."
+#else
+#define TOCPREFIX ""
+#endif
+ if ((ins >> 26) == 16) {
+ fprintf(ctx->fp, "\t%s %d, %d, " TOCPREFIX "%s\n",
+ (ins & 1) ? "bcl" : "bc", (ins >> 21) & 31, (ins >> 16) & 31, sym);
+ } else if ((ins >> 26) == 18) {
+ fprintf(ctx->fp, "\t%s " TOCPREFIX "%s\n", (ins & 1) ? "bl" : "b", sym);
+ } else {
+ fprintf(stderr,
+ "Error: unsupported opcode %08x for %s symbol relocation.\n",
+ ins, sym);
+ exit(1);
+ }
+#elif LJ_TARGET_MIPS
+ fprintf(stderr,
+ "Error: unsupported opcode %08x for %s symbol relocation.\n",
+ ins, sym);
+ exit(1);
+#else
+#error "missing relocation support for this architecture"
+#endif
+}
+#endif
+
+#if LJ_TARGET_ARM
+#define ELFASM_PX "%%"
+#else
+#define ELFASM_PX "@"
+#endif
+
+/* Emit an assembler label. */
+static void emit_asm_label(BuildCtx *ctx, const char *name, int size, int isfunc)
+{
+ switch (ctx->mode) {
+ case BUILD_elfasm:
+#if LJ_TARGET_PS3
+ if (!strncmp(name, "lj_vm_", 6) &&
+ strcmp(name, ctx->beginsym) &&
+ !strstr(name, "hook")) {
+ fprintf(ctx->fp,
+ "\n\t.globl %s\n"
+ "\t.section \".opd\",\"aw\"\n"
+ "%s:\n"
+ "\t.long .%s,.TOC.@tocbase32\n"
+ "\t.size %s,8\n"
+ "\t.previous\n"
+ "\t.globl .%s\n"
+ "\t.hidden .%s\n"
+ "\t.type .%s, " ELFASM_PX "function\n"
+ "\t.size .%s, %d\n"
+ ".%s:\n",
+ name, name, name, name, name, name, name, name, size, name);
+ break;
+ }
+#endif
+ fprintf(ctx->fp,
+ "\n\t.globl %s\n"
+ "\t.hidden %s\n"
+ "\t.type %s, " ELFASM_PX "%s\n"
+ "\t.size %s, %d\n"
+ "%s:\n",
+ name, name, name, isfunc ? "function" : "object", name, size, name);
+ break;
+ case BUILD_coffasm:
+ fprintf(ctx->fp, "\n\t.globl %s\n", name);
+ if (isfunc)
+ fprintf(ctx->fp, "\t.def %s; .scl 3; .type 32; .endef\n", name);
+ fprintf(ctx->fp, "%s:\n", name);
+ break;
+ case BUILD_machasm:
+ fprintf(ctx->fp,
+ "\n\t.private_extern %s\n"
+ "\t.no_dead_strip %s\n"
+ "%s:\n", name, name, name);
+ break;
+ default:
+ break;
+ }
+}
+
+/* Emit alignment. */
+static void emit_asm_align(BuildCtx *ctx, int bits)
+{
+ switch (ctx->mode) {
+ case BUILD_elfasm:
+ case BUILD_coffasm:
+ fprintf(ctx->fp, "\t.p2align %d\n", bits);
+ break;
+ case BUILD_machasm:
+ fprintf(ctx->fp, "\t.align %d\n", bits);
+ break;
+ default:
+ break;
+ }
+}
+
+/* ------------------------------------------------------------------------ */
+
+/* Emit assembler source code. */
+void emit_asm(BuildCtx *ctx)
+{
+ int i, rel;
+
+ fprintf(ctx->fp, "\t.file \"buildvm_%s.dasc\"\n", ctx->dasm_arch);
+ fprintf(ctx->fp, "\t.text\n");
+ emit_asm_align(ctx, 4);
+
+#if LJ_TARGET_PS3
+ emit_asm_label(ctx, ctx->beginsym, ctx->codesz, 0);
+#else
+ emit_asm_label(ctx, ctx->beginsym, 0, 0);
+#endif
+ if (ctx->mode != BUILD_machasm)
+ fprintf(ctx->fp, ".Lbegin:\n");
+
+#if LJ_TARGET_ARM && defined(__GNUC__) && !LJ_NO_UNWIND
+ /* This should really be moved into buildvm_arm.dasc. */
+#if LJ_ARCH_HASFPU
+ fprintf(ctx->fp,
+ ".fnstart\n"
+ ".save {r5, r6, r7, r8, r9, r10, r11, lr}\n"
+ ".vsave {d8-d15}\n"
+ ".save {r4}\n"
+ ".pad #28\n");
+#else
+ fprintf(ctx->fp,
+ ".fnstart\n"
+ ".save {r4, r5, r6, r7, r8, r9, r10, r11, lr}\n"
+ ".pad #28\n");
+#endif
+#endif
+#if LJ_TARGET_MIPS
+ fprintf(ctx->fp, ".set nomips16\n.abicalls\n.set noreorder\n.set nomacro\n");
+#endif
+
+ for (i = rel = 0; i < ctx->nsym; i++) {
+ int32_t ofs = ctx->sym[i].ofs;
+ int32_t next = ctx->sym[i+1].ofs;
+#if LJ_TARGET_ARM && defined(__GNUC__) && !LJ_NO_UNWIND && LJ_HASFFI
+ if (!strcmp(ctx->sym[i].name, "lj_vm_ffi_call"))
+ fprintf(ctx->fp,
+ ".globl lj_err_unwind_arm\n"
+ ".personality lj_err_unwind_arm\n"
+ ".fnend\n"
+ ".fnstart\n"
+ ".save {r4, r5, r11, lr}\n"
+ ".setfp r11, sp\n");
+#endif
+ emit_asm_label(ctx, ctx->sym[i].name, next - ofs, 1);
+ while (rel < ctx->nreloc && ctx->reloc[rel].ofs <= next) {
+ BuildReloc *r = &ctx->reloc[rel];
+ int n = r->ofs - ofs;
+#if LJ_TARGET_X86ORX64
+ if (r->type != 0 &&
+ (ctx->mode == BUILD_elfasm || ctx->mode == BUILD_machasm)) {
+ emit_asm_reloc_text(ctx, ctx->code+ofs, n, ctx->relocsym[r->sym]);
+ } else {
+ emit_asm_bytes(ctx, ctx->code+ofs, n);
+ emit_asm_reloc(ctx, r->type, ctx->relocsym[r->sym]);
+ }
+ ofs += n+4;
+#else
+ emit_asm_wordreloc(ctx, ctx->code+ofs, n, ctx->relocsym[r->sym]);
+ ofs += n;
+#endif
+ rel++;
+ }
+#if LJ_TARGET_X86ORX64
+ emit_asm_bytes(ctx, ctx->code+ofs, next-ofs);
+#else
+ emit_asm_words(ctx, ctx->code+ofs, next-ofs);
+#endif
+ }
+
+#if LJ_TARGET_ARM && defined(__GNUC__) && !LJ_NO_UNWIND
+ fprintf(ctx->fp,
+#if !LJ_HASFFI
+ ".globl lj_err_unwind_arm\n"
+ ".personality lj_err_unwind_arm\n"
+#endif
+ ".fnend\n");
+#endif
+
+ fprintf(ctx->fp, "\n");
+ switch (ctx->mode) {
+ case BUILD_elfasm:
+#if !(LJ_TARGET_PS3 || LJ_TARGET_PSVITA)
+ fprintf(ctx->fp, "\t.section .note.GNU-stack,\"\"," ELFASM_PX "progbits\n");
+#endif
+#if LJ_TARGET_PPC && !LJ_TARGET_PS3 && !LJ_ABI_SOFTFP
+ /* Hard-float ABI. */
+ fprintf(ctx->fp, "\t.gnu_attribute 4, 1\n");
+#endif
+ /* fallthrough */
+ case BUILD_coffasm:
+ fprintf(ctx->fp, "\t.ident \"%s\"\n", ctx->dasm_ident);
+ break;
+ case BUILD_machasm:
+ fprintf(ctx->fp,
+ "\t.cstring\n"
+ "\t.ascii \"%s\\0\"\n", ctx->dasm_ident);
+ break;
+ default:
+ break;
+ }
+ fprintf(ctx->fp, "\n");
+}
+
diff --git a/libs/luajit-cmake/luajit/src/host/buildvm_fold.c b/libs/luajit-cmake/luajit/src/host/buildvm_fold.c
new file mode 100644
index 0000000..edb5576
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/host/buildvm_fold.c
@@ -0,0 +1,236 @@
+/*
+** LuaJIT VM builder: IR folding hash table generator.
+** Copyright (C) 2005-2022 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#include "buildvm.h"
+#include "lj_obj.h"
+#if LJ_HASJIT
+#include "lj_ir.h"
+
+/* Context for the folding hash table generator. */
+static int lineno;
+static uint32_t funcidx;
+static uint32_t foldkeys[BUILD_MAX_FOLD];
+static uint32_t nkeys;
+
+/* Try to fill the hash table with keys using the hash parameters. */
+static int tryhash(uint32_t *htab, uint32_t sz, uint32_t r, int dorol)
+{
+ uint32_t i;
+ if (dorol && ((r & 31) == 0 || (r>>5) == 0))
+ return 0; /* Avoid zero rotates. */
+ memset(htab, 0xff, (sz+1)*sizeof(uint32_t));
+ for (i = 0; i < nkeys; i++) {
+ uint32_t key = foldkeys[i];
+ uint32_t k = key & 0xffffff;
+ uint32_t h = (dorol ? lj_rol(lj_rol(k, r>>5) - k, r&31) :
+ (((k << (r>>5)) - k) << (r&31))) % sz;
+ if (htab[h] != 0xffffffff) { /* Collision on primary slot. */
+ if (htab[h+1] != 0xffffffff) { /* Collision on secondary slot. */
+ /* Try to move the colliding key, if possible. */
+ if (h < sz-1 && htab[h+2] == 0xffffffff) {
+ uint32_t k2 = htab[h+1] & 0xffffff;
+ uint32_t h2 = (dorol ? lj_rol(lj_rol(k2, r>>5) - k2, r&31) :
+ (((k2 << (r>>5)) - k2) << (r&31))) % sz;
+ if (h2 != h+1) return 0; /* Cannot resolve collision. */
+ htab[h+2] = htab[h+1]; /* Move colliding key to secondary slot. */
+ } else {
+ return 0; /* Collision. */
+ }
+ }
+ htab[h+1] = key;
+ } else {
+ htab[h] = key;
+ }
+ }
+ return 1; /* Success, all keys could be stored. */
+}
+
+/* Print the generated hash table. */
+static void printhash(BuildCtx *ctx, uint32_t *htab, uint32_t sz)
+{
+ uint32_t i;
+ fprintf(ctx->fp, "static const uint32_t fold_hash[%d] = {\n0x%08x",
+ sz+1, htab[0]);
+ for (i = 1; i < sz+1; i++)
+ fprintf(ctx->fp, ",\n0x%08x", htab[i]);
+ fprintf(ctx->fp, "\n};\n\n");
+}
+
+/* Exhaustive search for the shortest semi-perfect hash table. */
+static void makehash(BuildCtx *ctx)
+{
+ uint32_t htab[BUILD_MAX_FOLD*2+1];
+ uint32_t sz, r;
+ /* Search for the smallest hash table with an odd size. */
+ for (sz = (nkeys|1); sz < BUILD_MAX_FOLD*2; sz += 2) {
+ /* First try all shift hash combinations. */
+ for (r = 0; r < 32*32; r++) {
+ if (tryhash(htab, sz, r, 0)) {
+ printhash(ctx, htab, sz);
+ fprintf(ctx->fp,
+ "#define fold_hashkey(k)\t(((((k)<<%u)-(k))<<%u)%%%u)\n\n",
+ r>>5, r&31, sz);
+ return;
+ }
+ }
+ /* Then try all rotate hash combinations. */
+ for (r = 0; r < 32*32; r++) {
+ if (tryhash(htab, sz, r, 1)) {
+ printhash(ctx, htab, sz);
+ fprintf(ctx->fp,
+ "#define fold_hashkey(k)\t(lj_rol(lj_rol((k),%u)-(k),%u)%%%u)\n\n",
+ r>>5, r&31, sz);
+ return;
+ }
+ }
+ }
+ fprintf(stderr, "Error: search for perfect hash failed\n");
+ exit(1);
+}
+
+/* Parse one token of a fold rule. */
+static uint32_t nexttoken(char **pp, int allowlit, int allowany)
+{
+ char *p = *pp;
+ if (p) {
+ uint32_t i;
+ char *q = strchr(p, ' ');
+ if (q) *q++ = '\0';
+ *pp = q;
+ if (allowlit && !strncmp(p, "IRFPM_", 6)) {
+ for (i = 0; irfpm_names[i]; i++)
+ if (!strcmp(irfpm_names[i], p+6))
+ return i;
+ } else if (allowlit && !strncmp(p, "IRFL_", 5)) {
+ for (i = 0; irfield_names[i]; i++)
+ if (!strcmp(irfield_names[i], p+5))
+ return i;
+ } else if (allowlit && !strncmp(p, "IRCALL_", 7)) {
+ for (i = 0; ircall_names[i]; i++)
+ if (!strcmp(ircall_names[i], p+7))
+ return i;
+ } else if (allowlit && !strncmp(p, "IRCONV_", 7)) {
+ for (i = 0; irt_names[i]; i++) {
+ const char *r = strchr(p+7, '_');
+ if (r && !strncmp(irt_names[i], p+7, r-(p+7))) {
+ uint32_t j;
+ for (j = 0; irt_names[j]; j++)
+ if (!strcmp(irt_names[j], r+1))
+ return (i << 5) + j;
+ }
+ }
+ } else if (allowlit && *p >= '0' && *p <= '9') {
+ for (i = 0; *p >= '0' && *p <= '9'; p++)
+ i = i*10 + (*p - '0');
+ if (*p == '\0')
+ return i;
+ } else if (allowany && !strcmp("any", p)) {
+ return allowany;
+ } else {
+ for (i = 0; ir_names[i]; i++)
+ if (!strcmp(ir_names[i], p))
+ return i;
+ }
+ fprintf(stderr, "Error: bad fold definition token \"%s\" at line %d\n", p, lineno);
+ exit(1);
+ }
+ return 0;
+}
+
+/* Parse a fold rule. */
+static void foldrule(char *p)
+{
+ uint32_t op = nexttoken(&p, 0, 0);
+ uint32_t left = nexttoken(&p, 0, 0x7f);
+ uint32_t right = nexttoken(&p, 1, 0x3ff);
+ uint32_t key = (funcidx << 24) | (op << 17) | (left << 10) | right;
+ uint32_t i;
+ if (nkeys >= BUILD_MAX_FOLD) {
+ fprintf(stderr, "Error: too many fold rules, increase BUILD_MAX_FOLD.\n");
+ exit(1);
+ }
+ /* Simple insertion sort to detect duplicates. */
+ for (i = nkeys; i > 0; i--) {
+ if ((foldkeys[i-1]&0xffffff) < (key & 0xffffff))
+ break;
+ if ((foldkeys[i-1]&0xffffff) == (key & 0xffffff)) {
+ fprintf(stderr, "Error: duplicate fold definition at line %d\n", lineno);
+ exit(1);
+ }
+ foldkeys[i] = foldkeys[i-1];
+ }
+ foldkeys[i] = key;
+ nkeys++;
+}
+
+/* Emit C source code for IR folding hash table. */
+void emit_fold(BuildCtx *ctx)
+{
+ char buf[256]; /* We don't care about analyzing lines longer than that. */
+ const char *fname = ctx->args[0];
+ FILE *fp;
+
+ if (fname == NULL) {
+ fprintf(stderr, "Error: missing input filename\n");
+ exit(1);
+ }
+
+ if (fname[0] == '-' && fname[1] == '\0') {
+ fp = stdin;
+ } else {
+ fp = fopen(fname, "r");
+ if (!fp) {
+ fprintf(stderr, "Error: cannot open input file '%s': %s\n",
+ fname, strerror(errno));
+ exit(1);
+ }
+ }
+
+ fprintf(ctx->fp, "/* This is a generated file. DO NOT EDIT! */\n\n");
+ fprintf(ctx->fp, "static const FoldFunc fold_func[] = {\n");
+
+ lineno = 0;
+ funcidx = 0;
+ nkeys = 0;
+ while (fgets(buf, sizeof(buf), fp) != NULL) {
+ lineno++;
+ /* The prefix must be at the start of a line, otherwise it's ignored. */
+ if (!strncmp(buf, FOLDDEF_PREFIX, sizeof(FOLDDEF_PREFIX)-1)) {
+ char *p = buf+sizeof(FOLDDEF_PREFIX)-1;
+ char *q = strchr(p, ')');
+ if (p[0] == '(' && q) {
+ p++;
+ *q = '\0';
+ foldrule(p);
+ } else if ((p[0] == 'F' || p[0] == 'X') && p[1] == '(' && q) {
+ p += 2;
+ *q = '\0';
+ if (funcidx)
+ fprintf(ctx->fp, ",\n");
+ if (p[-2] == 'X')
+ fprintf(ctx->fp, " %s", p);
+ else
+ fprintf(ctx->fp, " fold_%s", p);
+ funcidx++;
+ } else {
+ buf[strlen(buf)-1] = '\0';
+ fprintf(stderr, "Error: unknown fold definition tag %s%s at line %d\n",
+ FOLDDEF_PREFIX, p, lineno);
+ exit(1);
+ }
+ }
+ }
+ fclose(fp);
+ fprintf(ctx->fp, "\n};\n\n");
+
+ makehash(ctx);
+}
+#else
+void emit_fold(BuildCtx *ctx)
+{
+ UNUSED(ctx);
+}
+#endif
+
diff --git a/libs/luajit-cmake/luajit/src/host/buildvm_lib.c b/libs/luajit-cmake/luajit/src/host/buildvm_lib.c
new file mode 100644
index 0000000..b125ea1
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/host/buildvm_lib.c
@@ -0,0 +1,459 @@
+/*
+** LuaJIT VM builder: library definition compiler.
+** Copyright (C) 2005-2022 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#include "buildvm.h"
+#include "lj_obj.h"
+#include "lj_bc.h"
+#include "lj_lib.h"
+#include "buildvm_libbc.h"
+
+/* Context for library definitions. */
+static uint8_t obuf[8192];
+static uint8_t *optr;
+static char modname[80];
+static size_t modnamelen;
+static char funcname[80];
+static int modstate, regfunc;
+static int ffid, recffid, ffasmfunc;
+
+enum {
+ REGFUNC_OK,
+ REGFUNC_NOREG,
+ REGFUNC_NOREGUV
+};
+
+static void libdef_name(const char *p, int kind)
+{
+ size_t n = strlen(p);
+ if (kind != LIBINIT_STRING) {
+ if (n > modnamelen && p[modnamelen] == '_' &&
+ !strncmp(p, modname, modnamelen)) {
+ p += modnamelen+1;
+ n -= modnamelen+1;
+ }
+ }
+ if (n > LIBINIT_MAXSTR) {
+ fprintf(stderr, "Error: string too long: '%s'\n", p);
+ exit(1);
+ }
+ if (optr+1+n+2 > obuf+sizeof(obuf)) { /* +2 for caller. */
+ fprintf(stderr, "Error: output buffer overflow\n");
+ exit(1);
+ }
+ *optr++ = (uint8_t)(n | kind);
+ memcpy(optr, p, n);
+ optr += n;
+}
+
+static void libdef_endmodule(BuildCtx *ctx)
+{
+ if (modstate != 0) {
+ char line[80];
+ const uint8_t *p;
+ int n;
+ if (modstate == 1)
+ fprintf(ctx->fp, " (lua_CFunction)0");
+ fprintf(ctx->fp, "\n};\n");
+ fprintf(ctx->fp, "static const uint8_t %s%s[] = {\n",
+ LABEL_PREFIX_LIBINIT, modname);
+ line[0] = '\0';
+ for (n = 0, p = obuf; p < optr; p++) {
+ n += sprintf(line+n, "%d,", *p);
+ if (n >= 75) {
+ fprintf(ctx->fp, "%s\n", line);
+ n = 0;
+ line[0] = '\0';
+ }
+ }
+ fprintf(ctx->fp, "%s%d\n};\n#endif\n\n", line, LIBINIT_END);
+ }
+}
+
+static void libdef_module(BuildCtx *ctx, char *p, int arg)
+{
+ UNUSED(arg);
+ if (ctx->mode == BUILD_libdef) {
+ libdef_endmodule(ctx);
+ optr = obuf;
+ *optr++ = (uint8_t)ffid;
+ *optr++ = (uint8_t)ffasmfunc;
+ *optr++ = 0; /* Hash table size. */
+ modstate = 1;
+ fprintf(ctx->fp, "#ifdef %sMODULE_%s\n", LIBDEF_PREFIX, p);
+ fprintf(ctx->fp, "#undef %sMODULE_%s\n", LIBDEF_PREFIX, p);
+ fprintf(ctx->fp, "static const lua_CFunction %s%s[] = {\n",
+ LABEL_PREFIX_LIBCF, p);
+ }
+ modnamelen = strlen(p);
+ if (modnamelen > sizeof(modname)-1) {
+ fprintf(stderr, "Error: module name too long: '%s'\n", p);
+ exit(1);
+ }
+ strcpy(modname, p);
+}
+
+static int find_ffofs(BuildCtx *ctx, const char *name)
+{
+ int i;
+ for (i = 0; i < ctx->nglob; i++) {
+ const char *gl = ctx->globnames[i];
+ if (gl[0] == 'f' && gl[1] == 'f' && gl[2] == '_' && !strcmp(gl+3, name)) {
+ return (int)((uint8_t *)ctx->glob[i] - ctx->code);
+ }
+ }
+ fprintf(stderr, "Error: undefined fast function %s%s\n",
+ LABEL_PREFIX_FF, name);
+ exit(1);
+}
+
+static void libdef_func(BuildCtx *ctx, char *p, int arg)
+{
+ if (arg != LIBINIT_CF)
+ ffasmfunc++;
+ if (ctx->mode == BUILD_libdef) {
+ if (modstate == 0) {
+ fprintf(stderr, "Error: no module for function definition %s\n", p);
+ exit(1);
+ }
+ if (regfunc == REGFUNC_NOREG) {
+ if (optr+1 > obuf+sizeof(obuf)) {
+ fprintf(stderr, "Error: output buffer overflow\n");
+ exit(1);
+ }
+ *optr++ = LIBINIT_FFID;
+ } else {
+ if (arg != LIBINIT_ASM_) {
+ if (modstate != 1) fprintf(ctx->fp, ",\n");
+ modstate = 2;
+ fprintf(ctx->fp, " %s%s", arg ? LABEL_PREFIX_FFH : LABEL_PREFIX_CF, p);
+ }
+ if (regfunc != REGFUNC_NOREGUV) obuf[2]++; /* Bump hash table size. */
+ libdef_name(regfunc == REGFUNC_NOREGUV ? "" : p, arg);
+ }
+ } else if (ctx->mode == BUILD_ffdef) {
+ fprintf(ctx->fp, "FFDEF(%s)\n", p);
+ } else if (ctx->mode == BUILD_recdef) {
+ if (strlen(p) > sizeof(funcname)-1) {
+ fprintf(stderr, "Error: function name too long: '%s'\n", p);
+ exit(1);
+ }
+ strcpy(funcname, p);
+ } else if (ctx->mode == BUILD_vmdef) {
+ int i;
+ for (i = 1; p[i] && modname[i-1]; i++)
+ if (p[i] == '_') p[i] = '.';
+ fprintf(ctx->fp, "\"%s\",\n", p);
+ } else if (ctx->mode == BUILD_bcdef) {
+ if (arg != LIBINIT_CF)
+ fprintf(ctx->fp, ",\n%d", find_ffofs(ctx, p));
+ }
+ ffid++;
+ regfunc = REGFUNC_OK;
+}
+
+static uint8_t *libdef_uleb128(uint8_t *p, uint32_t *vv)
+{
+ uint32_t v = *p++;
+ if (v >= 0x80) {
+ int sh = 0; v &= 0x7f;
+ do { v |= ((*p & 0x7f) << (sh += 7)); } while (*p++ >= 0x80);
+ }
+ *vv = v;
+ return p;
+}
+
+static void libdef_fixupbc(uint8_t *p)
+{
+ uint32_t i, sizebc;
+ p += 4;
+ p = libdef_uleb128(p, &sizebc);
+ p = libdef_uleb128(p, &sizebc);
+ p = libdef_uleb128(p, &sizebc);
+ for (i = 0; i < sizebc; i++, p += 4) {
+ uint8_t op = p[libbc_endian ? 3 : 0];
+ uint8_t ra = p[libbc_endian ? 2 : 1];
+ uint8_t rc = p[libbc_endian ? 1 : 2];
+ uint8_t rb = p[libbc_endian ? 0 : 3];
+ if (!LJ_DUALNUM && op == BC_ISTYPE && rc == ~LJ_TNUMX+1) {
+ op = BC_ISNUM; rc++;
+ }
+ p[LJ_ENDIAN_SELECT(0, 3)] = op;
+ p[LJ_ENDIAN_SELECT(1, 2)] = ra;
+ p[LJ_ENDIAN_SELECT(2, 1)] = rc;
+ p[LJ_ENDIAN_SELECT(3, 0)] = rb;
+ }
+}
+
+static void libdef_lua(BuildCtx *ctx, char *p, int arg)
+{
+ UNUSED(arg);
+ if (ctx->mode == BUILD_libdef) {
+ int i;
+ for (i = 0; libbc_map[i].name != NULL; i++) {
+ if (!strcmp(libbc_map[i].name, p)) {
+ int ofs = libbc_map[i].ofs;
+ int len = libbc_map[i+1].ofs - ofs;
+ obuf[2]++; /* Bump hash table size. */
+ *optr++ = LIBINIT_LUA;
+ libdef_name(p, 0);
+ memcpy(optr, libbc_code + ofs, len);
+ libdef_fixupbc(optr);
+ optr += len;
+ return;
+ }
+ }
+ fprintf(stderr, "Error: missing libbc definition for %s\n", p);
+ exit(1);
+ }
+}
+
+static uint32_t find_rec(char *name)
+{
+ char *p = (char *)obuf;
+ uint32_t n;
+ for (n = 2; *p; n++) {
+ if (strcmp(p, name) == 0)
+ return n;
+ p += strlen(p)+1;
+ }
+ if (p+strlen(name)+1 >= (char *)obuf+sizeof(obuf)) {
+ fprintf(stderr, "Error: output buffer overflow\n");
+ exit(1);
+ }
+ strcpy(p, name);
+ return n;
+}
+
+static void libdef_rec(BuildCtx *ctx, char *p, int arg)
+{
+ UNUSED(arg);
+ if (ctx->mode == BUILD_recdef) {
+ char *q;
+ uint32_t n;
+ for (; recffid+1 < ffid; recffid++)
+ fprintf(ctx->fp, ",\n0");
+ recffid = ffid;
+ if (*p == '.') p = funcname;
+ q = strchr(p, ' ');
+ if (q) *q++ = '\0';
+ n = find_rec(p);
+ if (q)
+ fprintf(ctx->fp, ",\n0x%02x00+(%s)", n, q);
+ else
+ fprintf(ctx->fp, ",\n0x%02x00", n);
+ }
+}
+
+static void memcpy_endian(void *dst, void *src, size_t n)
+{
+ union { uint8_t b; uint32_t u; } host_endian;
+ host_endian.u = 1;
+ if (host_endian.b == LJ_ENDIAN_SELECT(1, 0)) {
+ memcpy(dst, src, n);
+ } else {
+ size_t i;
+ for (i = 0; i < n; i++)
+ ((uint8_t *)dst)[i] = ((uint8_t *)src)[n-i-1];
+ }
+}
+
+static void libdef_push(BuildCtx *ctx, char *p, int arg)
+{
+ UNUSED(arg);
+ if (ctx->mode == BUILD_libdef) {
+ int len = (int)strlen(p);
+ if (*p == '"') {
+ if (len > 1 && p[len-1] == '"') {
+ p[len-1] = '\0';
+ libdef_name(p+1, LIBINIT_STRING);
+ return;
+ }
+ } else if (*p >= '0' && *p <= '9') {
+ char *ep;
+ double d = strtod(p, &ep);
+ if (*ep == '\0') {
+ if (optr+1+sizeof(double) > obuf+sizeof(obuf)) {
+ fprintf(stderr, "Error: output buffer overflow\n");
+ exit(1);
+ }
+ *optr++ = LIBINIT_NUMBER;
+ memcpy_endian(optr, &d, sizeof(double));
+ optr += sizeof(double);
+ return;
+ }
+ } else if (!strcmp(p, "lastcl")) {
+ if (optr+1 > obuf+sizeof(obuf)) {
+ fprintf(stderr, "Error: output buffer overflow\n");
+ exit(1);
+ }
+ *optr++ = LIBINIT_LASTCL;
+ return;
+ } else if (len > 4 && !strncmp(p, "top-", 4)) {
+ if (optr+2 > obuf+sizeof(obuf)) {
+ fprintf(stderr, "Error: output buffer overflow\n");
+ exit(1);
+ }
+ *optr++ = LIBINIT_COPY;
+ *optr++ = (uint8_t)atoi(p+4);
+ return;
+ }
+ fprintf(stderr, "Error: bad value for %sPUSH(%s)\n", LIBDEF_PREFIX, p);
+ exit(1);
+ }
+}
+
+static void libdef_set(BuildCtx *ctx, char *p, int arg)
+{
+ UNUSED(arg);
+ if (ctx->mode == BUILD_libdef) {
+ if (p[0] == '!' && p[1] == '\0') p[0] = '\0'; /* Set env. */
+ libdef_name(p, LIBINIT_STRING);
+ *optr++ = LIBINIT_SET;
+ obuf[2]++; /* Bump hash table size. */
+ }
+}
+
+static void libdef_regfunc(BuildCtx *ctx, char *p, int arg)
+{
+ UNUSED(ctx); UNUSED(p);
+ regfunc = arg;
+}
+
+typedef void (*LibDefFunc)(BuildCtx *ctx, char *p, int arg);
+
+typedef struct LibDefHandler {
+ const char *suffix;
+ const char *stop;
+ const LibDefFunc func;
+ const int arg;
+} LibDefHandler;
+
+static const LibDefHandler libdef_handlers[] = {
+ { "MODULE_", " \t\r\n", libdef_module, 0 },
+ { "CF(", ")", libdef_func, LIBINIT_CF },
+ { "ASM(", ")", libdef_func, LIBINIT_ASM },
+ { "ASM_(", ")", libdef_func, LIBINIT_ASM_ },
+ { "LUA(", ")", libdef_lua, 0 },
+ { "REC(", ")", libdef_rec, 0 },
+ { "PUSH(", ")", libdef_push, 0 },
+ { "SET(", ")", libdef_set, 0 },
+ { "NOREGUV", NULL, libdef_regfunc, REGFUNC_NOREGUV },
+ { "NOREG", NULL, libdef_regfunc, REGFUNC_NOREG },
+ { NULL, NULL, (LibDefFunc)0, 0 }
+};
+
+/* Emit C source code for library function definitions. */
+void emit_lib(BuildCtx *ctx)
+{
+ const char *fname;
+
+ if (ctx->mode == BUILD_ffdef || ctx->mode == BUILD_libdef ||
+ ctx->mode == BUILD_recdef)
+ fprintf(ctx->fp, "/* This is a generated file. DO NOT EDIT! */\n\n");
+ else if (ctx->mode == BUILD_vmdef)
+ fprintf(ctx->fp, "ffnames = {\n[0]=\"Lua\",\n\"C\",\n");
+ if (ctx->mode == BUILD_recdef)
+ fprintf(ctx->fp, "static const uint16_t recff_idmap[] = {\n0,\n0x0100");
+ recffid = ffid = FF_C+1;
+ ffasmfunc = 0;
+
+ while ((fname = *ctx->args++)) {
+ char buf[256]; /* We don't care about analyzing lines longer than that. */
+ FILE *fp;
+ if (fname[0] == '-' && fname[1] == '\0') {
+ fp = stdin;
+ } else {
+ fp = fopen(fname, "r");
+ if (!fp) {
+ fprintf(stderr, "Error: cannot open input file '%s': %s\n",
+ fname, strerror(errno));
+ exit(1);
+ }
+ }
+ modstate = 0;
+ regfunc = REGFUNC_OK;
+ while (fgets(buf, sizeof(buf), fp) != NULL) {
+ char *p;
+ /* Simplistic pre-processor. Only handles top-level #if/#endif. */
+ if (buf[0] == '#' && buf[1] == 'i' && buf[2] == 'f') {
+ int ok = 1;
+ if (!strcmp(buf, "#if LJ_52\n"))
+ ok = LJ_52;
+ else if (!strcmp(buf, "#if LJ_HASJIT\n"))
+ ok = LJ_HASJIT;
+ else if (!strcmp(buf, "#if LJ_HASFFI\n"))
+ ok = LJ_HASFFI;
+ else if (!strcmp(buf, "#if LJ_HASBUFFER\n"))
+ ok = LJ_HASBUFFER;
+ if (!ok) {
+ int lvl = 1;
+ while (fgets(buf, sizeof(buf), fp) != NULL) {
+ if (buf[0] == '#' && buf[1] == 'e' && buf[2] == 'n') {
+ if (--lvl == 0) break;
+ } else if (buf[0] == '#' && buf[1] == 'i' && buf[2] == 'f') {
+ lvl++;
+ }
+ }
+ continue;
+ }
+ }
+ for (p = buf; (p = strstr(p, LIBDEF_PREFIX)) != NULL; ) {
+ const LibDefHandler *ldh;
+ p += sizeof(LIBDEF_PREFIX)-1;
+ for (ldh = libdef_handlers; ldh->suffix != NULL; ldh++) {
+ size_t n, len = strlen(ldh->suffix);
+ if (!strncmp(p, ldh->suffix, len)) {
+ p += len;
+ n = ldh->stop ? strcspn(p, ldh->stop) : 0;
+ if (!p[n]) break;
+ p[n] = '\0';
+ ldh->func(ctx, p, ldh->arg);
+ p += n+1;
+ break;
+ }
+ }
+ if (ldh->suffix == NULL) {
+ buf[strlen(buf)-1] = '\0';
+ fprintf(stderr, "Error: unknown library definition tag %s%s\n",
+ LIBDEF_PREFIX, p);
+ exit(1);
+ }
+ }
+ }
+ fclose(fp);
+ if (ctx->mode == BUILD_libdef) {
+ libdef_endmodule(ctx);
+ }
+ }
+
+ if (ctx->mode == BUILD_ffdef) {
+ fprintf(ctx->fp, "\n#undef FFDEF\n\n");
+ fprintf(ctx->fp,
+ "#ifndef FF_NUM_ASMFUNC\n#define FF_NUM_ASMFUNC %d\n#endif\n\n",
+ ffasmfunc);
+ } else if (ctx->mode == BUILD_vmdef) {
+ fprintf(ctx->fp, "},\n\n");
+ } else if (ctx->mode == BUILD_bcdef) {
+ int i;
+ fprintf(ctx->fp, "\n};\n\n");
+ fprintf(ctx->fp, "LJ_DATADEF const uint16_t lj_bc_mode[] = {\n");
+ fprintf(ctx->fp, "BCDEF(BCMODE)\n");
+ for (i = ffasmfunc-1; i > 0; i--)
+ fprintf(ctx->fp, "BCMODE_FF,\n");
+ fprintf(ctx->fp, "BCMODE_FF\n};\n\n");
+ } else if (ctx->mode == BUILD_recdef) {
+ char *p = (char *)obuf;
+ fprintf(ctx->fp, "\n};\n\n");
+ fprintf(ctx->fp, "static const RecordFunc recff_func[] = {\n"
+ "recff_nyi,\n"
+ "recff_c");
+ while (*p) {
+ fprintf(ctx->fp, ",\nrecff_%s", p);
+ p += strlen(p)+1;
+ }
+ fprintf(ctx->fp, "\n};\n\n");
+ }
+}
+
diff --git a/libs/luajit-cmake/luajit/src/host/buildvm_libbc.h b/libs/luajit-cmake/luajit/src/host/buildvm_libbc.h
new file mode 100644
index 0000000..276463b
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/host/buildvm_libbc.h
@@ -0,0 +1,81 @@
+/* This is a generated file. DO NOT EDIT! */
+
+static const int libbc_endian = 0;
+
+static const uint8_t libbc_code[] = {
+#if LJ_FR2
+/* math.deg */ 0,1,2,0,0,1,2,BC_MULVN,1,0,0,BC_RET1,1,2,0,241,135,158,166,3,
+220,203,178,130,4,
+/* math.rad */ 0,1,2,0,0,1,2,BC_MULVN,1,0,0,BC_RET1,1,2,0,243,244,148,165,20,
+198,190,199,252,3,
+/* string.len */ 0,1,2,0,0,0,3,BC_ISTYPE,0,5,0,BC_LEN,1,0,0,BC_RET1,1,2,0,
+/* table.foreachi */ 0,2,10,0,0,0,15,BC_ISTYPE,0,12,0,BC_ISTYPE,1,9,0,
+BC_KSHORT,2,1,0,BC_LEN,3,0,0,BC_KSHORT,4,1,0,BC_FORI,2,8,128,BC_MOV,6,1,0,
+BC_MOV,8,5,0,BC_TGETR,9,5,0,BC_CALL,6,3,2,BC_ISEQP,6,0,0,BC_JMP,7,1,128,
+BC_RET1,6,2,0,BC_FORL,2,248,127,BC_RET0,0,1,0,
+/* table.foreach */ 0,2,11,0,0,1,16,BC_ISTYPE,0,12,0,BC_ISTYPE,1,9,0,BC_KPRI,
+2,0,0,BC_MOV,3,0,0,BC_KNUM,4,0,0,BC_JMP,5,7,128,BC_MOV,7,1,0,BC_MOV,9,5,0,
+BC_MOV,10,6,0,BC_CALL,7,3,2,BC_ISEQP,7,0,0,BC_JMP,8,1,128,BC_RET1,7,2,0,
+BC_ITERN,5,3,3,BC_ITERL,5,247,127,BC_RET0,0,1,0,1,255,255,249,255,15,
+/* table.getn */ 0,1,2,0,0,0,3,BC_ISTYPE,0,12,0,BC_LEN,1,0,0,BC_RET1,1,2,0,
+/* table.remove */ 0,2,10,0,0,2,30,BC_ISTYPE,0,12,0,BC_LEN,2,0,0,BC_ISNEP,1,0,
+0,BC_JMP,3,7,128,BC_ISEQN,2,0,0,BC_JMP,3,23,128,BC_TGETR,3,2,0,BC_KPRI,4,0,0,
+BC_TSETR,4,2,0,BC_RET1,3,2,0,BC_JMP,3,18,128,BC_ISTYPE,1,14,0,BC_KSHORT,3,1,0,
+BC_ISGT,3,1,0,BC_JMP,3,14,128,BC_ISGT,1,2,0,BC_JMP,3,12,128,BC_TGETR,3,1,0,
+BC_ADDVN,4,1,1,BC_MOV,5,2,0,BC_KSHORT,6,1,0,BC_FORI,4,4,128,BC_SUBVN,8,1,7,
+BC_TGETR,9,7,0,BC_TSETR,9,8,0,BC_FORL,4,252,127,BC_KPRI,4,0,0,BC_TSETR,4,2,0,
+BC_RET1,3,2,0,BC_RET0,0,1,0,0,2,
+/* table.move */ 0,5,12,0,0,0,35,BC_ISTYPE,0,12,0,BC_ISTYPE,1,14,0,BC_ISTYPE,
+2,14,0,BC_ISTYPE,3,14,0,BC_ISNEP,4,0,0,BC_JMP,5,1,128,BC_MOV,4,0,0,BC_ISTYPE,
+4,12,0,BC_ISGT,1,2,0,BC_JMP,5,24,128,BC_SUBVV,5,1,3,BC_ISLT,2,3,0,BC_JMP,6,4,
+128,BC_ISLE,3,1,0,BC_JMP,6,2,128,BC_ISEQV,4,0,0,BC_JMP,6,9,128,BC_MOV,6,1,0,
+BC_MOV,7,2,0,BC_KSHORT,8,1,0,BC_FORI,6,4,128,BC_ADDVV,10,5,9,BC_TGETR,11,9,0,
+BC_TSETR,11,10,4,BC_FORL,6,252,127,BC_JMP,6,8,128,BC_MOV,6,2,0,BC_MOV,7,1,0,
+BC_KSHORT,8,255,255,BC_FORI,6,4,128,BC_ADDVV,10,5,9,BC_TGETR,11,9,0,BC_TSETR,
+11,10,4,BC_FORL,6,252,127,BC_RET1,4,2,0,
+#else
+/* math.deg */ 0,1,2,0,0,1,2,BC_MULVN,1,0,0,BC_RET1,1,2,0,241,135,158,166,3,
+220,203,178,130,4,
+/* math.rad */ 0,1,2,0,0,1,2,BC_MULVN,1,0,0,BC_RET1,1,2,0,243,244,148,165,20,
+198,190,199,252,3,
+/* string.len */ 0,1,2,0,0,0,3,BC_ISTYPE,0,5,0,BC_LEN,1,0,0,BC_RET1,1,2,0,
+/* table.foreachi */ 0,2,9,0,0,0,15,BC_ISTYPE,0,12,0,BC_ISTYPE,1,9,0,
+BC_KSHORT,2,1,0,BC_LEN,3,0,0,BC_KSHORT,4,1,0,BC_FORI,2,8,128,BC_MOV,6,1,0,
+BC_MOV,7,5,0,BC_TGETR,8,5,0,BC_CALL,6,3,2,BC_ISEQP,6,0,0,BC_JMP,7,1,128,
+BC_RET1,6,2,0,BC_FORL,2,248,127,BC_RET0,0,1,0,
+/* table.foreach */ 0,2,10,0,0,1,16,BC_ISTYPE,0,12,0,BC_ISTYPE,1,9,0,BC_KPRI,
+2,0,0,BC_MOV,3,0,0,BC_KNUM,4,0,0,BC_JMP,5,7,128,BC_MOV,7,1,0,BC_MOV,8,5,0,
+BC_MOV,9,6,0,BC_CALL,7,3,2,BC_ISEQP,7,0,0,BC_JMP,8,1,128,BC_RET1,7,2,0,
+BC_ITERN,5,3,3,BC_ITERL,5,247,127,BC_RET0,0,1,0,1,255,255,249,255,15,
+/* table.getn */ 0,1,2,0,0,0,3,BC_ISTYPE,0,12,0,BC_LEN,1,0,0,BC_RET1,1,2,0,
+/* table.remove */ 0,2,10,0,0,2,30,BC_ISTYPE,0,12,0,BC_LEN,2,0,0,BC_ISNEP,1,0,
+0,BC_JMP,3,7,128,BC_ISEQN,2,0,0,BC_JMP,3,23,128,BC_TGETR,3,2,0,BC_KPRI,4,0,0,
+BC_TSETR,4,2,0,BC_RET1,3,2,0,BC_JMP,3,18,128,BC_ISTYPE,1,14,0,BC_KSHORT,3,1,0,
+BC_ISGT,3,1,0,BC_JMP,3,14,128,BC_ISGT,1,2,0,BC_JMP,3,12,128,BC_TGETR,3,1,0,
+BC_ADDVN,4,1,1,BC_MOV,5,2,0,BC_KSHORT,6,1,0,BC_FORI,4,4,128,BC_SUBVN,8,1,7,
+BC_TGETR,9,7,0,BC_TSETR,9,8,0,BC_FORL,4,252,127,BC_KPRI,4,0,0,BC_TSETR,4,2,0,
+BC_RET1,3,2,0,BC_RET0,0,1,0,0,2,
+/* table.move */ 0,5,12,0,0,0,35,BC_ISTYPE,0,12,0,BC_ISTYPE,1,14,0,BC_ISTYPE,
+2,14,0,BC_ISTYPE,3,14,0,BC_ISNEP,4,0,0,BC_JMP,5,1,128,BC_MOV,4,0,0,BC_ISTYPE,
+4,12,0,BC_ISGT,1,2,0,BC_JMP,5,24,128,BC_SUBVV,5,1,3,BC_ISLT,2,3,0,BC_JMP,6,4,
+128,BC_ISLE,3,1,0,BC_JMP,6,2,128,BC_ISEQV,4,0,0,BC_JMP,6,9,128,BC_MOV,6,1,0,
+BC_MOV,7,2,0,BC_KSHORT,8,1,0,BC_FORI,6,4,128,BC_ADDVV,10,5,9,BC_TGETR,11,9,0,
+BC_TSETR,11,10,4,BC_FORL,6,252,127,BC_JMP,6,8,128,BC_MOV,6,2,0,BC_MOV,7,1,0,
+BC_KSHORT,8,255,255,BC_FORI,6,4,128,BC_ADDVV,10,5,9,BC_TGETR,11,9,0,BC_TSETR,
+11,10,4,BC_FORL,6,252,127,BC_RET1,4,2,0,
+#endif
+0
+};
+
+static const struct { const char *name; int ofs; } libbc_map[] = {
+{"math_deg",0},
+{"math_rad",25},
+{"string_len",50},
+{"table_foreachi",69},
+{"table_foreach",136},
+{"table_getn",213},
+{"table_remove",232},
+{"table_move",361},
+{NULL,508}
+};
+
diff --git a/libs/luajit-cmake/luajit/src/host/buildvm_peobj.c b/libs/luajit-cmake/luajit/src/host/buildvm_peobj.c
new file mode 100644
index 0000000..b030f23
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/host/buildvm_peobj.c
@@ -0,0 +1,379 @@
+/*
+** LuaJIT VM builder: PE object emitter.
+** Copyright (C) 2005-2022 Mike Pall. See Copyright Notice in luajit.h
+**
+** Only used for building on Windows, since we cannot assume the presence
+** of a suitable assembler. The host and target byte order must match.
+*/
+
+#include "buildvm.h"
+#include "lj_bc.h"
+
+#if LJ_TARGET_X86ORX64
+
+/* Context for PE object emitter. */
+static char *strtab;
+static size_t strtabofs;
+
+/* -- PE object definitions ----------------------------------------------- */
+
+/* PE header. */
+typedef struct PEheader {
+ uint16_t arch;
+ uint16_t nsects;
+ uint32_t time;
+ uint32_t symtabofs;
+ uint32_t nsyms;
+ uint16_t opthdrsz;
+ uint16_t flags;
+} PEheader;
+
+/* PE section. */
+typedef struct PEsection {
+ char name[8];
+ uint32_t vsize;
+ uint32_t vaddr;
+ uint32_t size;
+ uint32_t ofs;
+ uint32_t relocofs;
+ uint32_t lineofs;
+ uint16_t nreloc;
+ uint16_t nline;
+ uint32_t flags;
+} PEsection;
+
+/* PE relocation. */
+typedef struct PEreloc {
+ uint32_t vaddr;
+ uint32_t symidx;
+ uint16_t type;
+} PEreloc;
+
+/* Cannot use sizeof, because it pads up to the max. alignment. */
+#define PEOBJ_RELOC_SIZE (4+4+2)
+
+/* PE symbol table entry. */
+typedef struct PEsym {
+ union {
+ char name[8];
+ uint32_t nameref[2];
+ } n;
+ uint32_t value;
+ int16_t sect;
+ uint16_t type;
+ uint8_t scl;
+ uint8_t naux;
+} PEsym;
+
+/* PE symbol table auxiliary entry for a section. */
+typedef struct PEsymaux {
+ uint32_t size;
+ uint16_t nreloc;
+ uint16_t nline;
+ uint32_t cksum;
+ uint16_t assoc;
+ uint8_t comdatsel;
+ uint8_t unused[3];
+} PEsymaux;
+
+/* Cannot use sizeof, because it pads up to the max. alignment. */
+#define PEOBJ_SYM_SIZE (8+4+2+2+1+1)
+
+/* PE object CPU specific defines. */
+#if LJ_TARGET_X86
+#define PEOBJ_ARCH_TARGET 0x014c
+#define PEOBJ_RELOC_REL32 0x14 /* MS: REL32, GNU: DISP32. */
+#define PEOBJ_RELOC_DIR32 0x06
+#define PEOBJ_RELOC_OFS 0
+#define PEOBJ_TEXT_FLAGS 0x60500020 /* 60=r+x, 50=align16, 20=code. */
+#elif LJ_TARGET_X64
+#define PEOBJ_ARCH_TARGET 0x8664
+#define PEOBJ_RELOC_REL32 0x04 /* MS: REL32, GNU: DISP32. */
+#define PEOBJ_RELOC_DIR32 0x02
+#define PEOBJ_RELOC_ADDR32NB 0x03
+#define PEOBJ_RELOC_OFS 0
+#define PEOBJ_TEXT_FLAGS 0x60500020 /* 60=r+x, 50=align16, 20=code. */
+#endif
+
+/* Section numbers (0-based). */
+enum {
+ PEOBJ_SECT_ABS = -2,
+ PEOBJ_SECT_UNDEF = -1,
+ PEOBJ_SECT_TEXT,
+#if LJ_TARGET_X64
+ PEOBJ_SECT_PDATA,
+ PEOBJ_SECT_XDATA,
+#elif LJ_TARGET_X86
+ PEOBJ_SECT_SXDATA,
+#endif
+ PEOBJ_SECT_RDATA_Z,
+ PEOBJ_NSECTIONS
+};
+
+/* Symbol types. */
+#define PEOBJ_TYPE_NULL 0
+#define PEOBJ_TYPE_FUNC 0x20
+
+/* Symbol storage class. */
+#define PEOBJ_SCL_EXTERN 2
+#define PEOBJ_SCL_STATIC 3
+
+/* -- PE object emitter --------------------------------------------------- */
+
+/* Emit PE object symbol. */
+static void emit_peobj_sym(BuildCtx *ctx, const char *name, uint32_t value,
+ int sect, int type, int scl)
+{
+ PEsym sym;
+ size_t len = strlen(name);
+ if (!strtab) { /* Pass 1: only calculate string table length. */
+ if (len > 8) strtabofs += len+1;
+ return;
+ }
+ if (len <= 8) {
+ memcpy(sym.n.name, name, len);
+ memset(sym.n.name+len, 0, 8-len);
+ } else {
+ sym.n.nameref[0] = 0;
+ sym.n.nameref[1] = (uint32_t)strtabofs;
+ memcpy(strtab + strtabofs, name, len);
+ strtab[strtabofs+len] = 0;
+ strtabofs += len+1;
+ }
+ sym.value = value;
+ sym.sect = (int16_t)(sect+1); /* 1-based section number. */
+ sym.type = (uint16_t)type;
+ sym.scl = (uint8_t)scl;
+ sym.naux = 0;
+ owrite(ctx, &sym, PEOBJ_SYM_SIZE);
+}
+
+/* Emit PE object section symbol. */
+static void emit_peobj_sym_sect(BuildCtx *ctx, PEsection *pesect, int sect)
+{
+ PEsym sym;
+ PEsymaux aux;
+ if (!strtab) return; /* Pass 1: no output. */
+ memcpy(sym.n.name, pesect[sect].name, 8);
+ sym.value = 0;
+ sym.sect = (int16_t)(sect+1); /* 1-based section number. */
+ sym.type = PEOBJ_TYPE_NULL;
+ sym.scl = PEOBJ_SCL_STATIC;
+ sym.naux = 1;
+ owrite(ctx, &sym, PEOBJ_SYM_SIZE);
+ memset(&aux, 0, sizeof(PEsymaux));
+ aux.size = pesect[sect].size;
+ aux.nreloc = pesect[sect].nreloc;
+ owrite(ctx, &aux, PEOBJ_SYM_SIZE);
+}
+
+/* Emit Windows PE object file. */
+void emit_peobj(BuildCtx *ctx)
+{
+ PEheader pehdr;
+ PEsection pesect[PEOBJ_NSECTIONS];
+ uint32_t sofs;
+ int i, nrsym;
+ union { uint8_t b; uint32_t u; } host_endian;
+
+ sofs = sizeof(PEheader) + PEOBJ_NSECTIONS*sizeof(PEsection);
+
+ /* Fill in PE sections. */
+ memset(&pesect, 0, PEOBJ_NSECTIONS*sizeof(PEsection));
+ memcpy(pesect[PEOBJ_SECT_TEXT].name, ".text", sizeof(".text")-1);
+ pesect[PEOBJ_SECT_TEXT].ofs = sofs;
+ sofs += (pesect[PEOBJ_SECT_TEXT].size = (uint32_t)ctx->codesz);
+ pesect[PEOBJ_SECT_TEXT].relocofs = sofs;
+ sofs += (pesect[PEOBJ_SECT_TEXT].nreloc = (uint16_t)ctx->nreloc) * PEOBJ_RELOC_SIZE;
+ /* Flags: 60 = read+execute, 50 = align16, 20 = code. */
+ pesect[PEOBJ_SECT_TEXT].flags = PEOBJ_TEXT_FLAGS;
+
+#if LJ_TARGET_X64
+ memcpy(pesect[PEOBJ_SECT_PDATA].name, ".pdata", sizeof(".pdata")-1);
+ pesect[PEOBJ_SECT_PDATA].ofs = sofs;
+ sofs += (pesect[PEOBJ_SECT_PDATA].size = 6*4);
+ pesect[PEOBJ_SECT_PDATA].relocofs = sofs;
+ sofs += (pesect[PEOBJ_SECT_PDATA].nreloc = 6) * PEOBJ_RELOC_SIZE;
+ /* Flags: 40 = read, 30 = align4, 40 = initialized data. */
+ pesect[PEOBJ_SECT_PDATA].flags = 0x40300040;
+
+ memcpy(pesect[PEOBJ_SECT_XDATA].name, ".xdata", sizeof(".xdata")-1);
+ pesect[PEOBJ_SECT_XDATA].ofs = sofs;
+ sofs += (pesect[PEOBJ_SECT_XDATA].size = 8*2+4+6*2); /* See below. */
+ pesect[PEOBJ_SECT_XDATA].relocofs = sofs;
+ sofs += (pesect[PEOBJ_SECT_XDATA].nreloc = 1) * PEOBJ_RELOC_SIZE;
+ /* Flags: 40 = read, 30 = align4, 40 = initialized data. */
+ pesect[PEOBJ_SECT_XDATA].flags = 0x40300040;
+#elif LJ_TARGET_X86
+ memcpy(pesect[PEOBJ_SECT_SXDATA].name, ".sxdata", sizeof(".sxdata")-1);
+ pesect[PEOBJ_SECT_SXDATA].ofs = sofs;
+ sofs += (pesect[PEOBJ_SECT_SXDATA].size = 4);
+ pesect[PEOBJ_SECT_SXDATA].relocofs = sofs;
+ /* Flags: 40 = read, 30 = align4, 02 = lnk_info, 40 = initialized data. */
+ pesect[PEOBJ_SECT_SXDATA].flags = 0x40300240;
+#endif
+
+ memcpy(pesect[PEOBJ_SECT_RDATA_Z].name, ".rdata$Z", sizeof(".rdata$Z")-1);
+ pesect[PEOBJ_SECT_RDATA_Z].ofs = sofs;
+ sofs += (pesect[PEOBJ_SECT_RDATA_Z].size = (uint32_t)strlen(ctx->dasm_ident)+1);
+ /* Flags: 40 = read, 30 = align4, 40 = initialized data. */
+ pesect[PEOBJ_SECT_RDATA_Z].flags = 0x40300040;
+
+ /* Fill in PE header. */
+ pehdr.arch = PEOBJ_ARCH_TARGET;
+ pehdr.nsects = PEOBJ_NSECTIONS;
+ pehdr.time = 0; /* Timestamp is optional. */
+ pehdr.symtabofs = sofs;
+ pehdr.opthdrsz = 0;
+ pehdr.flags = 0;
+
+ /* Compute the size of the symbol table:
+ ** @feat.00 + nsections*2
+ ** + asm_start + nsym
+ ** + nrsym
+ */
+ nrsym = ctx->nrelocsym;
+ pehdr.nsyms = 1+PEOBJ_NSECTIONS*2 + 1+ctx->nsym + nrsym;
+#if LJ_TARGET_X64
+ pehdr.nsyms += 1; /* Symbol for lj_err_unwind_win. */
+#endif
+
+ /* Write PE object header and all sections. */
+ owrite(ctx, &pehdr, sizeof(PEheader));
+ owrite(ctx, &pesect, sizeof(PEsection)*PEOBJ_NSECTIONS);
+
+ /* Write .text section. */
+ host_endian.u = 1;
+ if (host_endian.b != LJ_ENDIAN_SELECT(1, 0)) {
+ fprintf(stderr, "Error: different byte order for host and target\n");
+ exit(1);
+ }
+ owrite(ctx, ctx->code, ctx->codesz);
+ for (i = 0; i < ctx->nreloc; i++) {
+ PEreloc reloc;
+ reloc.vaddr = (uint32_t)ctx->reloc[i].ofs + PEOBJ_RELOC_OFS;
+ reloc.symidx = 1+2+ctx->reloc[i].sym; /* Reloc syms are after .text sym. */
+ reloc.type = ctx->reloc[i].type ? PEOBJ_RELOC_REL32 : PEOBJ_RELOC_DIR32;
+ owrite(ctx, &reloc, PEOBJ_RELOC_SIZE);
+ }
+
+#if LJ_TARGET_X64
+ { /* Write .pdata section. */
+ uint32_t fcofs = (uint32_t)ctx->sym[ctx->nsym-1].ofs;
+ uint32_t pdata[3]; /* Start of .text, end of .text and .xdata. */
+ PEreloc reloc;
+ pdata[0] = 0; pdata[1] = fcofs; pdata[2] = 0;
+ owrite(ctx, &pdata, sizeof(pdata));
+ pdata[0] = fcofs; pdata[1] = (uint32_t)ctx->codesz; pdata[2] = 20;
+ owrite(ctx, &pdata, sizeof(pdata));
+ reloc.vaddr = 0; reloc.symidx = 1+2+nrsym+2+2+1;
+ reloc.type = PEOBJ_RELOC_ADDR32NB;
+ owrite(ctx, &reloc, PEOBJ_RELOC_SIZE);
+ reloc.vaddr = 4; reloc.symidx = 1+2+nrsym+2+2+1;
+ reloc.type = PEOBJ_RELOC_ADDR32NB;
+ owrite(ctx, &reloc, PEOBJ_RELOC_SIZE);
+ reloc.vaddr = 8; reloc.symidx = 1+2+nrsym+2;
+ reloc.type = PEOBJ_RELOC_ADDR32NB;
+ owrite(ctx, &reloc, PEOBJ_RELOC_SIZE);
+ reloc.vaddr = 12; reloc.symidx = 1+2+nrsym+2+2+1;
+ reloc.type = PEOBJ_RELOC_ADDR32NB;
+ owrite(ctx, &reloc, PEOBJ_RELOC_SIZE);
+ reloc.vaddr = 16; reloc.symidx = 1+2+nrsym+2+2+1;
+ reloc.type = PEOBJ_RELOC_ADDR32NB;
+ owrite(ctx, &reloc, PEOBJ_RELOC_SIZE);
+ reloc.vaddr = 20; reloc.symidx = 1+2+nrsym+2;
+ reloc.type = PEOBJ_RELOC_ADDR32NB;
+ owrite(ctx, &reloc, PEOBJ_RELOC_SIZE);
+ }
+ { /* Write .xdata section. */
+ uint16_t xdata[8+2+6];
+ PEreloc reloc;
+ xdata[0] = 0x01|0x08|0x10; /* Ver. 1, uhandler/ehandler, prolog size 0. */
+ xdata[1] = 0x0005; /* Number of unwind codes, no frame pointer. */
+ xdata[2] = 0x4200; /* Stack offset 4*8+8 = aword*5. */
+ xdata[3] = 0x3000; /* Push rbx. */
+ xdata[4] = 0x6000; /* Push rsi. */
+ xdata[5] = 0x7000; /* Push rdi. */
+ xdata[6] = 0x5000; /* Push rbp. */
+ xdata[7] = 0; /* Alignment. */
+ xdata[8] = xdata[9] = 0; /* Relocated address of exception handler. */
+ xdata[10] = 0x01; /* Ver. 1, no handler, prolog size 0. */
+ xdata[11] = 0x1504; /* Number of unwind codes, fp = rbp, fpofs = 16. */
+ xdata[12] = 0x0300; /* set_fpreg. */
+ xdata[13] = 0x0200; /* stack offset 0*8+8 = aword*1. */
+ xdata[14] = 0x3000; /* Push rbx. */
+ xdata[15] = 0x5000; /* Push rbp. */
+ owrite(ctx, &xdata, sizeof(xdata));
+ reloc.vaddr = 2*8; reloc.symidx = 1+2+nrsym+2+2;
+ reloc.type = PEOBJ_RELOC_ADDR32NB;
+ owrite(ctx, &reloc, PEOBJ_RELOC_SIZE);
+ }
+#elif LJ_TARGET_X86
+ /* Write .sxdata section. */
+ for (i = 0; i < nrsym; i++) {
+ if (!strcmp(ctx->relocsym[i], "_lj_err_unwind_win")) {
+ uint32_t symidx = 1+2+i;
+ owrite(ctx, &symidx, 4);
+ break;
+ }
+ }
+ if (i == nrsym) {
+ fprintf(stderr, "Error: extern lj_err_unwind_win not used\n");
+ exit(1);
+ }
+#endif
+
+ /* Write .rdata$Z section. */
+ owrite(ctx, ctx->dasm_ident, strlen(ctx->dasm_ident)+1);
+
+ /* Write symbol table. */
+ strtab = NULL; /* 1st pass: collect string sizes. */
+ for (;;) {
+ strtabofs = 4;
+ /* Mark as SafeSEH compliant. */
+ emit_peobj_sym(ctx, "@feat.00", 1,
+ PEOBJ_SECT_ABS, PEOBJ_TYPE_NULL, PEOBJ_SCL_STATIC);
+
+ emit_peobj_sym_sect(ctx, pesect, PEOBJ_SECT_TEXT);
+ for (i = 0; i < nrsym; i++)
+ emit_peobj_sym(ctx, ctx->relocsym[i], 0,
+ PEOBJ_SECT_UNDEF, PEOBJ_TYPE_FUNC, PEOBJ_SCL_EXTERN);
+
+#if LJ_TARGET_X64
+ emit_peobj_sym_sect(ctx, pesect, PEOBJ_SECT_PDATA);
+ emit_peobj_sym_sect(ctx, pesect, PEOBJ_SECT_XDATA);
+ emit_peobj_sym(ctx, "lj_err_unwind_win", 0,
+ PEOBJ_SECT_UNDEF, PEOBJ_TYPE_FUNC, PEOBJ_SCL_EXTERN);
+#elif LJ_TARGET_X86
+ emit_peobj_sym_sect(ctx, pesect, PEOBJ_SECT_SXDATA);
+#endif
+
+ emit_peobj_sym(ctx, ctx->beginsym, 0,
+ PEOBJ_SECT_TEXT, PEOBJ_TYPE_NULL, PEOBJ_SCL_EXTERN);
+ for (i = 0; i < ctx->nsym; i++)
+ emit_peobj_sym(ctx, ctx->sym[i].name, (uint32_t)ctx->sym[i].ofs,
+ PEOBJ_SECT_TEXT, PEOBJ_TYPE_FUNC, PEOBJ_SCL_EXTERN);
+
+ emit_peobj_sym_sect(ctx, pesect, PEOBJ_SECT_RDATA_Z);
+
+ if (strtab)
+ break;
+ /* 2nd pass: alloc strtab, write syms and copy strings. */
+ strtab = (char *)malloc(strtabofs);
+ *(uint32_t *)strtab = (uint32_t)strtabofs;
+ }
+
+ /* Write string table. */
+ owrite(ctx, strtab, strtabofs);
+}
+
+#else
+
+void emit_peobj(BuildCtx *ctx)
+{
+ UNUSED(ctx);
+ fprintf(stderr, "Error: no PE object support for this target\n");
+ exit(1);
+}
+
+#endif
diff --git a/libs/luajit-cmake/luajit/src/host/genlibbc.lua b/libs/luajit-cmake/luajit/src/host/genlibbc.lua
new file mode 100644
index 0000000..ba18812
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/host/genlibbc.lua
@@ -0,0 +1,225 @@
+----------------------------------------------------------------------------
+-- Lua script to dump the bytecode of the library functions written in Lua.
+-- The resulting 'buildvm_libbc.h' is used for the build process of LuaJIT.
+----------------------------------------------------------------------------
+-- Copyright (C) 2005-2022 Mike Pall. All rights reserved.
+-- Released under the MIT license. See Copyright Notice in luajit.h
+----------------------------------------------------------------------------
+
+local ffi = require("ffi")
+local bit = require("bit")
+local vmdef = require("jit.vmdef")
+local bcnames = vmdef.bcnames
+
+local format = string.format
+
+local isbe = (string.byte(string.dump(function() end), 5) % 2 == 1)
+
+local function usage(arg)
+ io.stderr:write("Usage: ", arg and arg[0] or "genlibbc",
+ " [-o buildvm_libbc.h] lib_*.c\n")
+ os.exit(1)
+end
+
+local function parse_arg(arg)
+ local outfile = "-"
+ if not (arg and arg[1]) then
+ usage(arg)
+ end
+ if arg[1] == "-o" then
+ outfile = arg[2]
+ if not outfile then usage(arg) end
+ table.remove(arg, 1)
+ table.remove(arg, 1)
+ end
+ return outfile
+end
+
+local function read_files(names)
+ local src = ""
+ for _,name in ipairs(names) do
+ local fp = assert(io.open(name))
+ src = src .. fp:read("*a")
+ fp:close()
+ end
+ return src
+end
+
+local function transform_lua(code)
+ local fixup = {}
+ local n = -30000
+ code = string.gsub(code, "CHECK_(%w*)%((.-)%)", function(tp, var)
+ n = n + 1
+ fixup[n] = { "CHECK", tp }
+ return format("%s=%d", var, n)
+ end)
+ code = string.gsub(code, "PAIRS%((.-)%)", function(var)
+ fixup.PAIRS = true
+ return format("nil, %s, 0x4dp80", var)
+ end)
+ return "return "..code, fixup
+end
+
+local function read_uleb128(p)
+ local v = p[0]; p = p + 1
+ if v >= 128 then
+ local sh = 7; v = v - 128
+ repeat
+ local r = p[0]
+ v = v + bit.lshift(bit.band(r, 127), sh)
+ sh = sh + 7
+ p = p + 1
+ until r < 128
+ end
+ return p, v
+end
+
+-- ORDER LJ_T
+local name2itype = {
+ str = 5, func = 9, tab = 12, int = 14, num = 15
+}
+
+local BC, BCN = {}, {}
+for i=0,#bcnames/6-1 do
+ local name = bcnames:sub(i*6+1, i*6+6):gsub(" ", "")
+ BC[name] = i
+ BCN[i] = name
+end
+local xop, xra = isbe and 3 or 0, isbe and 2 or 1
+local xrc, xrb = isbe and 1 or 2, isbe and 0 or 3
+
+local function fixup_dump(dump, fixup)
+ local buf = ffi.new("uint8_t[?]", #dump+1, dump)
+ local p = buf+5
+ local n, sizebc
+ p, n = read_uleb128(p)
+ local start = p
+ p = p + 4
+ p = read_uleb128(p)
+ p = read_uleb128(p)
+ p, sizebc = read_uleb128(p)
+ local startbc = tonumber(p - start)
+ local rawtab = {}
+ for i=0,sizebc-1 do
+ local op = p[xop]
+ if op == BC.KSHORT then
+ local rd = p[xrc] + 256*p[xrb]
+ rd = bit.arshift(bit.lshift(rd, 16), 16)
+ local f = fixup[rd]
+ if f then
+ if f[1] == "CHECK" then
+ local tp = f[2]
+ if tp == "tab" then rawtab[p[xra]] = true end
+ p[xop] = tp == "num" and BC.ISNUM or BC.ISTYPE
+ p[xrb] = 0
+ p[xrc] = name2itype[tp]
+ else
+ error("unhandled fixup type: "..f[1])
+ end
+ end
+ elseif op == BC.TGETV then
+ if rawtab[p[xrb]] then
+ p[xop] = BC.TGETR
+ end
+ elseif op == BC.TSETV then
+ if rawtab[p[xrb]] then
+ p[xop] = BC.TSETR
+ end
+ elseif op == BC.ITERC then
+ if fixup.PAIRS then
+ p[xop] = BC.ITERN
+ end
+ end
+ p = p + 4
+ end
+ local ndump = ffi.string(start, n)
+ -- Fixup hi-part of 0x4dp80 to LJ_KEYINDEX.
+ ndump = ndump:gsub("\x80\x80\xcd\xaa\x04", "\xff\xff\xf9\xff\x0f")
+ return { dump = ndump, startbc = startbc, sizebc = sizebc }
+end
+
+local function find_defs(src)
+ local defs = {}
+ for name, code in string.gmatch(src, "LJLIB_LUA%(([^)]*)%)%s*/%*(.-)%*/") do
+ local env = {}
+ local tcode, fixup = transform_lua(code)
+ local func = assert(load(tcode, "", nil, env))()
+ defs[name] = fixup_dump(string.dump(func, true), fixup)
+ defs[#defs+1] = name
+ end
+ return defs
+end
+
+local function gen_header(defs)
+ local t = {}
+ local function w(x) t[#t+1] = x end
+ w("/* This is a generated file. DO NOT EDIT! */\n\n")
+ w("static const int libbc_endian = ") w(isbe and 1 or 0) w(";\n\n")
+ local s, sb = "", ""
+ for i,name in ipairs(defs) do
+ local d = defs[name]
+ s = s .. d.dump
+ sb = sb .. string.char(i) .. ("\0"):rep(d.startbc - 1)
+ .. (isbe and "\0\0\0\255" or "\255\0\0\0"):rep(d.sizebc)
+ .. ("\0"):rep(#d.dump - d.startbc - d.sizebc*4)
+ end
+ w("static const uint8_t libbc_code[] = {\n")
+ local n = 0
+ for i=1,#s do
+ local x = string.byte(s, i)
+ local xb = string.byte(sb, i)
+ if xb == 255 then
+ local name = BCN[x]
+ local m = #name + 4
+ if n + m > 78 then n = 0; w("\n") end
+ n = n + m
+ w("BC_"); w(name)
+ else
+ local m = x < 10 and 2 or (x < 100 and 3 or 4)
+ if xb == 0 then
+ if n + m > 78 then n = 0; w("\n") end
+ else
+ local name = defs[xb]:gsub("_", ".")
+ if n ~= 0 then w("\n") end
+ w("/* "); w(name); w(" */ ")
+ n = #name + 7
+ end
+ n = n + m
+ w(x)
+ end
+ w(",")
+ end
+ w("\n0\n};\n\n")
+ w("static const struct { const char *name; int ofs; } libbc_map[] = {\n")
+ local m = 0
+ for _,name in ipairs(defs) do
+ w('{"'); w(name); w('",'); w(m) w('},\n')
+ m = m + #defs[name].dump
+ end
+ w("{NULL,"); w(m); w("}\n};\n\n")
+ return table.concat(t)
+end
+
+local function write_file(name, data)
+ if name == "-" then
+ assert(io.write(data))
+ assert(io.flush())
+ else
+ local fp = io.open(name)
+ if fp then
+ local old = fp:read("*a")
+ fp:close()
+ if data == old then return end
+ end
+ fp = assert(io.open(name, "w"))
+ assert(fp:write(data))
+ assert(fp:close())
+ end
+end
+
+local outfile = parse_arg(arg)
+local src = read_files(arg)
+local defs = find_defs(src)
+local hdr = gen_header(defs)
+write_file(outfile, hdr)
+
diff --git a/libs/luajit-cmake/luajit/src/host/genminilua.lua b/libs/luajit-cmake/luajit/src/host/genminilua.lua
new file mode 100644
index 0000000..e8e86c5
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/host/genminilua.lua
@@ -0,0 +1,436 @@
+----------------------------------------------------------------------------
+-- Lua script to generate a customized, minified version of Lua.
+-- The resulting 'minilua' is used for the build process of LuaJIT.
+----------------------------------------------------------------------------
+-- Copyright (C) 2005-2022 Mike Pall. All rights reserved.
+-- Released under the MIT license. See Copyright Notice in luajit.h
+----------------------------------------------------------------------------
+
+local sub, match, gsub = string.sub, string.match, string.gsub
+
+local LUA_VERSION = "5.1.5"
+local LUA_SOURCE
+
+local function usage()
+ io.stderr:write("Usage: ", arg and arg[0] or "genminilua",
+ " lua-", LUA_VERSION, "-source-dir\n")
+ os.exit(1)
+end
+
+local function find_sources()
+ LUA_SOURCE = arg and arg[1]
+ if not LUA_SOURCE then usage() end
+ if sub(LUA_SOURCE, -1) ~= "/" then LUA_SOURCE = LUA_SOURCE.."/" end
+ local fp = io.open(LUA_SOURCE .. "lua.h")
+ if not fp then
+ LUA_SOURCE = LUA_SOURCE.."src/"
+ fp = io.open(LUA_SOURCE .. "lua.h")
+ if not fp then usage() end
+ end
+ local all = fp:read("*a")
+ fp:close()
+ if not match(all, 'LUA_RELEASE%s*"Lua '..LUA_VERSION..'"') then
+ io.stderr:write("Error: version mismatch\n")
+ usage()
+ end
+end
+
+local LUA_FILES = {
+"lmem.c", "lobject.c", "ltm.c", "lfunc.c", "ldo.c", "lstring.c", "ltable.c",
+"lgc.c", "lstate.c", "ldebug.c", "lzio.c", "lopcodes.c",
+"llex.c", "lcode.c", "lparser.c", "lvm.c", "lapi.c", "lauxlib.c",
+"lbaselib.c", "ltablib.c", "liolib.c", "loslib.c", "lstrlib.c", "linit.c",
+}
+
+local REMOVE_LIB = {}
+gsub([[
+collectgarbage dofile gcinfo getfenv getmetatable load print rawequal rawset
+select tostring xpcall
+foreach foreachi getn maxn setn
+popen tmpfile seek setvbuf __tostring
+clock date difftime execute getenv rename setlocale time tmpname
+dump gfind len reverse
+LUA_LOADLIBNAME LUA_MATHLIBNAME LUA_DBLIBNAME
+]], "%S+", function(name)
+ REMOVE_LIB[name] = true
+end)
+
+local REMOVE_EXTINC = { ["<assert.h>"] = true, ["<locale.h>"] = true, }
+
+local CUSTOM_MAIN = [[
+typedef unsigned int UB;
+static UB barg(lua_State *L,int idx){
+union{lua_Number n;U64 b;}bn;
+bn.n=lua_tonumber(L,idx)+6755399441055744.0;
+if (bn.n==0.0&&!lua_isnumber(L,idx))luaL_typerror(L,idx,"number");
+return(UB)bn.b;
+}
+#define BRET(b) lua_pushnumber(L,(lua_Number)(int)(b));return 1;
+static int tobit(lua_State *L){
+BRET(barg(L,1))}
+static int bnot(lua_State *L){
+BRET(~barg(L,1))}
+static int band(lua_State *L){
+int i;UB b=barg(L,1);for(i=lua_gettop(L);i>1;i--)b&=barg(L,i);BRET(b)}
+static int bor(lua_State *L){
+int i;UB b=barg(L,1);for(i=lua_gettop(L);i>1;i--)b|=barg(L,i);BRET(b)}
+static int bxor(lua_State *L){
+int i;UB b=barg(L,1);for(i=lua_gettop(L);i>1;i--)b^=barg(L,i);BRET(b)}
+static int lshift(lua_State *L){
+UB b=barg(L,1),n=barg(L,2)&31;BRET(b<<n)}
+static int rshift(lua_State *L){
+UB b=barg(L,1),n=barg(L,2)&31;BRET(b>>n)}
+static int arshift(lua_State *L){
+UB b=barg(L,1),n=barg(L,2)&31;BRET((int)b>>n)}
+static int rol(lua_State *L){
+UB b=barg(L,1),n=barg(L,2)&31;BRET((b<<n)|(b>>(32-n)))}
+static int ror(lua_State *L){
+UB b=barg(L,1),n=barg(L,2)&31;BRET((b>>n)|(b<<(32-n)))}
+static int bswap(lua_State *L){
+UB b=barg(L,1);b=(b>>24)|((b>>8)&0xff00)|((b&0xff00)<<8)|(b<<24);BRET(b)}
+static int tohex(lua_State *L){
+UB b=barg(L,1);
+int n=lua_isnone(L,2)?8:(int)barg(L,2);
+const char *hexdigits="0123456789abcdef";
+char buf[8];
+int i;
+if(n<0){n=-n;hexdigits="0123456789ABCDEF";}
+if(n>8)n=8;
+for(i=(int)n;--i>=0;){buf[i]=hexdigits[b&15];b>>=4;}
+lua_pushlstring(L,buf,(size_t)n);
+return 1;
+}
+static const struct luaL_Reg bitlib[] = {
+{"tobit",tobit},
+{"bnot",bnot},
+{"band",band},
+{"bor",bor},
+{"bxor",bxor},
+{"lshift",lshift},
+{"rshift",rshift},
+{"arshift",arshift},
+{"rol",rol},
+{"ror",ror},
+{"bswap",bswap},
+{"tohex",tohex},
+{NULL,NULL}
+};
+int main(int argc, char **argv){
+ lua_State *L = luaL_newstate();
+ int i;
+ luaL_openlibs(L);
+ luaL_register(L, "bit", bitlib);
+ if (argc < 2) return sizeof(void *);
+ lua_createtable(L, 0, 1);
+ lua_pushstring(L, argv[1]);
+ lua_rawseti(L, -2, 0);
+ lua_setglobal(L, "arg");
+ if (luaL_loadfile(L, argv[1]))
+ goto err;
+ for (i = 2; i < argc; i++)
+ lua_pushstring(L, argv[i]);
+ if (lua_pcall(L, argc - 2, 0, 0)) {
+ err:
+ fprintf(stderr, "Error: %s\n", lua_tostring(L, -1));
+ return 1;
+ }
+ lua_close(L);
+ return 0;
+}
+]]
+
+local function read_sources()
+ local t = {}
+ for i, name in ipairs(LUA_FILES) do
+ local fp = assert(io.open(LUA_SOURCE..name, "r"))
+ t[i] = fp:read("*a")
+ assert(fp:close())
+ end
+ t[#t+1] = CUSTOM_MAIN
+ return table.concat(t)
+end
+
+local includes = {}
+
+local function merge_includes(src)
+ return gsub(src, '#include%s*"([^"]*)"%s*\n', function(name)
+ if includes[name] then return "" end
+ includes[name] = true
+ local fp = assert(io.open(LUA_SOURCE..name, "r"))
+ local inc = fp:read("*a")
+ assert(fp:close())
+ inc = gsub(inc, "#ifndef%s+%w+_h\n#define%s+%w+_h\n", "")
+ inc = gsub(inc, "#endif%s*$", "")
+ return merge_includes(inc)
+ end)
+end
+
+local function get_license(src)
+ return match(src, "/%*+\n%* Copyright %(.-%*/\n")
+end
+
+local function fold_lines(src)
+ return gsub(src, "\\\n", " ")
+end
+
+local strings = {}
+
+local function save_str(str)
+ local n = #strings+1
+ strings[n] = str
+ return "\1"..n.."\2"
+end
+
+local function save_strings(src)
+ src = gsub(src, '"[^"\n]*"', save_str)
+ return gsub(src, "'[^'\n]*'", save_str)
+end
+
+local function restore_strings(src)
+ return gsub(src, "\1(%d+)\2", function(numstr)
+ return strings[tonumber(numstr)]
+ end)
+end
+
+local function def_istrue(def)
+ return def == "INT_MAX > 2147483640L" or
+ def == "LUAI_BITSINT >= 32" or
+ def == "SIZE_Bx < LUAI_BITSINT-1" or
+ def == "cast" or
+ def == "defined(LUA_CORE)" or
+ def == "MINSTRTABSIZE" or
+ def == "LUA_MINBUFFER" or
+ def == "HARDSTACKTESTS" or
+ def == "UNUSED"
+end
+
+local head, defs = {[[
+#ifdef _MSC_VER
+typedef unsigned __int64 U64;
+#else
+typedef unsigned long long U64;
+#endif
+int _CRT_glob = 0;
+]]}, {}
+
+local function preprocess(src)
+ local t = { match(src, "^(.-)#") }
+ local lvl, on, oldon = 0, true, {}
+ for pp, def, txt in string.gmatch(src, "#(%w+) *([^\n]*)\n([^#]*)") do
+ if pp == "if" or pp == "ifdef" or pp == "ifndef" then
+ lvl = lvl + 1
+ oldon[lvl] = on
+ on = def_istrue(def)
+ elseif pp == "else" then
+ if oldon[lvl] then
+ if on == false then on = true else on = false end
+ end
+ elseif pp == "elif" then
+ if oldon[lvl] then
+ on = def_istrue(def)
+ end
+ elseif pp == "endif" then
+ on = oldon[lvl]
+ lvl = lvl - 1
+ elseif on then
+ if pp == "include" then
+ if not head[def] and not REMOVE_EXTINC[def] then
+ head[def] = true
+ head[#head+1] = "#include "..def.."\n"
+ end
+ elseif pp == "define" then
+ local k, sp, v = match(def, "([%w_]+)(%s*)(.*)")
+ if k and not (sp == "" and sub(v, 1, 1) == "(") then
+ defs[k] = gsub(v, "%a[%w_]*", function(tok)
+ return defs[tok] or tok
+ end)
+ else
+ t[#t+1] = "#define "..def.."\n"
+ end
+ elseif pp ~= "undef" then
+ error("unexpected directive: "..pp.." "..def)
+ end
+ end
+ if on then t[#t+1] = txt end
+ end
+ return gsub(table.concat(t), "%a[%w_]*", function(tok)
+ return defs[tok] or tok
+ end)
+end
+
+local function merge_header(src, license)
+ local hdr = string.format([[
+/* This is a heavily customized and minimized copy of Lua %s. */
+/* It's only used to build LuaJIT. It does NOT have all standard functions! */
+]], LUA_VERSION)
+ return hdr..license..table.concat(head)..src
+end
+
+local function strip_unused1(src)
+ return gsub(src, '( {"?([%w_]+)"?,%s+%a[%w_]*},\n)', function(line, func)
+ return REMOVE_LIB[func] and "" or line
+ end)
+end
+
+local function strip_unused2(src)
+ return gsub(src, "Symbolic Execution.-}=", "")
+end
+
+local function strip_unused3(src)
+ src = gsub(src, "extern", "static")
+ src = gsub(src, "\nstatic([^\n]-)%(([^)]*)%)%(", "\nstatic%1 %2(")
+ src = gsub(src, "#define lua_assert[^\n]*\n", "")
+ src = gsub(src, "lua_assert%b();?", "")
+ src = gsub(src, "default:\n}", "default:;\n}")
+ src = gsub(src, "lua_lock%b();", "")
+ src = gsub(src, "lua_unlock%b();", "")
+ src = gsub(src, "luai_threadyield%b();", "")
+ src = gsub(src, "luai_userstateopen%b();", "{}")
+ src = gsub(src, "luai_userstate%w+%b();", "")
+ src = gsub(src, "%(%(c==.*luaY_parser%)", "luaY_parser")
+ src = gsub(src, "trydecpoint%(ls,seminfo%)",
+ "luaX_lexerror(ls,\"malformed number\",TK_NUMBER)")
+ src = gsub(src, "int c=luaZ_lookahead%b();", "")
+ src = gsub(src, "luaL_register%(L,[^,]*,co_funcs%);\nreturn 2;",
+ "return 1;")
+ src = gsub(src, "getfuncname%b():", "NULL:")
+ src = gsub(src, "getobjname%b():", "NULL:")
+ src = gsub(src, "if%([^\n]*hookmask[^\n]*%)\n[^\n]*\n", "")
+ src = gsub(src, "if%([^\n]*hookmask[^\n]*%)%b{}\n", "")
+ src = gsub(src, "if%([^\n]*hookmask[^\n]*&&\n[^\n]*%b{}\n", "")
+ src = gsub(src, "(twoto%b()%()", "%1(size_t)")
+ src = gsub(src, "i<sizenode", "i<(int)sizenode")
+ src = gsub(src, "cast%(unsigned int,key%-1%)", "cast(unsigned int,key)-1")
+ return gsub(src, "\n\n+", "\n")
+end
+
+local function strip_comments(src)
+ return gsub(src, "/%*.-%*/", " ")
+end
+
+local function strip_whitespace(src)
+ src = gsub(src, "^%s+", "")
+ src = gsub(src, "%s*\n%s*", "\n")
+ src = gsub(src, "[ \t]+", " ")
+ src = gsub(src, "(%W) ", "%1")
+ return gsub(src, " (%W)", "%1")
+end
+
+local function rename_tokens1(src)
+ src = gsub(src, "getline", "getline_")
+ src = gsub(src, "struct ([%w_]+)", "ZX%1")
+ return gsub(src, "union ([%w_]+)", "ZY%1")
+end
+
+local function rename_tokens2(src)
+ src = gsub(src, "ZX([%w_]+)", "struct %1")
+ return gsub(src, "ZY([%w_]+)", "union %1")
+end
+
+local function fix_bugs_and_warnings(src)
+ src = gsub(src, "(luaD_checkstack%(L,p%->maxstacksize)%)", "%1+p->numparams)")
+ src = gsub(src, "if%(sep==%-1%)(return'%[';)\nelse (luaX_lexerror%b();)", "if (sep!=-1)%2\n%1")
+ return gsub(src, "(default:{\nNode%*n=mainposition)", "/*fallthrough*/\n%1")
+end
+
+local function func_gather(src)
+ local nodes, list = {}, {}
+ local pos, len = 1, #src
+ while pos < len do
+ local d, w = match(src, "^(#define ([%w_]+)[^\n]*\n)", pos)
+ if d then
+ local n = #list+1
+ list[n] = d
+ nodes[w] = n
+ else
+ local s
+ d, w, s = match(src, "^(([%w_]+)[^\n]*([{;])\n)", pos)
+ if not d then
+ d, w, s = match(src, "^(([%w_]+)[^(]*%b()([{;])\n)", pos)
+ if not d then d = match(src, "^[^\n]*\n", pos) end
+ end
+ if s == "{" then
+ d = d..sub(match(src, "^%b{}[^;\n]*;?\n", pos+#d-2), 3)
+ if sub(d, -2) == "{\n" then
+ d = d..sub(match(src, "^%b{}[^;\n]*;?\n", pos+#d-2), 3)
+ end
+ end
+ local k, v = nil, d
+ if w == "typedef" then
+ if match(d, "^typedef enum") then
+ head[#head+1] = d
+ else
+ k = match(d, "([%w_]+);\n$")
+ if not k then k = match(d, "^.-%(.-([%w_]+)%)%(") end
+ end
+ elseif w == "enum" then
+ head[#head+1] = v
+ elseif w ~= nil then
+ k = match(d, "^[^\n]-([%w_]+)[(%[=]")
+ if k then
+ if w ~= "static" and k ~= "main" then v = "static "..d end
+ else
+ k = w
+ end
+ end
+ if w and k then
+ local o = nodes[k]
+ if o then nodes["*"..k] = o end
+ local n = #list+1
+ list[n] = v
+ nodes[k] = n
+ end
+ end
+ pos = pos + #d
+ end
+ return nodes, list
+end
+
+local function func_visit(nodes, list, used, n)
+ local i = nodes[n]
+ for m in string.gmatch(list[i], "[%w_]+") do
+ if nodes[m] then
+ local j = used[m]
+ if not j then
+ used[m] = i
+ func_visit(nodes, list, used, m)
+ elseif i < j then
+ used[m] = i
+ end
+ end
+ end
+end
+
+local function func_collect(src)
+ local nodes, list = func_gather(src)
+ local used = {}
+ func_visit(nodes, list, used, "main")
+ for n,i in pairs(nodes) do
+ local j = used[n]
+ if j and j < i then used["*"..n] = j end
+ end
+ for n,i in pairs(nodes) do
+ if not used[n] then list[i] = "" end
+ end
+ return table.concat(list)
+end
+
+find_sources()
+local src = read_sources()
+src = merge_includes(src)
+local license = get_license(src)
+src = fold_lines(src)
+src = strip_unused1(src)
+src = save_strings(src)
+src = strip_unused2(src)
+src = strip_comments(src)
+src = preprocess(src)
+src = strip_whitespace(src)
+src = strip_unused3(src)
+src = rename_tokens1(src)
+src = func_collect(src)
+src = rename_tokens2(src)
+src = restore_strings(src)
+src = fix_bugs_and_warnings(src)
+src = merge_header(src, license)
+io.write(src)
diff --git a/libs/luajit-cmake/luajit/src/host/minilua.c b/libs/luajit-cmake/luajit/src/host/minilua.c
new file mode 100644
index 0000000..76f32ae
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/host/minilua.c
@@ -0,0 +1,7771 @@
+/* This is a heavily customized and minimized copy of Lua 5.1.5. */
+/* It's only used to build LuaJIT. It does NOT have all standard functions! */
+/******************************************************************************
+* Copyright (C) 1994-2012 Lua.org, PUC-Rio. All rights reserved.
+*
+* Permission is hereby granted, free of charge, to any person obtaining
+* a copy of this software and associated documentation files (the
+* "Software"), to deal in the Software without restriction, including
+* without limitation the rights to use, copy, modify, merge, publish,
+* distribute, sublicense, and/or sell copies of the Software, and to
+* permit persons to whom the Software is furnished to do so, subject to
+* the following conditions:
+*
+* The above copyright notice and this permission notice shall be
+* included in all copies or substantial portions of the Software.
+*
+* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+******************************************************************************/
+#ifdef _MSC_VER
+typedef unsigned __int64 U64;
+#else
+typedef unsigned long long U64;
+#endif
+int _CRT_glob = 0;
+#include <stddef.h>
+#include <stdarg.h>
+#include <limits.h>
+#include <math.h>
+#include <ctype.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <setjmp.h>
+#include <errno.h>
+#include <time.h>
+typedef enum{
+TM_INDEX,
+TM_NEWINDEX,
+TM_GC,
+TM_MODE,
+TM_EQ,
+TM_ADD,
+TM_SUB,
+TM_MUL,
+TM_DIV,
+TM_MOD,
+TM_POW,
+TM_UNM,
+TM_LEN,
+TM_LT,
+TM_LE,
+TM_CONCAT,
+TM_CALL,
+TM_N
+}TMS;
+enum OpMode{iABC,iABx,iAsBx};
+typedef enum{
+OP_MOVE,
+OP_LOADK,
+OP_LOADBOOL,
+OP_LOADNIL,
+OP_GETUPVAL,
+OP_GETGLOBAL,
+OP_GETTABLE,
+OP_SETGLOBAL,
+OP_SETUPVAL,
+OP_SETTABLE,
+OP_NEWTABLE,
+OP_SELF,
+OP_ADD,
+OP_SUB,
+OP_MUL,
+OP_DIV,
+OP_MOD,
+OP_POW,
+OP_UNM,
+OP_NOT,
+OP_LEN,
+OP_CONCAT,
+OP_JMP,
+OP_EQ,
+OP_LT,
+OP_LE,
+OP_TEST,
+OP_TESTSET,
+OP_CALL,
+OP_TAILCALL,
+OP_RETURN,
+OP_FORLOOP,
+OP_FORPREP,
+OP_TFORLOOP,
+OP_SETLIST,
+OP_CLOSE,
+OP_CLOSURE,
+OP_VARARG
+}OpCode;
+enum OpArgMask{
+OpArgN,
+OpArgU,
+OpArgR,
+OpArgK
+};
+typedef enum{
+VVOID,
+VNIL,
+VTRUE,
+VFALSE,
+VK,
+VKNUM,
+VLOCAL,
+VUPVAL,
+VGLOBAL,
+VINDEXED,
+VJMP,
+VRELOCABLE,
+VNONRELOC,
+VCALL,
+VVARARG
+}expkind;
+enum RESERVED{
+TK_AND=257,TK_BREAK,
+TK_DO,TK_ELSE,TK_ELSEIF,TK_END,TK_FALSE,TK_FOR,TK_FUNCTION,
+TK_IF,TK_IN,TK_LOCAL,TK_NIL,TK_NOT,TK_OR,TK_REPEAT,
+TK_RETURN,TK_THEN,TK_TRUE,TK_UNTIL,TK_WHILE,
+TK_CONCAT,TK_DOTS,TK_EQ,TK_GE,TK_LE,TK_NE,TK_NUMBER,
+TK_NAME,TK_STRING,TK_EOS
+};
+typedef enum BinOpr{
+OPR_ADD,OPR_SUB,OPR_MUL,OPR_DIV,OPR_MOD,OPR_POW,
+OPR_CONCAT,
+OPR_NE,OPR_EQ,
+OPR_LT,OPR_LE,OPR_GT,OPR_GE,
+OPR_AND,OPR_OR,
+OPR_NOBINOPR
+}BinOpr;
+typedef enum UnOpr{OPR_MINUS,OPR_NOT,OPR_LEN,OPR_NOUNOPR}UnOpr;
+#define LUA_QL(x)"'"x"'"
+#define luai_apicheck(L,o){(void)L;}
+#define lua_number2str(s,n)sprintf((s),"%.14g",(n))
+#define lua_str2number(s,p)strtod((s),(p))
+#define luai_numadd(a,b)((a)+(b))
+#define luai_numsub(a,b)((a)-(b))
+#define luai_nummul(a,b)((a)*(b))
+#define luai_numdiv(a,b)((a)/(b))
+#define luai_nummod(a,b)((a)-floor((a)/(b))*(b))
+#define luai_numpow(a,b)(pow(a,b))
+#define luai_numunm(a)(-(a))
+#define luai_numeq(a,b)((a)==(b))
+#define luai_numlt(a,b)((a)<(b))
+#define luai_numle(a,b)((a)<=(b))
+#define luai_numisnan(a)(!luai_numeq((a),(a)))
+#define lua_number2int(i,d)((i)=(int)(d))
+#define lua_number2integer(i,d)((i)=(lua_Integer)(d))
+#define LUAI_THROW(L,c)longjmp((c)->b,1)
+#define LUAI_TRY(L,c,a)if(setjmp((c)->b)==0){a}
+#define lua_pclose(L,file)((void)((void)L,file),0)
+#define lua_upvalueindex(i)((-10002)-(i))
+typedef struct lua_State lua_State;
+typedef int(*lua_CFunction)(lua_State*L);
+typedef const char*(*lua_Reader)(lua_State*L,void*ud,size_t*sz);
+typedef void*(*lua_Alloc)(void*ud,void*ptr,size_t osize,size_t nsize);
+typedef double lua_Number;
+typedef ptrdiff_t lua_Integer;
+static void lua_settop(lua_State*L,int idx);
+static int lua_type(lua_State*L,int idx);
+static const char* lua_tolstring(lua_State*L,int idx,size_t*len);
+static size_t lua_objlen(lua_State*L,int idx);
+static void lua_pushlstring(lua_State*L,const char*s,size_t l);
+static void lua_pushcclosure(lua_State*L,lua_CFunction fn,int n);
+static void lua_createtable(lua_State*L,int narr,int nrec);
+static void lua_setfield(lua_State*L,int idx,const char*k);
+#define lua_pop(L,n)lua_settop(L,-(n)-1)
+#define lua_newtable(L)lua_createtable(L,0,0)
+#define lua_pushcfunction(L,f)lua_pushcclosure(L,(f),0)
+#define lua_strlen(L,i)lua_objlen(L,(i))
+#define lua_isfunction(L,n)(lua_type(L,(n))==6)
+#define lua_istable(L,n)(lua_type(L,(n))==5)
+#define lua_isnil(L,n)(lua_type(L,(n))==0)
+#define lua_isboolean(L,n)(lua_type(L,(n))==1)
+#define lua_isnone(L,n)(lua_type(L,(n))==(-1))
+#define lua_isnoneornil(L,n)(lua_type(L,(n))<=0)
+#define lua_pushliteral(L,s)lua_pushlstring(L,""s,(sizeof(s)/sizeof(char))-1)
+#define lua_setglobal(L,s)lua_setfield(L,(-10002),(s))
+#define lua_tostring(L,i)lua_tolstring(L,(i),NULL)
+typedef struct lua_Debug lua_Debug;
+typedef void(*lua_Hook)(lua_State*L,lua_Debug*ar);
+struct lua_Debug{
+int event;
+const char*name;
+const char*namewhat;
+const char*what;
+const char*source;
+int currentline;
+int nups;
+int linedefined;
+int lastlinedefined;
+char short_src[60];
+int i_ci;
+};
+typedef unsigned int lu_int32;
+typedef size_t lu_mem;
+typedef ptrdiff_t l_mem;
+typedef unsigned char lu_byte;
+#define IntPoint(p)((unsigned int)(lu_mem)(p))
+typedef union{double u;void*s;long l;}L_Umaxalign;
+typedef double l_uacNumber;
+#define check_exp(c,e)(e)
+#define UNUSED(x)((void)(x))
+#define cast(t,exp)((t)(exp))
+#define cast_byte(i)cast(lu_byte,(i))
+#define cast_num(i)cast(lua_Number,(i))
+#define cast_int(i)cast(int,(i))
+typedef lu_int32 Instruction;
+#define condhardstacktests(x)((void)0)
+typedef union GCObject GCObject;
+typedef struct GCheader{
+GCObject*next;lu_byte tt;lu_byte marked;
+}GCheader;
+typedef union{
+GCObject*gc;
+void*p;
+lua_Number n;
+int b;
+}Value;
+typedef struct lua_TValue{
+Value value;int tt;
+}TValue;
+#define ttisnil(o)(ttype(o)==0)
+#define ttisnumber(o)(ttype(o)==3)
+#define ttisstring(o)(ttype(o)==4)
+#define ttistable(o)(ttype(o)==5)
+#define ttisfunction(o)(ttype(o)==6)
+#define ttisboolean(o)(ttype(o)==1)
+#define ttisuserdata(o)(ttype(o)==7)
+#define ttisthread(o)(ttype(o)==8)
+#define ttislightuserdata(o)(ttype(o)==2)
+#define ttype(o)((o)->tt)
+#define gcvalue(o)check_exp(iscollectable(o),(o)->value.gc)
+#define pvalue(o)check_exp(ttislightuserdata(o),(o)->value.p)
+#define nvalue(o)check_exp(ttisnumber(o),(o)->value.n)
+#define rawtsvalue(o)check_exp(ttisstring(o),&(o)->value.gc->ts)
+#define tsvalue(o)(&rawtsvalue(o)->tsv)
+#define rawuvalue(o)check_exp(ttisuserdata(o),&(o)->value.gc->u)
+#define uvalue(o)(&rawuvalue(o)->uv)
+#define clvalue(o)check_exp(ttisfunction(o),&(o)->value.gc->cl)
+#define hvalue(o)check_exp(ttistable(o),&(o)->value.gc->h)
+#define bvalue(o)check_exp(ttisboolean(o),(o)->value.b)
+#define thvalue(o)check_exp(ttisthread(o),&(o)->value.gc->th)
+#define l_isfalse(o)(ttisnil(o)||(ttisboolean(o)&&bvalue(o)==0))
+#define checkconsistency(obj)
+#define checkliveness(g,obj)
+#define setnilvalue(obj)((obj)->tt=0)
+#define setnvalue(obj,x){TValue*i_o=(obj);i_o->value.n=(x);i_o->tt=3;}
+#define setbvalue(obj,x){TValue*i_o=(obj);i_o->value.b=(x);i_o->tt=1;}
+#define setsvalue(L,obj,x){TValue*i_o=(obj);i_o->value.gc=cast(GCObject*,(x));i_o->tt=4;checkliveness(G(L),i_o);}
+#define setuvalue(L,obj,x){TValue*i_o=(obj);i_o->value.gc=cast(GCObject*,(x));i_o->tt=7;checkliveness(G(L),i_o);}
+#define setthvalue(L,obj,x){TValue*i_o=(obj);i_o->value.gc=cast(GCObject*,(x));i_o->tt=8;checkliveness(G(L),i_o);}
+#define setclvalue(L,obj,x){TValue*i_o=(obj);i_o->value.gc=cast(GCObject*,(x));i_o->tt=6;checkliveness(G(L),i_o);}
+#define sethvalue(L,obj,x){TValue*i_o=(obj);i_o->value.gc=cast(GCObject*,(x));i_o->tt=5;checkliveness(G(L),i_o);}
+#define setptvalue(L,obj,x){TValue*i_o=(obj);i_o->value.gc=cast(GCObject*,(x));i_o->tt=(8+1);checkliveness(G(L),i_o);}
+#define setobj(L,obj1,obj2){const TValue*o2=(obj2);TValue*o1=(obj1);o1->value=o2->value;o1->tt=o2->tt;checkliveness(G(L),o1);}
+#define setttype(obj,tt)(ttype(obj)=(tt))
+#define iscollectable(o)(ttype(o)>=4)
+typedef TValue*StkId;
+typedef union TString{
+L_Umaxalign dummy;
+struct{
+GCObject*next;lu_byte tt;lu_byte marked;
+lu_byte reserved;
+unsigned int hash;
+size_t len;
+}tsv;
+}TString;
+#define getstr(ts)cast(const char*,(ts)+1)
+#define svalue(o)getstr(rawtsvalue(o))
+typedef union Udata{
+L_Umaxalign dummy;
+struct{
+GCObject*next;lu_byte tt;lu_byte marked;
+struct Table*metatable;
+struct Table*env;
+size_t len;
+}uv;
+}Udata;
+typedef struct Proto{
+GCObject*next;lu_byte tt;lu_byte marked;
+TValue*k;
+Instruction*code;
+struct Proto**p;
+int*lineinfo;
+struct LocVar*locvars;
+TString**upvalues;
+TString*source;
+int sizeupvalues;
+int sizek;
+int sizecode;
+int sizelineinfo;
+int sizep;
+int sizelocvars;
+int linedefined;
+int lastlinedefined;
+GCObject*gclist;
+lu_byte nups;
+lu_byte numparams;
+lu_byte is_vararg;
+lu_byte maxstacksize;
+}Proto;
+typedef struct LocVar{
+TString*varname;
+int startpc;
+int endpc;
+}LocVar;
+typedef struct UpVal{
+GCObject*next;lu_byte tt;lu_byte marked;
+TValue*v;
+union{
+TValue value;
+struct{
+struct UpVal*prev;
+struct UpVal*next;
+}l;
+}u;
+}UpVal;
+typedef struct CClosure{
+GCObject*next;lu_byte tt;lu_byte marked;lu_byte isC;lu_byte nupvalues;GCObject*gclist;struct Table*env;
+lua_CFunction f;
+TValue upvalue[1];
+}CClosure;
+typedef struct LClosure{
+GCObject*next;lu_byte tt;lu_byte marked;lu_byte isC;lu_byte nupvalues;GCObject*gclist;struct Table*env;
+struct Proto*p;
+UpVal*upvals[1];
+}LClosure;
+typedef union Closure{
+CClosure c;
+LClosure l;
+}Closure;
+#define iscfunction(o)(ttype(o)==6&&clvalue(o)->c.isC)
+typedef union TKey{
+struct{
+Value value;int tt;
+struct Node*next;
+}nk;
+TValue tvk;
+}TKey;
+typedef struct Node{
+TValue i_val;
+TKey i_key;
+}Node;
+typedef struct Table{
+GCObject*next;lu_byte tt;lu_byte marked;
+lu_byte flags;
+lu_byte lsizenode;
+struct Table*metatable;
+TValue*array;
+Node*node;
+Node*lastfree;
+GCObject*gclist;
+int sizearray;
+}Table;
+#define lmod(s,size)(check_exp((size&(size-1))==0,(cast(int,(s)&((size)-1)))))
+#define twoto(x)((size_t)1<<(x))
+#define sizenode(t)(twoto((t)->lsizenode))
+static const TValue luaO_nilobject_;
+#define ceillog2(x)(luaO_log2((x)-1)+1)
+static int luaO_log2(unsigned int x);
+#define gfasttm(g,et,e)((et)==NULL?NULL:((et)->flags&(1u<<(e)))?NULL:luaT_gettm(et,e,(g)->tmname[e]))
+#define fasttm(l,et,e)gfasttm(G(l),et,e)
+static const TValue*luaT_gettm(Table*events,TMS event,TString*ename);
+#define luaM_reallocv(L,b,on,n,e)((cast(size_t,(n)+1)<=((size_t)(~(size_t)0)-2)/(e))?luaM_realloc_(L,(b),(on)*(e),(n)*(e)):luaM_toobig(L))
+#define luaM_freemem(L,b,s)luaM_realloc_(L,(b),(s),0)
+#define luaM_free(L,b)luaM_realloc_(L,(b),sizeof(*(b)),0)
+#define luaM_freearray(L,b,n,t)luaM_reallocv(L,(b),n,0,sizeof(t))
+#define luaM_malloc(L,t)luaM_realloc_(L,NULL,0,(t))
+#define luaM_new(L,t)cast(t*,luaM_malloc(L,sizeof(t)))
+#define luaM_newvector(L,n,t)cast(t*,luaM_reallocv(L,NULL,0,n,sizeof(t)))
+#define luaM_growvector(L,v,nelems,size,t,limit,e)if((nelems)+1>(size))((v)=cast(t*,luaM_growaux_(L,v,&(size),sizeof(t),limit,e)))
+#define luaM_reallocvector(L,v,oldn,n,t)((v)=cast(t*,luaM_reallocv(L,v,oldn,n,sizeof(t))))
+static void*luaM_realloc_(lua_State*L,void*block,size_t oldsize,
+size_t size);
+static void*luaM_toobig(lua_State*L);
+static void*luaM_growaux_(lua_State*L,void*block,int*size,
+size_t size_elem,int limit,
+const char*errormsg);
+typedef struct Zio ZIO;
+#define char2int(c)cast(int,cast(unsigned char,(c)))
+#define zgetc(z)(((z)->n--)>0?char2int(*(z)->p++):luaZ_fill(z))
+typedef struct Mbuffer{
+char*buffer;
+size_t n;
+size_t buffsize;
+}Mbuffer;
+#define luaZ_initbuffer(L,buff)((buff)->buffer=NULL,(buff)->buffsize=0)
+#define luaZ_buffer(buff)((buff)->buffer)
+#define luaZ_sizebuffer(buff)((buff)->buffsize)
+#define luaZ_bufflen(buff)((buff)->n)
+#define luaZ_resetbuffer(buff)((buff)->n=0)
+#define luaZ_resizebuffer(L,buff,size)(luaM_reallocvector(L,(buff)->buffer,(buff)->buffsize,size,char),(buff)->buffsize=size)
+#define luaZ_freebuffer(L,buff)luaZ_resizebuffer(L,buff,0)
+struct Zio{
+size_t n;
+const char*p;
+lua_Reader reader;
+void*data;
+lua_State*L;
+};
+static int luaZ_fill(ZIO*z);
+struct lua_longjmp;
+#define gt(L)(&L->l_gt)
+#define registry(L)(&G(L)->l_registry)
+typedef struct stringtable{
+GCObject**hash;
+lu_int32 nuse;
+int size;
+}stringtable;
+typedef struct CallInfo{
+StkId base;
+StkId func;
+StkId top;
+const Instruction*savedpc;
+int nresults;
+int tailcalls;
+}CallInfo;
+#define curr_func(L)(clvalue(L->ci->func))
+#define ci_func(ci)(clvalue((ci)->func))
+#define f_isLua(ci)(!ci_func(ci)->c.isC)
+#define isLua(ci)(ttisfunction((ci)->func)&&f_isLua(ci))
+typedef struct global_State{
+stringtable strt;
+lua_Alloc frealloc;
+void*ud;
+lu_byte currentwhite;
+lu_byte gcstate;
+int sweepstrgc;
+GCObject*rootgc;
+GCObject**sweepgc;
+GCObject*gray;
+GCObject*grayagain;
+GCObject*weak;
+GCObject*tmudata;
+Mbuffer buff;
+lu_mem GCthreshold;
+lu_mem totalbytes;
+lu_mem estimate;
+lu_mem gcdept;
+int gcpause;
+int gcstepmul;
+lua_CFunction panic;
+TValue l_registry;
+struct lua_State*mainthread;
+UpVal uvhead;
+struct Table*mt[(8+1)];
+TString*tmname[TM_N];
+}global_State;
+struct lua_State{
+GCObject*next;lu_byte tt;lu_byte marked;
+lu_byte status;
+StkId top;
+StkId base;
+global_State*l_G;
+CallInfo*ci;
+const Instruction*savedpc;
+StkId stack_last;
+StkId stack;
+CallInfo*end_ci;
+CallInfo*base_ci;
+int stacksize;
+int size_ci;
+unsigned short nCcalls;
+unsigned short baseCcalls;
+lu_byte hookmask;
+lu_byte allowhook;
+int basehookcount;
+int hookcount;
+lua_Hook hook;
+TValue l_gt;
+TValue env;
+GCObject*openupval;
+GCObject*gclist;
+struct lua_longjmp*errorJmp;
+ptrdiff_t errfunc;
+};
+#define G(L)(L->l_G)
+union GCObject{
+GCheader gch;
+union TString ts;
+union Udata u;
+union Closure cl;
+struct Table h;
+struct Proto p;
+struct UpVal uv;
+struct lua_State th;
+};
+#define rawgco2ts(o)check_exp((o)->gch.tt==4,&((o)->ts))
+#define gco2ts(o)(&rawgco2ts(o)->tsv)
+#define rawgco2u(o)check_exp((o)->gch.tt==7,&((o)->u))
+#define gco2u(o)(&rawgco2u(o)->uv)
+#define gco2cl(o)check_exp((o)->gch.tt==6,&((o)->cl))
+#define gco2h(o)check_exp((o)->gch.tt==5,&((o)->h))
+#define gco2p(o)check_exp((o)->gch.tt==(8+1),&((o)->p))
+#define gco2uv(o)check_exp((o)->gch.tt==(8+2),&((o)->uv))
+#define ngcotouv(o)check_exp((o)==NULL||(o)->gch.tt==(8+2),&((o)->uv))
+#define gco2th(o)check_exp((o)->gch.tt==8,&((o)->th))
+#define obj2gco(v)(cast(GCObject*,(v)))
+static void luaE_freethread(lua_State*L,lua_State*L1);
+#define pcRel(pc,p)(cast(int,(pc)-(p)->code)-1)
+#define getline_(f,pc)(((f)->lineinfo)?(f)->lineinfo[pc]:0)
+#define resethookcount(L)(L->hookcount=L->basehookcount)
+static void luaG_typeerror(lua_State*L,const TValue*o,
+const char*opname);
+static void luaG_runerror(lua_State*L,const char*fmt,...);
+#define luaD_checkstack(L,n)if((char*)L->stack_last-(char*)L->top<=(n)*(int)sizeof(TValue))luaD_growstack(L,n);else condhardstacktests(luaD_reallocstack(L,L->stacksize-5-1));
+#define incr_top(L){luaD_checkstack(L,1);L->top++;}
+#define savestack(L,p)((char*)(p)-(char*)L->stack)
+#define restorestack(L,n)((TValue*)((char*)L->stack+(n)))
+#define saveci(L,p)((char*)(p)-(char*)L->base_ci)
+#define restoreci(L,n)((CallInfo*)((char*)L->base_ci+(n)))
+typedef void(*Pfunc)(lua_State*L,void*ud);
+static int luaD_poscall(lua_State*L,StkId firstResult);
+static void luaD_reallocCI(lua_State*L,int newsize);
+static void luaD_reallocstack(lua_State*L,int newsize);
+static void luaD_growstack(lua_State*L,int n);
+static void luaD_throw(lua_State*L,int errcode);
+static void*luaM_growaux_(lua_State*L,void*block,int*size,size_t size_elems,
+int limit,const char*errormsg){
+void*newblock;
+int newsize;
+if(*size>=limit/2){
+if(*size>=limit)
+luaG_runerror(L,errormsg);
+newsize=limit;
+}
+else{
+newsize=(*size)*2;
+if(newsize<4)
+newsize=4;
+}
+newblock=luaM_reallocv(L,block,*size,newsize,size_elems);
+*size=newsize;
+return newblock;
+}
+static void*luaM_toobig(lua_State*L){
+luaG_runerror(L,"memory allocation error: block too big");
+return NULL;
+}
+static void*luaM_realloc_(lua_State*L,void*block,size_t osize,size_t nsize){
+global_State*g=G(L);
+block=(*g->frealloc)(g->ud,block,osize,nsize);
+if(block==NULL&&nsize>0)
+luaD_throw(L,4);
+g->totalbytes=(g->totalbytes-osize)+nsize;
+return block;
+}
+#define resetbits(x,m)((x)&=cast(lu_byte,~(m)))
+#define setbits(x,m)((x)|=(m))
+#define testbits(x,m)((x)&(m))
+#define bitmask(b)(1<<(b))
+#define bit2mask(b1,b2)(bitmask(b1)|bitmask(b2))
+#define l_setbit(x,b)setbits(x,bitmask(b))
+#define resetbit(x,b)resetbits(x,bitmask(b))
+#define testbit(x,b)testbits(x,bitmask(b))
+#define set2bits(x,b1,b2)setbits(x,(bit2mask(b1,b2)))
+#define reset2bits(x,b1,b2)resetbits(x,(bit2mask(b1,b2)))
+#define test2bits(x,b1,b2)testbits(x,(bit2mask(b1,b2)))
+#define iswhite(x)test2bits((x)->gch.marked,0,1)
+#define isblack(x)testbit((x)->gch.marked,2)
+#define isgray(x)(!isblack(x)&&!iswhite(x))
+#define otherwhite(g)(g->currentwhite^bit2mask(0,1))
+#define isdead(g,v)((v)->gch.marked&otherwhite(g)&bit2mask(0,1))
+#define changewhite(x)((x)->gch.marked^=bit2mask(0,1))
+#define gray2black(x)l_setbit((x)->gch.marked,2)
+#define valiswhite(x)(iscollectable(x)&&iswhite(gcvalue(x)))
+#define luaC_white(g)cast(lu_byte,(g)->currentwhite&bit2mask(0,1))
+#define luaC_checkGC(L){condhardstacktests(luaD_reallocstack(L,L->stacksize-5-1));if(G(L)->totalbytes>=G(L)->GCthreshold)luaC_step(L);}
+#define luaC_barrier(L,p,v){if(valiswhite(v)&&isblack(obj2gco(p)))luaC_barrierf(L,obj2gco(p),gcvalue(v));}
+#define luaC_barriert(L,t,v){if(valiswhite(v)&&isblack(obj2gco(t)))luaC_barrierback(L,t);}
+#define luaC_objbarrier(L,p,o){if(iswhite(obj2gco(o))&&isblack(obj2gco(p)))luaC_barrierf(L,obj2gco(p),obj2gco(o));}
+#define luaC_objbarriert(L,t,o){if(iswhite(obj2gco(o))&&isblack(obj2gco(t)))luaC_barrierback(L,t);}
+static void luaC_step(lua_State*L);
+static void luaC_link(lua_State*L,GCObject*o,lu_byte tt);
+static void luaC_linkupval(lua_State*L,UpVal*uv);
+static void luaC_barrierf(lua_State*L,GCObject*o,GCObject*v);
+static void luaC_barrierback(lua_State*L,Table*t);
+#define sizestring(s)(sizeof(union TString)+((s)->len+1)*sizeof(char))
+#define sizeudata(u)(sizeof(union Udata)+(u)->len)
+#define luaS_new(L,s)(luaS_newlstr(L,s,strlen(s)))
+#define luaS_newliteral(L,s)(luaS_newlstr(L,""s,(sizeof(s)/sizeof(char))-1))
+#define luaS_fix(s)l_setbit((s)->tsv.marked,5)
+static TString*luaS_newlstr(lua_State*L,const char*str,size_t l);
+#define tostring(L,o)((ttype(o)==4)||(luaV_tostring(L,o)))
+#define tonumber(o,n)(ttype(o)==3||(((o)=luaV_tonumber(o,n))!=NULL))
+#define equalobj(L,o1,o2)(ttype(o1)==ttype(o2)&&luaV_equalval(L,o1,o2))
+static int luaV_equalval(lua_State*L,const TValue*t1,const TValue*t2);
+static const TValue*luaV_tonumber(const TValue*obj,TValue*n);
+static int luaV_tostring(lua_State*L,StkId obj);
+static void luaV_execute(lua_State*L,int nexeccalls);
+static void luaV_concat(lua_State*L,int total,int last);
+static const TValue luaO_nilobject_={{NULL},0};
+static int luaO_int2fb(unsigned int x){
+int e=0;
+while(x>=16){
+x=(x+1)>>1;
+e++;
+}
+if(x<8)return x;
+else return((e+1)<<3)|(cast_int(x)-8);
+}
+static int luaO_fb2int(int x){
+int e=(x>>3)&31;
+if(e==0)return x;
+else return((x&7)+8)<<(e-1);
+}
+static int luaO_log2(unsigned int x){
+static const lu_byte log_2[256]={
+0,1,2,2,3,3,3,3,4,4,4,4,4,4,4,4,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,
+6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,
+7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
+7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
+8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
+8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
+8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
+8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8
+};
+int l=-1;
+while(x>=256){l+=8;x>>=8;}
+return l+log_2[x];
+}
+static int luaO_rawequalObj(const TValue*t1,const TValue*t2){
+if(ttype(t1)!=ttype(t2))return 0;
+else switch(ttype(t1)){
+case 0:
+return 1;
+case 3:
+return luai_numeq(nvalue(t1),nvalue(t2));
+case 1:
+return bvalue(t1)==bvalue(t2);
+case 2:
+return pvalue(t1)==pvalue(t2);
+default:
+return gcvalue(t1)==gcvalue(t2);
+}
+}
+static int luaO_str2d(const char*s,lua_Number*result){
+char*endptr;
+*result=lua_str2number(s,&endptr);
+if(endptr==s)return 0;
+if(*endptr=='x'||*endptr=='X')
+*result=cast_num(strtoul(s,&endptr,16));
+if(*endptr=='\0')return 1;
+while(isspace(cast(unsigned char,*endptr)))endptr++;
+if(*endptr!='\0')return 0;
+return 1;
+}
+static void pushstr(lua_State*L,const char*str){
+setsvalue(L,L->top,luaS_new(L,str));
+incr_top(L);
+}
+static const char*luaO_pushvfstring(lua_State*L,const char*fmt,va_list argp){
+int n=1;
+pushstr(L,"");
+for(;;){
+const char*e=strchr(fmt,'%');
+if(e==NULL)break;
+setsvalue(L,L->top,luaS_newlstr(L,fmt,e-fmt));
+incr_top(L);
+switch(*(e+1)){
+case's':{
+const char*s=va_arg(argp,char*);
+if(s==NULL)s="(null)";
+pushstr(L,s);
+break;
+}
+case'c':{
+char buff[2];
+buff[0]=cast(char,va_arg(argp,int));
+buff[1]='\0';
+pushstr(L,buff);
+break;
+}
+case'd':{
+setnvalue(L->top,cast_num(va_arg(argp,int)));
+incr_top(L);
+break;
+}
+case'f':{
+setnvalue(L->top,cast_num(va_arg(argp,l_uacNumber)));
+incr_top(L);
+break;
+}
+case'p':{
+char buff[4*sizeof(void*)+8];
+sprintf(buff,"%p",va_arg(argp,void*));
+pushstr(L,buff);
+break;
+}
+case'%':{
+pushstr(L,"%");
+break;
+}
+default:{
+char buff[3];
+buff[0]='%';
+buff[1]=*(e+1);
+buff[2]='\0';
+pushstr(L,buff);
+break;
+}
+}
+n+=2;
+fmt=e+2;
+}
+pushstr(L,fmt);
+luaV_concat(L,n+1,cast_int(L->top-L->base)-1);
+L->top-=n;
+return svalue(L->top-1);
+}
+static const char*luaO_pushfstring(lua_State*L,const char*fmt,...){
+const char*msg;
+va_list argp;
+va_start(argp,fmt);
+msg=luaO_pushvfstring(L,fmt,argp);
+va_end(argp);
+return msg;
+}
+static void luaO_chunkid(char*out,const char*source,size_t bufflen){
+if(*source=='='){
+strncpy(out,source+1,bufflen);
+out[bufflen-1]='\0';
+}
+else{
+if(*source=='@'){
+size_t l;
+source++;
+bufflen-=sizeof(" '...' ");
+l=strlen(source);
+strcpy(out,"");
+if(l>bufflen){
+source+=(l-bufflen);
+strcat(out,"...");
+}
+strcat(out,source);
+}
+else{
+size_t len=strcspn(source,"\n\r");
+bufflen-=sizeof(" [string \"...\"] ");
+if(len>bufflen)len=bufflen;
+strcpy(out,"[string \"");
+if(source[len]!='\0'){
+strncat(out,source,len);
+strcat(out,"...");
+}
+else
+strcat(out,source);
+strcat(out,"\"]");
+}
+}
+}
+#define gnode(t,i)(&(t)->node[i])
+#define gkey(n)(&(n)->i_key.nk)
+#define gval(n)(&(n)->i_val)
+#define gnext(n)((n)->i_key.nk.next)
+#define key2tval(n)(&(n)->i_key.tvk)
+static TValue*luaH_setnum(lua_State*L,Table*t,int key);
+static const TValue*luaH_getstr(Table*t,TString*key);
+static TValue*luaH_set(lua_State*L,Table*t,const TValue*key);
+static const char*const luaT_typenames[]={
+"nil","boolean","userdata","number",
+"string","table","function","userdata","thread",
+"proto","upval"
+};
+static void luaT_init(lua_State*L){
+static const char*const luaT_eventname[]={
+"__index","__newindex",
+"__gc","__mode","__eq",
+"__add","__sub","__mul","__div","__mod",
+"__pow","__unm","__len","__lt","__le",
+"__concat","__call"
+};
+int i;
+for(i=0;i<TM_N;i++){
+G(L)->tmname[i]=luaS_new(L,luaT_eventname[i]);
+luaS_fix(G(L)->tmname[i]);
+}
+}
+static const TValue*luaT_gettm(Table*events,TMS event,TString*ename){
+const TValue*tm=luaH_getstr(events,ename);
+if(ttisnil(tm)){
+events->flags|=cast_byte(1u<<event);
+return NULL;
+}
+else return tm;
+}
+static const TValue*luaT_gettmbyobj(lua_State*L,const TValue*o,TMS event){
+Table*mt;
+switch(ttype(o)){
+case 5:
+mt=hvalue(o)->metatable;
+break;
+case 7:
+mt=uvalue(o)->metatable;
+break;
+default:
+mt=G(L)->mt[ttype(o)];
+}
+return(mt?luaH_getstr(mt,G(L)->tmname[event]):(&luaO_nilobject_));
+}
+#define sizeCclosure(n)(cast(int,sizeof(CClosure))+cast(int,sizeof(TValue)*((n)-1)))
+#define sizeLclosure(n)(cast(int,sizeof(LClosure))+cast(int,sizeof(TValue*)*((n)-1)))
+static Closure*luaF_newCclosure(lua_State*L,int nelems,Table*e){
+Closure*c=cast(Closure*,luaM_malloc(L,sizeCclosure(nelems)));
+luaC_link(L,obj2gco(c),6);
+c->c.isC=1;
+c->c.env=e;
+c->c.nupvalues=cast_byte(nelems);
+return c;
+}
+static Closure*luaF_newLclosure(lua_State*L,int nelems,Table*e){
+Closure*c=cast(Closure*,luaM_malloc(L,sizeLclosure(nelems)));
+luaC_link(L,obj2gco(c),6);
+c->l.isC=0;
+c->l.env=e;
+c->l.nupvalues=cast_byte(nelems);
+while(nelems--)c->l.upvals[nelems]=NULL;
+return c;
+}
+static UpVal*luaF_newupval(lua_State*L){
+UpVal*uv=luaM_new(L,UpVal);
+luaC_link(L,obj2gco(uv),(8+2));
+uv->v=&uv->u.value;
+setnilvalue(uv->v);
+return uv;
+}
+static UpVal*luaF_findupval(lua_State*L,StkId level){
+global_State*g=G(L);
+GCObject**pp=&L->openupval;
+UpVal*p;
+UpVal*uv;
+while(*pp!=NULL&&(p=ngcotouv(*pp))->v>=level){
+if(p->v==level){
+if(isdead(g,obj2gco(p)))
+changewhite(obj2gco(p));
+return p;
+}
+pp=&p->next;
+}
+uv=luaM_new(L,UpVal);
+uv->tt=(8+2);
+uv->marked=luaC_white(g);
+uv->v=level;
+uv->next=*pp;
+*pp=obj2gco(uv);
+uv->u.l.prev=&g->uvhead;
+uv->u.l.next=g->uvhead.u.l.next;
+uv->u.l.next->u.l.prev=uv;
+g->uvhead.u.l.next=uv;
+return uv;
+}
+static void unlinkupval(UpVal*uv){
+uv->u.l.next->u.l.prev=uv->u.l.prev;
+uv->u.l.prev->u.l.next=uv->u.l.next;
+}
+static void luaF_freeupval(lua_State*L,UpVal*uv){
+if(uv->v!=&uv->u.value)
+unlinkupval(uv);
+luaM_free(L,uv);
+}
+static void luaF_close(lua_State*L,StkId level){
+UpVal*uv;
+global_State*g=G(L);
+while(L->openupval!=NULL&&(uv=ngcotouv(L->openupval))->v>=level){
+GCObject*o=obj2gco(uv);
+L->openupval=uv->next;
+if(isdead(g,o))
+luaF_freeupval(L,uv);
+else{
+unlinkupval(uv);
+setobj(L,&uv->u.value,uv->v);
+uv->v=&uv->u.value;
+luaC_linkupval(L,uv);
+}
+}
+}
+static Proto*luaF_newproto(lua_State*L){
+Proto*f=luaM_new(L,Proto);
+luaC_link(L,obj2gco(f),(8+1));
+f->k=NULL;
+f->sizek=0;
+f->p=NULL;
+f->sizep=0;
+f->code=NULL;
+f->sizecode=0;
+f->sizelineinfo=0;
+f->sizeupvalues=0;
+f->nups=0;
+f->upvalues=NULL;
+f->numparams=0;
+f->is_vararg=0;
+f->maxstacksize=0;
+f->lineinfo=NULL;
+f->sizelocvars=0;
+f->locvars=NULL;
+f->linedefined=0;
+f->lastlinedefined=0;
+f->source=NULL;
+return f;
+}
+static void luaF_freeproto(lua_State*L,Proto*f){
+luaM_freearray(L,f->code,f->sizecode,Instruction);
+luaM_freearray(L,f->p,f->sizep,Proto*);
+luaM_freearray(L,f->k,f->sizek,TValue);
+luaM_freearray(L,f->lineinfo,f->sizelineinfo,int);
+luaM_freearray(L,f->locvars,f->sizelocvars,struct LocVar);
+luaM_freearray(L,f->upvalues,f->sizeupvalues,TString*);
+luaM_free(L,f);
+}
+static void luaF_freeclosure(lua_State*L,Closure*c){
+int size=(c->c.isC)?sizeCclosure(c->c.nupvalues):
+sizeLclosure(c->l.nupvalues);
+luaM_freemem(L,c,size);
+}
+#define MASK1(n,p)((~((~(Instruction)0)<<n))<<p)
+#define MASK0(n,p)(~MASK1(n,p))
+#define GET_OPCODE(i)(cast(OpCode,((i)>>0)&MASK1(6,0)))
+#define SET_OPCODE(i,o)((i)=(((i)&MASK0(6,0))|((cast(Instruction,o)<<0)&MASK1(6,0))))
+#define GETARG_A(i)(cast(int,((i)>>(0+6))&MASK1(8,0)))
+#define SETARG_A(i,u)((i)=(((i)&MASK0(8,(0+6)))|((cast(Instruction,u)<<(0+6))&MASK1(8,(0+6)))))
+#define GETARG_B(i)(cast(int,((i)>>(((0+6)+8)+9))&MASK1(9,0)))
+#define SETARG_B(i,b)((i)=(((i)&MASK0(9,(((0+6)+8)+9)))|((cast(Instruction,b)<<(((0+6)+8)+9))&MASK1(9,(((0+6)+8)+9)))))
+#define GETARG_C(i)(cast(int,((i)>>((0+6)+8))&MASK1(9,0)))
+#define SETARG_C(i,b)((i)=(((i)&MASK0(9,((0+6)+8)))|((cast(Instruction,b)<<((0+6)+8))&MASK1(9,((0+6)+8)))))
+#define GETARG_Bx(i)(cast(int,((i)>>((0+6)+8))&MASK1((9+9),0)))
+#define SETARG_Bx(i,b)((i)=(((i)&MASK0((9+9),((0+6)+8)))|((cast(Instruction,b)<<((0+6)+8))&MASK1((9+9),((0+6)+8)))))
+#define GETARG_sBx(i)(GETARG_Bx(i)-(((1<<(9+9))-1)>>1))
+#define SETARG_sBx(i,b)SETARG_Bx((i),cast(unsigned int,(b)+(((1<<(9+9))-1)>>1)))
+#define CREATE_ABC(o,a,b,c)((cast(Instruction,o)<<0)|(cast(Instruction,a)<<(0+6))|(cast(Instruction,b)<<(((0+6)+8)+9))|(cast(Instruction,c)<<((0+6)+8)))
+#define CREATE_ABx(o,a,bc)((cast(Instruction,o)<<0)|(cast(Instruction,a)<<(0+6))|(cast(Instruction,bc)<<((0+6)+8)))
+#define ISK(x)((x)&(1<<(9-1)))
+#define INDEXK(r)((int)(r)&~(1<<(9-1)))
+#define RKASK(x)((x)|(1<<(9-1)))
+static const lu_byte luaP_opmodes[(cast(int,OP_VARARG)+1)];
+#define getBMode(m)(cast(enum OpArgMask,(luaP_opmodes[m]>>4)&3))
+#define getCMode(m)(cast(enum OpArgMask,(luaP_opmodes[m]>>2)&3))
+#define testTMode(m)(luaP_opmodes[m]&(1<<7))
+typedef struct expdesc{
+expkind k;
+union{
+struct{int info,aux;}s;
+lua_Number nval;
+}u;
+int t;
+int f;
+}expdesc;
+typedef struct upvaldesc{
+lu_byte k;
+lu_byte info;
+}upvaldesc;
+struct BlockCnt;
+typedef struct FuncState{
+Proto*f;
+Table*h;
+struct FuncState*prev;
+struct LexState*ls;
+struct lua_State*L;
+struct BlockCnt*bl;
+int pc;
+int lasttarget;
+int jpc;
+int freereg;
+int nk;
+int np;
+short nlocvars;
+lu_byte nactvar;
+upvaldesc upvalues[60];
+unsigned short actvar[200];
+}FuncState;
+static Proto*luaY_parser(lua_State*L,ZIO*z,Mbuffer*buff,
+const char*name);
+struct lua_longjmp{
+struct lua_longjmp*previous;
+jmp_buf b;
+volatile int status;
+};
+static void luaD_seterrorobj(lua_State*L,int errcode,StkId oldtop){
+switch(errcode){
+case 4:{
+setsvalue(L,oldtop,luaS_newliteral(L,"not enough memory"));
+break;
+}
+case 5:{
+setsvalue(L,oldtop,luaS_newliteral(L,"error in error handling"));
+break;
+}
+case 3:
+case 2:{
+setobj(L,oldtop,L->top-1);
+break;
+}
+}
+L->top=oldtop+1;
+}
+static void restore_stack_limit(lua_State*L){
+if(L->size_ci>20000){
+int inuse=cast_int(L->ci-L->base_ci);
+if(inuse+1<20000)
+luaD_reallocCI(L,20000);
+}
+}
+static void resetstack(lua_State*L,int status){
+L->ci=L->base_ci;
+L->base=L->ci->base;
+luaF_close(L,L->base);
+luaD_seterrorobj(L,status,L->base);
+L->nCcalls=L->baseCcalls;
+L->allowhook=1;
+restore_stack_limit(L);
+L->errfunc=0;
+L->errorJmp=NULL;
+}
+static void luaD_throw(lua_State*L,int errcode){
+if(L->errorJmp){
+L->errorJmp->status=errcode;
+LUAI_THROW(L,L->errorJmp);
+}
+else{
+L->status=cast_byte(errcode);
+if(G(L)->panic){
+resetstack(L,errcode);
+G(L)->panic(L);
+}
+exit(EXIT_FAILURE);
+}
+}
+static int luaD_rawrunprotected(lua_State*L,Pfunc f,void*ud){
+struct lua_longjmp lj;
+lj.status=0;
+lj.previous=L->errorJmp;
+L->errorJmp=&lj;
+LUAI_TRY(L,&lj,
+(*f)(L,ud);
+);
+L->errorJmp=lj.previous;
+return lj.status;
+}
+static void correctstack(lua_State*L,TValue*oldstack){
+CallInfo*ci;
+GCObject*up;
+L->top=(L->top-oldstack)+L->stack;
+for(up=L->openupval;up!=NULL;up=up->gch.next)
+gco2uv(up)->v=(gco2uv(up)->v-oldstack)+L->stack;
+for(ci=L->base_ci;ci<=L->ci;ci++){
+ci->top=(ci->top-oldstack)+L->stack;
+ci->base=(ci->base-oldstack)+L->stack;
+ci->func=(ci->func-oldstack)+L->stack;
+}
+L->base=(L->base-oldstack)+L->stack;
+}
+static void luaD_reallocstack(lua_State*L,int newsize){
+TValue*oldstack=L->stack;
+int realsize=newsize+1+5;
+luaM_reallocvector(L,L->stack,L->stacksize,realsize,TValue);
+L->stacksize=realsize;
+L->stack_last=L->stack+newsize;
+correctstack(L,oldstack);
+}
+static void luaD_reallocCI(lua_State*L,int newsize){
+CallInfo*oldci=L->base_ci;
+luaM_reallocvector(L,L->base_ci,L->size_ci,newsize,CallInfo);
+L->size_ci=newsize;
+L->ci=(L->ci-oldci)+L->base_ci;
+L->end_ci=L->base_ci+L->size_ci-1;
+}
+static void luaD_growstack(lua_State*L,int n){
+if(n<=L->stacksize)
+luaD_reallocstack(L,2*L->stacksize);
+else
+luaD_reallocstack(L,L->stacksize+n);
+}
+static CallInfo*growCI(lua_State*L){
+if(L->size_ci>20000)
+luaD_throw(L,5);
+else{
+luaD_reallocCI(L,2*L->size_ci);
+if(L->size_ci>20000)
+luaG_runerror(L,"stack overflow");
+}
+return++L->ci;
+}
+static StkId adjust_varargs(lua_State*L,Proto*p,int actual){
+int i;
+int nfixargs=p->numparams;
+Table*htab=NULL;
+StkId base,fixed;
+for(;actual<nfixargs;++actual)
+setnilvalue(L->top++);
+fixed=L->top-actual;
+base=L->top;
+for(i=0;i<nfixargs;i++){
+setobj(L,L->top++,fixed+i);
+setnilvalue(fixed+i);
+}
+if(htab){
+sethvalue(L,L->top++,htab);
+}
+return base;
+}
+static StkId tryfuncTM(lua_State*L,StkId func){
+const TValue*tm=luaT_gettmbyobj(L,func,TM_CALL);
+StkId p;
+ptrdiff_t funcr=savestack(L,func);
+if(!ttisfunction(tm))
+luaG_typeerror(L,func,"call");
+for(p=L->top;p>func;p--)setobj(L,p,p-1);
+incr_top(L);
+func=restorestack(L,funcr);
+setobj(L,func,tm);
+return func;
+}
+#define inc_ci(L)((L->ci==L->end_ci)?growCI(L):(condhardstacktests(luaD_reallocCI(L,L->size_ci)),++L->ci))
+static int luaD_precall(lua_State*L,StkId func,int nresults){
+LClosure*cl;
+ptrdiff_t funcr;
+if(!ttisfunction(func))
+func=tryfuncTM(L,func);
+funcr=savestack(L,func);
+cl=&clvalue(func)->l;
+L->ci->savedpc=L->savedpc;
+if(!cl->isC){
+CallInfo*ci;
+StkId st,base;
+Proto*p=cl->p;
+luaD_checkstack(L,p->maxstacksize+p->numparams);
+func=restorestack(L,funcr);
+if(!p->is_vararg){
+base=func+1;
+if(L->top>base+p->numparams)
+L->top=base+p->numparams;
+}
+else{
+int nargs=cast_int(L->top-func)-1;
+base=adjust_varargs(L,p,nargs);
+func=restorestack(L,funcr);
+}
+ci=inc_ci(L);
+ci->func=func;
+L->base=ci->base=base;
+ci->top=L->base+p->maxstacksize;
+L->savedpc=p->code;
+ci->tailcalls=0;
+ci->nresults=nresults;
+for(st=L->top;st<ci->top;st++)
+setnilvalue(st);
+L->top=ci->top;
+return 0;
+}
+else{
+CallInfo*ci;
+int n;
+luaD_checkstack(L,20);
+ci=inc_ci(L);
+ci->func=restorestack(L,funcr);
+L->base=ci->base=ci->func+1;
+ci->top=L->top+20;
+ci->nresults=nresults;
+n=(*curr_func(L)->c.f)(L);
+if(n<0)
+return 2;
+else{
+luaD_poscall(L,L->top-n);
+return 1;
+}
+}
+}
+static int luaD_poscall(lua_State*L,StkId firstResult){
+StkId res;
+int wanted,i;
+CallInfo*ci;
+ci=L->ci--;
+res=ci->func;
+wanted=ci->nresults;
+L->base=(ci-1)->base;
+L->savedpc=(ci-1)->savedpc;
+for(i=wanted;i!=0&&firstResult<L->top;i--)
+setobj(L,res++,firstResult++);
+while(i-->0)
+setnilvalue(res++);
+L->top=res;
+return(wanted-(-1));
+}
+static void luaD_call(lua_State*L,StkId func,int nResults){
+if(++L->nCcalls>=200){
+if(L->nCcalls==200)
+luaG_runerror(L,"C stack overflow");
+else if(L->nCcalls>=(200+(200>>3)))
+luaD_throw(L,5);
+}
+if(luaD_precall(L,func,nResults)==0)
+luaV_execute(L,1);
+L->nCcalls--;
+luaC_checkGC(L);
+}
+static int luaD_pcall(lua_State*L,Pfunc func,void*u,
+ptrdiff_t old_top,ptrdiff_t ef){
+int status;
+unsigned short oldnCcalls=L->nCcalls;
+ptrdiff_t old_ci=saveci(L,L->ci);
+lu_byte old_allowhooks=L->allowhook;
+ptrdiff_t old_errfunc=L->errfunc;
+L->errfunc=ef;
+status=luaD_rawrunprotected(L,func,u);
+if(status!=0){
+StkId oldtop=restorestack(L,old_top);
+luaF_close(L,oldtop);
+luaD_seterrorobj(L,status,oldtop);
+L->nCcalls=oldnCcalls;
+L->ci=restoreci(L,old_ci);
+L->base=L->ci->base;
+L->savedpc=L->ci->savedpc;
+L->allowhook=old_allowhooks;
+restore_stack_limit(L);
+}
+L->errfunc=old_errfunc;
+return status;
+}
+struct SParser{
+ZIO*z;
+Mbuffer buff;
+const char*name;
+};
+static void f_parser(lua_State*L,void*ud){
+int i;
+Proto*tf;
+Closure*cl;
+struct SParser*p=cast(struct SParser*,ud);
+luaC_checkGC(L);
+tf=luaY_parser(L,p->z,
+&p->buff,p->name);
+cl=luaF_newLclosure(L,tf->nups,hvalue(gt(L)));
+cl->l.p=tf;
+for(i=0;i<tf->nups;i++)
+cl->l.upvals[i]=luaF_newupval(L);
+setclvalue(L,L->top,cl);
+incr_top(L);
+}
+static int luaD_protectedparser(lua_State*L,ZIO*z,const char*name){
+struct SParser p;
+int status;
+p.z=z;p.name=name;
+luaZ_initbuffer(L,&p.buff);
+status=luaD_pcall(L,f_parser,&p,savestack(L,L->top),L->errfunc);
+luaZ_freebuffer(L,&p.buff);
+return status;
+}
+static void luaS_resize(lua_State*L,int newsize){
+GCObject**newhash;
+stringtable*tb;
+int i;
+if(G(L)->gcstate==2)
+return;
+newhash=luaM_newvector(L,newsize,GCObject*);
+tb=&G(L)->strt;
+for(i=0;i<newsize;i++)newhash[i]=NULL;
+for(i=0;i<tb->size;i++){
+GCObject*p=tb->hash[i];
+while(p){
+GCObject*next=p->gch.next;
+unsigned int h=gco2ts(p)->hash;
+int h1=lmod(h,newsize);
+p->gch.next=newhash[h1];
+newhash[h1]=p;
+p=next;
+}
+}
+luaM_freearray(L,tb->hash,tb->size,TString*);
+tb->size=newsize;
+tb->hash=newhash;
+}
+static TString*newlstr(lua_State*L,const char*str,size_t l,
+unsigned int h){
+TString*ts;
+stringtable*tb;
+if(l+1>(((size_t)(~(size_t)0)-2)-sizeof(TString))/sizeof(char))
+luaM_toobig(L);
+ts=cast(TString*,luaM_malloc(L,(l+1)*sizeof(char)+sizeof(TString)));
+ts->tsv.len=l;
+ts->tsv.hash=h;
+ts->tsv.marked=luaC_white(G(L));
+ts->tsv.tt=4;
+ts->tsv.reserved=0;
+memcpy(ts+1,str,l*sizeof(char));
+((char*)(ts+1))[l]='\0';
+tb=&G(L)->strt;
+h=lmod(h,tb->size);
+ts->tsv.next=tb->hash[h];
+tb->hash[h]=obj2gco(ts);
+tb->nuse++;
+if(tb->nuse>cast(lu_int32,tb->size)&&tb->size<=(INT_MAX-2)/2)
+luaS_resize(L,tb->size*2);
+return ts;
+}
+static TString*luaS_newlstr(lua_State*L,const char*str,size_t l){
+GCObject*o;
+unsigned int h=cast(unsigned int,l);
+size_t step=(l>>5)+1;
+size_t l1;
+for(l1=l;l1>=step;l1-=step)
+h=h^((h<<5)+(h>>2)+cast(unsigned char,str[l1-1]));
+for(o=G(L)->strt.hash[lmod(h,G(L)->strt.size)];
+o!=NULL;
+o=o->gch.next){
+TString*ts=rawgco2ts(o);
+if(ts->tsv.len==l&&(memcmp(str,getstr(ts),l)==0)){
+if(isdead(G(L),o))changewhite(o);
+return ts;
+}
+}
+return newlstr(L,str,l,h);
+}
+static Udata*luaS_newudata(lua_State*L,size_t s,Table*e){
+Udata*u;
+if(s>((size_t)(~(size_t)0)-2)-sizeof(Udata))
+luaM_toobig(L);
+u=cast(Udata*,luaM_malloc(L,s+sizeof(Udata)));
+u->uv.marked=luaC_white(G(L));
+u->uv.tt=7;
+u->uv.len=s;
+u->uv.metatable=NULL;
+u->uv.env=e;
+u->uv.next=G(L)->mainthread->next;
+G(L)->mainthread->next=obj2gco(u);
+return u;
+}
+#define hashpow2(t,n)(gnode(t,lmod((n),sizenode(t))))
+#define hashstr(t,str)hashpow2(t,(str)->tsv.hash)
+#define hashboolean(t,p)hashpow2(t,p)
+#define hashmod(t,n)(gnode(t,((n)%((sizenode(t)-1)|1))))
+#define hashpointer(t,p)hashmod(t,IntPoint(p))
+static const Node dummynode_={
+{{NULL},0},
+{{{NULL},0,NULL}}
+};
+static Node*hashnum(const Table*t,lua_Number n){
+unsigned int a[cast_int(sizeof(lua_Number)/sizeof(int))];
+int i;
+if(luai_numeq(n,0))
+return gnode(t,0);
+memcpy(a,&n,sizeof(a));
+for(i=1;i<cast_int(sizeof(lua_Number)/sizeof(int));i++)a[0]+=a[i];
+return hashmod(t,a[0]);
+}
+static Node*mainposition(const Table*t,const TValue*key){
+switch(ttype(key)){
+case 3:
+return hashnum(t,nvalue(key));
+case 4:
+return hashstr(t,rawtsvalue(key));
+case 1:
+return hashboolean(t,bvalue(key));
+case 2:
+return hashpointer(t,pvalue(key));
+default:
+return hashpointer(t,gcvalue(key));
+}
+}
+static int arrayindex(const TValue*key){
+if(ttisnumber(key)){
+lua_Number n=nvalue(key);
+int k;
+lua_number2int(k,n);
+if(luai_numeq(cast_num(k),n))
+return k;
+}
+return-1;
+}
+static int findindex(lua_State*L,Table*t,StkId key){
+int i;
+if(ttisnil(key))return-1;
+i=arrayindex(key);
+if(0<i&&i<=t->sizearray)
+return i-1;
+else{
+Node*n=mainposition(t,key);
+do{
+if(luaO_rawequalObj(key2tval(n),key)||
+(ttype(gkey(n))==(8+3)&&iscollectable(key)&&
+gcvalue(gkey(n))==gcvalue(key))){
+i=cast_int(n-gnode(t,0));
+return i+t->sizearray;
+}
+else n=gnext(n);
+}while(n);
+luaG_runerror(L,"invalid key to "LUA_QL("next"));
+return 0;
+}
+}
+static int luaH_next(lua_State*L,Table*t,StkId key){
+int i=findindex(L,t,key);
+for(i++;i<t->sizearray;i++){
+if(!ttisnil(&t->array[i])){
+setnvalue(key,cast_num(i+1));
+setobj(L,key+1,&t->array[i]);
+return 1;
+}
+}
+for(i-=t->sizearray;i<(int)sizenode(t);i++){
+if(!ttisnil(gval(gnode(t,i)))){
+setobj(L,key,key2tval(gnode(t,i)));
+setobj(L,key+1,gval(gnode(t,i)));
+return 1;
+}
+}
+return 0;
+}
+static int computesizes(int nums[],int*narray){
+int i;
+int twotoi;
+int a=0;
+int na=0;
+int n=0;
+for(i=0,twotoi=1;twotoi/2<*narray;i++,twotoi*=2){
+if(nums[i]>0){
+a+=nums[i];
+if(a>twotoi/2){
+n=twotoi;
+na=a;
+}
+}
+if(a==*narray)break;
+}
+*narray=n;
+return na;
+}
+static int countint(const TValue*key,int*nums){
+int k=arrayindex(key);
+if(0<k&&k<=(1<<(32-2))){
+nums[ceillog2(k)]++;
+return 1;
+}
+else
+return 0;
+}
+static int numusearray(const Table*t,int*nums){
+int lg;
+int ttlg;
+int ause=0;
+int i=1;
+for(lg=0,ttlg=1;lg<=(32-2);lg++,ttlg*=2){
+int lc=0;
+int lim=ttlg;
+if(lim>t->sizearray){
+lim=t->sizearray;
+if(i>lim)
+break;
+}
+for(;i<=lim;i++){
+if(!ttisnil(&t->array[i-1]))
+lc++;
+}
+nums[lg]+=lc;
+ause+=lc;
+}
+return ause;
+}
+static int numusehash(const Table*t,int*nums,int*pnasize){
+int totaluse=0;
+int ause=0;
+int i=sizenode(t);
+while(i--){
+Node*n=&t->node[i];
+if(!ttisnil(gval(n))){
+ause+=countint(key2tval(n),nums);
+totaluse++;
+}
+}
+*pnasize+=ause;
+return totaluse;
+}
+static void setarrayvector(lua_State*L,Table*t,int size){
+int i;
+luaM_reallocvector(L,t->array,t->sizearray,size,TValue);
+for(i=t->sizearray;i<size;i++)
+setnilvalue(&t->array[i]);
+t->sizearray=size;
+}
+static void setnodevector(lua_State*L,Table*t,int size){
+int lsize;
+if(size==0){
+t->node=cast(Node*,(&dummynode_));
+lsize=0;
+}
+else{
+int i;
+lsize=ceillog2(size);
+if(lsize>(32-2))
+luaG_runerror(L,"table overflow");
+size=twoto(lsize);
+t->node=luaM_newvector(L,size,Node);
+for(i=0;i<size;i++){
+Node*n=gnode(t,i);
+gnext(n)=NULL;
+setnilvalue(gkey(n));
+setnilvalue(gval(n));
+}
+}
+t->lsizenode=cast_byte(lsize);
+t->lastfree=gnode(t,size);
+}
+static void resize(lua_State*L,Table*t,int nasize,int nhsize){
+int i;
+int oldasize=t->sizearray;
+int oldhsize=t->lsizenode;
+Node*nold=t->node;
+if(nasize>oldasize)
+setarrayvector(L,t,nasize);
+setnodevector(L,t,nhsize);
+if(nasize<oldasize){
+t->sizearray=nasize;
+for(i=nasize;i<oldasize;i++){
+if(!ttisnil(&t->array[i]))
+setobj(L,luaH_setnum(L,t,i+1),&t->array[i]);
+}
+luaM_reallocvector(L,t->array,oldasize,nasize,TValue);
+}
+for(i=twoto(oldhsize)-1;i>=0;i--){
+Node*old=nold+i;
+if(!ttisnil(gval(old)))
+setobj(L,luaH_set(L,t,key2tval(old)),gval(old));
+}
+if(nold!=(&dummynode_))
+luaM_freearray(L,nold,twoto(oldhsize),Node);
+}
+static void luaH_resizearray(lua_State*L,Table*t,int nasize){
+int nsize=(t->node==(&dummynode_))?0:sizenode(t);
+resize(L,t,nasize,nsize);
+}
+static void rehash(lua_State*L,Table*t,const TValue*ek){
+int nasize,na;
+int nums[(32-2)+1];
+int i;
+int totaluse;
+for(i=0;i<=(32-2);i++)nums[i]=0;
+nasize=numusearray(t,nums);
+totaluse=nasize;
+totaluse+=numusehash(t,nums,&nasize);
+nasize+=countint(ek,nums);
+totaluse++;
+na=computesizes(nums,&nasize);
+resize(L,t,nasize,totaluse-na);
+}
+static Table*luaH_new(lua_State*L,int narray,int nhash){
+Table*t=luaM_new(L,Table);
+luaC_link(L,obj2gco(t),5);
+t->metatable=NULL;
+t->flags=cast_byte(~0);
+t->array=NULL;
+t->sizearray=0;
+t->lsizenode=0;
+t->node=cast(Node*,(&dummynode_));
+setarrayvector(L,t,narray);
+setnodevector(L,t,nhash);
+return t;
+}
+static void luaH_free(lua_State*L,Table*t){
+if(t->node!=(&dummynode_))
+luaM_freearray(L,t->node,sizenode(t),Node);
+luaM_freearray(L,t->array,t->sizearray,TValue);
+luaM_free(L,t);
+}
+static Node*getfreepos(Table*t){
+while(t->lastfree-->t->node){
+if(ttisnil(gkey(t->lastfree)))
+return t->lastfree;
+}
+return NULL;
+}
+static TValue*newkey(lua_State*L,Table*t,const TValue*key){
+Node*mp=mainposition(t,key);
+if(!ttisnil(gval(mp))||mp==(&dummynode_)){
+Node*othern;
+Node*n=getfreepos(t);
+if(n==NULL){
+rehash(L,t,key);
+return luaH_set(L,t,key);
+}
+othern=mainposition(t,key2tval(mp));
+if(othern!=mp){
+while(gnext(othern)!=mp)othern=gnext(othern);
+gnext(othern)=n;
+*n=*mp;
+gnext(mp)=NULL;
+setnilvalue(gval(mp));
+}
+else{
+gnext(n)=gnext(mp);
+gnext(mp)=n;
+mp=n;
+}
+}
+gkey(mp)->value=key->value;gkey(mp)->tt=key->tt;
+luaC_barriert(L,t,key);
+return gval(mp);
+}
+static const TValue*luaH_getnum(Table*t,int key){
+if(cast(unsigned int,key)-1<cast(unsigned int,t->sizearray))
+return&t->array[key-1];
+else{
+lua_Number nk=cast_num(key);
+Node*n=hashnum(t,nk);
+do{
+if(ttisnumber(gkey(n))&&luai_numeq(nvalue(gkey(n)),nk))
+return gval(n);
+else n=gnext(n);
+}while(n);
+return(&luaO_nilobject_);
+}
+}
+static const TValue*luaH_getstr(Table*t,TString*key){
+Node*n=hashstr(t,key);
+do{
+if(ttisstring(gkey(n))&&rawtsvalue(gkey(n))==key)
+return gval(n);
+else n=gnext(n);
+}while(n);
+return(&luaO_nilobject_);
+}
+static const TValue*luaH_get(Table*t,const TValue*key){
+switch(ttype(key)){
+case 0:return(&luaO_nilobject_);
+case 4:return luaH_getstr(t,rawtsvalue(key));
+case 3:{
+int k;
+lua_Number n=nvalue(key);
+lua_number2int(k,n);
+if(luai_numeq(cast_num(k),nvalue(key)))
+return luaH_getnum(t,k);
+}
+/*fallthrough*/
+default:{
+Node*n=mainposition(t,key);
+do{
+if(luaO_rawequalObj(key2tval(n),key))
+return gval(n);
+else n=gnext(n);
+}while(n);
+return(&luaO_nilobject_);
+}
+}
+}
+static TValue*luaH_set(lua_State*L,Table*t,const TValue*key){
+const TValue*p=luaH_get(t,key);
+t->flags=0;
+if(p!=(&luaO_nilobject_))
+return cast(TValue*,p);
+else{
+if(ttisnil(key))luaG_runerror(L,"table index is nil");
+else if(ttisnumber(key)&&luai_numisnan(nvalue(key)))
+luaG_runerror(L,"table index is NaN");
+return newkey(L,t,key);
+}
+}
+static TValue*luaH_setnum(lua_State*L,Table*t,int key){
+const TValue*p=luaH_getnum(t,key);
+if(p!=(&luaO_nilobject_))
+return cast(TValue*,p);
+else{
+TValue k;
+setnvalue(&k,cast_num(key));
+return newkey(L,t,&k);
+}
+}
+static TValue*luaH_setstr(lua_State*L,Table*t,TString*key){
+const TValue*p=luaH_getstr(t,key);
+if(p!=(&luaO_nilobject_))
+return cast(TValue*,p);
+else{
+TValue k;
+setsvalue(L,&k,key);
+return newkey(L,t,&k);
+}
+}
+static int unbound_search(Table*t,unsigned int j){
+unsigned int i=j;
+j++;
+while(!ttisnil(luaH_getnum(t,j))){
+i=j;
+j*=2;
+if(j>cast(unsigned int,(INT_MAX-2))){
+i=1;
+while(!ttisnil(luaH_getnum(t,i)))i++;
+return i-1;
+}
+}
+while(j-i>1){
+unsigned int m=(i+j)/2;
+if(ttisnil(luaH_getnum(t,m)))j=m;
+else i=m;
+}
+return i;
+}
+static int luaH_getn(Table*t){
+unsigned int j=t->sizearray;
+if(j>0&&ttisnil(&t->array[j-1])){
+unsigned int i=0;
+while(j-i>1){
+unsigned int m=(i+j)/2;
+if(ttisnil(&t->array[m-1]))j=m;
+else i=m;
+}
+return i;
+}
+else if(t->node==(&dummynode_))
+return j;
+else return unbound_search(t,j);
+}
+#define makewhite(g,x)((x)->gch.marked=cast_byte(((x)->gch.marked&cast_byte(~(bitmask(2)|bit2mask(0,1))))|luaC_white(g)))
+#define white2gray(x)reset2bits((x)->gch.marked,0,1)
+#define black2gray(x)resetbit((x)->gch.marked,2)
+#define stringmark(s)reset2bits((s)->tsv.marked,0,1)
+#define isfinalized(u)testbit((u)->marked,3)
+#define markfinalized(u)l_setbit((u)->marked,3)
+#define markvalue(g,o){checkconsistency(o);if(iscollectable(o)&&iswhite(gcvalue(o)))reallymarkobject(g,gcvalue(o));}
+#define markobject(g,t){if(iswhite(obj2gco(t)))reallymarkobject(g,obj2gco(t));}
+#define setthreshold(g)(g->GCthreshold=(g->estimate/100)*g->gcpause)
+static void removeentry(Node*n){
+if(iscollectable(gkey(n)))
+setttype(gkey(n),(8+3));
+}
+static void reallymarkobject(global_State*g,GCObject*o){
+white2gray(o);
+switch(o->gch.tt){
+case 4:{
+return;
+}
+case 7:{
+Table*mt=gco2u(o)->metatable;
+gray2black(o);
+if(mt)markobject(g,mt);
+markobject(g,gco2u(o)->env);
+return;
+}
+case(8+2):{
+UpVal*uv=gco2uv(o);
+markvalue(g,uv->v);
+if(uv->v==&uv->u.value)
+gray2black(o);
+return;
+}
+case 6:{
+gco2cl(o)->c.gclist=g->gray;
+g->gray=o;
+break;
+}
+case 5:{
+gco2h(o)->gclist=g->gray;
+g->gray=o;
+break;
+}
+case 8:{
+gco2th(o)->gclist=g->gray;
+g->gray=o;
+break;
+}
+case(8+1):{
+gco2p(o)->gclist=g->gray;
+g->gray=o;
+break;
+}
+default:;
+}
+}
+static void marktmu(global_State*g){
+GCObject*u=g->tmudata;
+if(u){
+do{
+u=u->gch.next;
+makewhite(g,u);
+reallymarkobject(g,u);
+}while(u!=g->tmudata);
+}
+}
+static size_t luaC_separateudata(lua_State*L,int all){
+global_State*g=G(L);
+size_t deadmem=0;
+GCObject**p=&g->mainthread->next;
+GCObject*curr;
+while((curr=*p)!=NULL){
+if(!(iswhite(curr)||all)||isfinalized(gco2u(curr)))
+p=&curr->gch.next;
+else if(fasttm(L,gco2u(curr)->metatable,TM_GC)==NULL){
+markfinalized(gco2u(curr));
+p=&curr->gch.next;
+}
+else{
+deadmem+=sizeudata(gco2u(curr));
+markfinalized(gco2u(curr));
+*p=curr->gch.next;
+if(g->tmudata==NULL)
+g->tmudata=curr->gch.next=curr;
+else{
+curr->gch.next=g->tmudata->gch.next;
+g->tmudata->gch.next=curr;
+g->tmudata=curr;
+}
+}
+}
+return deadmem;
+}
+static int traversetable(global_State*g,Table*h){
+int i;
+int weakkey=0;
+int weakvalue=0;
+const TValue*mode;
+if(h->metatable)
+markobject(g,h->metatable);
+mode=gfasttm(g,h->metatable,TM_MODE);
+if(mode&&ttisstring(mode)){
+weakkey=(strchr(svalue(mode),'k')!=NULL);
+weakvalue=(strchr(svalue(mode),'v')!=NULL);
+if(weakkey||weakvalue){
+h->marked&=~(bitmask(3)|bitmask(4));
+h->marked|=cast_byte((weakkey<<3)|
+(weakvalue<<4));
+h->gclist=g->weak;
+g->weak=obj2gco(h);
+}
+}
+if(weakkey&&weakvalue)return 1;
+if(!weakvalue){
+i=h->sizearray;
+while(i--)
+markvalue(g,&h->array[i]);
+}
+i=sizenode(h);
+while(i--){
+Node*n=gnode(h,i);
+if(ttisnil(gval(n)))
+removeentry(n);
+else{
+if(!weakkey)markvalue(g,gkey(n));
+if(!weakvalue)markvalue(g,gval(n));
+}
+}
+return weakkey||weakvalue;
+}
+static void traverseproto(global_State*g,Proto*f){
+int i;
+if(f->source)stringmark(f->source);
+for(i=0;i<f->sizek;i++)
+markvalue(g,&f->k[i]);
+for(i=0;i<f->sizeupvalues;i++){
+if(f->upvalues[i])
+stringmark(f->upvalues[i]);
+}
+for(i=0;i<f->sizep;i++){
+if(f->p[i])
+markobject(g,f->p[i]);
+}
+for(i=0;i<f->sizelocvars;i++){
+if(f->locvars[i].varname)
+stringmark(f->locvars[i].varname);
+}
+}
+static void traverseclosure(global_State*g,Closure*cl){
+markobject(g,cl->c.env);
+if(cl->c.isC){
+int i;
+for(i=0;i<cl->c.nupvalues;i++)
+markvalue(g,&cl->c.upvalue[i]);
+}
+else{
+int i;
+markobject(g,cl->l.p);
+for(i=0;i<cl->l.nupvalues;i++)
+markobject(g,cl->l.upvals[i]);
+}
+}
+static void checkstacksizes(lua_State*L,StkId max){
+int ci_used=cast_int(L->ci-L->base_ci);
+int s_used=cast_int(max-L->stack);
+if(L->size_ci>20000)
+return;
+if(4*ci_used<L->size_ci&&2*8<L->size_ci)
+luaD_reallocCI(L,L->size_ci/2);
+condhardstacktests(luaD_reallocCI(L,ci_used+1));
+if(4*s_used<L->stacksize&&
+2*((2*20)+5)<L->stacksize)
+luaD_reallocstack(L,L->stacksize/2);
+condhardstacktests(luaD_reallocstack(L,s_used));
+}
+static void traversestack(global_State*g,lua_State*l){
+StkId o,lim;
+CallInfo*ci;
+markvalue(g,gt(l));
+lim=l->top;
+for(ci=l->base_ci;ci<=l->ci;ci++){
+if(lim<ci->top)lim=ci->top;
+}
+for(o=l->stack;o<l->top;o++)
+markvalue(g,o);
+for(;o<=lim;o++)
+setnilvalue(o);
+checkstacksizes(l,lim);
+}
+static l_mem propagatemark(global_State*g){
+GCObject*o=g->gray;
+gray2black(o);
+switch(o->gch.tt){
+case 5:{
+Table*h=gco2h(o);
+g->gray=h->gclist;
+if(traversetable(g,h))
+black2gray(o);
+return sizeof(Table)+sizeof(TValue)*h->sizearray+
+sizeof(Node)*sizenode(h);
+}
+case 6:{
+Closure*cl=gco2cl(o);
+g->gray=cl->c.gclist;
+traverseclosure(g,cl);
+return(cl->c.isC)?sizeCclosure(cl->c.nupvalues):
+sizeLclosure(cl->l.nupvalues);
+}
+case 8:{
+lua_State*th=gco2th(o);
+g->gray=th->gclist;
+th->gclist=g->grayagain;
+g->grayagain=o;
+black2gray(o);
+traversestack(g,th);
+return sizeof(lua_State)+sizeof(TValue)*th->stacksize+
+sizeof(CallInfo)*th->size_ci;
+}
+case(8+1):{
+Proto*p=gco2p(o);
+g->gray=p->gclist;
+traverseproto(g,p);
+return sizeof(Proto)+sizeof(Instruction)*p->sizecode+
+sizeof(Proto*)*p->sizep+
+sizeof(TValue)*p->sizek+
+sizeof(int)*p->sizelineinfo+
+sizeof(LocVar)*p->sizelocvars+
+sizeof(TString*)*p->sizeupvalues;
+}
+default:return 0;
+}
+}
+static size_t propagateall(global_State*g){
+size_t m=0;
+while(g->gray)m+=propagatemark(g);
+return m;
+}
+static int iscleared(const TValue*o,int iskey){
+if(!iscollectable(o))return 0;
+if(ttisstring(o)){
+stringmark(rawtsvalue(o));
+return 0;
+}
+return iswhite(gcvalue(o))||
+(ttisuserdata(o)&&(!iskey&&isfinalized(uvalue(o))));
+}
+static void cleartable(GCObject*l){
+while(l){
+Table*h=gco2h(l);
+int i=h->sizearray;
+if(testbit(h->marked,4)){
+while(i--){
+TValue*o=&h->array[i];
+if(iscleared(o,0))
+setnilvalue(o);
+}
+}
+i=sizenode(h);
+while(i--){
+Node*n=gnode(h,i);
+if(!ttisnil(gval(n))&&
+(iscleared(key2tval(n),1)||iscleared(gval(n),0))){
+setnilvalue(gval(n));
+removeentry(n);
+}
+}
+l=h->gclist;
+}
+}
+static void freeobj(lua_State*L,GCObject*o){
+switch(o->gch.tt){
+case(8+1):luaF_freeproto(L,gco2p(o));break;
+case 6:luaF_freeclosure(L,gco2cl(o));break;
+case(8+2):luaF_freeupval(L,gco2uv(o));break;
+case 5:luaH_free(L,gco2h(o));break;
+case 8:{
+luaE_freethread(L,gco2th(o));
+break;
+}
+case 4:{
+G(L)->strt.nuse--;
+luaM_freemem(L,o,sizestring(gco2ts(o)));
+break;
+}
+case 7:{
+luaM_freemem(L,o,sizeudata(gco2u(o)));
+break;
+}
+default:;
+}
+}
+#define sweepwholelist(L,p)sweeplist(L,p,((lu_mem)(~(lu_mem)0)-2))
+static GCObject**sweeplist(lua_State*L,GCObject**p,lu_mem count){
+GCObject*curr;
+global_State*g=G(L);
+int deadmask=otherwhite(g);
+while((curr=*p)!=NULL&&count-->0){
+if(curr->gch.tt==8)
+sweepwholelist(L,&gco2th(curr)->openupval);
+if((curr->gch.marked^bit2mask(0,1))&deadmask){
+makewhite(g,curr);
+p=&curr->gch.next;
+}
+else{
+*p=curr->gch.next;
+if(curr==g->rootgc)
+g->rootgc=curr->gch.next;
+freeobj(L,curr);
+}
+}
+return p;
+}
+static void checkSizes(lua_State*L){
+global_State*g=G(L);
+if(g->strt.nuse<cast(lu_int32,g->strt.size/4)&&
+g->strt.size>32*2)
+luaS_resize(L,g->strt.size/2);
+if(luaZ_sizebuffer(&g->buff)>32*2){
+size_t newsize=luaZ_sizebuffer(&g->buff)/2;
+luaZ_resizebuffer(L,&g->buff,newsize);
+}
+}
+static void GCTM(lua_State*L){
+global_State*g=G(L);
+GCObject*o=g->tmudata->gch.next;
+Udata*udata=rawgco2u(o);
+const TValue*tm;
+if(o==g->tmudata)
+g->tmudata=NULL;
+else
+g->tmudata->gch.next=udata->uv.next;
+udata->uv.next=g->mainthread->next;
+g->mainthread->next=o;
+makewhite(g,o);
+tm=fasttm(L,udata->uv.metatable,TM_GC);
+if(tm!=NULL){
+lu_byte oldah=L->allowhook;
+lu_mem oldt=g->GCthreshold;
+L->allowhook=0;
+g->GCthreshold=2*g->totalbytes;
+setobj(L,L->top,tm);
+setuvalue(L,L->top+1,udata);
+L->top+=2;
+luaD_call(L,L->top-2,0);
+L->allowhook=oldah;
+g->GCthreshold=oldt;
+}
+}
+static void luaC_callGCTM(lua_State*L){
+while(G(L)->tmudata)
+GCTM(L);
+}
+static void luaC_freeall(lua_State*L){
+global_State*g=G(L);
+int i;
+g->currentwhite=bit2mask(0,1)|bitmask(6);
+sweepwholelist(L,&g->rootgc);
+for(i=0;i<g->strt.size;i++)
+sweepwholelist(L,&g->strt.hash[i]);
+}
+static void markmt(global_State*g){
+int i;
+for(i=0;i<(8+1);i++)
+if(g->mt[i])markobject(g,g->mt[i]);
+}
+static void markroot(lua_State*L){
+global_State*g=G(L);
+g->gray=NULL;
+g->grayagain=NULL;
+g->weak=NULL;
+markobject(g,g->mainthread);
+markvalue(g,gt(g->mainthread));
+markvalue(g,registry(L));
+markmt(g);
+g->gcstate=1;
+}
+static void remarkupvals(global_State*g){
+UpVal*uv;
+for(uv=g->uvhead.u.l.next;uv!=&g->uvhead;uv=uv->u.l.next){
+if(isgray(obj2gco(uv)))
+markvalue(g,uv->v);
+}
+}
+static void atomic(lua_State*L){
+global_State*g=G(L);
+size_t udsize;
+remarkupvals(g);
+propagateall(g);
+g->gray=g->weak;
+g->weak=NULL;
+markobject(g,L);
+markmt(g);
+propagateall(g);
+g->gray=g->grayagain;
+g->grayagain=NULL;
+propagateall(g);
+udsize=luaC_separateudata(L,0);
+marktmu(g);
+udsize+=propagateall(g);
+cleartable(g->weak);
+g->currentwhite=cast_byte(otherwhite(g));
+g->sweepstrgc=0;
+g->sweepgc=&g->rootgc;
+g->gcstate=2;
+g->estimate=g->totalbytes-udsize;
+}
+static l_mem singlestep(lua_State*L){
+global_State*g=G(L);
+switch(g->gcstate){
+case 0:{
+markroot(L);
+return 0;
+}
+case 1:{
+if(g->gray)
+return propagatemark(g);
+else{
+atomic(L);
+return 0;
+}
+}
+case 2:{
+lu_mem old=g->totalbytes;
+sweepwholelist(L,&g->strt.hash[g->sweepstrgc++]);
+if(g->sweepstrgc>=g->strt.size)
+g->gcstate=3;
+g->estimate-=old-g->totalbytes;
+return 10;
+}
+case 3:{
+lu_mem old=g->totalbytes;
+g->sweepgc=sweeplist(L,g->sweepgc,40);
+if(*g->sweepgc==NULL){
+checkSizes(L);
+g->gcstate=4;
+}
+g->estimate-=old-g->totalbytes;
+return 40*10;
+}
+case 4:{
+if(g->tmudata){
+GCTM(L);
+if(g->estimate>100)
+g->estimate-=100;
+return 100;
+}
+else{
+g->gcstate=0;
+g->gcdept=0;
+return 0;
+}
+}
+default:return 0;
+}
+}
+static void luaC_step(lua_State*L){
+global_State*g=G(L);
+l_mem lim=(1024u/100)*g->gcstepmul;
+if(lim==0)
+lim=(((lu_mem)(~(lu_mem)0)-2)-1)/2;
+g->gcdept+=g->totalbytes-g->GCthreshold;
+do{
+lim-=singlestep(L);
+if(g->gcstate==0)
+break;
+}while(lim>0);
+if(g->gcstate!=0){
+if(g->gcdept<1024u)
+g->GCthreshold=g->totalbytes+1024u;
+else{
+g->gcdept-=1024u;
+g->GCthreshold=g->totalbytes;
+}
+}
+else{
+setthreshold(g);
+}
+}
+static void luaC_barrierf(lua_State*L,GCObject*o,GCObject*v){
+global_State*g=G(L);
+if(g->gcstate==1)
+reallymarkobject(g,v);
+else
+makewhite(g,o);
+}
+static void luaC_barrierback(lua_State*L,Table*t){
+global_State*g=G(L);
+GCObject*o=obj2gco(t);
+black2gray(o);
+t->gclist=g->grayagain;
+g->grayagain=o;
+}
+static void luaC_link(lua_State*L,GCObject*o,lu_byte tt){
+global_State*g=G(L);
+o->gch.next=g->rootgc;
+g->rootgc=o;
+o->gch.marked=luaC_white(g);
+o->gch.tt=tt;
+}
+static void luaC_linkupval(lua_State*L,UpVal*uv){
+global_State*g=G(L);
+GCObject*o=obj2gco(uv);
+o->gch.next=g->rootgc;
+g->rootgc=o;
+if(isgray(o)){
+if(g->gcstate==1){
+gray2black(o);
+luaC_barrier(L,uv,uv->v);
+}
+else{
+makewhite(g,o);
+}
+}
+}
+typedef union{
+lua_Number r;
+TString*ts;
+}SemInfo;
+typedef struct Token{
+int token;
+SemInfo seminfo;
+}Token;
+typedef struct LexState{
+int current;
+int linenumber;
+int lastline;
+Token t;
+Token lookahead;
+struct FuncState*fs;
+struct lua_State*L;
+ZIO*z;
+Mbuffer*buff;
+TString*source;
+char decpoint;
+}LexState;
+static void luaX_init(lua_State*L);
+static void luaX_lexerror(LexState*ls,const char*msg,int token);
+#define state_size(x)(sizeof(x)+0)
+#define fromstate(l)(cast(lu_byte*,(l))-0)
+#define tostate(l)(cast(lua_State*,cast(lu_byte*,l)+0))
+typedef struct LG{
+lua_State l;
+global_State g;
+}LG;
+static void stack_init(lua_State*L1,lua_State*L){
+L1->base_ci=luaM_newvector(L,8,CallInfo);
+L1->ci=L1->base_ci;
+L1->size_ci=8;
+L1->end_ci=L1->base_ci+L1->size_ci-1;
+L1->stack=luaM_newvector(L,(2*20)+5,TValue);
+L1->stacksize=(2*20)+5;
+L1->top=L1->stack;
+L1->stack_last=L1->stack+(L1->stacksize-5)-1;
+L1->ci->func=L1->top;
+setnilvalue(L1->top++);
+L1->base=L1->ci->base=L1->top;
+L1->ci->top=L1->top+20;
+}
+static void freestack(lua_State*L,lua_State*L1){
+luaM_freearray(L,L1->base_ci,L1->size_ci,CallInfo);
+luaM_freearray(L,L1->stack,L1->stacksize,TValue);
+}
+static void f_luaopen(lua_State*L,void*ud){
+global_State*g=G(L);
+UNUSED(ud);
+stack_init(L,L);
+sethvalue(L,gt(L),luaH_new(L,0,2));
+sethvalue(L,registry(L),luaH_new(L,0,2));
+luaS_resize(L,32);
+luaT_init(L);
+luaX_init(L);
+luaS_fix(luaS_newliteral(L,"not enough memory"));
+g->GCthreshold=4*g->totalbytes;
+}
+static void preinit_state(lua_State*L,global_State*g){
+G(L)=g;
+L->stack=NULL;
+L->stacksize=0;
+L->errorJmp=NULL;
+L->hook=NULL;
+L->hookmask=0;
+L->basehookcount=0;
+L->allowhook=1;
+resethookcount(L);
+L->openupval=NULL;
+L->size_ci=0;
+L->nCcalls=L->baseCcalls=0;
+L->status=0;
+L->base_ci=L->ci=NULL;
+L->savedpc=NULL;
+L->errfunc=0;
+setnilvalue(gt(L));
+}
+static void close_state(lua_State*L){
+global_State*g=G(L);
+luaF_close(L,L->stack);
+luaC_freeall(L);
+luaM_freearray(L,G(L)->strt.hash,G(L)->strt.size,TString*);
+luaZ_freebuffer(L,&g->buff);
+freestack(L,L);
+(*g->frealloc)(g->ud,fromstate(L),state_size(LG),0);
+}
+static void luaE_freethread(lua_State*L,lua_State*L1){
+luaF_close(L1,L1->stack);
+freestack(L,L1);
+luaM_freemem(L,fromstate(L1),state_size(lua_State));
+}
+static lua_State*lua_newstate(lua_Alloc f,void*ud){
+int i;
+lua_State*L;
+global_State*g;
+void*l=(*f)(ud,NULL,0,state_size(LG));
+if(l==NULL)return NULL;
+L=tostate(l);
+g=&((LG*)L)->g;
+L->next=NULL;
+L->tt=8;
+g->currentwhite=bit2mask(0,5);
+L->marked=luaC_white(g);
+set2bits(L->marked,5,6);
+preinit_state(L,g);
+g->frealloc=f;
+g->ud=ud;
+g->mainthread=L;
+g->uvhead.u.l.prev=&g->uvhead;
+g->uvhead.u.l.next=&g->uvhead;
+g->GCthreshold=0;
+g->strt.size=0;
+g->strt.nuse=0;
+g->strt.hash=NULL;
+setnilvalue(registry(L));
+luaZ_initbuffer(L,&g->buff);
+g->panic=NULL;
+g->gcstate=0;
+g->rootgc=obj2gco(L);
+g->sweepstrgc=0;
+g->sweepgc=&g->rootgc;
+g->gray=NULL;
+g->grayagain=NULL;
+g->weak=NULL;
+g->tmudata=NULL;
+g->totalbytes=sizeof(LG);
+g->gcpause=200;
+g->gcstepmul=200;
+g->gcdept=0;
+for(i=0;i<(8+1);i++)g->mt[i]=NULL;
+if(luaD_rawrunprotected(L,f_luaopen,NULL)!=0){
+close_state(L);
+L=NULL;
+}
+else
+{}
+return L;
+}
+static void callallgcTM(lua_State*L,void*ud){
+UNUSED(ud);
+luaC_callGCTM(L);
+}
+static void lua_close(lua_State*L){
+L=G(L)->mainthread;
+luaF_close(L,L->stack);
+luaC_separateudata(L,1);
+L->errfunc=0;
+do{
+L->ci=L->base_ci;
+L->base=L->top=L->ci->base;
+L->nCcalls=L->baseCcalls=0;
+}while(luaD_rawrunprotected(L,callallgcTM,NULL)!=0);
+close_state(L);
+}
+#define getcode(fs,e)((fs)->f->code[(e)->u.s.info])
+#define luaK_codeAsBx(fs,o,A,sBx)luaK_codeABx(fs,o,A,(sBx)+(((1<<(9+9))-1)>>1))
+#define luaK_setmultret(fs,e)luaK_setreturns(fs,e,(-1))
+static int luaK_codeABx(FuncState*fs,OpCode o,int A,unsigned int Bx);
+static int luaK_codeABC(FuncState*fs,OpCode o,int A,int B,int C);
+static void luaK_setreturns(FuncState*fs,expdesc*e,int nresults);
+static void luaK_patchtohere(FuncState*fs,int list);
+static void luaK_concat(FuncState*fs,int*l1,int l2);
+static int currentpc(lua_State*L,CallInfo*ci){
+if(!isLua(ci))return-1;
+if(ci==L->ci)
+ci->savedpc=L->savedpc;
+return pcRel(ci->savedpc,ci_func(ci)->l.p);
+}
+static int currentline(lua_State*L,CallInfo*ci){
+int pc=currentpc(L,ci);
+if(pc<0)
+return-1;
+else
+return getline_(ci_func(ci)->l.p,pc);
+}
+static int lua_getstack(lua_State*L,int level,lua_Debug*ar){
+int status;
+CallInfo*ci;
+for(ci=L->ci;level>0&&ci>L->base_ci;ci--){
+level--;
+if(f_isLua(ci))
+level-=ci->tailcalls;
+}
+if(level==0&&ci>L->base_ci){
+status=1;
+ar->i_ci=cast_int(ci-L->base_ci);
+}
+else if(level<0){
+status=1;
+ar->i_ci=0;
+}
+else status=0;
+return status;
+}
+static Proto*getluaproto(CallInfo*ci){
+return(isLua(ci)?ci_func(ci)->l.p:NULL);
+}
+static void funcinfo(lua_Debug*ar,Closure*cl){
+if(cl->c.isC){
+ar->source="=[C]";
+ar->linedefined=-1;
+ar->lastlinedefined=-1;
+ar->what="C";
+}
+else{
+ar->source=getstr(cl->l.p->source);
+ar->linedefined=cl->l.p->linedefined;
+ar->lastlinedefined=cl->l.p->lastlinedefined;
+ar->what=(ar->linedefined==0)?"main":"Lua";
+}
+luaO_chunkid(ar->short_src,ar->source,60);
+}
+static void info_tailcall(lua_Debug*ar){
+ar->name=ar->namewhat="";
+ar->what="tail";
+ar->lastlinedefined=ar->linedefined=ar->currentline=-1;
+ar->source="=(tail call)";
+luaO_chunkid(ar->short_src,ar->source,60);
+ar->nups=0;
+}
+static void collectvalidlines(lua_State*L,Closure*f){
+if(f==NULL||f->c.isC){
+setnilvalue(L->top);
+}
+else{
+Table*t=luaH_new(L,0,0);
+int*lineinfo=f->l.p->lineinfo;
+int i;
+for(i=0;i<f->l.p->sizelineinfo;i++)
+setbvalue(luaH_setnum(L,t,lineinfo[i]),1);
+sethvalue(L,L->top,t);
+}
+incr_top(L);
+}
+static int auxgetinfo(lua_State*L,const char*what,lua_Debug*ar,
+Closure*f,CallInfo*ci){
+int status=1;
+if(f==NULL){
+info_tailcall(ar);
+return status;
+}
+for(;*what;what++){
+switch(*what){
+case'S':{
+funcinfo(ar,f);
+break;
+}
+case'l':{
+ar->currentline=(ci)?currentline(L,ci):-1;
+break;
+}
+case'u':{
+ar->nups=f->c.nupvalues;
+break;
+}
+case'n':{
+ar->namewhat=(ci)?NULL:NULL;
+if(ar->namewhat==NULL){
+ar->namewhat="";
+ar->name=NULL;
+}
+break;
+}
+case'L':
+case'f':
+break;
+default:status=0;
+}
+}
+return status;
+}
+static int lua_getinfo(lua_State*L,const char*what,lua_Debug*ar){
+int status;
+Closure*f=NULL;
+CallInfo*ci=NULL;
+if(*what=='>'){
+StkId func=L->top-1;
+luai_apicheck(L,ttisfunction(func));
+what++;
+f=clvalue(func);
+L->top--;
+}
+else if(ar->i_ci!=0){
+ci=L->base_ci+ar->i_ci;
+f=clvalue(ci->func);
+}
+status=auxgetinfo(L,what,ar,f,ci);
+if(strchr(what,'f')){
+if(f==NULL)setnilvalue(L->top);
+else setclvalue(L,L->top,f);
+incr_top(L);
+}
+if(strchr(what,'L'))
+collectvalidlines(L,f);
+return status;
+}
+static int isinstack(CallInfo*ci,const TValue*o){
+StkId p;
+for(p=ci->base;p<ci->top;p++)
+if(o==p)return 1;
+return 0;
+}
+static void luaG_typeerror(lua_State*L,const TValue*o,const char*op){
+const char*name=NULL;
+const char*t=luaT_typenames[ttype(o)];
+const char*kind=(isinstack(L->ci,o))?
+NULL:
+NULL;
+if(kind)
+luaG_runerror(L,"attempt to %s %s "LUA_QL("%s")" (a %s value)",
+op,kind,name,t);
+else
+luaG_runerror(L,"attempt to %s a %s value",op,t);
+}
+static void luaG_concaterror(lua_State*L,StkId p1,StkId p2){
+if(ttisstring(p1)||ttisnumber(p1))p1=p2;
+luaG_typeerror(L,p1,"concatenate");
+}
+static void luaG_aritherror(lua_State*L,const TValue*p1,const TValue*p2){
+TValue temp;
+if(luaV_tonumber(p1,&temp)==NULL)
+p2=p1;
+luaG_typeerror(L,p2,"perform arithmetic on");
+}
+static int luaG_ordererror(lua_State*L,const TValue*p1,const TValue*p2){
+const char*t1=luaT_typenames[ttype(p1)];
+const char*t2=luaT_typenames[ttype(p2)];
+if(t1[2]==t2[2])
+luaG_runerror(L,"attempt to compare two %s values",t1);
+else
+luaG_runerror(L,"attempt to compare %s with %s",t1,t2);
+return 0;
+}
+static void addinfo(lua_State*L,const char*msg){
+CallInfo*ci=L->ci;
+if(isLua(ci)){
+char buff[60];
+int line=currentline(L,ci);
+luaO_chunkid(buff,getstr(getluaproto(ci)->source),60);
+luaO_pushfstring(L,"%s:%d: %s",buff,line,msg);
+}
+}
+static void luaG_errormsg(lua_State*L){
+if(L->errfunc!=0){
+StkId errfunc=restorestack(L,L->errfunc);
+if(!ttisfunction(errfunc))luaD_throw(L,5);
+setobj(L,L->top,L->top-1);
+setobj(L,L->top-1,errfunc);
+incr_top(L);
+luaD_call(L,L->top-2,1);
+}
+luaD_throw(L,2);
+}
+static void luaG_runerror(lua_State*L,const char*fmt,...){
+va_list argp;
+va_start(argp,fmt);
+addinfo(L,luaO_pushvfstring(L,fmt,argp));
+va_end(argp);
+luaG_errormsg(L);
+}
+static int luaZ_fill(ZIO*z){
+size_t size;
+lua_State*L=z->L;
+const char*buff;
+buff=z->reader(L,z->data,&size);
+if(buff==NULL||size==0)return(-1);
+z->n=size-1;
+z->p=buff;
+return char2int(*(z->p++));
+}
+static void luaZ_init(lua_State*L,ZIO*z,lua_Reader reader,void*data){
+z->L=L;
+z->reader=reader;
+z->data=data;
+z->n=0;
+z->p=NULL;
+}
+static char*luaZ_openspace(lua_State*L,Mbuffer*buff,size_t n){
+if(n>buff->buffsize){
+if(n<32)n=32;
+luaZ_resizebuffer(L,buff,n);
+}
+return buff->buffer;
+}
+#define opmode(t,a,b,c,m)(((t)<<7)|((a)<<6)|((b)<<4)|((c)<<2)|(m))
+static const lu_byte luaP_opmodes[(cast(int,OP_VARARG)+1)]={
+opmode(0,1,OpArgR,OpArgN,iABC)
+,opmode(0,1,OpArgK,OpArgN,iABx)
+,opmode(0,1,OpArgU,OpArgU,iABC)
+,opmode(0,1,OpArgR,OpArgN,iABC)
+,opmode(0,1,OpArgU,OpArgN,iABC)
+,opmode(0,1,OpArgK,OpArgN,iABx)
+,opmode(0,1,OpArgR,OpArgK,iABC)
+,opmode(0,0,OpArgK,OpArgN,iABx)
+,opmode(0,0,OpArgU,OpArgN,iABC)
+,opmode(0,0,OpArgK,OpArgK,iABC)
+,opmode(0,1,OpArgU,OpArgU,iABC)
+,opmode(0,1,OpArgR,OpArgK,iABC)
+,opmode(0,1,OpArgK,OpArgK,iABC)
+,opmode(0,1,OpArgK,OpArgK,iABC)
+,opmode(0,1,OpArgK,OpArgK,iABC)
+,opmode(0,1,OpArgK,OpArgK,iABC)
+,opmode(0,1,OpArgK,OpArgK,iABC)
+,opmode(0,1,OpArgK,OpArgK,iABC)
+,opmode(0,1,OpArgR,OpArgN,iABC)
+,opmode(0,1,OpArgR,OpArgN,iABC)
+,opmode(0,1,OpArgR,OpArgN,iABC)
+,opmode(0,1,OpArgR,OpArgR,iABC)
+,opmode(0,0,OpArgR,OpArgN,iAsBx)
+,opmode(1,0,OpArgK,OpArgK,iABC)
+,opmode(1,0,OpArgK,OpArgK,iABC)
+,opmode(1,0,OpArgK,OpArgK,iABC)
+,opmode(1,1,OpArgR,OpArgU,iABC)
+,opmode(1,1,OpArgR,OpArgU,iABC)
+,opmode(0,1,OpArgU,OpArgU,iABC)
+,opmode(0,1,OpArgU,OpArgU,iABC)
+,opmode(0,0,OpArgU,OpArgN,iABC)
+,opmode(0,1,OpArgR,OpArgN,iAsBx)
+,opmode(0,1,OpArgR,OpArgN,iAsBx)
+,opmode(1,0,OpArgN,OpArgU,iABC)
+,opmode(0,0,OpArgU,OpArgU,iABC)
+,opmode(0,0,OpArgN,OpArgN,iABC)
+,opmode(0,1,OpArgU,OpArgN,iABx)
+,opmode(0,1,OpArgU,OpArgN,iABC)
+};
+#define next(ls)(ls->current=zgetc(ls->z))
+#define currIsNewline(ls)(ls->current=='\n'||ls->current=='\r')
+static const char*const luaX_tokens[]={
+"and","break","do","else","elseif",
+"end","false","for","function","if",
+"in","local","nil","not","or","repeat",
+"return","then","true","until","while",
+"..","...","==",">=","<=","~=",
+"<number>","<name>","<string>","<eof>",
+NULL
+};
+#define save_and_next(ls)(save(ls,ls->current),next(ls))
+static void save(LexState*ls,int c){
+Mbuffer*b=ls->buff;
+if(b->n+1>b->buffsize){
+size_t newsize;
+if(b->buffsize>=((size_t)(~(size_t)0)-2)/2)
+luaX_lexerror(ls,"lexical element too long",0);
+newsize=b->buffsize*2;
+luaZ_resizebuffer(ls->L,b,newsize);
+}
+b->buffer[b->n++]=cast(char,c);
+}
+static void luaX_init(lua_State*L){
+int i;
+for(i=0;i<(cast(int,TK_WHILE-257+1));i++){
+TString*ts=luaS_new(L,luaX_tokens[i]);
+luaS_fix(ts);
+ts->tsv.reserved=cast_byte(i+1);
+}
+}
+static const char*luaX_token2str(LexState*ls,int token){
+if(token<257){
+return(iscntrl(token))?luaO_pushfstring(ls->L,"char(%d)",token):
+luaO_pushfstring(ls->L,"%c",token);
+}
+else
+return luaX_tokens[token-257];
+}
+static const char*txtToken(LexState*ls,int token){
+switch(token){
+case TK_NAME:
+case TK_STRING:
+case TK_NUMBER:
+save(ls,'\0');
+return luaZ_buffer(ls->buff);
+default:
+return luaX_token2str(ls,token);
+}
+}
+static void luaX_lexerror(LexState*ls,const char*msg,int token){
+char buff[80];
+luaO_chunkid(buff,getstr(ls->source),80);
+msg=luaO_pushfstring(ls->L,"%s:%d: %s",buff,ls->linenumber,msg);
+if(token)
+luaO_pushfstring(ls->L,"%s near "LUA_QL("%s"),msg,txtToken(ls,token));
+luaD_throw(ls->L,3);
+}
+static void luaX_syntaxerror(LexState*ls,const char*msg){
+luaX_lexerror(ls,msg,ls->t.token);
+}
+static TString*luaX_newstring(LexState*ls,const char*str,size_t l){
+lua_State*L=ls->L;
+TString*ts=luaS_newlstr(L,str,l);
+TValue*o=luaH_setstr(L,ls->fs->h,ts);
+if(ttisnil(o)){
+setbvalue(o,1);
+luaC_checkGC(L);
+}
+return ts;
+}
+static void inclinenumber(LexState*ls){
+int old=ls->current;
+next(ls);
+if(currIsNewline(ls)&&ls->current!=old)
+next(ls);
+if(++ls->linenumber>=(INT_MAX-2))
+luaX_syntaxerror(ls,"chunk has too many lines");
+}
+static void luaX_setinput(lua_State*L,LexState*ls,ZIO*z,TString*source){
+ls->decpoint='.';
+ls->L=L;
+ls->lookahead.token=TK_EOS;
+ls->z=z;
+ls->fs=NULL;
+ls->linenumber=1;
+ls->lastline=1;
+ls->source=source;
+luaZ_resizebuffer(ls->L,ls->buff,32);
+next(ls);
+}
+static int check_next(LexState*ls,const char*set){
+if(!strchr(set,ls->current))
+return 0;
+save_and_next(ls);
+return 1;
+}
+static void buffreplace(LexState*ls,char from,char to){
+size_t n=luaZ_bufflen(ls->buff);
+char*p=luaZ_buffer(ls->buff);
+while(n--)
+if(p[n]==from)p[n]=to;
+}
+static void read_numeral(LexState*ls,SemInfo*seminfo){
+do{
+save_and_next(ls);
+}while(isdigit(ls->current)||ls->current=='.');
+if(check_next(ls,"Ee"))
+check_next(ls,"+-");
+while(isalnum(ls->current)||ls->current=='_')
+save_and_next(ls);
+save(ls,'\0');
+buffreplace(ls,'.',ls->decpoint);
+if(!luaO_str2d(luaZ_buffer(ls->buff),&seminfo->r))
+luaX_lexerror(ls,"malformed number",TK_NUMBER);
+}
+static int skip_sep(LexState*ls){
+int count=0;
+int s=ls->current;
+save_and_next(ls);
+while(ls->current=='='){
+save_and_next(ls);
+count++;
+}
+return(ls->current==s)?count:(-count)-1;
+}
+static void read_long_string(LexState*ls,SemInfo*seminfo,int sep){
+int cont=0;
+(void)(cont);
+save_and_next(ls);
+if(currIsNewline(ls))
+inclinenumber(ls);
+for(;;){
+switch(ls->current){
+case(-1):
+luaX_lexerror(ls,(seminfo)?"unfinished long string":
+"unfinished long comment",TK_EOS);
+break;
+case']':{
+if(skip_sep(ls)==sep){
+save_and_next(ls);
+goto endloop;
+}
+break;
+}
+case'\n':
+case'\r':{
+save(ls,'\n');
+inclinenumber(ls);
+if(!seminfo)luaZ_resetbuffer(ls->buff);
+break;
+}
+default:{
+if(seminfo)save_and_next(ls);
+else next(ls);
+}
+}
+}endloop:
+if(seminfo)
+seminfo->ts=luaX_newstring(ls,luaZ_buffer(ls->buff)+(2+sep),
+luaZ_bufflen(ls->buff)-2*(2+sep));
+}
+static void read_string(LexState*ls,int del,SemInfo*seminfo){
+save_and_next(ls);
+while(ls->current!=del){
+switch(ls->current){
+case(-1):
+luaX_lexerror(ls,"unfinished string",TK_EOS);
+continue;
+case'\n':
+case'\r':
+luaX_lexerror(ls,"unfinished string",TK_STRING);
+continue;
+case'\\':{
+int c;
+next(ls);
+switch(ls->current){
+case'a':c='\a';break;
+case'b':c='\b';break;
+case'f':c='\f';break;
+case'n':c='\n';break;
+case'r':c='\r';break;
+case't':c='\t';break;
+case'v':c='\v';break;
+case'\n':
+case'\r':save(ls,'\n');inclinenumber(ls);continue;
+case(-1):continue;
+default:{
+if(!isdigit(ls->current))
+save_and_next(ls);
+else{
+int i=0;
+c=0;
+do{
+c=10*c+(ls->current-'0');
+next(ls);
+}while(++i<3&&isdigit(ls->current));
+if(c>UCHAR_MAX)
+luaX_lexerror(ls,"escape sequence too large",TK_STRING);
+save(ls,c);
+}
+continue;
+}
+}
+save(ls,c);
+next(ls);
+continue;
+}
+default:
+save_and_next(ls);
+}
+}
+save_and_next(ls);
+seminfo->ts=luaX_newstring(ls,luaZ_buffer(ls->buff)+1,
+luaZ_bufflen(ls->buff)-2);
+}
+static int llex(LexState*ls,SemInfo*seminfo){
+luaZ_resetbuffer(ls->buff);
+for(;;){
+switch(ls->current){
+case'\n':
+case'\r':{
+inclinenumber(ls);
+continue;
+}
+case'-':{
+next(ls);
+if(ls->current!='-')return'-';
+next(ls);
+if(ls->current=='['){
+int sep=skip_sep(ls);
+luaZ_resetbuffer(ls->buff);
+if(sep>=0){
+read_long_string(ls,NULL,sep);
+luaZ_resetbuffer(ls->buff);
+continue;
+}
+}
+while(!currIsNewline(ls)&&ls->current!=(-1))
+next(ls);
+continue;
+}
+case'[':{
+int sep=skip_sep(ls);
+if(sep>=0){
+read_long_string(ls,seminfo,sep);
+return TK_STRING;
+}
+else if (sep!=-1)luaX_lexerror(ls,"invalid long string delimiter",TK_STRING);
+return'[';
+}
+case'=':{
+next(ls);
+if(ls->current!='=')return'=';
+else{next(ls);return TK_EQ;}
+}
+case'<':{
+next(ls);
+if(ls->current!='=')return'<';
+else{next(ls);return TK_LE;}
+}
+case'>':{
+next(ls);
+if(ls->current!='=')return'>';
+else{next(ls);return TK_GE;}
+}
+case'~':{
+next(ls);
+if(ls->current!='=')return'~';
+else{next(ls);return TK_NE;}
+}
+case'"':
+case'\'':{
+read_string(ls,ls->current,seminfo);
+return TK_STRING;
+}
+case'.':{
+save_and_next(ls);
+if(check_next(ls,".")){
+if(check_next(ls,"."))
+return TK_DOTS;
+else return TK_CONCAT;
+}
+else if(!isdigit(ls->current))return'.';
+else{
+read_numeral(ls,seminfo);
+return TK_NUMBER;
+}
+}
+case(-1):{
+return TK_EOS;
+}
+default:{
+if(isspace(ls->current)){
+next(ls);
+continue;
+}
+else if(isdigit(ls->current)){
+read_numeral(ls,seminfo);
+return TK_NUMBER;
+}
+else if(isalpha(ls->current)||ls->current=='_'){
+TString*ts;
+do{
+save_and_next(ls);
+}while(isalnum(ls->current)||ls->current=='_');
+ts=luaX_newstring(ls,luaZ_buffer(ls->buff),
+luaZ_bufflen(ls->buff));
+if(ts->tsv.reserved>0)
+return ts->tsv.reserved-1+257;
+else{
+seminfo->ts=ts;
+return TK_NAME;
+}
+}
+else{
+int c=ls->current;
+next(ls);
+return c;
+}
+}
+}
+}
+}
+static void luaX_next(LexState*ls){
+ls->lastline=ls->linenumber;
+if(ls->lookahead.token!=TK_EOS){
+ls->t=ls->lookahead;
+ls->lookahead.token=TK_EOS;
+}
+else
+ls->t.token=llex(ls,&ls->t.seminfo);
+}
+static void luaX_lookahead(LexState*ls){
+ls->lookahead.token=llex(ls,&ls->lookahead.seminfo);
+}
+#define hasjumps(e)((e)->t!=(e)->f)
+static int isnumeral(expdesc*e){
+return(e->k==VKNUM&&e->t==(-1)&&e->f==(-1));
+}
+static void luaK_nil(FuncState*fs,int from,int n){
+Instruction*previous;
+if(fs->pc>fs->lasttarget){
+if(fs->pc==0){
+if(from>=fs->nactvar)
+return;
+}
+else{
+previous=&fs->f->code[fs->pc-1];
+if(GET_OPCODE(*previous)==OP_LOADNIL){
+int pfrom=GETARG_A(*previous);
+int pto=GETARG_B(*previous);
+if(pfrom<=from&&from<=pto+1){
+if(from+n-1>pto)
+SETARG_B(*previous,from+n-1);
+return;
+}
+}
+}
+}
+luaK_codeABC(fs,OP_LOADNIL,from,from+n-1,0);
+}
+static int luaK_jump(FuncState*fs){
+int jpc=fs->jpc;
+int j;
+fs->jpc=(-1);
+j=luaK_codeAsBx(fs,OP_JMP,0,(-1));
+luaK_concat(fs,&j,jpc);
+return j;
+}
+static void luaK_ret(FuncState*fs,int first,int nret){
+luaK_codeABC(fs,OP_RETURN,first,nret+1,0);
+}
+static int condjump(FuncState*fs,OpCode op,int A,int B,int C){
+luaK_codeABC(fs,op,A,B,C);
+return luaK_jump(fs);
+}
+static void fixjump(FuncState*fs,int pc,int dest){
+Instruction*jmp=&fs->f->code[pc];
+int offset=dest-(pc+1);
+if(abs(offset)>(((1<<(9+9))-1)>>1))
+luaX_syntaxerror(fs->ls,"control structure too long");
+SETARG_sBx(*jmp,offset);
+}
+static int luaK_getlabel(FuncState*fs){
+fs->lasttarget=fs->pc;
+return fs->pc;
+}
+static int getjump(FuncState*fs,int pc){
+int offset=GETARG_sBx(fs->f->code[pc]);
+if(offset==(-1))
+return(-1);
+else
+return(pc+1)+offset;
+}
+static Instruction*getjumpcontrol(FuncState*fs,int pc){
+Instruction*pi=&fs->f->code[pc];
+if(pc>=1&&testTMode(GET_OPCODE(*(pi-1))))
+return pi-1;
+else
+return pi;
+}
+static int need_value(FuncState*fs,int list){
+for(;list!=(-1);list=getjump(fs,list)){
+Instruction i=*getjumpcontrol(fs,list);
+if(GET_OPCODE(i)!=OP_TESTSET)return 1;
+}
+return 0;
+}
+static int patchtestreg(FuncState*fs,int node,int reg){
+Instruction*i=getjumpcontrol(fs,node);
+if(GET_OPCODE(*i)!=OP_TESTSET)
+return 0;
+if(reg!=((1<<8)-1)&&reg!=GETARG_B(*i))
+SETARG_A(*i,reg);
+else
+*i=CREATE_ABC(OP_TEST,GETARG_B(*i),0,GETARG_C(*i));
+return 1;
+}
+static void removevalues(FuncState*fs,int list){
+for(;list!=(-1);list=getjump(fs,list))
+patchtestreg(fs,list,((1<<8)-1));
+}
+static void patchlistaux(FuncState*fs,int list,int vtarget,int reg,
+int dtarget){
+while(list!=(-1)){
+int next=getjump(fs,list);
+if(patchtestreg(fs,list,reg))
+fixjump(fs,list,vtarget);
+else
+fixjump(fs,list,dtarget);
+list=next;
+}
+}
+static void dischargejpc(FuncState*fs){
+patchlistaux(fs,fs->jpc,fs->pc,((1<<8)-1),fs->pc);
+fs->jpc=(-1);
+}
+static void luaK_patchlist(FuncState*fs,int list,int target){
+if(target==fs->pc)
+luaK_patchtohere(fs,list);
+else{
+patchlistaux(fs,list,target,((1<<8)-1),target);
+}
+}
+static void luaK_patchtohere(FuncState*fs,int list){
+luaK_getlabel(fs);
+luaK_concat(fs,&fs->jpc,list);
+}
+static void luaK_concat(FuncState*fs,int*l1,int l2){
+if(l2==(-1))return;
+else if(*l1==(-1))
+*l1=l2;
+else{
+int list=*l1;
+int next;
+while((next=getjump(fs,list))!=(-1))
+list=next;
+fixjump(fs,list,l2);
+}
+}
+static void luaK_checkstack(FuncState*fs,int n){
+int newstack=fs->freereg+n;
+if(newstack>fs->f->maxstacksize){
+if(newstack>=250)
+luaX_syntaxerror(fs->ls,"function or expression too complex");
+fs->f->maxstacksize=cast_byte(newstack);
+}
+}
+static void luaK_reserveregs(FuncState*fs,int n){
+luaK_checkstack(fs,n);
+fs->freereg+=n;
+}
+static void freereg(FuncState*fs,int reg){
+if(!ISK(reg)&&reg>=fs->nactvar){
+fs->freereg--;
+}
+}
+static void freeexp(FuncState*fs,expdesc*e){
+if(e->k==VNONRELOC)
+freereg(fs,e->u.s.info);
+}
+static int addk(FuncState*fs,TValue*k,TValue*v){
+lua_State*L=fs->L;
+TValue*idx=luaH_set(L,fs->h,k);
+Proto*f=fs->f;
+int oldsize=f->sizek;
+if(ttisnumber(idx)){
+return cast_int(nvalue(idx));
+}
+else{
+setnvalue(idx,cast_num(fs->nk));
+luaM_growvector(L,f->k,fs->nk,f->sizek,TValue,
+((1<<(9+9))-1),"constant table overflow");
+while(oldsize<f->sizek)setnilvalue(&f->k[oldsize++]);
+setobj(L,&f->k[fs->nk],v);
+luaC_barrier(L,f,v);
+return fs->nk++;
+}
+}
+static int luaK_stringK(FuncState*fs,TString*s){
+TValue o;
+setsvalue(fs->L,&o,s);
+return addk(fs,&o,&o);
+}
+static int luaK_numberK(FuncState*fs,lua_Number r){
+TValue o;
+setnvalue(&o,r);
+return addk(fs,&o,&o);
+}
+static int boolK(FuncState*fs,int b){
+TValue o;
+setbvalue(&o,b);
+return addk(fs,&o,&o);
+}
+static int nilK(FuncState*fs){
+TValue k,v;
+setnilvalue(&v);
+sethvalue(fs->L,&k,fs->h);
+return addk(fs,&k,&v);
+}
+static void luaK_setreturns(FuncState*fs,expdesc*e,int nresults){
+if(e->k==VCALL){
+SETARG_C(getcode(fs,e),nresults+1);
+}
+else if(e->k==VVARARG){
+SETARG_B(getcode(fs,e),nresults+1);
+SETARG_A(getcode(fs,e),fs->freereg);
+luaK_reserveregs(fs,1);
+}
+}
+static void luaK_setoneret(FuncState*fs,expdesc*e){
+if(e->k==VCALL){
+e->k=VNONRELOC;
+e->u.s.info=GETARG_A(getcode(fs,e));
+}
+else if(e->k==VVARARG){
+SETARG_B(getcode(fs,e),2);
+e->k=VRELOCABLE;
+}
+}
+static void luaK_dischargevars(FuncState*fs,expdesc*e){
+switch(e->k){
+case VLOCAL:{
+e->k=VNONRELOC;
+break;
+}
+case VUPVAL:{
+e->u.s.info=luaK_codeABC(fs,OP_GETUPVAL,0,e->u.s.info,0);
+e->k=VRELOCABLE;
+break;
+}
+case VGLOBAL:{
+e->u.s.info=luaK_codeABx(fs,OP_GETGLOBAL,0,e->u.s.info);
+e->k=VRELOCABLE;
+break;
+}
+case VINDEXED:{
+freereg(fs,e->u.s.aux);
+freereg(fs,e->u.s.info);
+e->u.s.info=luaK_codeABC(fs,OP_GETTABLE,0,e->u.s.info,e->u.s.aux);
+e->k=VRELOCABLE;
+break;
+}
+case VVARARG:
+case VCALL:{
+luaK_setoneret(fs,e);
+break;
+}
+default:break;
+}
+}
+static int code_label(FuncState*fs,int A,int b,int jump){
+luaK_getlabel(fs);
+return luaK_codeABC(fs,OP_LOADBOOL,A,b,jump);
+}
+static void discharge2reg(FuncState*fs,expdesc*e,int reg){
+luaK_dischargevars(fs,e);
+switch(e->k){
+case VNIL:{
+luaK_nil(fs,reg,1);
+break;
+}
+case VFALSE:case VTRUE:{
+luaK_codeABC(fs,OP_LOADBOOL,reg,e->k==VTRUE,0);
+break;
+}
+case VK:{
+luaK_codeABx(fs,OP_LOADK,reg,e->u.s.info);
+break;
+}
+case VKNUM:{
+luaK_codeABx(fs,OP_LOADK,reg,luaK_numberK(fs,e->u.nval));
+break;
+}
+case VRELOCABLE:{
+Instruction*pc=&getcode(fs,e);
+SETARG_A(*pc,reg);
+break;
+}
+case VNONRELOC:{
+if(reg!=e->u.s.info)
+luaK_codeABC(fs,OP_MOVE,reg,e->u.s.info,0);
+break;
+}
+default:{
+return;
+}
+}
+e->u.s.info=reg;
+e->k=VNONRELOC;
+}
+static void discharge2anyreg(FuncState*fs,expdesc*e){
+if(e->k!=VNONRELOC){
+luaK_reserveregs(fs,1);
+discharge2reg(fs,e,fs->freereg-1);
+}
+}
+static void exp2reg(FuncState*fs,expdesc*e,int reg){
+discharge2reg(fs,e,reg);
+if(e->k==VJMP)
+luaK_concat(fs,&e->t,e->u.s.info);
+if(hasjumps(e)){
+int final;
+int p_f=(-1);
+int p_t=(-1);
+if(need_value(fs,e->t)||need_value(fs,e->f)){
+int fj=(e->k==VJMP)?(-1):luaK_jump(fs);
+p_f=code_label(fs,reg,0,1);
+p_t=code_label(fs,reg,1,0);
+luaK_patchtohere(fs,fj);
+}
+final=luaK_getlabel(fs);
+patchlistaux(fs,e->f,final,reg,p_f);
+patchlistaux(fs,e->t,final,reg,p_t);
+}
+e->f=e->t=(-1);
+e->u.s.info=reg;
+e->k=VNONRELOC;
+}
+static void luaK_exp2nextreg(FuncState*fs,expdesc*e){
+luaK_dischargevars(fs,e);
+freeexp(fs,e);
+luaK_reserveregs(fs,1);
+exp2reg(fs,e,fs->freereg-1);
+}
+static int luaK_exp2anyreg(FuncState*fs,expdesc*e){
+luaK_dischargevars(fs,e);
+if(e->k==VNONRELOC){
+if(!hasjumps(e))return e->u.s.info;
+if(e->u.s.info>=fs->nactvar){
+exp2reg(fs,e,e->u.s.info);
+return e->u.s.info;
+}
+}
+luaK_exp2nextreg(fs,e);
+return e->u.s.info;
+}
+static void luaK_exp2val(FuncState*fs,expdesc*e){
+if(hasjumps(e))
+luaK_exp2anyreg(fs,e);
+else
+luaK_dischargevars(fs,e);
+}
+static int luaK_exp2RK(FuncState*fs,expdesc*e){
+luaK_exp2val(fs,e);
+switch(e->k){
+case VKNUM:
+case VTRUE:
+case VFALSE:
+case VNIL:{
+if(fs->nk<=((1<<(9-1))-1)){
+e->u.s.info=(e->k==VNIL)?nilK(fs):
+(e->k==VKNUM)?luaK_numberK(fs,e->u.nval):
+boolK(fs,(e->k==VTRUE));
+e->k=VK;
+return RKASK(e->u.s.info);
+}
+else break;
+}
+case VK:{
+if(e->u.s.info<=((1<<(9-1))-1))
+return RKASK(e->u.s.info);
+else break;
+}
+default:break;
+}
+return luaK_exp2anyreg(fs,e);
+}
+static void luaK_storevar(FuncState*fs,expdesc*var,expdesc*ex){
+switch(var->k){
+case VLOCAL:{
+freeexp(fs,ex);
+exp2reg(fs,ex,var->u.s.info);
+return;
+}
+case VUPVAL:{
+int e=luaK_exp2anyreg(fs,ex);
+luaK_codeABC(fs,OP_SETUPVAL,e,var->u.s.info,0);
+break;
+}
+case VGLOBAL:{
+int e=luaK_exp2anyreg(fs,ex);
+luaK_codeABx(fs,OP_SETGLOBAL,e,var->u.s.info);
+break;
+}
+case VINDEXED:{
+int e=luaK_exp2RK(fs,ex);
+luaK_codeABC(fs,OP_SETTABLE,var->u.s.info,var->u.s.aux,e);
+break;
+}
+default:{
+break;
+}
+}
+freeexp(fs,ex);
+}
+static void luaK_self(FuncState*fs,expdesc*e,expdesc*key){
+int func;
+luaK_exp2anyreg(fs,e);
+freeexp(fs,e);
+func=fs->freereg;
+luaK_reserveregs(fs,2);
+luaK_codeABC(fs,OP_SELF,func,e->u.s.info,luaK_exp2RK(fs,key));
+freeexp(fs,key);
+e->u.s.info=func;
+e->k=VNONRELOC;
+}
+static void invertjump(FuncState*fs,expdesc*e){
+Instruction*pc=getjumpcontrol(fs,e->u.s.info);
+SETARG_A(*pc,!(GETARG_A(*pc)));
+}
+static int jumponcond(FuncState*fs,expdesc*e,int cond){
+if(e->k==VRELOCABLE){
+Instruction ie=getcode(fs,e);
+if(GET_OPCODE(ie)==OP_NOT){
+fs->pc--;
+return condjump(fs,OP_TEST,GETARG_B(ie),0,!cond);
+}
+}
+discharge2anyreg(fs,e);
+freeexp(fs,e);
+return condjump(fs,OP_TESTSET,((1<<8)-1),e->u.s.info,cond);
+}
+static void luaK_goiftrue(FuncState*fs,expdesc*e){
+int pc;
+luaK_dischargevars(fs,e);
+switch(e->k){
+case VK:case VKNUM:case VTRUE:{
+pc=(-1);
+break;
+}
+case VJMP:{
+invertjump(fs,e);
+pc=e->u.s.info;
+break;
+}
+default:{
+pc=jumponcond(fs,e,0);
+break;
+}
+}
+luaK_concat(fs,&e->f,pc);
+luaK_patchtohere(fs,e->t);
+e->t=(-1);
+}
+static void luaK_goiffalse(FuncState*fs,expdesc*e){
+int pc;
+luaK_dischargevars(fs,e);
+switch(e->k){
+case VNIL:case VFALSE:{
+pc=(-1);
+break;
+}
+case VJMP:{
+pc=e->u.s.info;
+break;
+}
+default:{
+pc=jumponcond(fs,e,1);
+break;
+}
+}
+luaK_concat(fs,&e->t,pc);
+luaK_patchtohere(fs,e->f);
+e->f=(-1);
+}
+static void codenot(FuncState*fs,expdesc*e){
+luaK_dischargevars(fs,e);
+switch(e->k){
+case VNIL:case VFALSE:{
+e->k=VTRUE;
+break;
+}
+case VK:case VKNUM:case VTRUE:{
+e->k=VFALSE;
+break;
+}
+case VJMP:{
+invertjump(fs,e);
+break;
+}
+case VRELOCABLE:
+case VNONRELOC:{
+discharge2anyreg(fs,e);
+freeexp(fs,e);
+e->u.s.info=luaK_codeABC(fs,OP_NOT,0,e->u.s.info,0);
+e->k=VRELOCABLE;
+break;
+}
+default:{
+break;
+}
+}
+{int temp=e->f;e->f=e->t;e->t=temp;}
+removevalues(fs,e->f);
+removevalues(fs,e->t);
+}
+static void luaK_indexed(FuncState*fs,expdesc*t,expdesc*k){
+t->u.s.aux=luaK_exp2RK(fs,k);
+t->k=VINDEXED;
+}
+static int constfolding(OpCode op,expdesc*e1,expdesc*e2){
+lua_Number v1,v2,r;
+if(!isnumeral(e1)||!isnumeral(e2))return 0;
+v1=e1->u.nval;
+v2=e2->u.nval;
+switch(op){
+case OP_ADD:r=luai_numadd(v1,v2);break;
+case OP_SUB:r=luai_numsub(v1,v2);break;
+case OP_MUL:r=luai_nummul(v1,v2);break;
+case OP_DIV:
+if(v2==0)return 0;
+r=luai_numdiv(v1,v2);break;
+case OP_MOD:
+if(v2==0)return 0;
+r=luai_nummod(v1,v2);break;
+case OP_POW:r=luai_numpow(v1,v2);break;
+case OP_UNM:r=luai_numunm(v1);break;
+case OP_LEN:return 0;
+default:r=0;break;
+}
+if(luai_numisnan(r))return 0;
+e1->u.nval=r;
+return 1;
+}
+static void codearith(FuncState*fs,OpCode op,expdesc*e1,expdesc*e2){
+if(constfolding(op,e1,e2))
+return;
+else{
+int o2=(op!=OP_UNM&&op!=OP_LEN)?luaK_exp2RK(fs,e2):0;
+int o1=luaK_exp2RK(fs,e1);
+if(o1>o2){
+freeexp(fs,e1);
+freeexp(fs,e2);
+}
+else{
+freeexp(fs,e2);
+freeexp(fs,e1);
+}
+e1->u.s.info=luaK_codeABC(fs,op,0,o1,o2);
+e1->k=VRELOCABLE;
+}
+}
+static void codecomp(FuncState*fs,OpCode op,int cond,expdesc*e1,
+expdesc*e2){
+int o1=luaK_exp2RK(fs,e1);
+int o2=luaK_exp2RK(fs,e2);
+freeexp(fs,e2);
+freeexp(fs,e1);
+if(cond==0&&op!=OP_EQ){
+int temp;
+temp=o1;o1=o2;o2=temp;
+cond=1;
+}
+e1->u.s.info=condjump(fs,op,cond,o1,o2);
+e1->k=VJMP;
+}
+static void luaK_prefix(FuncState*fs,UnOpr op,expdesc*e){
+expdesc e2;
+e2.t=e2.f=(-1);e2.k=VKNUM;e2.u.nval=0;
+switch(op){
+case OPR_MINUS:{
+if(!isnumeral(e))
+luaK_exp2anyreg(fs,e);
+codearith(fs,OP_UNM,e,&e2);
+break;
+}
+case OPR_NOT:codenot(fs,e);break;
+case OPR_LEN:{
+luaK_exp2anyreg(fs,e);
+codearith(fs,OP_LEN,e,&e2);
+break;
+}
+default:;
+}
+}
+static void luaK_infix(FuncState*fs,BinOpr op,expdesc*v){
+switch(op){
+case OPR_AND:{
+luaK_goiftrue(fs,v);
+break;
+}
+case OPR_OR:{
+luaK_goiffalse(fs,v);
+break;
+}
+case OPR_CONCAT:{
+luaK_exp2nextreg(fs,v);
+break;
+}
+case OPR_ADD:case OPR_SUB:case OPR_MUL:case OPR_DIV:
+case OPR_MOD:case OPR_POW:{
+if(!isnumeral(v))luaK_exp2RK(fs,v);
+break;
+}
+default:{
+luaK_exp2RK(fs,v);
+break;
+}
+}
+}
+static void luaK_posfix(FuncState*fs,BinOpr op,expdesc*e1,expdesc*e2){
+switch(op){
+case OPR_AND:{
+luaK_dischargevars(fs,e2);
+luaK_concat(fs,&e2->f,e1->f);
+*e1=*e2;
+break;
+}
+case OPR_OR:{
+luaK_dischargevars(fs,e2);
+luaK_concat(fs,&e2->t,e1->t);
+*e1=*e2;
+break;
+}
+case OPR_CONCAT:{
+luaK_exp2val(fs,e2);
+if(e2->k==VRELOCABLE&&GET_OPCODE(getcode(fs,e2))==OP_CONCAT){
+freeexp(fs,e1);
+SETARG_B(getcode(fs,e2),e1->u.s.info);
+e1->k=VRELOCABLE;e1->u.s.info=e2->u.s.info;
+}
+else{
+luaK_exp2nextreg(fs,e2);
+codearith(fs,OP_CONCAT,e1,e2);
+}
+break;
+}
+case OPR_ADD:codearith(fs,OP_ADD,e1,e2);break;
+case OPR_SUB:codearith(fs,OP_SUB,e1,e2);break;
+case OPR_MUL:codearith(fs,OP_MUL,e1,e2);break;
+case OPR_DIV:codearith(fs,OP_DIV,e1,e2);break;
+case OPR_MOD:codearith(fs,OP_MOD,e1,e2);break;
+case OPR_POW:codearith(fs,OP_POW,e1,e2);break;
+case OPR_EQ:codecomp(fs,OP_EQ,1,e1,e2);break;
+case OPR_NE:codecomp(fs,OP_EQ,0,e1,e2);break;
+case OPR_LT:codecomp(fs,OP_LT,1,e1,e2);break;
+case OPR_LE:codecomp(fs,OP_LE,1,e1,e2);break;
+case OPR_GT:codecomp(fs,OP_LT,0,e1,e2);break;
+case OPR_GE:codecomp(fs,OP_LE,0,e1,e2);break;
+default:;
+}
+}
+static void luaK_fixline(FuncState*fs,int line){
+fs->f->lineinfo[fs->pc-1]=line;
+}
+static int luaK_code(FuncState*fs,Instruction i,int line){
+Proto*f=fs->f;
+dischargejpc(fs);
+luaM_growvector(fs->L,f->code,fs->pc,f->sizecode,Instruction,
+(INT_MAX-2),"code size overflow");
+f->code[fs->pc]=i;
+luaM_growvector(fs->L,f->lineinfo,fs->pc,f->sizelineinfo,int,
+(INT_MAX-2),"code size overflow");
+f->lineinfo[fs->pc]=line;
+return fs->pc++;
+}
+static int luaK_codeABC(FuncState*fs,OpCode o,int a,int b,int c){
+return luaK_code(fs,CREATE_ABC(o,a,b,c),fs->ls->lastline);
+}
+static int luaK_codeABx(FuncState*fs,OpCode o,int a,unsigned int bc){
+return luaK_code(fs,CREATE_ABx(o,a,bc),fs->ls->lastline);
+}
+static void luaK_setlist(FuncState*fs,int base,int nelems,int tostore){
+int c=(nelems-1)/50+1;
+int b=(tostore==(-1))?0:tostore;
+if(c<=((1<<9)-1))
+luaK_codeABC(fs,OP_SETLIST,base,b,c);
+else{
+luaK_codeABC(fs,OP_SETLIST,base,b,0);
+luaK_code(fs,cast(Instruction,c),fs->ls->lastline);
+}
+fs->freereg=base+1;
+}
+#define hasmultret(k)((k)==VCALL||(k)==VVARARG)
+#define getlocvar(fs,i)((fs)->f->locvars[(fs)->actvar[i]])
+#define luaY_checklimit(fs,v,l,m)if((v)>(l))errorlimit(fs,l,m)
+typedef struct BlockCnt{
+struct BlockCnt*previous;
+int breaklist;
+lu_byte nactvar;
+lu_byte upval;
+lu_byte isbreakable;
+}BlockCnt;
+static void chunk(LexState*ls);
+static void expr(LexState*ls,expdesc*v);
+static void anchor_token(LexState*ls){
+if(ls->t.token==TK_NAME||ls->t.token==TK_STRING){
+TString*ts=ls->t.seminfo.ts;
+luaX_newstring(ls,getstr(ts),ts->tsv.len);
+}
+}
+static void error_expected(LexState*ls,int token){
+luaX_syntaxerror(ls,
+luaO_pushfstring(ls->L,LUA_QL("%s")" expected",luaX_token2str(ls,token)));
+}
+static void errorlimit(FuncState*fs,int limit,const char*what){
+const char*msg=(fs->f->linedefined==0)?
+luaO_pushfstring(fs->L,"main function has more than %d %s",limit,what):
+luaO_pushfstring(fs->L,"function at line %d has more than %d %s",
+fs->f->linedefined,limit,what);
+luaX_lexerror(fs->ls,msg,0);
+}
+static int testnext(LexState*ls,int c){
+if(ls->t.token==c){
+luaX_next(ls);
+return 1;
+}
+else return 0;
+}
+static void check(LexState*ls,int c){
+if(ls->t.token!=c)
+error_expected(ls,c);
+}
+static void checknext(LexState*ls,int c){
+check(ls,c);
+luaX_next(ls);
+}
+#define check_condition(ls,c,msg){if(!(c))luaX_syntaxerror(ls,msg);}
+static void check_match(LexState*ls,int what,int who,int where){
+if(!testnext(ls,what)){
+if(where==ls->linenumber)
+error_expected(ls,what);
+else{
+luaX_syntaxerror(ls,luaO_pushfstring(ls->L,
+LUA_QL("%s")" expected (to close "LUA_QL("%s")" at line %d)",
+luaX_token2str(ls,what),luaX_token2str(ls,who),where));
+}
+}
+}
+static TString*str_checkname(LexState*ls){
+TString*ts;
+check(ls,TK_NAME);
+ts=ls->t.seminfo.ts;
+luaX_next(ls);
+return ts;
+}
+static void init_exp(expdesc*e,expkind k,int i){
+e->f=e->t=(-1);
+e->k=k;
+e->u.s.info=i;
+}
+static void codestring(LexState*ls,expdesc*e,TString*s){
+init_exp(e,VK,luaK_stringK(ls->fs,s));
+}
+static void checkname(LexState*ls,expdesc*e){
+codestring(ls,e,str_checkname(ls));
+}
+static int registerlocalvar(LexState*ls,TString*varname){
+FuncState*fs=ls->fs;
+Proto*f=fs->f;
+int oldsize=f->sizelocvars;
+luaM_growvector(ls->L,f->locvars,fs->nlocvars,f->sizelocvars,
+LocVar,SHRT_MAX,"too many local variables");
+while(oldsize<f->sizelocvars)f->locvars[oldsize++].varname=NULL;
+f->locvars[fs->nlocvars].varname=varname;
+luaC_objbarrier(ls->L,f,varname);
+return fs->nlocvars++;
+}
+#define new_localvarliteral(ls,v,n)new_localvar(ls,luaX_newstring(ls,""v,(sizeof(v)/sizeof(char))-1),n)
+static void new_localvar(LexState*ls,TString*name,int n){
+FuncState*fs=ls->fs;
+luaY_checklimit(fs,fs->nactvar+n+1,200,"local variables");
+fs->actvar[fs->nactvar+n]=cast(unsigned short,registerlocalvar(ls,name));
+}
+static void adjustlocalvars(LexState*ls,int nvars){
+FuncState*fs=ls->fs;
+fs->nactvar=cast_byte(fs->nactvar+nvars);
+for(;nvars;nvars--){
+getlocvar(fs,fs->nactvar-nvars).startpc=fs->pc;
+}
+}
+static void removevars(LexState*ls,int tolevel){
+FuncState*fs=ls->fs;
+while(fs->nactvar>tolevel)
+getlocvar(fs,--fs->nactvar).endpc=fs->pc;
+}
+static int indexupvalue(FuncState*fs,TString*name,expdesc*v){
+int i;
+Proto*f=fs->f;
+int oldsize=f->sizeupvalues;
+for(i=0;i<f->nups;i++){
+if(fs->upvalues[i].k==v->k&&fs->upvalues[i].info==v->u.s.info){
+return i;
+}
+}
+luaY_checklimit(fs,f->nups+1,60,"upvalues");
+luaM_growvector(fs->L,f->upvalues,f->nups,f->sizeupvalues,
+TString*,(INT_MAX-2),"");
+while(oldsize<f->sizeupvalues)f->upvalues[oldsize++]=NULL;
+f->upvalues[f->nups]=name;
+luaC_objbarrier(fs->L,f,name);
+fs->upvalues[f->nups].k=cast_byte(v->k);
+fs->upvalues[f->nups].info=cast_byte(v->u.s.info);
+return f->nups++;
+}
+static int searchvar(FuncState*fs,TString*n){
+int i;
+for(i=fs->nactvar-1;i>=0;i--){
+if(n==getlocvar(fs,i).varname)
+return i;
+}
+return-1;
+}
+static void markupval(FuncState*fs,int level){
+BlockCnt*bl=fs->bl;
+while(bl&&bl->nactvar>level)bl=bl->previous;
+if(bl)bl->upval=1;
+}
+static int singlevaraux(FuncState*fs,TString*n,expdesc*var,int base){
+if(fs==NULL){
+init_exp(var,VGLOBAL,((1<<8)-1));
+return VGLOBAL;
+}
+else{
+int v=searchvar(fs,n);
+if(v>=0){
+init_exp(var,VLOCAL,v);
+if(!base)
+markupval(fs,v);
+return VLOCAL;
+}
+else{
+if(singlevaraux(fs->prev,n,var,0)==VGLOBAL)
+return VGLOBAL;
+var->u.s.info=indexupvalue(fs,n,var);
+var->k=VUPVAL;
+return VUPVAL;
+}
+}
+}
+static void singlevar(LexState*ls,expdesc*var){
+TString*varname=str_checkname(ls);
+FuncState*fs=ls->fs;
+if(singlevaraux(fs,varname,var,1)==VGLOBAL)
+var->u.s.info=luaK_stringK(fs,varname);
+}
+static void adjust_assign(LexState*ls,int nvars,int nexps,expdesc*e){
+FuncState*fs=ls->fs;
+int extra=nvars-nexps;
+if(hasmultret(e->k)){
+extra++;
+if(extra<0)extra=0;
+luaK_setreturns(fs,e,extra);
+if(extra>1)luaK_reserveregs(fs,extra-1);
+}
+else{
+if(e->k!=VVOID)luaK_exp2nextreg(fs,e);
+if(extra>0){
+int reg=fs->freereg;
+luaK_reserveregs(fs,extra);
+luaK_nil(fs,reg,extra);
+}
+}
+}
+static void enterlevel(LexState*ls){
+if(++ls->L->nCcalls>200)
+luaX_lexerror(ls,"chunk has too many syntax levels",0);
+}
+#define leavelevel(ls)((ls)->L->nCcalls--)
+static void enterblock(FuncState*fs,BlockCnt*bl,lu_byte isbreakable){
+bl->breaklist=(-1);
+bl->isbreakable=isbreakable;
+bl->nactvar=fs->nactvar;
+bl->upval=0;
+bl->previous=fs->bl;
+fs->bl=bl;
+}
+static void leaveblock(FuncState*fs){
+BlockCnt*bl=fs->bl;
+fs->bl=bl->previous;
+removevars(fs->ls,bl->nactvar);
+if(bl->upval)
+luaK_codeABC(fs,OP_CLOSE,bl->nactvar,0,0);
+fs->freereg=fs->nactvar;
+luaK_patchtohere(fs,bl->breaklist);
+}
+static void pushclosure(LexState*ls,FuncState*func,expdesc*v){
+FuncState*fs=ls->fs;
+Proto*f=fs->f;
+int oldsize=f->sizep;
+int i;
+luaM_growvector(ls->L,f->p,fs->np,f->sizep,Proto*,
+((1<<(9+9))-1),"constant table overflow");
+while(oldsize<f->sizep)f->p[oldsize++]=NULL;
+f->p[fs->np++]=func->f;
+luaC_objbarrier(ls->L,f,func->f);
+init_exp(v,VRELOCABLE,luaK_codeABx(fs,OP_CLOSURE,0,fs->np-1));
+for(i=0;i<func->f->nups;i++){
+OpCode o=(func->upvalues[i].k==VLOCAL)?OP_MOVE:OP_GETUPVAL;
+luaK_codeABC(fs,o,0,func->upvalues[i].info,0);
+}
+}
+static void open_func(LexState*ls,FuncState*fs){
+lua_State*L=ls->L;
+Proto*f=luaF_newproto(L);
+fs->f=f;
+fs->prev=ls->fs;
+fs->ls=ls;
+fs->L=L;
+ls->fs=fs;
+fs->pc=0;
+fs->lasttarget=-1;
+fs->jpc=(-1);
+fs->freereg=0;
+fs->nk=0;
+fs->np=0;
+fs->nlocvars=0;
+fs->nactvar=0;
+fs->bl=NULL;
+f->source=ls->source;
+f->maxstacksize=2;
+fs->h=luaH_new(L,0,0);
+sethvalue(L,L->top,fs->h);
+incr_top(L);
+setptvalue(L,L->top,f);
+incr_top(L);
+}
+static void close_func(LexState*ls){
+lua_State*L=ls->L;
+FuncState*fs=ls->fs;
+Proto*f=fs->f;
+removevars(ls,0);
+luaK_ret(fs,0,0);
+luaM_reallocvector(L,f->code,f->sizecode,fs->pc,Instruction);
+f->sizecode=fs->pc;
+luaM_reallocvector(L,f->lineinfo,f->sizelineinfo,fs->pc,int);
+f->sizelineinfo=fs->pc;
+luaM_reallocvector(L,f->k,f->sizek,fs->nk,TValue);
+f->sizek=fs->nk;
+luaM_reallocvector(L,f->p,f->sizep,fs->np,Proto*);
+f->sizep=fs->np;
+luaM_reallocvector(L,f->locvars,f->sizelocvars,fs->nlocvars,LocVar);
+f->sizelocvars=fs->nlocvars;
+luaM_reallocvector(L,f->upvalues,f->sizeupvalues,f->nups,TString*);
+f->sizeupvalues=f->nups;
+ls->fs=fs->prev;
+if(fs)anchor_token(ls);
+L->top-=2;
+}
+static Proto*luaY_parser(lua_State*L,ZIO*z,Mbuffer*buff,const char*name){
+struct LexState lexstate;
+struct FuncState funcstate;
+lexstate.buff=buff;
+luaX_setinput(L,&lexstate,z,luaS_new(L,name));
+open_func(&lexstate,&funcstate);
+funcstate.f->is_vararg=2;
+luaX_next(&lexstate);
+chunk(&lexstate);
+check(&lexstate,TK_EOS);
+close_func(&lexstate);
+return funcstate.f;
+}
+static void field(LexState*ls,expdesc*v){
+FuncState*fs=ls->fs;
+expdesc key;
+luaK_exp2anyreg(fs,v);
+luaX_next(ls);
+checkname(ls,&key);
+luaK_indexed(fs,v,&key);
+}
+static void yindex(LexState*ls,expdesc*v){
+luaX_next(ls);
+expr(ls,v);
+luaK_exp2val(ls->fs,v);
+checknext(ls,']');
+}
+struct ConsControl{
+expdesc v;
+expdesc*t;
+int nh;
+int na;
+int tostore;
+};
+static void recfield(LexState*ls,struct ConsControl*cc){
+FuncState*fs=ls->fs;
+int reg=ls->fs->freereg;
+expdesc key,val;
+int rkkey;
+if(ls->t.token==TK_NAME){
+luaY_checklimit(fs,cc->nh,(INT_MAX-2),"items in a constructor");
+checkname(ls,&key);
+}
+else
+yindex(ls,&key);
+cc->nh++;
+checknext(ls,'=');
+rkkey=luaK_exp2RK(fs,&key);
+expr(ls,&val);
+luaK_codeABC(fs,OP_SETTABLE,cc->t->u.s.info,rkkey,luaK_exp2RK(fs,&val));
+fs->freereg=reg;
+}
+static void closelistfield(FuncState*fs,struct ConsControl*cc){
+if(cc->v.k==VVOID)return;
+luaK_exp2nextreg(fs,&cc->v);
+cc->v.k=VVOID;
+if(cc->tostore==50){
+luaK_setlist(fs,cc->t->u.s.info,cc->na,cc->tostore);
+cc->tostore=0;
+}
+}
+static void lastlistfield(FuncState*fs,struct ConsControl*cc){
+if(cc->tostore==0)return;
+if(hasmultret(cc->v.k)){
+luaK_setmultret(fs,&cc->v);
+luaK_setlist(fs,cc->t->u.s.info,cc->na,(-1));
+cc->na--;
+}
+else{
+if(cc->v.k!=VVOID)
+luaK_exp2nextreg(fs,&cc->v);
+luaK_setlist(fs,cc->t->u.s.info,cc->na,cc->tostore);
+}
+}
+static void listfield(LexState*ls,struct ConsControl*cc){
+expr(ls,&cc->v);
+luaY_checklimit(ls->fs,cc->na,(INT_MAX-2),"items in a constructor");
+cc->na++;
+cc->tostore++;
+}
+static void constructor(LexState*ls,expdesc*t){
+FuncState*fs=ls->fs;
+int line=ls->linenumber;
+int pc=luaK_codeABC(fs,OP_NEWTABLE,0,0,0);
+struct ConsControl cc;
+cc.na=cc.nh=cc.tostore=0;
+cc.t=t;
+init_exp(t,VRELOCABLE,pc);
+init_exp(&cc.v,VVOID,0);
+luaK_exp2nextreg(ls->fs,t);
+checknext(ls,'{');
+do{
+if(ls->t.token=='}')break;
+closelistfield(fs,&cc);
+switch(ls->t.token){
+case TK_NAME:{
+luaX_lookahead(ls);
+if(ls->lookahead.token!='=')
+listfield(ls,&cc);
+else
+recfield(ls,&cc);
+break;
+}
+case'[':{
+recfield(ls,&cc);
+break;
+}
+default:{
+listfield(ls,&cc);
+break;
+}
+}
+}while(testnext(ls,',')||testnext(ls,';'));
+check_match(ls,'}','{',line);
+lastlistfield(fs,&cc);
+SETARG_B(fs->f->code[pc],luaO_int2fb(cc.na));
+SETARG_C(fs->f->code[pc],luaO_int2fb(cc.nh));
+}
+static void parlist(LexState*ls){
+FuncState*fs=ls->fs;
+Proto*f=fs->f;
+int nparams=0;
+f->is_vararg=0;
+if(ls->t.token!=')'){
+do{
+switch(ls->t.token){
+case TK_NAME:{
+new_localvar(ls,str_checkname(ls),nparams++);
+break;
+}
+case TK_DOTS:{
+luaX_next(ls);
+f->is_vararg|=2;
+break;
+}
+default:luaX_syntaxerror(ls,"<name> or "LUA_QL("...")" expected");
+}
+}while(!f->is_vararg&&testnext(ls,','));
+}
+adjustlocalvars(ls,nparams);
+f->numparams=cast_byte(fs->nactvar-(f->is_vararg&1));
+luaK_reserveregs(fs,fs->nactvar);
+}
+static void body(LexState*ls,expdesc*e,int needself,int line){
+FuncState new_fs;
+open_func(ls,&new_fs);
+new_fs.f->linedefined=line;
+checknext(ls,'(');
+if(needself){
+new_localvarliteral(ls,"self",0);
+adjustlocalvars(ls,1);
+}
+parlist(ls);
+checknext(ls,')');
+chunk(ls);
+new_fs.f->lastlinedefined=ls->linenumber;
+check_match(ls,TK_END,TK_FUNCTION,line);
+close_func(ls);
+pushclosure(ls,&new_fs,e);
+}
+static int explist1(LexState*ls,expdesc*v){
+int n=1;
+expr(ls,v);
+while(testnext(ls,',')){
+luaK_exp2nextreg(ls->fs,v);
+expr(ls,v);
+n++;
+}
+return n;
+}
+static void funcargs(LexState*ls,expdesc*f){
+FuncState*fs=ls->fs;
+expdesc args;
+int base,nparams;
+int line=ls->linenumber;
+switch(ls->t.token){
+case'(':{
+if(line!=ls->lastline)
+luaX_syntaxerror(ls,"ambiguous syntax (function call x new statement)");
+luaX_next(ls);
+if(ls->t.token==')')
+args.k=VVOID;
+else{
+explist1(ls,&args);
+luaK_setmultret(fs,&args);
+}
+check_match(ls,')','(',line);
+break;
+}
+case'{':{
+constructor(ls,&args);
+break;
+}
+case TK_STRING:{
+codestring(ls,&args,ls->t.seminfo.ts);
+luaX_next(ls);
+break;
+}
+default:{
+luaX_syntaxerror(ls,"function arguments expected");
+return;
+}
+}
+base=f->u.s.info;
+if(hasmultret(args.k))
+nparams=(-1);
+else{
+if(args.k!=VVOID)
+luaK_exp2nextreg(fs,&args);
+nparams=fs->freereg-(base+1);
+}
+init_exp(f,VCALL,luaK_codeABC(fs,OP_CALL,base,nparams+1,2));
+luaK_fixline(fs,line);
+fs->freereg=base+1;
+}
+static void prefixexp(LexState*ls,expdesc*v){
+switch(ls->t.token){
+case'(':{
+int line=ls->linenumber;
+luaX_next(ls);
+expr(ls,v);
+check_match(ls,')','(',line);
+luaK_dischargevars(ls->fs,v);
+return;
+}
+case TK_NAME:{
+singlevar(ls,v);
+return;
+}
+default:{
+luaX_syntaxerror(ls,"unexpected symbol");
+return;
+}
+}
+}
+static void primaryexp(LexState*ls,expdesc*v){
+FuncState*fs=ls->fs;
+prefixexp(ls,v);
+for(;;){
+switch(ls->t.token){
+case'.':{
+field(ls,v);
+break;
+}
+case'[':{
+expdesc key;
+luaK_exp2anyreg(fs,v);
+yindex(ls,&key);
+luaK_indexed(fs,v,&key);
+break;
+}
+case':':{
+expdesc key;
+luaX_next(ls);
+checkname(ls,&key);
+luaK_self(fs,v,&key);
+funcargs(ls,v);
+break;
+}
+case'(':case TK_STRING:case'{':{
+luaK_exp2nextreg(fs,v);
+funcargs(ls,v);
+break;
+}
+default:return;
+}
+}
+}
+static void simpleexp(LexState*ls,expdesc*v){
+switch(ls->t.token){
+case TK_NUMBER:{
+init_exp(v,VKNUM,0);
+v->u.nval=ls->t.seminfo.r;
+break;
+}
+case TK_STRING:{
+codestring(ls,v,ls->t.seminfo.ts);
+break;
+}
+case TK_NIL:{
+init_exp(v,VNIL,0);
+break;
+}
+case TK_TRUE:{
+init_exp(v,VTRUE,0);
+break;
+}
+case TK_FALSE:{
+init_exp(v,VFALSE,0);
+break;
+}
+case TK_DOTS:{
+FuncState*fs=ls->fs;
+check_condition(ls,fs->f->is_vararg,
+"cannot use "LUA_QL("...")" outside a vararg function");
+fs->f->is_vararg&=~4;
+init_exp(v,VVARARG,luaK_codeABC(fs,OP_VARARG,0,1,0));
+break;
+}
+case'{':{
+constructor(ls,v);
+return;
+}
+case TK_FUNCTION:{
+luaX_next(ls);
+body(ls,v,0,ls->linenumber);
+return;
+}
+default:{
+primaryexp(ls,v);
+return;
+}
+}
+luaX_next(ls);
+}
+static UnOpr getunopr(int op){
+switch(op){
+case TK_NOT:return OPR_NOT;
+case'-':return OPR_MINUS;
+case'#':return OPR_LEN;
+default:return OPR_NOUNOPR;
+}
+}
+static BinOpr getbinopr(int op){
+switch(op){
+case'+':return OPR_ADD;
+case'-':return OPR_SUB;
+case'*':return OPR_MUL;
+case'/':return OPR_DIV;
+case'%':return OPR_MOD;
+case'^':return OPR_POW;
+case TK_CONCAT:return OPR_CONCAT;
+case TK_NE:return OPR_NE;
+case TK_EQ:return OPR_EQ;
+case'<':return OPR_LT;
+case TK_LE:return OPR_LE;
+case'>':return OPR_GT;
+case TK_GE:return OPR_GE;
+case TK_AND:return OPR_AND;
+case TK_OR:return OPR_OR;
+default:return OPR_NOBINOPR;
+}
+}
+static const struct{
+lu_byte left;
+lu_byte right;
+}priority[]={
+{6,6},{6,6},{7,7},{7,7},{7,7},
+{10,9},{5,4},
+{3,3},{3,3},
+{3,3},{3,3},{3,3},{3,3},
+{2,2},{1,1}
+};
+static BinOpr subexpr(LexState*ls,expdesc*v,unsigned int limit){
+BinOpr op;
+UnOpr uop;
+enterlevel(ls);
+uop=getunopr(ls->t.token);
+if(uop!=OPR_NOUNOPR){
+luaX_next(ls);
+subexpr(ls,v,8);
+luaK_prefix(ls->fs,uop,v);
+}
+else simpleexp(ls,v);
+op=getbinopr(ls->t.token);
+while(op!=OPR_NOBINOPR&&priority[op].left>limit){
+expdesc v2;
+BinOpr nextop;
+luaX_next(ls);
+luaK_infix(ls->fs,op,v);
+nextop=subexpr(ls,&v2,priority[op].right);
+luaK_posfix(ls->fs,op,v,&v2);
+op=nextop;
+}
+leavelevel(ls);
+return op;
+}
+static void expr(LexState*ls,expdesc*v){
+subexpr(ls,v,0);
+}
+static int block_follow(int token){
+switch(token){
+case TK_ELSE:case TK_ELSEIF:case TK_END:
+case TK_UNTIL:case TK_EOS:
+return 1;
+default:return 0;
+}
+}
+static void block(LexState*ls){
+FuncState*fs=ls->fs;
+BlockCnt bl;
+enterblock(fs,&bl,0);
+chunk(ls);
+leaveblock(fs);
+}
+struct LHS_assign{
+struct LHS_assign*prev;
+expdesc v;
+};
+static void check_conflict(LexState*ls,struct LHS_assign*lh,expdesc*v){
+FuncState*fs=ls->fs;
+int extra=fs->freereg;
+int conflict=0;
+for(;lh;lh=lh->prev){
+if(lh->v.k==VINDEXED){
+if(lh->v.u.s.info==v->u.s.info){
+conflict=1;
+lh->v.u.s.info=extra;
+}
+if(lh->v.u.s.aux==v->u.s.info){
+conflict=1;
+lh->v.u.s.aux=extra;
+}
+}
+}
+if(conflict){
+luaK_codeABC(fs,OP_MOVE,fs->freereg,v->u.s.info,0);
+luaK_reserveregs(fs,1);
+}
+}
+static void assignment(LexState*ls,struct LHS_assign*lh,int nvars){
+expdesc e;
+check_condition(ls,VLOCAL<=lh->v.k&&lh->v.k<=VINDEXED,
+"syntax error");
+if(testnext(ls,',')){
+struct LHS_assign nv;
+nv.prev=lh;
+primaryexp(ls,&nv.v);
+if(nv.v.k==VLOCAL)
+check_conflict(ls,lh,&nv.v);
+luaY_checklimit(ls->fs,nvars,200-ls->L->nCcalls,
+"variables in assignment");
+assignment(ls,&nv,nvars+1);
+}
+else{
+int nexps;
+checknext(ls,'=');
+nexps=explist1(ls,&e);
+if(nexps!=nvars){
+adjust_assign(ls,nvars,nexps,&e);
+if(nexps>nvars)
+ls->fs->freereg-=nexps-nvars;
+}
+else{
+luaK_setoneret(ls->fs,&e);
+luaK_storevar(ls->fs,&lh->v,&e);
+return;
+}
+}
+init_exp(&e,VNONRELOC,ls->fs->freereg-1);
+luaK_storevar(ls->fs,&lh->v,&e);
+}
+static int cond(LexState*ls){
+expdesc v;
+expr(ls,&v);
+if(v.k==VNIL)v.k=VFALSE;
+luaK_goiftrue(ls->fs,&v);
+return v.f;
+}
+static void breakstat(LexState*ls){
+FuncState*fs=ls->fs;
+BlockCnt*bl=fs->bl;
+int upval=0;
+while(bl&&!bl->isbreakable){
+upval|=bl->upval;
+bl=bl->previous;
+}
+if(!bl)
+luaX_syntaxerror(ls,"no loop to break");
+if(upval)
+luaK_codeABC(fs,OP_CLOSE,bl->nactvar,0,0);
+luaK_concat(fs,&bl->breaklist,luaK_jump(fs));
+}
+static void whilestat(LexState*ls,int line){
+FuncState*fs=ls->fs;
+int whileinit;
+int condexit;
+BlockCnt bl;
+luaX_next(ls);
+whileinit=luaK_getlabel(fs);
+condexit=cond(ls);
+enterblock(fs,&bl,1);
+checknext(ls,TK_DO);
+block(ls);
+luaK_patchlist(fs,luaK_jump(fs),whileinit);
+check_match(ls,TK_END,TK_WHILE,line);
+leaveblock(fs);
+luaK_patchtohere(fs,condexit);
+}
+static void repeatstat(LexState*ls,int line){
+int condexit;
+FuncState*fs=ls->fs;
+int repeat_init=luaK_getlabel(fs);
+BlockCnt bl1,bl2;
+enterblock(fs,&bl1,1);
+enterblock(fs,&bl2,0);
+luaX_next(ls);
+chunk(ls);
+check_match(ls,TK_UNTIL,TK_REPEAT,line);
+condexit=cond(ls);
+if(!bl2.upval){
+leaveblock(fs);
+luaK_patchlist(ls->fs,condexit,repeat_init);
+}
+else{
+breakstat(ls);
+luaK_patchtohere(ls->fs,condexit);
+leaveblock(fs);
+luaK_patchlist(ls->fs,luaK_jump(fs),repeat_init);
+}
+leaveblock(fs);
+}
+static int exp1(LexState*ls){
+expdesc e;
+int k;
+expr(ls,&e);
+k=e.k;
+luaK_exp2nextreg(ls->fs,&e);
+return k;
+}
+static void forbody(LexState*ls,int base,int line,int nvars,int isnum){
+BlockCnt bl;
+FuncState*fs=ls->fs;
+int prep,endfor;
+adjustlocalvars(ls,3);
+checknext(ls,TK_DO);
+prep=isnum?luaK_codeAsBx(fs,OP_FORPREP,base,(-1)):luaK_jump(fs);
+enterblock(fs,&bl,0);
+adjustlocalvars(ls,nvars);
+luaK_reserveregs(fs,nvars);
+block(ls);
+leaveblock(fs);
+luaK_patchtohere(fs,prep);
+endfor=(isnum)?luaK_codeAsBx(fs,OP_FORLOOP,base,(-1)):
+luaK_codeABC(fs,OP_TFORLOOP,base,0,nvars);
+luaK_fixline(fs,line);
+luaK_patchlist(fs,(isnum?endfor:luaK_jump(fs)),prep+1);
+}
+static void fornum(LexState*ls,TString*varname,int line){
+FuncState*fs=ls->fs;
+int base=fs->freereg;
+new_localvarliteral(ls,"(for index)",0);
+new_localvarliteral(ls,"(for limit)",1);
+new_localvarliteral(ls,"(for step)",2);
+new_localvar(ls,varname,3);
+checknext(ls,'=');
+exp1(ls);
+checknext(ls,',');
+exp1(ls);
+if(testnext(ls,','))
+exp1(ls);
+else{
+luaK_codeABx(fs,OP_LOADK,fs->freereg,luaK_numberK(fs,1));
+luaK_reserveregs(fs,1);
+}
+forbody(ls,base,line,1,1);
+}
+static void forlist(LexState*ls,TString*indexname){
+FuncState*fs=ls->fs;
+expdesc e;
+int nvars=0;
+int line;
+int base=fs->freereg;
+new_localvarliteral(ls,"(for generator)",nvars++);
+new_localvarliteral(ls,"(for state)",nvars++);
+new_localvarliteral(ls,"(for control)",nvars++);
+new_localvar(ls,indexname,nvars++);
+while(testnext(ls,','))
+new_localvar(ls,str_checkname(ls),nvars++);
+checknext(ls,TK_IN);
+line=ls->linenumber;
+adjust_assign(ls,3,explist1(ls,&e),&e);
+luaK_checkstack(fs,3);
+forbody(ls,base,line,nvars-3,0);
+}
+static void forstat(LexState*ls,int line){
+FuncState*fs=ls->fs;
+TString*varname;
+BlockCnt bl;
+enterblock(fs,&bl,1);
+luaX_next(ls);
+varname=str_checkname(ls);
+switch(ls->t.token){
+case'=':fornum(ls,varname,line);break;
+case',':case TK_IN:forlist(ls,varname);break;
+default:luaX_syntaxerror(ls,LUA_QL("=")" or "LUA_QL("in")" expected");
+}
+check_match(ls,TK_END,TK_FOR,line);
+leaveblock(fs);
+}
+static int test_then_block(LexState*ls){
+int condexit;
+luaX_next(ls);
+condexit=cond(ls);
+checknext(ls,TK_THEN);
+block(ls);
+return condexit;
+}
+static void ifstat(LexState*ls,int line){
+FuncState*fs=ls->fs;
+int flist;
+int escapelist=(-1);
+flist=test_then_block(ls);
+while(ls->t.token==TK_ELSEIF){
+luaK_concat(fs,&escapelist,luaK_jump(fs));
+luaK_patchtohere(fs,flist);
+flist=test_then_block(ls);
+}
+if(ls->t.token==TK_ELSE){
+luaK_concat(fs,&escapelist,luaK_jump(fs));
+luaK_patchtohere(fs,flist);
+luaX_next(ls);
+block(ls);
+}
+else
+luaK_concat(fs,&escapelist,flist);
+luaK_patchtohere(fs,escapelist);
+check_match(ls,TK_END,TK_IF,line);
+}
+static void localfunc(LexState*ls){
+expdesc v,b;
+FuncState*fs=ls->fs;
+new_localvar(ls,str_checkname(ls),0);
+init_exp(&v,VLOCAL,fs->freereg);
+luaK_reserveregs(fs,1);
+adjustlocalvars(ls,1);
+body(ls,&b,0,ls->linenumber);
+luaK_storevar(fs,&v,&b);
+getlocvar(fs,fs->nactvar-1).startpc=fs->pc;
+}
+static void localstat(LexState*ls){
+int nvars=0;
+int nexps;
+expdesc e;
+do{
+new_localvar(ls,str_checkname(ls),nvars++);
+}while(testnext(ls,','));
+if(testnext(ls,'='))
+nexps=explist1(ls,&e);
+else{
+e.k=VVOID;
+nexps=0;
+}
+adjust_assign(ls,nvars,nexps,&e);
+adjustlocalvars(ls,nvars);
+}
+static int funcname(LexState*ls,expdesc*v){
+int needself=0;
+singlevar(ls,v);
+while(ls->t.token=='.')
+field(ls,v);
+if(ls->t.token==':'){
+needself=1;
+field(ls,v);
+}
+return needself;
+}
+static void funcstat(LexState*ls,int line){
+int needself;
+expdesc v,b;
+luaX_next(ls);
+needself=funcname(ls,&v);
+body(ls,&b,needself,line);
+luaK_storevar(ls->fs,&v,&b);
+luaK_fixline(ls->fs,line);
+}
+static void exprstat(LexState*ls){
+FuncState*fs=ls->fs;
+struct LHS_assign v;
+primaryexp(ls,&v.v);
+if(v.v.k==VCALL)
+SETARG_C(getcode(fs,&v.v),1);
+else{
+v.prev=NULL;
+assignment(ls,&v,1);
+}
+}
+static void retstat(LexState*ls){
+FuncState*fs=ls->fs;
+expdesc e;
+int first,nret;
+luaX_next(ls);
+if(block_follow(ls->t.token)||ls->t.token==';')
+first=nret=0;
+else{
+nret=explist1(ls,&e);
+if(hasmultret(e.k)){
+luaK_setmultret(fs,&e);
+if(e.k==VCALL&&nret==1){
+SET_OPCODE(getcode(fs,&e),OP_TAILCALL);
+}
+first=fs->nactvar;
+nret=(-1);
+}
+else{
+if(nret==1)
+first=luaK_exp2anyreg(fs,&e);
+else{
+luaK_exp2nextreg(fs,&e);
+first=fs->nactvar;
+}
+}
+}
+luaK_ret(fs,first,nret);
+}
+static int statement(LexState*ls){
+int line=ls->linenumber;
+switch(ls->t.token){
+case TK_IF:{
+ifstat(ls,line);
+return 0;
+}
+case TK_WHILE:{
+whilestat(ls,line);
+return 0;
+}
+case TK_DO:{
+luaX_next(ls);
+block(ls);
+check_match(ls,TK_END,TK_DO,line);
+return 0;
+}
+case TK_FOR:{
+forstat(ls,line);
+return 0;
+}
+case TK_REPEAT:{
+repeatstat(ls,line);
+return 0;
+}
+case TK_FUNCTION:{
+funcstat(ls,line);
+return 0;
+}
+case TK_LOCAL:{
+luaX_next(ls);
+if(testnext(ls,TK_FUNCTION))
+localfunc(ls);
+else
+localstat(ls);
+return 0;
+}
+case TK_RETURN:{
+retstat(ls);
+return 1;
+}
+case TK_BREAK:{
+luaX_next(ls);
+breakstat(ls);
+return 1;
+}
+default:{
+exprstat(ls);
+return 0;
+}
+}
+}
+static void chunk(LexState*ls){
+int islast=0;
+enterlevel(ls);
+while(!islast&&!block_follow(ls->t.token)){
+islast=statement(ls);
+testnext(ls,';');
+ls->fs->freereg=ls->fs->nactvar;
+}
+leavelevel(ls);
+}
+static const TValue*luaV_tonumber(const TValue*obj,TValue*n){
+lua_Number num;
+if(ttisnumber(obj))return obj;
+if(ttisstring(obj)&&luaO_str2d(svalue(obj),&num)){
+setnvalue(n,num);
+return n;
+}
+else
+return NULL;
+}
+static int luaV_tostring(lua_State*L,StkId obj){
+if(!ttisnumber(obj))
+return 0;
+else{
+char s[32];
+lua_Number n=nvalue(obj);
+lua_number2str(s,n);
+setsvalue(L,obj,luaS_new(L,s));
+return 1;
+}
+}
+static void callTMres(lua_State*L,StkId res,const TValue*f,
+const TValue*p1,const TValue*p2){
+ptrdiff_t result=savestack(L,res);
+setobj(L,L->top,f);
+setobj(L,L->top+1,p1);
+setobj(L,L->top+2,p2);
+luaD_checkstack(L,3);
+L->top+=3;
+luaD_call(L,L->top-3,1);
+res=restorestack(L,result);
+L->top--;
+setobj(L,res,L->top);
+}
+static void callTM(lua_State*L,const TValue*f,const TValue*p1,
+const TValue*p2,const TValue*p3){
+setobj(L,L->top,f);
+setobj(L,L->top+1,p1);
+setobj(L,L->top+2,p2);
+setobj(L,L->top+3,p3);
+luaD_checkstack(L,4);
+L->top+=4;
+luaD_call(L,L->top-4,0);
+}
+static void luaV_gettable(lua_State*L,const TValue*t,TValue*key,StkId val){
+int loop;
+for(loop=0;loop<100;loop++){
+const TValue*tm;
+if(ttistable(t)){
+Table*h=hvalue(t);
+const TValue*res=luaH_get(h,key);
+if(!ttisnil(res)||
+(tm=fasttm(L,h->metatable,TM_INDEX))==NULL){
+setobj(L,val,res);
+return;
+}
+}
+else if(ttisnil(tm=luaT_gettmbyobj(L,t,TM_INDEX)))
+luaG_typeerror(L,t,"index");
+if(ttisfunction(tm)){
+callTMres(L,val,tm,t,key);
+return;
+}
+t=tm;
+}
+luaG_runerror(L,"loop in gettable");
+}
+static void luaV_settable(lua_State*L,const TValue*t,TValue*key,StkId val){
+int loop;
+TValue temp;
+for(loop=0;loop<100;loop++){
+const TValue*tm;
+if(ttistable(t)){
+Table*h=hvalue(t);
+TValue*oldval=luaH_set(L,h,key);
+if(!ttisnil(oldval)||
+(tm=fasttm(L,h->metatable,TM_NEWINDEX))==NULL){
+setobj(L,oldval,val);
+h->flags=0;
+luaC_barriert(L,h,val);
+return;
+}
+}
+else if(ttisnil(tm=luaT_gettmbyobj(L,t,TM_NEWINDEX)))
+luaG_typeerror(L,t,"index");
+if(ttisfunction(tm)){
+callTM(L,tm,t,key,val);
+return;
+}
+setobj(L,&temp,tm);
+t=&temp;
+}
+luaG_runerror(L,"loop in settable");
+}
+static int call_binTM(lua_State*L,const TValue*p1,const TValue*p2,
+StkId res,TMS event){
+const TValue*tm=luaT_gettmbyobj(L,p1,event);
+if(ttisnil(tm))
+tm=luaT_gettmbyobj(L,p2,event);
+if(ttisnil(tm))return 0;
+callTMres(L,res,tm,p1,p2);
+return 1;
+}
+static const TValue*get_compTM(lua_State*L,Table*mt1,Table*mt2,
+TMS event){
+const TValue*tm1=fasttm(L,mt1,event);
+const TValue*tm2;
+if(tm1==NULL)return NULL;
+if(mt1==mt2)return tm1;
+tm2=fasttm(L,mt2,event);
+if(tm2==NULL)return NULL;
+if(luaO_rawequalObj(tm1,tm2))
+return tm1;
+return NULL;
+}
+static int call_orderTM(lua_State*L,const TValue*p1,const TValue*p2,
+TMS event){
+const TValue*tm1=luaT_gettmbyobj(L,p1,event);
+const TValue*tm2;
+if(ttisnil(tm1))return-1;
+tm2=luaT_gettmbyobj(L,p2,event);
+if(!luaO_rawequalObj(tm1,tm2))
+return-1;
+callTMres(L,L->top,tm1,p1,p2);
+return!l_isfalse(L->top);
+}
+static int l_strcmp(const TString*ls,const TString*rs){
+const char*l=getstr(ls);
+size_t ll=ls->tsv.len;
+const char*r=getstr(rs);
+size_t lr=rs->tsv.len;
+for(;;){
+int temp=strcoll(l,r);
+if(temp!=0)return temp;
+else{
+size_t len=strlen(l);
+if(len==lr)
+return(len==ll)?0:1;
+else if(len==ll)
+return-1;
+len++;
+l+=len;ll-=len;r+=len;lr-=len;
+}
+}
+}
+static int luaV_lessthan(lua_State*L,const TValue*l,const TValue*r){
+int res;
+if(ttype(l)!=ttype(r))
+return luaG_ordererror(L,l,r);
+else if(ttisnumber(l))
+return luai_numlt(nvalue(l),nvalue(r));
+else if(ttisstring(l))
+return l_strcmp(rawtsvalue(l),rawtsvalue(r))<0;
+else if((res=call_orderTM(L,l,r,TM_LT))!=-1)
+return res;
+return luaG_ordererror(L,l,r);
+}
+static int lessequal(lua_State*L,const TValue*l,const TValue*r){
+int res;
+if(ttype(l)!=ttype(r))
+return luaG_ordererror(L,l,r);
+else if(ttisnumber(l))
+return luai_numle(nvalue(l),nvalue(r));
+else if(ttisstring(l))
+return l_strcmp(rawtsvalue(l),rawtsvalue(r))<=0;
+else if((res=call_orderTM(L,l,r,TM_LE))!=-1)
+return res;
+else if((res=call_orderTM(L,r,l,TM_LT))!=-1)
+return!res;
+return luaG_ordererror(L,l,r);
+}
+static int luaV_equalval(lua_State*L,const TValue*t1,const TValue*t2){
+const TValue*tm;
+switch(ttype(t1)){
+case 0:return 1;
+case 3:return luai_numeq(nvalue(t1),nvalue(t2));
+case 1:return bvalue(t1)==bvalue(t2);
+case 2:return pvalue(t1)==pvalue(t2);
+case 7:{
+if(uvalue(t1)==uvalue(t2))return 1;
+tm=get_compTM(L,uvalue(t1)->metatable,uvalue(t2)->metatable,
+TM_EQ);
+break;
+}
+case 5:{
+if(hvalue(t1)==hvalue(t2))return 1;
+tm=get_compTM(L,hvalue(t1)->metatable,hvalue(t2)->metatable,TM_EQ);
+break;
+}
+default:return gcvalue(t1)==gcvalue(t2);
+}
+if(tm==NULL)return 0;
+callTMres(L,L->top,tm,t1,t2);
+return!l_isfalse(L->top);
+}
+static void luaV_concat(lua_State*L,int total,int last){
+do{
+StkId top=L->base+last+1;
+int n=2;
+if(!(ttisstring(top-2)||ttisnumber(top-2))||!tostring(L,top-1)){
+if(!call_binTM(L,top-2,top-1,top-2,TM_CONCAT))
+luaG_concaterror(L,top-2,top-1);
+}else if(tsvalue(top-1)->len==0)
+(void)tostring(L,top-2);
+else{
+size_t tl=tsvalue(top-1)->len;
+char*buffer;
+int i;
+for(n=1;n<total&&tostring(L,top-n-1);n++){
+size_t l=tsvalue(top-n-1)->len;
+if(l>=((size_t)(~(size_t)0)-2)-tl)luaG_runerror(L,"string length overflow");
+tl+=l;
+}
+buffer=luaZ_openspace(L,&G(L)->buff,tl);
+tl=0;
+for(i=n;i>0;i--){
+size_t l=tsvalue(top-i)->len;
+memcpy(buffer+tl,svalue(top-i),l);
+tl+=l;
+}
+setsvalue(L,top-n,luaS_newlstr(L,buffer,tl));
+}
+total-=n-1;
+last-=n-1;
+}while(total>1);
+}
+static void Arith(lua_State*L,StkId ra,const TValue*rb,
+const TValue*rc,TMS op){
+TValue tempb,tempc;
+const TValue*b,*c;
+if((b=luaV_tonumber(rb,&tempb))!=NULL&&
+(c=luaV_tonumber(rc,&tempc))!=NULL){
+lua_Number nb=nvalue(b),nc=nvalue(c);
+switch(op){
+case TM_ADD:setnvalue(ra,luai_numadd(nb,nc));break;
+case TM_SUB:setnvalue(ra,luai_numsub(nb,nc));break;
+case TM_MUL:setnvalue(ra,luai_nummul(nb,nc));break;
+case TM_DIV:setnvalue(ra,luai_numdiv(nb,nc));break;
+case TM_MOD:setnvalue(ra,luai_nummod(nb,nc));break;
+case TM_POW:setnvalue(ra,luai_numpow(nb,nc));break;
+case TM_UNM:setnvalue(ra,luai_numunm(nb));break;
+default:break;
+}
+}
+else if(!call_binTM(L,rb,rc,ra,op))
+luaG_aritherror(L,rb,rc);
+}
+#define runtime_check(L,c){if(!(c))break;}
+#define RA(i)(base+GETARG_A(i))
+#define RB(i)check_exp(getBMode(GET_OPCODE(i))==OpArgR,base+GETARG_B(i))
+#define RKB(i)check_exp(getBMode(GET_OPCODE(i))==OpArgK,ISK(GETARG_B(i))?k+INDEXK(GETARG_B(i)):base+GETARG_B(i))
+#define RKC(i)check_exp(getCMode(GET_OPCODE(i))==OpArgK,ISK(GETARG_C(i))?k+INDEXK(GETARG_C(i)):base+GETARG_C(i))
+#define KBx(i)check_exp(getBMode(GET_OPCODE(i))==OpArgK,k+GETARG_Bx(i))
+#define dojump(L,pc,i){(pc)+=(i);}
+#define Protect(x){L->savedpc=pc;{x;};base=L->base;}
+#define arith_op(op,tm){TValue*rb=RKB(i);TValue*rc=RKC(i);if(ttisnumber(rb)&&ttisnumber(rc)){lua_Number nb=nvalue(rb),nc=nvalue(rc);setnvalue(ra,op(nb,nc));}else Protect(Arith(L,ra,rb,rc,tm));}
+static void luaV_execute(lua_State*L,int nexeccalls){
+LClosure*cl;
+StkId base;
+TValue*k;
+const Instruction*pc;
+reentry:
+pc=L->savedpc;
+cl=&clvalue(L->ci->func)->l;
+base=L->base;
+k=cl->p->k;
+for(;;){
+const Instruction i=*pc++;
+StkId ra;
+ra=RA(i);
+switch(GET_OPCODE(i)){
+case OP_MOVE:{
+setobj(L,ra,RB(i));
+continue;
+}
+case OP_LOADK:{
+setobj(L,ra,KBx(i));
+continue;
+}
+case OP_LOADBOOL:{
+setbvalue(ra,GETARG_B(i));
+if(GETARG_C(i))pc++;
+continue;
+}
+case OP_LOADNIL:{
+TValue*rb=RB(i);
+do{
+setnilvalue(rb--);
+}while(rb>=ra);
+continue;
+}
+case OP_GETUPVAL:{
+int b=GETARG_B(i);
+setobj(L,ra,cl->upvals[b]->v);
+continue;
+}
+case OP_GETGLOBAL:{
+TValue g;
+TValue*rb=KBx(i);
+sethvalue(L,&g,cl->env);
+Protect(luaV_gettable(L,&g,rb,ra));
+continue;
+}
+case OP_GETTABLE:{
+Protect(luaV_gettable(L,RB(i),RKC(i),ra));
+continue;
+}
+case OP_SETGLOBAL:{
+TValue g;
+sethvalue(L,&g,cl->env);
+Protect(luaV_settable(L,&g,KBx(i),ra));
+continue;
+}
+case OP_SETUPVAL:{
+UpVal*uv=cl->upvals[GETARG_B(i)];
+setobj(L,uv->v,ra);
+luaC_barrier(L,uv,ra);
+continue;
+}
+case OP_SETTABLE:{
+Protect(luaV_settable(L,ra,RKB(i),RKC(i)));
+continue;
+}
+case OP_NEWTABLE:{
+int b=GETARG_B(i);
+int c=GETARG_C(i);
+sethvalue(L,ra,luaH_new(L,luaO_fb2int(b),luaO_fb2int(c)));
+Protect(luaC_checkGC(L));
+continue;
+}
+case OP_SELF:{
+StkId rb=RB(i);
+setobj(L,ra+1,rb);
+Protect(luaV_gettable(L,rb,RKC(i),ra));
+continue;
+}
+case OP_ADD:{
+arith_op(luai_numadd,TM_ADD);
+continue;
+}
+case OP_SUB:{
+arith_op(luai_numsub,TM_SUB);
+continue;
+}
+case OP_MUL:{
+arith_op(luai_nummul,TM_MUL);
+continue;
+}
+case OP_DIV:{
+arith_op(luai_numdiv,TM_DIV);
+continue;
+}
+case OP_MOD:{
+arith_op(luai_nummod,TM_MOD);
+continue;
+}
+case OP_POW:{
+arith_op(luai_numpow,TM_POW);
+continue;
+}
+case OP_UNM:{
+TValue*rb=RB(i);
+if(ttisnumber(rb)){
+lua_Number nb=nvalue(rb);
+setnvalue(ra,luai_numunm(nb));
+}
+else{
+Protect(Arith(L,ra,rb,rb,TM_UNM));
+}
+continue;
+}
+case OP_NOT:{
+int res=l_isfalse(RB(i));
+setbvalue(ra,res);
+continue;
+}
+case OP_LEN:{
+const TValue*rb=RB(i);
+switch(ttype(rb)){
+case 5:{
+setnvalue(ra,cast_num(luaH_getn(hvalue(rb))));
+break;
+}
+case 4:{
+setnvalue(ra,cast_num(tsvalue(rb)->len));
+break;
+}
+default:{
+Protect(
+if(!call_binTM(L,rb,(&luaO_nilobject_),ra,TM_LEN))
+luaG_typeerror(L,rb,"get length of");
+)
+}
+}
+continue;
+}
+case OP_CONCAT:{
+int b=GETARG_B(i);
+int c=GETARG_C(i);
+Protect(luaV_concat(L,c-b+1,c);luaC_checkGC(L));
+setobj(L,RA(i),base+b);
+continue;
+}
+case OP_JMP:{
+dojump(L,pc,GETARG_sBx(i));
+continue;
+}
+case OP_EQ:{
+TValue*rb=RKB(i);
+TValue*rc=RKC(i);
+Protect(
+if(equalobj(L,rb,rc)==GETARG_A(i))
+dojump(L,pc,GETARG_sBx(*pc));
+)
+pc++;
+continue;
+}
+case OP_LT:{
+Protect(
+if(luaV_lessthan(L,RKB(i),RKC(i))==GETARG_A(i))
+dojump(L,pc,GETARG_sBx(*pc));
+)
+pc++;
+continue;
+}
+case OP_LE:{
+Protect(
+if(lessequal(L,RKB(i),RKC(i))==GETARG_A(i))
+dojump(L,pc,GETARG_sBx(*pc));
+)
+pc++;
+continue;
+}
+case OP_TEST:{
+if(l_isfalse(ra)!=GETARG_C(i))
+dojump(L,pc,GETARG_sBx(*pc));
+pc++;
+continue;
+}
+case OP_TESTSET:{
+TValue*rb=RB(i);
+if(l_isfalse(rb)!=GETARG_C(i)){
+setobj(L,ra,rb);
+dojump(L,pc,GETARG_sBx(*pc));
+}
+pc++;
+continue;
+}
+case OP_CALL:{
+int b=GETARG_B(i);
+int nresults=GETARG_C(i)-1;
+if(b!=0)L->top=ra+b;
+L->savedpc=pc;
+switch(luaD_precall(L,ra,nresults)){
+case 0:{
+nexeccalls++;
+goto reentry;
+}
+case 1:{
+if(nresults>=0)L->top=L->ci->top;
+base=L->base;
+continue;
+}
+default:{
+return;
+}
+}
+}
+case OP_TAILCALL:{
+int b=GETARG_B(i);
+if(b!=0)L->top=ra+b;
+L->savedpc=pc;
+switch(luaD_precall(L,ra,(-1))){
+case 0:{
+CallInfo*ci=L->ci-1;
+int aux;
+StkId func=ci->func;
+StkId pfunc=(ci+1)->func;
+if(L->openupval)luaF_close(L,ci->base);
+L->base=ci->base=ci->func+((ci+1)->base-pfunc);
+for(aux=0;pfunc+aux<L->top;aux++)
+setobj(L,func+aux,pfunc+aux);
+ci->top=L->top=func+aux;
+ci->savedpc=L->savedpc;
+ci->tailcalls++;
+L->ci--;
+goto reentry;
+}
+case 1:{
+base=L->base;
+continue;
+}
+default:{
+return;
+}
+}
+}
+case OP_RETURN:{
+int b=GETARG_B(i);
+if(b!=0)L->top=ra+b-1;
+if(L->openupval)luaF_close(L,base);
+L->savedpc=pc;
+b=luaD_poscall(L,ra);
+if(--nexeccalls==0)
+return;
+else{
+if(b)L->top=L->ci->top;
+goto reentry;
+}
+}
+case OP_FORLOOP:{
+lua_Number step=nvalue(ra+2);
+lua_Number idx=luai_numadd(nvalue(ra),step);
+lua_Number limit=nvalue(ra+1);
+if(luai_numlt(0,step)?luai_numle(idx,limit)
+:luai_numle(limit,idx)){
+dojump(L,pc,GETARG_sBx(i));
+setnvalue(ra,idx);
+setnvalue(ra+3,idx);
+}
+continue;
+}
+case OP_FORPREP:{
+const TValue*init=ra;
+const TValue*plimit=ra+1;
+const TValue*pstep=ra+2;
+L->savedpc=pc;
+if(!tonumber(init,ra))
+luaG_runerror(L,LUA_QL("for")" initial value must be a number");
+else if(!tonumber(plimit,ra+1))
+luaG_runerror(L,LUA_QL("for")" limit must be a number");
+else if(!tonumber(pstep,ra+2))
+luaG_runerror(L,LUA_QL("for")" step must be a number");
+setnvalue(ra,luai_numsub(nvalue(ra),nvalue(pstep)));
+dojump(L,pc,GETARG_sBx(i));
+continue;
+}
+case OP_TFORLOOP:{
+StkId cb=ra+3;
+setobj(L,cb+2,ra+2);
+setobj(L,cb+1,ra+1);
+setobj(L,cb,ra);
+L->top=cb+3;
+Protect(luaD_call(L,cb,GETARG_C(i)));
+L->top=L->ci->top;
+cb=RA(i)+3;
+if(!ttisnil(cb)){
+setobj(L,cb-1,cb);
+dojump(L,pc,GETARG_sBx(*pc));
+}
+pc++;
+continue;
+}
+case OP_SETLIST:{
+int n=GETARG_B(i);
+int c=GETARG_C(i);
+int last;
+Table*h;
+if(n==0){
+n=cast_int(L->top-ra)-1;
+L->top=L->ci->top;
+}
+if(c==0)c=cast_int(*pc++);
+runtime_check(L,ttistable(ra));
+h=hvalue(ra);
+last=((c-1)*50)+n;
+if(last>h->sizearray)
+luaH_resizearray(L,h,last);
+for(;n>0;n--){
+TValue*val=ra+n;
+setobj(L,luaH_setnum(L,h,last--),val);
+luaC_barriert(L,h,val);
+}
+continue;
+}
+case OP_CLOSE:{
+luaF_close(L,ra);
+continue;
+}
+case OP_CLOSURE:{
+Proto*p;
+Closure*ncl;
+int nup,j;
+p=cl->p->p[GETARG_Bx(i)];
+nup=p->nups;
+ncl=luaF_newLclosure(L,nup,cl->env);
+ncl->l.p=p;
+for(j=0;j<nup;j++,pc++){
+if(GET_OPCODE(*pc)==OP_GETUPVAL)
+ncl->l.upvals[j]=cl->upvals[GETARG_B(*pc)];
+else{
+ncl->l.upvals[j]=luaF_findupval(L,base+GETARG_B(*pc));
+}
+}
+setclvalue(L,ra,ncl);
+Protect(luaC_checkGC(L));
+continue;
+}
+case OP_VARARG:{
+int b=GETARG_B(i)-1;
+int j;
+CallInfo*ci=L->ci;
+int n=cast_int(ci->base-ci->func)-cl->p->numparams-1;
+if(b==(-1)){
+Protect(luaD_checkstack(L,n));
+ra=RA(i);
+b=n;
+L->top=ra+n;
+}
+for(j=0;j<b;j++){
+if(j<n){
+setobj(L,ra+j,ci->base-n+j);
+}
+else{
+setnilvalue(ra+j);
+}
+}
+continue;
+}
+}
+}
+}
+#define api_checknelems(L,n)luai_apicheck(L,(n)<=(L->top-L->base))
+#define api_checkvalidindex(L,i)luai_apicheck(L,(i)!=(&luaO_nilobject_))
+#define api_incr_top(L){luai_apicheck(L,L->top<L->ci->top);L->top++;}
+static TValue*index2adr(lua_State*L,int idx){
+if(idx>0){
+TValue*o=L->base+(idx-1);
+luai_apicheck(L,idx<=L->ci->top-L->base);
+if(o>=L->top)return cast(TValue*,(&luaO_nilobject_));
+else return o;
+}
+else if(idx>(-10000)){
+luai_apicheck(L,idx!=0&&-idx<=L->top-L->base);
+return L->top+idx;
+}
+else switch(idx){
+case(-10000):return registry(L);
+case(-10001):{
+Closure*func=curr_func(L);
+sethvalue(L,&L->env,func->c.env);
+return&L->env;
+}
+case(-10002):return gt(L);
+default:{
+Closure*func=curr_func(L);
+idx=(-10002)-idx;
+return(idx<=func->c.nupvalues)
+?&func->c.upvalue[idx-1]
+:cast(TValue*,(&luaO_nilobject_));
+}
+}
+}
+static Table*getcurrenv(lua_State*L){
+if(L->ci==L->base_ci)
+return hvalue(gt(L));
+else{
+Closure*func=curr_func(L);
+return func->c.env;
+}
+}
+static int lua_checkstack(lua_State*L,int size){
+int res=1;
+if(size>8000||(L->top-L->base+size)>8000)
+res=0;
+else if(size>0){
+luaD_checkstack(L,size);
+if(L->ci->top<L->top+size)
+L->ci->top=L->top+size;
+}
+return res;
+}
+static lua_CFunction lua_atpanic(lua_State*L,lua_CFunction panicf){
+lua_CFunction old;
+old=G(L)->panic;
+G(L)->panic=panicf;
+return old;
+}
+static int lua_gettop(lua_State*L){
+return cast_int(L->top-L->base);
+}
+static void lua_settop(lua_State*L,int idx){
+if(idx>=0){
+luai_apicheck(L,idx<=L->stack_last-L->base);
+while(L->top<L->base+idx)
+setnilvalue(L->top++);
+L->top=L->base+idx;
+}
+else{
+luai_apicheck(L,-(idx+1)<=(L->top-L->base));
+L->top+=idx+1;
+}
+}
+static void lua_remove(lua_State*L,int idx){
+StkId p;
+p=index2adr(L,idx);
+api_checkvalidindex(L,p);
+while(++p<L->top)setobj(L,p-1,p);
+L->top--;
+}
+static void lua_insert(lua_State*L,int idx){
+StkId p;
+StkId q;
+p=index2adr(L,idx);
+api_checkvalidindex(L,p);
+for(q=L->top;q>p;q--)setobj(L,q,q-1);
+setobj(L,p,L->top);
+}
+static void lua_replace(lua_State*L,int idx){
+StkId o;
+if(idx==(-10001)&&L->ci==L->base_ci)
+luaG_runerror(L,"no calling environment");
+api_checknelems(L,1);
+o=index2adr(L,idx);
+api_checkvalidindex(L,o);
+if(idx==(-10001)){
+Closure*func=curr_func(L);
+luai_apicheck(L,ttistable(L->top-1));
+func->c.env=hvalue(L->top-1);
+luaC_barrier(L,func,L->top-1);
+}
+else{
+setobj(L,o,L->top-1);
+if(idx<(-10002))
+luaC_barrier(L,curr_func(L),L->top-1);
+}
+L->top--;
+}
+static void lua_pushvalue(lua_State*L,int idx){
+setobj(L,L->top,index2adr(L,idx));
+api_incr_top(L);
+}
+static int lua_type(lua_State*L,int idx){
+StkId o=index2adr(L,idx);
+return(o==(&luaO_nilobject_))?(-1):ttype(o);
+}
+static const char*lua_typename(lua_State*L,int t){
+UNUSED(L);
+return(t==(-1))?"no value":luaT_typenames[t];
+}
+static int lua_iscfunction(lua_State*L,int idx){
+StkId o=index2adr(L,idx);
+return iscfunction(o);
+}
+static int lua_isnumber(lua_State*L,int idx){
+TValue n;
+const TValue*o=index2adr(L,idx);
+return tonumber(o,&n);
+}
+static int lua_isstring(lua_State*L,int idx){
+int t=lua_type(L,idx);
+return(t==4||t==3);
+}
+static int lua_rawequal(lua_State*L,int index1,int index2){
+StkId o1=index2adr(L,index1);
+StkId o2=index2adr(L,index2);
+return(o1==(&luaO_nilobject_)||o2==(&luaO_nilobject_))?0
+:luaO_rawequalObj(o1,o2);
+}
+static int lua_lessthan(lua_State*L,int index1,int index2){
+StkId o1,o2;
+int i;
+o1=index2adr(L,index1);
+o2=index2adr(L,index2);
+i=(o1==(&luaO_nilobject_)||o2==(&luaO_nilobject_))?0
+:luaV_lessthan(L,o1,o2);
+return i;
+}
+static lua_Number lua_tonumber(lua_State*L,int idx){
+TValue n;
+const TValue*o=index2adr(L,idx);
+if(tonumber(o,&n))
+return nvalue(o);
+else
+return 0;
+}
+static lua_Integer lua_tointeger(lua_State*L,int idx){
+TValue n;
+const TValue*o=index2adr(L,idx);
+if(tonumber(o,&n)){
+lua_Integer res;
+lua_Number num=nvalue(o);
+lua_number2integer(res,num);
+return res;
+}
+else
+return 0;
+}
+static int lua_toboolean(lua_State*L,int idx){
+const TValue*o=index2adr(L,idx);
+return!l_isfalse(o);
+}
+static const char*lua_tolstring(lua_State*L,int idx,size_t*len){
+StkId o=index2adr(L,idx);
+if(!ttisstring(o)){
+if(!luaV_tostring(L,o)){
+if(len!=NULL)*len=0;
+return NULL;
+}
+luaC_checkGC(L);
+o=index2adr(L,idx);
+}
+if(len!=NULL)*len=tsvalue(o)->len;
+return svalue(o);
+}
+static size_t lua_objlen(lua_State*L,int idx){
+StkId o=index2adr(L,idx);
+switch(ttype(o)){
+case 4:return tsvalue(o)->len;
+case 7:return uvalue(o)->len;
+case 5:return luaH_getn(hvalue(o));
+case 3:{
+size_t l;
+l=(luaV_tostring(L,o)?tsvalue(o)->len:0);
+return l;
+}
+default:return 0;
+}
+}
+static lua_CFunction lua_tocfunction(lua_State*L,int idx){
+StkId o=index2adr(L,idx);
+return(!iscfunction(o))?NULL:clvalue(o)->c.f;
+}
+static void*lua_touserdata(lua_State*L,int idx){
+StkId o=index2adr(L,idx);
+switch(ttype(o)){
+case 7:return(rawuvalue(o)+1);
+case 2:return pvalue(o);
+default:return NULL;
+}
+}
+static void lua_pushnil(lua_State*L){
+setnilvalue(L->top);
+api_incr_top(L);
+}
+static void lua_pushnumber(lua_State*L,lua_Number n){
+setnvalue(L->top,n);
+api_incr_top(L);
+}
+static void lua_pushinteger(lua_State*L,lua_Integer n){
+setnvalue(L->top,cast_num(n));
+api_incr_top(L);
+}
+static void lua_pushlstring(lua_State*L,const char*s,size_t len){
+luaC_checkGC(L);
+setsvalue(L,L->top,luaS_newlstr(L,s,len));
+api_incr_top(L);
+}
+static void lua_pushstring(lua_State*L,const char*s){
+if(s==NULL)
+lua_pushnil(L);
+else
+lua_pushlstring(L,s,strlen(s));
+}
+static const char*lua_pushvfstring(lua_State*L,const char*fmt,
+va_list argp){
+const char*ret;
+luaC_checkGC(L);
+ret=luaO_pushvfstring(L,fmt,argp);
+return ret;
+}
+static const char*lua_pushfstring(lua_State*L,const char*fmt,...){
+const char*ret;
+va_list argp;
+luaC_checkGC(L);
+va_start(argp,fmt);
+ret=luaO_pushvfstring(L,fmt,argp);
+va_end(argp);
+return ret;
+}
+static void lua_pushcclosure(lua_State*L,lua_CFunction fn,int n){
+Closure*cl;
+luaC_checkGC(L);
+api_checknelems(L,n);
+cl=luaF_newCclosure(L,n,getcurrenv(L));
+cl->c.f=fn;
+L->top-=n;
+while(n--)
+setobj(L,&cl->c.upvalue[n],L->top+n);
+setclvalue(L,L->top,cl);
+api_incr_top(L);
+}
+static void lua_pushboolean(lua_State*L,int b){
+setbvalue(L->top,(b!=0));
+api_incr_top(L);
+}
+static int lua_pushthread(lua_State*L){
+setthvalue(L,L->top,L);
+api_incr_top(L);
+return(G(L)->mainthread==L);
+}
+static void lua_gettable(lua_State*L,int idx){
+StkId t;
+t=index2adr(L,idx);
+api_checkvalidindex(L,t);
+luaV_gettable(L,t,L->top-1,L->top-1);
+}
+static void lua_getfield(lua_State*L,int idx,const char*k){
+StkId t;
+TValue key;
+t=index2adr(L,idx);
+api_checkvalidindex(L,t);
+setsvalue(L,&key,luaS_new(L,k));
+luaV_gettable(L,t,&key,L->top);
+api_incr_top(L);
+}
+static void lua_rawget(lua_State*L,int idx){
+StkId t;
+t=index2adr(L,idx);
+luai_apicheck(L,ttistable(t));
+setobj(L,L->top-1,luaH_get(hvalue(t),L->top-1));
+}
+static void lua_rawgeti(lua_State*L,int idx,int n){
+StkId o;
+o=index2adr(L,idx);
+luai_apicheck(L,ttistable(o));
+setobj(L,L->top,luaH_getnum(hvalue(o),n));
+api_incr_top(L);
+}
+static void lua_createtable(lua_State*L,int narray,int nrec){
+luaC_checkGC(L);
+sethvalue(L,L->top,luaH_new(L,narray,nrec));
+api_incr_top(L);
+}
+static int lua_getmetatable(lua_State*L,int objindex){
+const TValue*obj;
+Table*mt=NULL;
+int res;
+obj=index2adr(L,objindex);
+switch(ttype(obj)){
+case 5:
+mt=hvalue(obj)->metatable;
+break;
+case 7:
+mt=uvalue(obj)->metatable;
+break;
+default:
+mt=G(L)->mt[ttype(obj)];
+break;
+}
+if(mt==NULL)
+res=0;
+else{
+sethvalue(L,L->top,mt);
+api_incr_top(L);
+res=1;
+}
+return res;
+}
+static void lua_getfenv(lua_State*L,int idx){
+StkId o;
+o=index2adr(L,idx);
+api_checkvalidindex(L,o);
+switch(ttype(o)){
+case 6:
+sethvalue(L,L->top,clvalue(o)->c.env);
+break;
+case 7:
+sethvalue(L,L->top,uvalue(o)->env);
+break;
+case 8:
+setobj(L,L->top,gt(thvalue(o)));
+break;
+default:
+setnilvalue(L->top);
+break;
+}
+api_incr_top(L);
+}
+static void lua_settable(lua_State*L,int idx){
+StkId t;
+api_checknelems(L,2);
+t=index2adr(L,idx);
+api_checkvalidindex(L,t);
+luaV_settable(L,t,L->top-2,L->top-1);
+L->top-=2;
+}
+static void lua_setfield(lua_State*L,int idx,const char*k){
+StkId t;
+TValue key;
+api_checknelems(L,1);
+t=index2adr(L,idx);
+api_checkvalidindex(L,t);
+setsvalue(L,&key,luaS_new(L,k));
+luaV_settable(L,t,&key,L->top-1);
+L->top--;
+}
+static void lua_rawset(lua_State*L,int idx){
+StkId t;
+api_checknelems(L,2);
+t=index2adr(L,idx);
+luai_apicheck(L,ttistable(t));
+setobj(L,luaH_set(L,hvalue(t),L->top-2),L->top-1);
+luaC_barriert(L,hvalue(t),L->top-1);
+L->top-=2;
+}
+static void lua_rawseti(lua_State*L,int idx,int n){
+StkId o;
+api_checknelems(L,1);
+o=index2adr(L,idx);
+luai_apicheck(L,ttistable(o));
+setobj(L,luaH_setnum(L,hvalue(o),n),L->top-1);
+luaC_barriert(L,hvalue(o),L->top-1);
+L->top--;
+}
+static int lua_setmetatable(lua_State*L,int objindex){
+TValue*obj;
+Table*mt;
+api_checknelems(L,1);
+obj=index2adr(L,objindex);
+api_checkvalidindex(L,obj);
+if(ttisnil(L->top-1))
+mt=NULL;
+else{
+luai_apicheck(L,ttistable(L->top-1));
+mt=hvalue(L->top-1);
+}
+switch(ttype(obj)){
+case 5:{
+hvalue(obj)->metatable=mt;
+if(mt)
+luaC_objbarriert(L,hvalue(obj),mt);
+break;
+}
+case 7:{
+uvalue(obj)->metatable=mt;
+if(mt)
+luaC_objbarrier(L,rawuvalue(obj),mt);
+break;
+}
+default:{
+G(L)->mt[ttype(obj)]=mt;
+break;
+}
+}
+L->top--;
+return 1;
+}
+static int lua_setfenv(lua_State*L,int idx){
+StkId o;
+int res=1;
+api_checknelems(L,1);
+o=index2adr(L,idx);
+api_checkvalidindex(L,o);
+luai_apicheck(L,ttistable(L->top-1));
+switch(ttype(o)){
+case 6:
+clvalue(o)->c.env=hvalue(L->top-1);
+break;
+case 7:
+uvalue(o)->env=hvalue(L->top-1);
+break;
+case 8:
+sethvalue(L,gt(thvalue(o)),hvalue(L->top-1));
+break;
+default:
+res=0;
+break;
+}
+if(res)luaC_objbarrier(L,gcvalue(o),hvalue(L->top-1));
+L->top--;
+return res;
+}
+#define adjustresults(L,nres){if(nres==(-1)&&L->top>=L->ci->top)L->ci->top=L->top;}
+#define checkresults(L,na,nr)luai_apicheck(L,(nr)==(-1)||(L->ci->top-L->top>=(nr)-(na)))
+static void lua_call(lua_State*L,int nargs,int nresults){
+StkId func;
+api_checknelems(L,nargs+1);
+checkresults(L,nargs,nresults);
+func=L->top-(nargs+1);
+luaD_call(L,func,nresults);
+adjustresults(L,nresults);
+}
+struct CallS{
+StkId func;
+int nresults;
+};
+static void f_call(lua_State*L,void*ud){
+struct CallS*c=cast(struct CallS*,ud);
+luaD_call(L,c->func,c->nresults);
+}
+static int lua_pcall(lua_State*L,int nargs,int nresults,int errfunc){
+struct CallS c;
+int status;
+ptrdiff_t func;
+api_checknelems(L,nargs+1);
+checkresults(L,nargs,nresults);
+if(errfunc==0)
+func=0;
+else{
+StkId o=index2adr(L,errfunc);
+api_checkvalidindex(L,o);
+func=savestack(L,o);
+}
+c.func=L->top-(nargs+1);
+c.nresults=nresults;
+status=luaD_pcall(L,f_call,&c,savestack(L,c.func),func);
+adjustresults(L,nresults);
+return status;
+}
+static int lua_load(lua_State*L,lua_Reader reader,void*data,
+const char*chunkname){
+ZIO z;
+int status;
+if(!chunkname)chunkname="?";
+luaZ_init(L,&z,reader,data);
+status=luaD_protectedparser(L,&z,chunkname);
+return status;
+}
+static int lua_error(lua_State*L){
+api_checknelems(L,1);
+luaG_errormsg(L);
+return 0;
+}
+static int lua_next(lua_State*L,int idx){
+StkId t;
+int more;
+t=index2adr(L,idx);
+luai_apicheck(L,ttistable(t));
+more=luaH_next(L,hvalue(t),L->top-1);
+if(more){
+api_incr_top(L);
+}
+else
+L->top-=1;
+return more;
+}
+static void lua_concat(lua_State*L,int n){
+api_checknelems(L,n);
+if(n>=2){
+luaC_checkGC(L);
+luaV_concat(L,n,cast_int(L->top-L->base)-1);
+L->top-=(n-1);
+}
+else if(n==0){
+setsvalue(L,L->top,luaS_newlstr(L,"",0));
+api_incr_top(L);
+}
+}
+static void*lua_newuserdata(lua_State*L,size_t size){
+Udata*u;
+luaC_checkGC(L);
+u=luaS_newudata(L,size,getcurrenv(L));
+setuvalue(L,L->top,u);
+api_incr_top(L);
+return u+1;
+}
+#define luaL_getn(L,i)((int)lua_objlen(L,i))
+#define luaL_setn(L,i,j)((void)0)
+typedef struct luaL_Reg{
+const char*name;
+lua_CFunction func;
+}luaL_Reg;
+static void luaI_openlib(lua_State*L,const char*libname,
+const luaL_Reg*l,int nup);
+static int luaL_argerror(lua_State*L,int numarg,const char*extramsg);
+static const char* luaL_checklstring(lua_State*L,int numArg,
+size_t*l);
+static const char* luaL_optlstring(lua_State*L,int numArg,
+const char*def,size_t*l);
+static lua_Integer luaL_checkinteger(lua_State*L,int numArg);
+static lua_Integer luaL_optinteger(lua_State*L,int nArg,
+lua_Integer def);
+static int luaL_error(lua_State*L,const char*fmt,...);
+static const char* luaL_findtable(lua_State*L,int idx,
+const char*fname,int szhint);
+#define luaL_argcheck(L,cond,numarg,extramsg)((void)((cond)||luaL_argerror(L,(numarg),(extramsg))))
+#define luaL_checkstring(L,n)(luaL_checklstring(L,(n),NULL))
+#define luaL_optstring(L,n,d)(luaL_optlstring(L,(n),(d),NULL))
+#define luaL_checkint(L,n)((int)luaL_checkinteger(L,(n)))
+#define luaL_optint(L,n,d)((int)luaL_optinteger(L,(n),(d)))
+#define luaL_typename(L,i)lua_typename(L,lua_type(L,(i)))
+#define luaL_getmetatable(L,n)(lua_getfield(L,(-10000),(n)))
+#define luaL_opt(L,f,n,d)(lua_isnoneornil(L,(n))?(d):f(L,(n)))
+typedef struct luaL_Buffer{
+char*p;
+int lvl;
+lua_State*L;
+char buffer[BUFSIZ];
+}luaL_Buffer;
+#define luaL_addchar(B,c)((void)((B)->p<((B)->buffer+BUFSIZ)||luaL_prepbuffer(B)),(*(B)->p++=(char)(c)))
+#define luaL_addsize(B,n)((B)->p+=(n))
+static char* luaL_prepbuffer(luaL_Buffer*B);
+static int luaL_argerror(lua_State*L,int narg,const char*extramsg){
+lua_Debug ar;
+if(!lua_getstack(L,0,&ar))
+return luaL_error(L,"bad argument #%d (%s)",narg,extramsg);
+lua_getinfo(L,"n",&ar);
+if(strcmp(ar.namewhat,"method")==0){
+narg--;
+if(narg==0)
+return luaL_error(L,"calling "LUA_QL("%s")" on bad self (%s)",
+ar.name,extramsg);
+}
+if(ar.name==NULL)
+ar.name="?";
+return luaL_error(L,"bad argument #%d to "LUA_QL("%s")" (%s)",
+narg,ar.name,extramsg);
+}
+static int luaL_typerror(lua_State*L,int narg,const char*tname){
+const char*msg=lua_pushfstring(L,"%s expected, got %s",
+tname,luaL_typename(L,narg));
+return luaL_argerror(L,narg,msg);
+}
+static void tag_error(lua_State*L,int narg,int tag){
+luaL_typerror(L,narg,lua_typename(L,tag));
+}
+static void luaL_where(lua_State*L,int level){
+lua_Debug ar;
+if(lua_getstack(L,level,&ar)){
+lua_getinfo(L,"Sl",&ar);
+if(ar.currentline>0){
+lua_pushfstring(L,"%s:%d: ",ar.short_src,ar.currentline);
+return;
+}
+}
+lua_pushliteral(L,"");
+}
+static int luaL_error(lua_State*L,const char*fmt,...){
+va_list argp;
+va_start(argp,fmt);
+luaL_where(L,1);
+lua_pushvfstring(L,fmt,argp);
+va_end(argp);
+lua_concat(L,2);
+return lua_error(L);
+}
+static int luaL_newmetatable(lua_State*L,const char*tname){
+lua_getfield(L,(-10000),tname);
+if(!lua_isnil(L,-1))
+return 0;
+lua_pop(L,1);
+lua_newtable(L);
+lua_pushvalue(L,-1);
+lua_setfield(L,(-10000),tname);
+return 1;
+}
+static void*luaL_checkudata(lua_State*L,int ud,const char*tname){
+void*p=lua_touserdata(L,ud);
+if(p!=NULL){
+if(lua_getmetatable(L,ud)){
+lua_getfield(L,(-10000),tname);
+if(lua_rawequal(L,-1,-2)){
+lua_pop(L,2);
+return p;
+}
+}
+}
+luaL_typerror(L,ud,tname);
+return NULL;
+}
+static void luaL_checkstack(lua_State*L,int space,const char*mes){
+if(!lua_checkstack(L,space))
+luaL_error(L,"stack overflow (%s)",mes);
+}
+static void luaL_checktype(lua_State*L,int narg,int t){
+if(lua_type(L,narg)!=t)
+tag_error(L,narg,t);
+}
+static void luaL_checkany(lua_State*L,int narg){
+if(lua_type(L,narg)==(-1))
+luaL_argerror(L,narg,"value expected");
+}
+static const char*luaL_checklstring(lua_State*L,int narg,size_t*len){
+const char*s=lua_tolstring(L,narg,len);
+if(!s)tag_error(L,narg,4);
+return s;
+}
+static const char*luaL_optlstring(lua_State*L,int narg,
+const char*def,size_t*len){
+if(lua_isnoneornil(L,narg)){
+if(len)
+*len=(def?strlen(def):0);
+return def;
+}
+else return luaL_checklstring(L,narg,len);
+}
+static lua_Number luaL_checknumber(lua_State*L,int narg){
+lua_Number d=lua_tonumber(L,narg);
+if(d==0&&!lua_isnumber(L,narg))
+tag_error(L,narg,3);
+return d;
+}
+static lua_Integer luaL_checkinteger(lua_State*L,int narg){
+lua_Integer d=lua_tointeger(L,narg);
+if(d==0&&!lua_isnumber(L,narg))
+tag_error(L,narg,3);
+return d;
+}
+static lua_Integer luaL_optinteger(lua_State*L,int narg,
+lua_Integer def){
+return luaL_opt(L,luaL_checkinteger,narg,def);
+}
+static int luaL_getmetafield(lua_State*L,int obj,const char*event){
+if(!lua_getmetatable(L,obj))
+return 0;
+lua_pushstring(L,event);
+lua_rawget(L,-2);
+if(lua_isnil(L,-1)){
+lua_pop(L,2);
+return 0;
+}
+else{
+lua_remove(L,-2);
+return 1;
+}
+}
+static void luaL_register(lua_State*L,const char*libname,
+const luaL_Reg*l){
+luaI_openlib(L,libname,l,0);
+}
+static int libsize(const luaL_Reg*l){
+int size=0;
+for(;l->name;l++)size++;
+return size;
+}
+static void luaI_openlib(lua_State*L,const char*libname,
+const luaL_Reg*l,int nup){
+if(libname){
+int size=libsize(l);
+luaL_findtable(L,(-10000),"_LOADED",1);
+lua_getfield(L,-1,libname);
+if(!lua_istable(L,-1)){
+lua_pop(L,1);
+if(luaL_findtable(L,(-10002),libname,size)!=NULL)
+luaL_error(L,"name conflict for module "LUA_QL("%s"),libname);
+lua_pushvalue(L,-1);
+lua_setfield(L,-3,libname);
+}
+lua_remove(L,-2);
+lua_insert(L,-(nup+1));
+}
+for(;l->name;l++){
+int i;
+for(i=0;i<nup;i++)
+lua_pushvalue(L,-nup);
+lua_pushcclosure(L,l->func,nup);
+lua_setfield(L,-(nup+2),l->name);
+}
+lua_pop(L,nup);
+}
+static const char*luaL_findtable(lua_State*L,int idx,
+const char*fname,int szhint){
+const char*e;
+lua_pushvalue(L,idx);
+do{
+e=strchr(fname,'.');
+if(e==NULL)e=fname+strlen(fname);
+lua_pushlstring(L,fname,e-fname);
+lua_rawget(L,-2);
+if(lua_isnil(L,-1)){
+lua_pop(L,1);
+lua_createtable(L,0,(*e=='.'?1:szhint));
+lua_pushlstring(L,fname,e-fname);
+lua_pushvalue(L,-2);
+lua_settable(L,-4);
+}
+else if(!lua_istable(L,-1)){
+lua_pop(L,2);
+return fname;
+}
+lua_remove(L,-2);
+fname=e+1;
+}while(*e=='.');
+return NULL;
+}
+#define bufflen(B)((B)->p-(B)->buffer)
+#define bufffree(B)((size_t)(BUFSIZ-bufflen(B)))
+static int emptybuffer(luaL_Buffer*B){
+size_t l=bufflen(B);
+if(l==0)return 0;
+else{
+lua_pushlstring(B->L,B->buffer,l);
+B->p=B->buffer;
+B->lvl++;
+return 1;
+}
+}
+static void adjuststack(luaL_Buffer*B){
+if(B->lvl>1){
+lua_State*L=B->L;
+int toget=1;
+size_t toplen=lua_strlen(L,-1);
+do{
+size_t l=lua_strlen(L,-(toget+1));
+if(B->lvl-toget+1>=(20/2)||toplen>l){
+toplen+=l;
+toget++;
+}
+else break;
+}while(toget<B->lvl);
+lua_concat(L,toget);
+B->lvl=B->lvl-toget+1;
+}
+}
+static char*luaL_prepbuffer(luaL_Buffer*B){
+if(emptybuffer(B))
+adjuststack(B);
+return B->buffer;
+}
+static void luaL_addlstring(luaL_Buffer*B,const char*s,size_t l){
+while(l--)
+luaL_addchar(B,*s++);
+}
+static void luaL_pushresult(luaL_Buffer*B){
+emptybuffer(B);
+lua_concat(B->L,B->lvl);
+B->lvl=1;
+}
+static void luaL_addvalue(luaL_Buffer*B){
+lua_State*L=B->L;
+size_t vl;
+const char*s=lua_tolstring(L,-1,&vl);
+if(vl<=bufffree(B)){
+memcpy(B->p,s,vl);
+B->p+=vl;
+lua_pop(L,1);
+}
+else{
+if(emptybuffer(B))
+lua_insert(L,-2);
+B->lvl++;
+adjuststack(B);
+}
+}
+static void luaL_buffinit(lua_State*L,luaL_Buffer*B){
+B->L=L;
+B->p=B->buffer;
+B->lvl=0;
+}
+typedef struct LoadF{
+int extraline;
+FILE*f;
+char buff[BUFSIZ];
+}LoadF;
+static const char*getF(lua_State*L,void*ud,size_t*size){
+LoadF*lf=(LoadF*)ud;
+(void)L;
+if(lf->extraline){
+lf->extraline=0;
+*size=1;
+return"\n";
+}
+if(feof(lf->f))return NULL;
+*size=fread(lf->buff,1,sizeof(lf->buff),lf->f);
+return(*size>0)?lf->buff:NULL;
+}
+static int errfile(lua_State*L,const char*what,int fnameindex){
+const char*serr=strerror(errno);
+const char*filename=lua_tostring(L,fnameindex)+1;
+lua_pushfstring(L,"cannot %s %s: %s",what,filename,serr);
+lua_remove(L,fnameindex);
+return(5+1);
+}
+static int luaL_loadfile(lua_State*L,const char*filename){
+LoadF lf;
+int status,readstatus;
+int c;
+int fnameindex=lua_gettop(L)+1;
+lf.extraline=0;
+if(filename==NULL){
+lua_pushliteral(L,"=stdin");
+lf.f=stdin;
+}
+else{
+lua_pushfstring(L,"@%s",filename);
+lf.f=fopen(filename,"r");
+if(lf.f==NULL)return errfile(L,"open",fnameindex);
+}
+c=getc(lf.f);
+if(c=='#'){
+lf.extraline=1;
+while((c=getc(lf.f))!=EOF&&c!='\n');
+if(c=='\n')c=getc(lf.f);
+}
+if(c=="\033Lua"[0]&&filename){
+lf.f=freopen(filename,"rb",lf.f);
+if(lf.f==NULL)return errfile(L,"reopen",fnameindex);
+while((c=getc(lf.f))!=EOF&&c!="\033Lua"[0]);
+lf.extraline=0;
+}
+ungetc(c,lf.f);
+status=lua_load(L,getF,&lf,lua_tostring(L,-1));
+readstatus=ferror(lf.f);
+if(filename)fclose(lf.f);
+if(readstatus){
+lua_settop(L,fnameindex);
+return errfile(L,"read",fnameindex);
+}
+lua_remove(L,fnameindex);
+return status;
+}
+typedef struct LoadS{
+const char*s;
+size_t size;
+}LoadS;
+static const char*getS(lua_State*L,void*ud,size_t*size){
+LoadS*ls=(LoadS*)ud;
+(void)L;
+if(ls->size==0)return NULL;
+*size=ls->size;
+ls->size=0;
+return ls->s;
+}
+static int luaL_loadbuffer(lua_State*L,const char*buff,size_t size,
+const char*name){
+LoadS ls;
+ls.s=buff;
+ls.size=size;
+return lua_load(L,getS,&ls,name);
+}
+static void*l_alloc(void*ud,void*ptr,size_t osize,size_t nsize){
+(void)ud;
+(void)osize;
+if(nsize==0){
+free(ptr);
+return NULL;
+}
+else
+return realloc(ptr,nsize);
+}
+static int panic(lua_State*L){
+(void)L;
+fprintf(stderr,"PANIC: unprotected error in call to Lua API (%s)\n",
+lua_tostring(L,-1));
+return 0;
+}
+static lua_State*luaL_newstate(void){
+lua_State*L=lua_newstate(l_alloc,NULL);
+if(L)lua_atpanic(L,&panic);
+return L;
+}
+static int luaB_tonumber(lua_State*L){
+int base=luaL_optint(L,2,10);
+if(base==10){
+luaL_checkany(L,1);
+if(lua_isnumber(L,1)){
+lua_pushnumber(L,lua_tonumber(L,1));
+return 1;
+}
+}
+else{
+const char*s1=luaL_checkstring(L,1);
+char*s2;
+unsigned long n;
+luaL_argcheck(L,2<=base&&base<=36,2,"base out of range");
+n=strtoul(s1,&s2,base);
+if(s1!=s2){
+while(isspace((unsigned char)(*s2)))s2++;
+if(*s2=='\0'){
+lua_pushnumber(L,(lua_Number)n);
+return 1;
+}
+}
+}
+lua_pushnil(L);
+return 1;
+}
+static int luaB_error(lua_State*L){
+int level=luaL_optint(L,2,1);
+lua_settop(L,1);
+if(lua_isstring(L,1)&&level>0){
+luaL_where(L,level);
+lua_pushvalue(L,1);
+lua_concat(L,2);
+}
+return lua_error(L);
+}
+static int luaB_setmetatable(lua_State*L){
+int t=lua_type(L,2);
+luaL_checktype(L,1,5);
+luaL_argcheck(L,t==0||t==5,2,
+"nil or table expected");
+if(luaL_getmetafield(L,1,"__metatable"))
+luaL_error(L,"cannot change a protected metatable");
+lua_settop(L,2);
+lua_setmetatable(L,1);
+return 1;
+}
+static void getfunc(lua_State*L,int opt){
+if(lua_isfunction(L,1))lua_pushvalue(L,1);
+else{
+lua_Debug ar;
+int level=opt?luaL_optint(L,1,1):luaL_checkint(L,1);
+luaL_argcheck(L,level>=0,1,"level must be non-negative");
+if(lua_getstack(L,level,&ar)==0)
+luaL_argerror(L,1,"invalid level");
+lua_getinfo(L,"f",&ar);
+if(lua_isnil(L,-1))
+luaL_error(L,"no function environment for tail call at level %d",
+level);
+}
+}
+static int luaB_setfenv(lua_State*L){
+luaL_checktype(L,2,5);
+getfunc(L,0);
+lua_pushvalue(L,2);
+if(lua_isnumber(L,1)&&lua_tonumber(L,1)==0){
+lua_pushthread(L);
+lua_insert(L,-2);
+lua_setfenv(L,-2);
+return 0;
+}
+else if(lua_iscfunction(L,-2)||lua_setfenv(L,-2)==0)
+luaL_error(L,
+LUA_QL("setfenv")" cannot change environment of given object");
+return 1;
+}
+static int luaB_rawget(lua_State*L){
+luaL_checktype(L,1,5);
+luaL_checkany(L,2);
+lua_settop(L,2);
+lua_rawget(L,1);
+return 1;
+}
+static int luaB_type(lua_State*L){
+luaL_checkany(L,1);
+lua_pushstring(L,luaL_typename(L,1));
+return 1;
+}
+static int luaB_next(lua_State*L){
+luaL_checktype(L,1,5);
+lua_settop(L,2);
+if(lua_next(L,1))
+return 2;
+else{
+lua_pushnil(L);
+return 1;
+}
+}
+static int luaB_pairs(lua_State*L){
+luaL_checktype(L,1,5);
+lua_pushvalue(L,lua_upvalueindex(1));
+lua_pushvalue(L,1);
+lua_pushnil(L);
+return 3;
+}
+static int ipairsaux(lua_State*L){
+int i=luaL_checkint(L,2);
+luaL_checktype(L,1,5);
+i++;
+lua_pushinteger(L,i);
+lua_rawgeti(L,1,i);
+return(lua_isnil(L,-1))?0:2;
+}
+static int luaB_ipairs(lua_State*L){
+luaL_checktype(L,1,5);
+lua_pushvalue(L,lua_upvalueindex(1));
+lua_pushvalue(L,1);
+lua_pushinteger(L,0);
+return 3;
+}
+static int load_aux(lua_State*L,int status){
+if(status==0)
+return 1;
+else{
+lua_pushnil(L);
+lua_insert(L,-2);
+return 2;
+}
+}
+static int luaB_loadstring(lua_State*L){
+size_t l;
+const char*s=luaL_checklstring(L,1,&l);
+const char*chunkname=luaL_optstring(L,2,s);
+return load_aux(L,luaL_loadbuffer(L,s,l,chunkname));
+}
+static int luaB_loadfile(lua_State*L){
+const char*fname=luaL_optstring(L,1,NULL);
+return load_aux(L,luaL_loadfile(L,fname));
+}
+static int luaB_assert(lua_State*L){
+luaL_checkany(L,1);
+if(!lua_toboolean(L,1))
+return luaL_error(L,"%s",luaL_optstring(L,2,"assertion failed!"));
+return lua_gettop(L);
+}
+static int luaB_unpack(lua_State*L){
+int i,e,n;
+luaL_checktype(L,1,5);
+i=luaL_optint(L,2,1);
+e=luaL_opt(L,luaL_checkint,3,luaL_getn(L,1));
+if(i>e)return 0;
+n=e-i+1;
+if(n<=0||!lua_checkstack(L,n))
+return luaL_error(L,"too many results to unpack");
+lua_rawgeti(L,1,i);
+while(i++<e)
+lua_rawgeti(L,1,i);
+return n;
+}
+static int luaB_pcall(lua_State*L){
+int status;
+luaL_checkany(L,1);
+status=lua_pcall(L,lua_gettop(L)-1,(-1),0);
+lua_pushboolean(L,(status==0));
+lua_insert(L,1);
+return lua_gettop(L);
+}
+static int luaB_newproxy(lua_State*L){
+lua_settop(L,1);
+lua_newuserdata(L,0);
+if(lua_toboolean(L,1)==0)
+return 1;
+else if(lua_isboolean(L,1)){
+lua_newtable(L);
+lua_pushvalue(L,-1);
+lua_pushboolean(L,1);
+lua_rawset(L,lua_upvalueindex(1));
+}
+else{
+int validproxy=0;
+if(lua_getmetatable(L,1)){
+lua_rawget(L,lua_upvalueindex(1));
+validproxy=lua_toboolean(L,-1);
+lua_pop(L,1);
+}
+luaL_argcheck(L,validproxy,1,"boolean or proxy expected");
+lua_getmetatable(L,1);
+}
+lua_setmetatable(L,2);
+return 1;
+}
+static const luaL_Reg base_funcs[]={
+{"assert",luaB_assert},
+{"error",luaB_error},
+{"loadfile",luaB_loadfile},
+{"loadstring",luaB_loadstring},
+{"next",luaB_next},
+{"pcall",luaB_pcall},
+{"rawget",luaB_rawget},
+{"setfenv",luaB_setfenv},
+{"setmetatable",luaB_setmetatable},
+{"tonumber",luaB_tonumber},
+{"type",luaB_type},
+{"unpack",luaB_unpack},
+{NULL,NULL}
+};
+static void auxopen(lua_State*L,const char*name,
+lua_CFunction f,lua_CFunction u){
+lua_pushcfunction(L,u);
+lua_pushcclosure(L,f,1);
+lua_setfield(L,-2,name);
+}
+static void base_open(lua_State*L){
+lua_pushvalue(L,(-10002));
+lua_setglobal(L,"_G");
+luaL_register(L,"_G",base_funcs);
+lua_pushliteral(L,"Lua 5.1");
+lua_setglobal(L,"_VERSION");
+auxopen(L,"ipairs",luaB_ipairs,ipairsaux);
+auxopen(L,"pairs",luaB_pairs,luaB_next);
+lua_createtable(L,0,1);
+lua_pushvalue(L,-1);
+lua_setmetatable(L,-2);
+lua_pushliteral(L,"kv");
+lua_setfield(L,-2,"__mode");
+lua_pushcclosure(L,luaB_newproxy,1);
+lua_setglobal(L,"newproxy");
+}
+static int luaopen_base(lua_State*L){
+base_open(L);
+return 1;
+}
+#define aux_getn(L,n)(luaL_checktype(L,n,5),luaL_getn(L,n))
+static int tinsert(lua_State*L){
+int e=aux_getn(L,1)+1;
+int pos;
+switch(lua_gettop(L)){
+case 2:{
+pos=e;
+break;
+}
+case 3:{
+int i;
+pos=luaL_checkint(L,2);
+if(pos>e)e=pos;
+for(i=e;i>pos;i--){
+lua_rawgeti(L,1,i-1);
+lua_rawseti(L,1,i);
+}
+break;
+}
+default:{
+return luaL_error(L,"wrong number of arguments to "LUA_QL("insert"));
+}
+}
+luaL_setn(L,1,e);
+lua_rawseti(L,1,pos);
+return 0;
+}
+static int tremove(lua_State*L){
+int e=aux_getn(L,1);
+int pos=luaL_optint(L,2,e);
+if(!(1<=pos&&pos<=e))
+return 0;
+luaL_setn(L,1,e-1);
+lua_rawgeti(L,1,pos);
+for(;pos<e;pos++){
+lua_rawgeti(L,1,pos+1);
+lua_rawseti(L,1,pos);
+}
+lua_pushnil(L);
+lua_rawseti(L,1,e);
+return 1;
+}
+static void addfield(lua_State*L,luaL_Buffer*b,int i){
+lua_rawgeti(L,1,i);
+if(!lua_isstring(L,-1))
+luaL_error(L,"invalid value (%s) at index %d in table for "
+LUA_QL("concat"),luaL_typename(L,-1),i);
+luaL_addvalue(b);
+}
+static int tconcat(lua_State*L){
+luaL_Buffer b;
+size_t lsep;
+int i,last;
+const char*sep=luaL_optlstring(L,2,"",&lsep);
+luaL_checktype(L,1,5);
+i=luaL_optint(L,3,1);
+last=luaL_opt(L,luaL_checkint,4,luaL_getn(L,1));
+luaL_buffinit(L,&b);
+for(;i<last;i++){
+addfield(L,&b,i);
+luaL_addlstring(&b,sep,lsep);
+}
+if(i==last)
+addfield(L,&b,i);
+luaL_pushresult(&b);
+return 1;
+}
+static void set2(lua_State*L,int i,int j){
+lua_rawseti(L,1,i);
+lua_rawseti(L,1,j);
+}
+static int sort_comp(lua_State*L,int a,int b){
+if(!lua_isnil(L,2)){
+int res;
+lua_pushvalue(L,2);
+lua_pushvalue(L,a-1);
+lua_pushvalue(L,b-2);
+lua_call(L,2,1);
+res=lua_toboolean(L,-1);
+lua_pop(L,1);
+return res;
+}
+else
+return lua_lessthan(L,a,b);
+}
+static void auxsort(lua_State*L,int l,int u){
+while(l<u){
+int i,j;
+lua_rawgeti(L,1,l);
+lua_rawgeti(L,1,u);
+if(sort_comp(L,-1,-2))
+set2(L,l,u);
+else
+lua_pop(L,2);
+if(u-l==1)break;
+i=(l+u)/2;
+lua_rawgeti(L,1,i);
+lua_rawgeti(L,1,l);
+if(sort_comp(L,-2,-1))
+set2(L,i,l);
+else{
+lua_pop(L,1);
+lua_rawgeti(L,1,u);
+if(sort_comp(L,-1,-2))
+set2(L,i,u);
+else
+lua_pop(L,2);
+}
+if(u-l==2)break;
+lua_rawgeti(L,1,i);
+lua_pushvalue(L,-1);
+lua_rawgeti(L,1,u-1);
+set2(L,i,u-1);
+i=l;j=u-1;
+for(;;){
+while(lua_rawgeti(L,1,++i),sort_comp(L,-1,-2)){
+if(i>u)luaL_error(L,"invalid order function for sorting");
+lua_pop(L,1);
+}
+while(lua_rawgeti(L,1,--j),sort_comp(L,-3,-1)){
+if(j<l)luaL_error(L,"invalid order function for sorting");
+lua_pop(L,1);
+}
+if(j<i){
+lua_pop(L,3);
+break;
+}
+set2(L,i,j);
+}
+lua_rawgeti(L,1,u-1);
+lua_rawgeti(L,1,i);
+set2(L,u-1,i);
+if(i-l<u-i){
+j=l;i=i-1;l=i+2;
+}
+else{
+j=i+1;i=u;u=j-2;
+}
+auxsort(L,j,i);
+}
+}
+static int sort(lua_State*L){
+int n=aux_getn(L,1);
+luaL_checkstack(L,40,"");
+if(!lua_isnoneornil(L,2))
+luaL_checktype(L,2,6);
+lua_settop(L,2);
+auxsort(L,1,n);
+return 0;
+}
+static const luaL_Reg tab_funcs[]={
+{"concat",tconcat},
+{"insert",tinsert},
+{"remove",tremove},
+{"sort",sort},
+{NULL,NULL}
+};
+static int luaopen_table(lua_State*L){
+luaL_register(L,"table",tab_funcs);
+return 1;
+}
+static const char*const fnames[]={"input","output"};
+static int pushresult(lua_State*L,int i,const char*filename){
+int en=errno;
+if(i){
+lua_pushboolean(L,1);
+return 1;
+}
+else{
+lua_pushnil(L);
+if(filename)
+lua_pushfstring(L,"%s: %s",filename,strerror(en));
+else
+lua_pushfstring(L,"%s",strerror(en));
+lua_pushinteger(L,en);
+return 3;
+}
+}
+static void fileerror(lua_State*L,int arg,const char*filename){
+lua_pushfstring(L,"%s: %s",filename,strerror(errno));
+luaL_argerror(L,arg,lua_tostring(L,-1));
+}
+#define tofilep(L)((FILE**)luaL_checkudata(L,1,"FILE*"))
+static int io_type(lua_State*L){
+void*ud;
+luaL_checkany(L,1);
+ud=lua_touserdata(L,1);
+lua_getfield(L,(-10000),"FILE*");
+if(ud==NULL||!lua_getmetatable(L,1)||!lua_rawequal(L,-2,-1))
+lua_pushnil(L);
+else if(*((FILE**)ud)==NULL)
+lua_pushliteral(L,"closed file");
+else
+lua_pushliteral(L,"file");
+return 1;
+}
+static FILE*tofile(lua_State*L){
+FILE**f=tofilep(L);
+if(*f==NULL)
+luaL_error(L,"attempt to use a closed file");
+return*f;
+}
+static FILE**newfile(lua_State*L){
+FILE**pf=(FILE**)lua_newuserdata(L,sizeof(FILE*));
+*pf=NULL;
+luaL_getmetatable(L,"FILE*");
+lua_setmetatable(L,-2);
+return pf;
+}
+static int io_noclose(lua_State*L){
+lua_pushnil(L);
+lua_pushliteral(L,"cannot close standard file");
+return 2;
+}
+static int io_pclose(lua_State*L){
+FILE**p=tofilep(L);
+int ok=lua_pclose(L,*p);
+*p=NULL;
+return pushresult(L,ok,NULL);
+}
+static int io_fclose(lua_State*L){
+FILE**p=tofilep(L);
+int ok=(fclose(*p)==0);
+*p=NULL;
+return pushresult(L,ok,NULL);
+}
+static int aux_close(lua_State*L){
+lua_getfenv(L,1);
+lua_getfield(L,-1,"__close");
+return(lua_tocfunction(L,-1))(L);
+}
+static int io_close(lua_State*L){
+if(lua_isnone(L,1))
+lua_rawgeti(L,(-10001),2);
+tofile(L);
+return aux_close(L);
+}
+static int io_gc(lua_State*L){
+FILE*f=*tofilep(L);
+if(f!=NULL)
+aux_close(L);
+return 0;
+}
+static int io_open(lua_State*L){
+const char*filename=luaL_checkstring(L,1);
+const char*mode=luaL_optstring(L,2,"r");
+FILE**pf=newfile(L);
+*pf=fopen(filename,mode);
+return(*pf==NULL)?pushresult(L,0,filename):1;
+}
+static FILE*getiofile(lua_State*L,int findex){
+FILE*f;
+lua_rawgeti(L,(-10001),findex);
+f=*(FILE**)lua_touserdata(L,-1);
+if(f==NULL)
+luaL_error(L,"standard %s file is closed",fnames[findex-1]);
+return f;
+}
+static int g_iofile(lua_State*L,int f,const char*mode){
+if(!lua_isnoneornil(L,1)){
+const char*filename=lua_tostring(L,1);
+if(filename){
+FILE**pf=newfile(L);
+*pf=fopen(filename,mode);
+if(*pf==NULL)
+fileerror(L,1,filename);
+}
+else{
+tofile(L);
+lua_pushvalue(L,1);
+}
+lua_rawseti(L,(-10001),f);
+}
+lua_rawgeti(L,(-10001),f);
+return 1;
+}
+static int io_input(lua_State*L){
+return g_iofile(L,1,"r");
+}
+static int io_output(lua_State*L){
+return g_iofile(L,2,"w");
+}
+static int io_readline(lua_State*L);
+static void aux_lines(lua_State*L,int idx,int toclose){
+lua_pushvalue(L,idx);
+lua_pushboolean(L,toclose);
+lua_pushcclosure(L,io_readline,2);
+}
+static int f_lines(lua_State*L){
+tofile(L);
+aux_lines(L,1,0);
+return 1;
+}
+static int io_lines(lua_State*L){
+if(lua_isnoneornil(L,1)){
+lua_rawgeti(L,(-10001),1);
+return f_lines(L);
+}
+else{
+const char*filename=luaL_checkstring(L,1);
+FILE**pf=newfile(L);
+*pf=fopen(filename,"r");
+if(*pf==NULL)
+fileerror(L,1,filename);
+aux_lines(L,lua_gettop(L),1);
+return 1;
+}
+}
+static int read_number(lua_State*L,FILE*f){
+lua_Number d;
+if(fscanf(f,"%lf",&d)==1){
+lua_pushnumber(L,d);
+return 1;
+}
+else{
+lua_pushnil(L);
+return 0;
+}
+}
+static int test_eof(lua_State*L,FILE*f){
+int c=getc(f);
+ungetc(c,f);
+lua_pushlstring(L,NULL,0);
+return(c!=EOF);
+}
+static int read_line(lua_State*L,FILE*f){
+luaL_Buffer b;
+luaL_buffinit(L,&b);
+for(;;){
+size_t l;
+char*p=luaL_prepbuffer(&b);
+if(fgets(p,BUFSIZ,f)==NULL){
+luaL_pushresult(&b);
+return(lua_objlen(L,-1)>0);
+}
+l=strlen(p);
+if(l==0||p[l-1]!='\n')
+luaL_addsize(&b,l);
+else{
+luaL_addsize(&b,l-1);
+luaL_pushresult(&b);
+return 1;
+}
+}
+}
+static int read_chars(lua_State*L,FILE*f,size_t n){
+size_t rlen;
+size_t nr;
+luaL_Buffer b;
+luaL_buffinit(L,&b);
+rlen=BUFSIZ;
+do{
+char*p=luaL_prepbuffer(&b);
+if(rlen>n)rlen=n;
+nr=fread(p,sizeof(char),rlen,f);
+luaL_addsize(&b,nr);
+n-=nr;
+}while(n>0&&nr==rlen);
+luaL_pushresult(&b);
+return(n==0||lua_objlen(L,-1)>0);
+}
+static int g_read(lua_State*L,FILE*f,int first){
+int nargs=lua_gettop(L)-1;
+int success;
+int n;
+clearerr(f);
+if(nargs==0){
+success=read_line(L,f);
+n=first+1;
+}
+else{
+luaL_checkstack(L,nargs+20,"too many arguments");
+success=1;
+for(n=first;nargs--&&success;n++){
+if(lua_type(L,n)==3){
+size_t l=(size_t)lua_tointeger(L,n);
+success=(l==0)?test_eof(L,f):read_chars(L,f,l);
+}
+else{
+const char*p=lua_tostring(L,n);
+luaL_argcheck(L,p&&p[0]=='*',n,"invalid option");
+switch(p[1]){
+case'n':
+success=read_number(L,f);
+break;
+case'l':
+success=read_line(L,f);
+break;
+case'a':
+read_chars(L,f,~((size_t)0));
+success=1;
+break;
+default:
+return luaL_argerror(L,n,"invalid format");
+}
+}
+}
+}
+if(ferror(f))
+return pushresult(L,0,NULL);
+if(!success){
+lua_pop(L,1);
+lua_pushnil(L);
+}
+return n-first;
+}
+static int io_read(lua_State*L){
+return g_read(L,getiofile(L,1),1);
+}
+static int f_read(lua_State*L){
+return g_read(L,tofile(L),2);
+}
+static int io_readline(lua_State*L){
+FILE*f=*(FILE**)lua_touserdata(L,lua_upvalueindex(1));
+int sucess;
+if(f==NULL)
+luaL_error(L,"file is already closed");
+sucess=read_line(L,f);
+if(ferror(f))
+return luaL_error(L,"%s",strerror(errno));
+if(sucess)return 1;
+else{
+if(lua_toboolean(L,lua_upvalueindex(2))){
+lua_settop(L,0);
+lua_pushvalue(L,lua_upvalueindex(1));
+aux_close(L);
+}
+return 0;
+}
+}
+static int g_write(lua_State*L,FILE*f,int arg){
+int nargs=lua_gettop(L)-1;
+int status=1;
+for(;nargs--;arg++){
+if(lua_type(L,arg)==3){
+status=status&&
+fprintf(f,"%.14g",lua_tonumber(L,arg))>0;
+}
+else{
+size_t l;
+const char*s=luaL_checklstring(L,arg,&l);
+status=status&&(fwrite(s,sizeof(char),l,f)==l);
+}
+}
+return pushresult(L,status,NULL);
+}
+static int io_write(lua_State*L){
+return g_write(L,getiofile(L,2),1);
+}
+static int f_write(lua_State*L){
+return g_write(L,tofile(L),2);
+}
+static int io_flush(lua_State*L){
+return pushresult(L,fflush(getiofile(L,2))==0,NULL);
+}
+static int f_flush(lua_State*L){
+return pushresult(L,fflush(tofile(L))==0,NULL);
+}
+static const luaL_Reg iolib[]={
+{"close",io_close},
+{"flush",io_flush},
+{"input",io_input},
+{"lines",io_lines},
+{"open",io_open},
+{"output",io_output},
+{"read",io_read},
+{"type",io_type},
+{"write",io_write},
+{NULL,NULL}
+};
+static const luaL_Reg flib[]={
+{"close",io_close},
+{"flush",f_flush},
+{"lines",f_lines},
+{"read",f_read},
+{"write",f_write},
+{"__gc",io_gc},
+{NULL,NULL}
+};
+static void createmeta(lua_State*L){
+luaL_newmetatable(L,"FILE*");
+lua_pushvalue(L,-1);
+lua_setfield(L,-2,"__index");
+luaL_register(L,NULL,flib);
+}
+static void createstdfile(lua_State*L,FILE*f,int k,const char*fname){
+*newfile(L)=f;
+if(k>0){
+lua_pushvalue(L,-1);
+lua_rawseti(L,(-10001),k);
+}
+lua_pushvalue(L,-2);
+lua_setfenv(L,-2);
+lua_setfield(L,-3,fname);
+}
+static void newfenv(lua_State*L,lua_CFunction cls){
+lua_createtable(L,0,1);
+lua_pushcfunction(L,cls);
+lua_setfield(L,-2,"__close");
+}
+static int luaopen_io(lua_State*L){
+createmeta(L);
+newfenv(L,io_fclose);
+lua_replace(L,(-10001));
+luaL_register(L,"io",iolib);
+newfenv(L,io_noclose);
+createstdfile(L,stdin,1,"stdin");
+createstdfile(L,stdout,2,"stdout");
+createstdfile(L,stderr,0,"stderr");
+lua_pop(L,1);
+lua_getfield(L,-1,"popen");
+newfenv(L,io_pclose);
+lua_setfenv(L,-2);
+lua_pop(L,1);
+return 1;
+}
+static int os_pushresult(lua_State*L,int i,const char*filename){
+int en=errno;
+if(i){
+lua_pushboolean(L,1);
+return 1;
+}
+else{
+lua_pushnil(L);
+lua_pushfstring(L,"%s: %s",filename,strerror(en));
+lua_pushinteger(L,en);
+return 3;
+}
+}
+static int os_remove(lua_State*L){
+const char*filename=luaL_checkstring(L,1);
+return os_pushresult(L,remove(filename)==0,filename);
+}
+static int os_exit(lua_State*L){
+exit(luaL_optint(L,1,EXIT_SUCCESS));
+}
+static const luaL_Reg syslib[]={
+{"exit",os_exit},
+{"remove",os_remove},
+{NULL,NULL}
+};
+static int luaopen_os(lua_State*L){
+luaL_register(L,"os",syslib);
+return 1;
+}
+#define uchar(c)((unsigned char)(c))
+static ptrdiff_t posrelat(ptrdiff_t pos,size_t len){
+if(pos<0)pos+=(ptrdiff_t)len+1;
+return(pos>=0)?pos:0;
+}
+static int str_sub(lua_State*L){
+size_t l;
+const char*s=luaL_checklstring(L,1,&l);
+ptrdiff_t start=posrelat(luaL_checkinteger(L,2),l);
+ptrdiff_t end=posrelat(luaL_optinteger(L,3,-1),l);
+if(start<1)start=1;
+if(end>(ptrdiff_t)l)end=(ptrdiff_t)l;
+if(start<=end)
+lua_pushlstring(L,s+start-1,end-start+1);
+else lua_pushliteral(L,"");
+return 1;
+}
+static int str_lower(lua_State*L){
+size_t l;
+size_t i;
+luaL_Buffer b;
+const char*s=luaL_checklstring(L,1,&l);
+luaL_buffinit(L,&b);
+for(i=0;i<l;i++)
+luaL_addchar(&b,tolower(uchar(s[i])));
+luaL_pushresult(&b);
+return 1;
+}
+static int str_upper(lua_State*L){
+size_t l;
+size_t i;
+luaL_Buffer b;
+const char*s=luaL_checklstring(L,1,&l);
+luaL_buffinit(L,&b);
+for(i=0;i<l;i++)
+luaL_addchar(&b,toupper(uchar(s[i])));
+luaL_pushresult(&b);
+return 1;
+}
+static int str_rep(lua_State*L){
+size_t l;
+luaL_Buffer b;
+const char*s=luaL_checklstring(L,1,&l);
+int n=luaL_checkint(L,2);
+luaL_buffinit(L,&b);
+while(n-->0)
+luaL_addlstring(&b,s,l);
+luaL_pushresult(&b);
+return 1;
+}
+static int str_byte(lua_State*L){
+size_t l;
+const char*s=luaL_checklstring(L,1,&l);
+ptrdiff_t posi=posrelat(luaL_optinteger(L,2,1),l);
+ptrdiff_t pose=posrelat(luaL_optinteger(L,3,posi),l);
+int n,i;
+if(posi<=0)posi=1;
+if((size_t)pose>l)pose=l;
+if(posi>pose)return 0;
+n=(int)(pose-posi+1);
+if(posi+n<=pose)
+luaL_error(L,"string slice too long");
+luaL_checkstack(L,n,"string slice too long");
+for(i=0;i<n;i++)
+lua_pushinteger(L,uchar(s[posi+i-1]));
+return n;
+}
+static int str_char(lua_State*L){
+int n=lua_gettop(L);
+int i;
+luaL_Buffer b;
+luaL_buffinit(L,&b);
+for(i=1;i<=n;i++){
+int c=luaL_checkint(L,i);
+luaL_argcheck(L,uchar(c)==c,i,"invalid value");
+luaL_addchar(&b,uchar(c));
+}
+luaL_pushresult(&b);
+return 1;
+}
+typedef struct MatchState{
+const char*src_init;
+const char*src_end;
+lua_State*L;
+int level;
+struct{
+const char*init;
+ptrdiff_t len;
+}capture[32];
+}MatchState;
+static int check_capture(MatchState*ms,int l){
+l-='1';
+if(l<0||l>=ms->level||ms->capture[l].len==(-1))
+return luaL_error(ms->L,"invalid capture index");
+return l;
+}
+static int capture_to_close(MatchState*ms){
+int level=ms->level;
+for(level--;level>=0;level--)
+if(ms->capture[level].len==(-1))return level;
+return luaL_error(ms->L,"invalid pattern capture");
+}
+static const char*classend(MatchState*ms,const char*p){
+switch(*p++){
+case'%':{
+if(*p=='\0')
+luaL_error(ms->L,"malformed pattern (ends with "LUA_QL("%%")")");
+return p+1;
+}
+case'[':{
+if(*p=='^')p++;
+do{
+if(*p=='\0')
+luaL_error(ms->L,"malformed pattern (missing "LUA_QL("]")")");
+if(*(p++)=='%'&&*p!='\0')
+p++;
+}while(*p!=']');
+return p+1;
+}
+default:{
+return p;
+}
+}
+}
+static int match_class(int c,int cl){
+int res;
+switch(tolower(cl)){
+case'a':res=isalpha(c);break;
+case'c':res=iscntrl(c);break;
+case'd':res=isdigit(c);break;
+case'l':res=islower(c);break;
+case'p':res=ispunct(c);break;
+case's':res=isspace(c);break;
+case'u':res=isupper(c);break;
+case'w':res=isalnum(c);break;
+case'x':res=isxdigit(c);break;
+case'z':res=(c==0);break;
+default:return(cl==c);
+}
+return(islower(cl)?res:!res);
+}
+static int matchbracketclass(int c,const char*p,const char*ec){
+int sig=1;
+if(*(p+1)=='^'){
+sig=0;
+p++;
+}
+while(++p<ec){
+if(*p=='%'){
+p++;
+if(match_class(c,uchar(*p)))
+return sig;
+}
+else if((*(p+1)=='-')&&(p+2<ec)){
+p+=2;
+if(uchar(*(p-2))<=c&&c<=uchar(*p))
+return sig;
+}
+else if(uchar(*p)==c)return sig;
+}
+return!sig;
+}
+static int singlematch(int c,const char*p,const char*ep){
+switch(*p){
+case'.':return 1;
+case'%':return match_class(c,uchar(*(p+1)));
+case'[':return matchbracketclass(c,p,ep-1);
+default:return(uchar(*p)==c);
+}
+}
+static const char*match(MatchState*ms,const char*s,const char*p);
+static const char*matchbalance(MatchState*ms,const char*s,
+const char*p){
+if(*p==0||*(p+1)==0)
+luaL_error(ms->L,"unbalanced pattern");
+if(*s!=*p)return NULL;
+else{
+int b=*p;
+int e=*(p+1);
+int cont=1;
+while(++s<ms->src_end){
+if(*s==e){
+if(--cont==0)return s+1;
+}
+else if(*s==b)cont++;
+}
+}
+return NULL;
+}
+static const char*max_expand(MatchState*ms,const char*s,
+const char*p,const char*ep){
+ptrdiff_t i=0;
+while((s+i)<ms->src_end&&singlematch(uchar(*(s+i)),p,ep))
+i++;
+while(i>=0){
+const char*res=match(ms,(s+i),ep+1);
+if(res)return res;
+i--;
+}
+return NULL;
+}
+static const char*min_expand(MatchState*ms,const char*s,
+const char*p,const char*ep){
+for(;;){
+const char*res=match(ms,s,ep+1);
+if(res!=NULL)
+return res;
+else if(s<ms->src_end&&singlematch(uchar(*s),p,ep))
+s++;
+else return NULL;
+}
+}
+static const char*start_capture(MatchState*ms,const char*s,
+const char*p,int what){
+const char*res;
+int level=ms->level;
+if(level>=32)luaL_error(ms->L,"too many captures");
+ms->capture[level].init=s;
+ms->capture[level].len=what;
+ms->level=level+1;
+if((res=match(ms,s,p))==NULL)
+ms->level--;
+return res;
+}
+static const char*end_capture(MatchState*ms,const char*s,
+const char*p){
+int l=capture_to_close(ms);
+const char*res;
+ms->capture[l].len=s-ms->capture[l].init;
+if((res=match(ms,s,p))==NULL)
+ms->capture[l].len=(-1);
+return res;
+}
+static const char*match_capture(MatchState*ms,const char*s,int l){
+size_t len;
+l=check_capture(ms,l);
+len=ms->capture[l].len;
+if((size_t)(ms->src_end-s)>=len&&
+memcmp(ms->capture[l].init,s,len)==0)
+return s+len;
+else return NULL;
+}
+static const char*match(MatchState*ms,const char*s,const char*p){
+init:
+switch(*p){
+case'(':{
+if(*(p+1)==')')
+return start_capture(ms,s,p+2,(-2));
+else
+return start_capture(ms,s,p+1,(-1));
+}
+case')':{
+return end_capture(ms,s,p+1);
+}
+case'%':{
+switch(*(p+1)){
+case'b':{
+s=matchbalance(ms,s,p+2);
+if(s==NULL)return NULL;
+p+=4;goto init;
+}
+case'f':{
+const char*ep;char previous;
+p+=2;
+if(*p!='[')
+luaL_error(ms->L,"missing "LUA_QL("[")" after "
+LUA_QL("%%f")" in pattern");
+ep=classend(ms,p);
+previous=(s==ms->src_init)?'\0':*(s-1);
+if(matchbracketclass(uchar(previous),p,ep-1)||
+!matchbracketclass(uchar(*s),p,ep-1))return NULL;
+p=ep;goto init;
+}
+default:{
+if(isdigit(uchar(*(p+1)))){
+s=match_capture(ms,s,uchar(*(p+1)));
+if(s==NULL)return NULL;
+p+=2;goto init;
+}
+goto dflt;
+}
+}
+}
+case'\0':{
+return s;
+}
+case'$':{
+if(*(p+1)=='\0')
+return(s==ms->src_end)?s:NULL;
+else goto dflt;
+}
+default:dflt:{
+const char*ep=classend(ms,p);
+int m=s<ms->src_end&&singlematch(uchar(*s),p,ep);
+switch(*ep){
+case'?':{
+const char*res;
+if(m&&((res=match(ms,s+1,ep+1))!=NULL))
+return res;
+p=ep+1;goto init;
+}
+case'*':{
+return max_expand(ms,s,p,ep);
+}
+case'+':{
+return(m?max_expand(ms,s+1,p,ep):NULL);
+}
+case'-':{
+return min_expand(ms,s,p,ep);
+}
+default:{
+if(!m)return NULL;
+s++;p=ep;goto init;
+}
+}
+}
+}
+}
+static const char*lmemfind(const char*s1,size_t l1,
+const char*s2,size_t l2){
+if(l2==0)return s1;
+else if(l2>l1)return NULL;
+else{
+const char*init;
+l2--;
+l1=l1-l2;
+while(l1>0&&(init=(const char*)memchr(s1,*s2,l1))!=NULL){
+init++;
+if(memcmp(init,s2+1,l2)==0)
+return init-1;
+else{
+l1-=init-s1;
+s1=init;
+}
+}
+return NULL;
+}
+}
+static void push_onecapture(MatchState*ms,int i,const char*s,
+const char*e){
+if(i>=ms->level){
+if(i==0)
+lua_pushlstring(ms->L,s,e-s);
+else
+luaL_error(ms->L,"invalid capture index");
+}
+else{
+ptrdiff_t l=ms->capture[i].len;
+if(l==(-1))luaL_error(ms->L,"unfinished capture");
+if(l==(-2))
+lua_pushinteger(ms->L,ms->capture[i].init-ms->src_init+1);
+else
+lua_pushlstring(ms->L,ms->capture[i].init,l);
+}
+}
+static int push_captures(MatchState*ms,const char*s,const char*e){
+int i;
+int nlevels=(ms->level==0&&s)?1:ms->level;
+luaL_checkstack(ms->L,nlevels,"too many captures");
+for(i=0;i<nlevels;i++)
+push_onecapture(ms,i,s,e);
+return nlevels;
+}
+static int str_find_aux(lua_State*L,int find){
+size_t l1,l2;
+const char*s=luaL_checklstring(L,1,&l1);
+const char*p=luaL_checklstring(L,2,&l2);
+ptrdiff_t init=posrelat(luaL_optinteger(L,3,1),l1)-1;
+if(init<0)init=0;
+else if((size_t)(init)>l1)init=(ptrdiff_t)l1;
+if(find&&(lua_toboolean(L,4)||
+strpbrk(p,"^$*+?.([%-")==NULL)){
+const char*s2=lmemfind(s+init,l1-init,p,l2);
+if(s2){
+lua_pushinteger(L,s2-s+1);
+lua_pushinteger(L,s2-s+l2);
+return 2;
+}
+}
+else{
+MatchState ms;
+int anchor=(*p=='^')?(p++,1):0;
+const char*s1=s+init;
+ms.L=L;
+ms.src_init=s;
+ms.src_end=s+l1;
+do{
+const char*res;
+ms.level=0;
+if((res=match(&ms,s1,p))!=NULL){
+if(find){
+lua_pushinteger(L,s1-s+1);
+lua_pushinteger(L,res-s);
+return push_captures(&ms,NULL,0)+2;
+}
+else
+return push_captures(&ms,s1,res);
+}
+}while(s1++<ms.src_end&&!anchor);
+}
+lua_pushnil(L);
+return 1;
+}
+static int str_find(lua_State*L){
+return str_find_aux(L,1);
+}
+static int str_match(lua_State*L){
+return str_find_aux(L,0);
+}
+static int gmatch_aux(lua_State*L){
+MatchState ms;
+size_t ls;
+const char*s=lua_tolstring(L,lua_upvalueindex(1),&ls);
+const char*p=lua_tostring(L,lua_upvalueindex(2));
+const char*src;
+ms.L=L;
+ms.src_init=s;
+ms.src_end=s+ls;
+for(src=s+(size_t)lua_tointeger(L,lua_upvalueindex(3));
+src<=ms.src_end;
+src++){
+const char*e;
+ms.level=0;
+if((e=match(&ms,src,p))!=NULL){
+lua_Integer newstart=e-s;
+if(e==src)newstart++;
+lua_pushinteger(L,newstart);
+lua_replace(L,lua_upvalueindex(3));
+return push_captures(&ms,src,e);
+}
+}
+return 0;
+}
+static int gmatch(lua_State*L){
+luaL_checkstring(L,1);
+luaL_checkstring(L,2);
+lua_settop(L,2);
+lua_pushinteger(L,0);
+lua_pushcclosure(L,gmatch_aux,3);
+return 1;
+}
+static void add_s(MatchState*ms,luaL_Buffer*b,const char*s,
+const char*e){
+size_t l,i;
+const char*news=lua_tolstring(ms->L,3,&l);
+for(i=0;i<l;i++){
+if(news[i]!='%')
+luaL_addchar(b,news[i]);
+else{
+i++;
+if(!isdigit(uchar(news[i])))
+luaL_addchar(b,news[i]);
+else if(news[i]=='0')
+luaL_addlstring(b,s,e-s);
+else{
+push_onecapture(ms,news[i]-'1',s,e);
+luaL_addvalue(b);
+}
+}
+}
+}
+static void add_value(MatchState*ms,luaL_Buffer*b,const char*s,
+const char*e){
+lua_State*L=ms->L;
+switch(lua_type(L,3)){
+case 3:
+case 4:{
+add_s(ms,b,s,e);
+return;
+}
+case 6:{
+int n;
+lua_pushvalue(L,3);
+n=push_captures(ms,s,e);
+lua_call(L,n,1);
+break;
+}
+case 5:{
+push_onecapture(ms,0,s,e);
+lua_gettable(L,3);
+break;
+}
+}
+if(!lua_toboolean(L,-1)){
+lua_pop(L,1);
+lua_pushlstring(L,s,e-s);
+}
+else if(!lua_isstring(L,-1))
+luaL_error(L,"invalid replacement value (a %s)",luaL_typename(L,-1));
+luaL_addvalue(b);
+}
+static int str_gsub(lua_State*L){
+size_t srcl;
+const char*src=luaL_checklstring(L,1,&srcl);
+const char*p=luaL_checkstring(L,2);
+int tr=lua_type(L,3);
+int max_s=luaL_optint(L,4,srcl+1);
+int anchor=(*p=='^')?(p++,1):0;
+int n=0;
+MatchState ms;
+luaL_Buffer b;
+luaL_argcheck(L,tr==3||tr==4||
+tr==6||tr==5,3,
+"string/function/table expected");
+luaL_buffinit(L,&b);
+ms.L=L;
+ms.src_init=src;
+ms.src_end=src+srcl;
+while(n<max_s){
+const char*e;
+ms.level=0;
+e=match(&ms,src,p);
+if(e){
+n++;
+add_value(&ms,&b,src,e);
+}
+if(e&&e>src)
+src=e;
+else if(src<ms.src_end)
+luaL_addchar(&b,*src++);
+else break;
+if(anchor)break;
+}
+luaL_addlstring(&b,src,ms.src_end-src);
+luaL_pushresult(&b);
+lua_pushinteger(L,n);
+return 2;
+}
+static void addquoted(lua_State*L,luaL_Buffer*b,int arg){
+size_t l;
+const char*s=luaL_checklstring(L,arg,&l);
+luaL_addchar(b,'"');
+while(l--){
+switch(*s){
+case'"':case'\\':case'\n':{
+luaL_addchar(b,'\\');
+luaL_addchar(b,*s);
+break;
+}
+case'\r':{
+luaL_addlstring(b,"\\r",2);
+break;
+}
+case'\0':{
+luaL_addlstring(b,"\\000",4);
+break;
+}
+default:{
+luaL_addchar(b,*s);
+break;
+}
+}
+s++;
+}
+luaL_addchar(b,'"');
+}
+static const char*scanformat(lua_State*L,const char*strfrmt,char*form){
+const char*p=strfrmt;
+while(*p!='\0'&&strchr("-+ #0",*p)!=NULL)p++;
+if((size_t)(p-strfrmt)>=sizeof("-+ #0"))
+luaL_error(L,"invalid format (repeated flags)");
+if(isdigit(uchar(*p)))p++;
+if(isdigit(uchar(*p)))p++;
+if(*p=='.'){
+p++;
+if(isdigit(uchar(*p)))p++;
+if(isdigit(uchar(*p)))p++;
+}
+if(isdigit(uchar(*p)))
+luaL_error(L,"invalid format (width or precision too long)");
+*(form++)='%';
+strncpy(form,strfrmt,p-strfrmt+1);
+form+=p-strfrmt+1;
+*form='\0';
+return p;
+}
+static void addintlen(char*form){
+size_t l=strlen(form);
+char spec=form[l-1];
+strcpy(form+l-1,"l");
+form[l+sizeof("l")-2]=spec;
+form[l+sizeof("l")-1]='\0';
+}
+static int str_format(lua_State*L){
+int top=lua_gettop(L);
+int arg=1;
+size_t sfl;
+const char*strfrmt=luaL_checklstring(L,arg,&sfl);
+const char*strfrmt_end=strfrmt+sfl;
+luaL_Buffer b;
+luaL_buffinit(L,&b);
+while(strfrmt<strfrmt_end){
+if(*strfrmt!='%')
+luaL_addchar(&b,*strfrmt++);
+else if(*++strfrmt=='%')
+luaL_addchar(&b,*strfrmt++);
+else{
+char form[(sizeof("-+ #0")+sizeof("l")+10)];
+char buff[512];
+if(++arg>top)
+luaL_argerror(L,arg,"no value");
+strfrmt=scanformat(L,strfrmt,form);
+switch(*strfrmt++){
+case'c':{
+sprintf(buff,form,(int)luaL_checknumber(L,arg));
+break;
+}
+case'd':case'i':{
+addintlen(form);
+sprintf(buff,form,(long)luaL_checknumber(L,arg));
+break;
+}
+case'o':case'u':case'x':case'X':{
+addintlen(form);
+sprintf(buff,form,(unsigned long)luaL_checknumber(L,arg));
+break;
+}
+case'e':case'E':case'f':
+case'g':case'G':{
+sprintf(buff,form,(double)luaL_checknumber(L,arg));
+break;
+}
+case'q':{
+addquoted(L,&b,arg);
+continue;
+}
+case's':{
+size_t l;
+const char*s=luaL_checklstring(L,arg,&l);
+if(!strchr(form,'.')&&l>=100){
+lua_pushvalue(L,arg);
+luaL_addvalue(&b);
+continue;
+}
+else{
+sprintf(buff,form,s);
+break;
+}
+}
+default:{
+return luaL_error(L,"invalid option "LUA_QL("%%%c")" to "
+LUA_QL("format"),*(strfrmt-1));
+}
+}
+luaL_addlstring(&b,buff,strlen(buff));
+}
+}
+luaL_pushresult(&b);
+return 1;
+}
+static const luaL_Reg strlib[]={
+{"byte",str_byte},
+{"char",str_char},
+{"find",str_find},
+{"format",str_format},
+{"gmatch",gmatch},
+{"gsub",str_gsub},
+{"lower",str_lower},
+{"match",str_match},
+{"rep",str_rep},
+{"sub",str_sub},
+{"upper",str_upper},
+{NULL,NULL}
+};
+static void createmetatable(lua_State*L){
+lua_createtable(L,0,1);
+lua_pushliteral(L,"");
+lua_pushvalue(L,-2);
+lua_setmetatable(L,-2);
+lua_pop(L,1);
+lua_pushvalue(L,-2);
+lua_setfield(L,-2,"__index");
+lua_pop(L,1);
+}
+static int luaopen_string(lua_State*L){
+luaL_register(L,"string",strlib);
+createmetatable(L);
+return 1;
+}
+static const luaL_Reg lualibs[]={
+{"",luaopen_base},
+{"table",luaopen_table},
+{"io",luaopen_io},
+{"os",luaopen_os},
+{"string",luaopen_string},
+{NULL,NULL}
+};
+static void luaL_openlibs(lua_State*L){
+const luaL_Reg*lib=lualibs;
+for(;lib->func;lib++){
+lua_pushcfunction(L,lib->func);
+lua_pushstring(L,lib->name);
+lua_call(L,1,0);
+}
+}
+typedef unsigned int UB;
+static UB barg(lua_State*L,int idx){
+union{lua_Number n;U64 b;}bn;
+bn.n=lua_tonumber(L,idx)+6755399441055744.0;
+if(bn.n==0.0&&!lua_isnumber(L,idx))luaL_typerror(L,idx,"number");
+return(UB)bn.b;
+}
+#define BRET(b)lua_pushnumber(L,(lua_Number)(int)(b));return 1;
+static int tobit(lua_State*L){
+BRET(barg(L,1))}
+static int bnot(lua_State*L){
+BRET(~barg(L,1))}
+static int band(lua_State*L){
+int i;UB b=barg(L,1);for(i=lua_gettop(L);i>1;i--)b&=barg(L,i);BRET(b)}
+static int bor(lua_State*L){
+int i;UB b=barg(L,1);for(i=lua_gettop(L);i>1;i--)b|=barg(L,i);BRET(b)}
+static int bxor(lua_State*L){
+int i;UB b=barg(L,1);for(i=lua_gettop(L);i>1;i--)b^=barg(L,i);BRET(b)}
+static int lshift(lua_State*L){
+UB b=barg(L,1),n=barg(L,2)&31;BRET(b<<n)}
+static int rshift(lua_State*L){
+UB b=barg(L,1),n=barg(L,2)&31;BRET(b>>n)}
+static int arshift(lua_State*L){
+UB b=barg(L,1),n=barg(L,2)&31;BRET((int)b>>n)}
+static int rol(lua_State*L){
+UB b=barg(L,1),n=barg(L,2)&31;BRET((b<<n)|(b>>(32-n)))}
+static int ror(lua_State*L){
+UB b=barg(L,1),n=barg(L,2)&31;BRET((b>>n)|(b<<(32-n)))}
+static int bswap(lua_State*L){
+UB b=barg(L,1);b=(b>>24)|((b>>8)&0xff00)|((b&0xff00)<<8)|(b<<24);BRET(b)}
+static int tohex(lua_State*L){
+UB b=barg(L,1);
+int n=lua_isnone(L,2)?8:(int)barg(L,2);
+const char*hexdigits="0123456789abcdef";
+char buf[8];
+int i;
+if(n<0){n=-n;hexdigits="0123456789ABCDEF";}
+if(n>8)n=8;
+for(i=(int)n;--i>=0;){buf[i]=hexdigits[b&15];b>>=4;}
+lua_pushlstring(L,buf,(size_t)n);
+return 1;
+}
+static const struct luaL_Reg bitlib[]={
+{"tobit",tobit},
+{"bnot",bnot},
+{"band",band},
+{"bor",bor},
+{"bxor",bxor},
+{"lshift",lshift},
+{"rshift",rshift},
+{"arshift",arshift},
+{"rol",rol},
+{"ror",ror},
+{"bswap",bswap},
+{"tohex",tohex},
+{NULL,NULL}
+};
+int main(int argc,char**argv){
+lua_State*L=luaL_newstate();
+int i;
+luaL_openlibs(L);
+luaL_register(L,"bit",bitlib);
+if(argc<2)return sizeof(void*);
+lua_createtable(L,0,1);
+lua_pushstring(L,argv[1]);
+lua_rawseti(L,-2,0);
+lua_setglobal(L,"arg");
+if(luaL_loadfile(L,argv[1]))
+goto err;
+for(i=2;i<argc;i++)
+lua_pushstring(L,argv[i]);
+if(lua_pcall(L,argc-2,0,0)){
+err:
+fprintf(stderr,"Error: %s\n",lua_tostring(L,-1));
+return 1;
+}
+lua_close(L);
+return 0;
+}
diff --git a/libs/luajit-cmake/luajit/src/jit/.gitignore b/libs/luajit-cmake/luajit/src/jit/.gitignore
new file mode 100644
index 0000000..500e285
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/jit/.gitignore
@@ -0,0 +1 @@
+vmdef.lua
diff --git a/libs/luajit-cmake/luajit/src/jit/bc.lua b/libs/luajit-cmake/luajit/src/jit/bc.lua
new file mode 100644
index 0000000..8d0844c
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/jit/bc.lua
@@ -0,0 +1,190 @@
+----------------------------------------------------------------------------
+-- LuaJIT bytecode listing module.
+--
+-- Copyright (C) 2005-2022 Mike Pall. All rights reserved.
+-- Released under the MIT license. See Copyright Notice in luajit.h
+----------------------------------------------------------------------------
+--
+-- This module lists the bytecode of a Lua function. If it's loaded by -jbc
+-- it hooks into the parser and lists all functions of a chunk as they
+-- are parsed.
+--
+-- Example usage:
+--
+-- luajit -jbc -e 'local x=0; for i=1,1e6 do x=x+i end; print(x)'
+-- luajit -jbc=- foo.lua
+-- luajit -jbc=foo.list foo.lua
+--
+-- Default output is to stderr. To redirect the output to a file, pass a
+-- filename as an argument (use '-' for stdout) or set the environment
+-- variable LUAJIT_LISTFILE. The file is overwritten every time the module
+-- is started.
+--
+-- This module can also be used programmatically:
+--
+-- local bc = require("jit.bc")
+--
+-- local function foo() print("hello") end
+--
+-- bc.dump(foo) --> -- BYTECODE -- [...]
+-- print(bc.line(foo, 2)) --> 0002 KSTR 1 1 ; "hello"
+--
+-- local out = {
+-- -- Do something with each line:
+-- write = function(t, ...) io.write(...) end,
+-- close = function(t) end,
+-- flush = function(t) end,
+-- }
+-- bc.dump(foo, out)
+--
+------------------------------------------------------------------------------
+
+-- Cache some library functions and objects.
+local jit = require("jit")
+assert(jit.version_num == 20100, "LuaJIT core/library version mismatch")
+local jutil = require("jit.util")
+local vmdef = require("jit.vmdef")
+local bit = require("bit")
+local sub, gsub, format = string.sub, string.gsub, string.format
+local byte, band, shr = string.byte, bit.band, bit.rshift
+local funcinfo, funcbc, funck = jutil.funcinfo, jutil.funcbc, jutil.funck
+local funcuvname = jutil.funcuvname
+local bcnames = vmdef.bcnames
+local stdout, stderr = io.stdout, io.stderr
+
+------------------------------------------------------------------------------
+
+local function ctlsub(c)
+ if c == "\n" then return "\\n"
+ elseif c == "\r" then return "\\r"
+ elseif c == "\t" then return "\\t"
+ else return format("\\%03d", byte(c))
+ end
+end
+
+-- Return one bytecode line.
+local function bcline(func, pc, prefix)
+ local ins, m = funcbc(func, pc)
+ if not ins then return end
+ local ma, mb, mc = band(m, 7), band(m, 15*8), band(m, 15*128)
+ local a = band(shr(ins, 8), 0xff)
+ local oidx = 6*band(ins, 0xff)
+ local op = sub(bcnames, oidx+1, oidx+6)
+ local s = format("%04d %s %-6s %3s ",
+ pc, prefix or " ", op, ma == 0 and "" or a)
+ local d = shr(ins, 16)
+ if mc == 13*128 then -- BCMjump
+ return format("%s=> %04d\n", s, pc+d-0x7fff)
+ end
+ if mb ~= 0 then
+ d = band(d, 0xff)
+ elseif mc == 0 then
+ return s.."\n"
+ end
+ local kc
+ if mc == 10*128 then -- BCMstr
+ kc = funck(func, -d-1)
+ kc = format(#kc > 40 and '"%.40s"~' or '"%s"', gsub(kc, "%c", ctlsub))
+ elseif mc == 9*128 then -- BCMnum
+ kc = funck(func, d)
+ if op == "TSETM " then kc = kc - 2^52 end
+ elseif mc == 12*128 then -- BCMfunc
+ local fi = funcinfo(funck(func, -d-1))
+ if fi.ffid then
+ kc = vmdef.ffnames[fi.ffid]
+ else
+ kc = fi.loc
+ end
+ elseif mc == 5*128 then -- BCMuv
+ kc = funcuvname(func, d)
+ end
+ if ma == 5 then -- BCMuv
+ local ka = funcuvname(func, a)
+ if kc then kc = ka.." ; "..kc else kc = ka end
+ end
+ if mb ~= 0 then
+ local b = shr(ins, 24)
+ if kc then return format("%s%3d %3d ; %s\n", s, b, d, kc) end
+ return format("%s%3d %3d\n", s, b, d)
+ end
+ if kc then return format("%s%3d ; %s\n", s, d, kc) end
+ if mc == 7*128 and d > 32767 then d = d - 65536 end -- BCMlits
+ return format("%s%3d\n", s, d)
+end
+
+-- Collect branch targets of a function.
+local function bctargets(func)
+ local target = {}
+ for pc=1,1000000000 do
+ local ins, m = funcbc(func, pc)
+ if not ins then break end
+ if band(m, 15*128) == 13*128 then target[pc+shr(ins, 16)-0x7fff] = true end
+ end
+ return target
+end
+
+-- Dump bytecode instructions of a function.
+local function bcdump(func, out, all)
+ if not out then out = stdout end
+ local fi = funcinfo(func)
+ if all and fi.children then
+ for n=-1,-1000000000,-1 do
+ local k = funck(func, n)
+ if not k then break end
+ if type(k) == "proto" then bcdump(k, out, true) end
+ end
+ end
+ out:write(format("-- BYTECODE -- %s-%d\n", fi.loc, fi.lastlinedefined))
+ local target = bctargets(func)
+ for pc=1,1000000000 do
+ local s = bcline(func, pc, target[pc] and "=>")
+ if not s then break end
+ out:write(s)
+ end
+ out:write("\n")
+ out:flush()
+end
+
+------------------------------------------------------------------------------
+
+-- Active flag and output file handle.
+local active, out
+
+-- List handler.
+local function h_list(func)
+ return bcdump(func, out)
+end
+
+-- Detach list handler.
+local function bclistoff()
+ if active then
+ active = false
+ jit.attach(h_list)
+ if out and out ~= stdout and out ~= stderr then out:close() end
+ out = nil
+ end
+end
+
+-- Open the output file and attach list handler.
+local function bcliston(outfile)
+ if active then bclistoff() end
+ if not outfile then outfile = os.getenv("LUAJIT_LISTFILE") end
+ if outfile then
+ out = outfile == "-" and stdout or assert(io.open(outfile, "w"))
+ else
+ out = stderr
+ end
+ jit.attach(h_list, "bc")
+ active = true
+end
+
+-- Public module functions.
+return {
+ line = bcline,
+ dump = bcdump,
+ targets = bctargets,
+ on = bcliston,
+ off = bclistoff,
+ start = bcliston -- For -j command line option.
+}
+
diff --git a/libs/luajit-cmake/luajit/src/jit/bcsave.lua b/libs/luajit-cmake/luajit/src/jit/bcsave.lua
new file mode 100644
index 0000000..90fe9da
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/jit/bcsave.lua
@@ -0,0 +1,705 @@
+----------------------------------------------------------------------------
+-- LuaJIT module to save/list bytecode.
+--
+-- Copyright (C) 2005-2022 Mike Pall. All rights reserved.
+-- Released under the MIT license. See Copyright Notice in luajit.h
+----------------------------------------------------------------------------
+--
+-- This module saves or lists the bytecode for an input file.
+-- It's run by the -b command line option.
+--
+------------------------------------------------------------------------------
+
+local jit = require("jit")
+assert(jit.version_num == 20100, "LuaJIT core/library version mismatch")
+local bit = require("bit")
+
+-- Symbol name prefix for LuaJIT bytecode.
+local LJBC_PREFIX = "luaJIT_BC_"
+
+local type, assert = type, assert
+local format = string.format
+local tremove, tconcat = table.remove, table.concat
+
+------------------------------------------------------------------------------
+
+local function usage()
+ io.stderr:write[[
+Save LuaJIT bytecode: luajit -b[options] input output
+ -l Only list bytecode.
+ -s Strip debug info (default).
+ -g Keep debug info.
+ -n name Set module name (default: auto-detect from input name).
+ -t type Set output file type (default: auto-detect from output name).
+ -a arch Override architecture for object files (default: native).
+ -o os Override OS for object files (default: native).
+ -F name Override filename (default: input filename).
+ -e chunk Use chunk string as input.
+ -- Stop handling options.
+ - Use stdin as input and/or stdout as output.
+
+File types: c h obj o raw (default)
+]]
+ os.exit(1)
+end
+
+local function check(ok, ...)
+ if ok then return ok, ... end
+ io.stderr:write("luajit: ", ...)
+ io.stderr:write("\n")
+ os.exit(1)
+end
+
+local function readfile(ctx, input)
+ if type(input) == "function" then return input end
+ if ctx.filename then
+ local data
+ if input == "-" then
+ data = io.stdin:read("*a")
+ else
+ local fp = assert(io.open(input, "rb"))
+ data = assert(fp:read("*a"))
+ assert(fp:close())
+ end
+ return check(load(data, ctx.filename))
+ else
+ if input == "-" then input = nil end
+ return check(loadfile(input))
+ end
+end
+
+local function savefile(name, mode)
+ if name == "-" then return io.stdout end
+ return check(io.open(name, mode))
+end
+
+local function set_stdout_binary(ffi)
+ ffi.cdef[[int _setmode(int fd, int mode);]]
+ ffi.C._setmode(1, 0x8000)
+end
+
+------------------------------------------------------------------------------
+
+local map_type = {
+ raw = "raw", c = "c", h = "h", o = "obj", obj = "obj",
+}
+
+local map_arch = {
+ x86 = { e = "le", b = 32, m = 3, p = 0x14c, },
+ x64 = { e = "le", b = 64, m = 62, p = 0x8664, },
+ arm = { e = "le", b = 32, m = 40, p = 0x1c0, },
+ arm64 = { e = "le", b = 64, m = 183, p = 0xaa64, },
+ arm64be = { e = "be", b = 64, m = 183, },
+ ppc = { e = "be", b = 32, m = 20, },
+ mips = { e = "be", b = 32, m = 8, f = 0x50001006, },
+ mipsel = { e = "le", b = 32, m = 8, f = 0x50001006, },
+ mips64 = { e = "be", b = 64, m = 8, f = 0x80000007, },
+ mips64el = { e = "le", b = 64, m = 8, f = 0x80000007, },
+ mips64r6 = { e = "be", b = 64, m = 8, f = 0xa0000407, },
+ mips64r6el = { e = "le", b = 64, m = 8, f = 0xa0000407, },
+}
+
+local map_os = {
+ linux = true, windows = true, osx = true, freebsd = true, netbsd = true,
+ openbsd = true, dragonfly = true, solaris = true,
+}
+
+local function checkarg(str, map, err)
+ str = str:lower()
+ local s = check(map[str], "unknown ", err)
+ return type(s) == "string" and s or str
+end
+
+local function detecttype(str)
+ local ext = str:lower():match("%.(%a+)$")
+ return map_type[ext] or "raw"
+end
+
+local function checkmodname(str)
+ check(str:match("^[%w_.%-]+$"), "bad module name")
+ return str:gsub("[%.%-]", "_")
+end
+
+local function detectmodname(str)
+ if type(str) == "string" then
+ local tail = str:match("[^/\\]+$")
+ if tail then str = tail end
+ local head = str:match("^(.*)%.[^.]*$")
+ if head then str = head end
+ str = str:match("^[%w_.%-]+")
+ else
+ str = nil
+ end
+ check(str, "cannot derive module name, use -n name")
+ return str:gsub("[%.%-]", "_")
+end
+
+------------------------------------------------------------------------------
+
+local function bcsave_tail(fp, output, s)
+ local ok, err = fp:write(s)
+ if ok and output ~= "-" then ok, err = fp:close() end
+ check(ok, "cannot write ", output, ": ", err)
+end
+
+local function bcsave_raw(output, s)
+ if output == "-" and jit.os == "Windows" then
+ local ok, ffi = pcall(require, "ffi")
+ check(ok, "FFI library required to write binary file to stdout")
+ set_stdout_binary(ffi)
+ end
+ local fp = savefile(output, "wb")
+ bcsave_tail(fp, output, s)
+end
+
+local function bcsave_c(ctx, output, s)
+ local fp = savefile(output, "w")
+ if ctx.type == "c" then
+ fp:write(format([[
+#ifdef __cplusplus
+extern "C"
+#endif
+#ifdef _WIN32
+__declspec(dllexport)
+#endif
+const unsigned char %s%s[] = {
+]], LJBC_PREFIX, ctx.modname))
+ else
+ fp:write(format([[
+#define %s%s_SIZE %d
+static const unsigned char %s%s[] = {
+]], LJBC_PREFIX, ctx.modname, #s, LJBC_PREFIX, ctx.modname))
+ end
+ local t, n, m = {}, 0, 0
+ for i=1,#s do
+ local b = tostring(string.byte(s, i))
+ m = m + #b + 1
+ if m > 78 then
+ fp:write(tconcat(t, ",", 1, n), ",\n")
+ n, m = 0, #b + 1
+ end
+ n = n + 1
+ t[n] = b
+ end
+ bcsave_tail(fp, output, tconcat(t, ",", 1, n).."\n};\n")
+end
+
+local function bcsave_elfobj(ctx, output, s, ffi)
+ ffi.cdef[[
+typedef struct {
+ uint8_t emagic[4], eclass, eendian, eversion, eosabi, eabiversion, epad[7];
+ uint16_t type, machine;
+ uint32_t version;
+ uint32_t entry, phofs, shofs;
+ uint32_t flags;
+ uint16_t ehsize, phentsize, phnum, shentsize, shnum, shstridx;
+} ELF32header;
+typedef struct {
+ uint8_t emagic[4], eclass, eendian, eversion, eosabi, eabiversion, epad[7];
+ uint16_t type, machine;
+ uint32_t version;
+ uint64_t entry, phofs, shofs;
+ uint32_t flags;
+ uint16_t ehsize, phentsize, phnum, shentsize, shnum, shstridx;
+} ELF64header;
+typedef struct {
+ uint32_t name, type, flags, addr, ofs, size, link, info, align, entsize;
+} ELF32sectheader;
+typedef struct {
+ uint32_t name, type;
+ uint64_t flags, addr, ofs, size;
+ uint32_t link, info;
+ uint64_t align, entsize;
+} ELF64sectheader;
+typedef struct {
+ uint32_t name, value, size;
+ uint8_t info, other;
+ uint16_t sectidx;
+} ELF32symbol;
+typedef struct {
+ uint32_t name;
+ uint8_t info, other;
+ uint16_t sectidx;
+ uint64_t value, size;
+} ELF64symbol;
+typedef struct {
+ ELF32header hdr;
+ ELF32sectheader sect[6];
+ ELF32symbol sym[2];
+ uint8_t space[4096];
+} ELF32obj;
+typedef struct {
+ ELF64header hdr;
+ ELF64sectheader sect[6];
+ ELF64symbol sym[2];
+ uint8_t space[4096];
+} ELF64obj;
+]]
+ local symname = LJBC_PREFIX..ctx.modname
+ local ai = assert(map_arch[ctx.arch])
+ local is64, isbe = ai.b == 64, ai.e == "be"
+
+ -- Handle different host/target endianess.
+ local function f32(x) return x end
+ local f16, fofs = f32, f32
+ if ffi.abi("be") ~= isbe then
+ f32 = bit.bswap
+ function f16(x) return bit.rshift(bit.bswap(x), 16) end
+ if is64 then
+ local two32 = ffi.cast("int64_t", 2^32)
+ function fofs(x) return bit.bswap(x)*two32 end
+ else
+ fofs = f32
+ end
+ end
+
+ -- Create ELF object and fill in header.
+ local o = ffi.new(is64 and "ELF64obj" or "ELF32obj")
+ local hdr = o.hdr
+ if ctx.os == "bsd" or ctx.os == "other" then -- Determine native hdr.eosabi.
+ local bf = assert(io.open("/bin/ls", "rb"))
+ local bs = bf:read(9)
+ bf:close()
+ ffi.copy(o, bs, 9)
+ check(hdr.emagic[0] == 127, "no support for writing native object files")
+ else
+ hdr.emagic = "\127ELF"
+ hdr.eosabi = ({ freebsd=9, netbsd=2, openbsd=12, solaris=6 })[ctx.os] or 0
+ end
+ hdr.eclass = is64 and 2 or 1
+ hdr.eendian = isbe and 2 or 1
+ hdr.eversion = 1
+ hdr.type = f16(1)
+ hdr.machine = f16(ai.m)
+ hdr.flags = f32(ai.f or 0)
+ hdr.version = f32(1)
+ hdr.shofs = fofs(ffi.offsetof(o, "sect"))
+ hdr.ehsize = f16(ffi.sizeof(hdr))
+ hdr.shentsize = f16(ffi.sizeof(o.sect[0]))
+ hdr.shnum = f16(6)
+ hdr.shstridx = f16(2)
+
+ -- Fill in sections and symbols.
+ local sofs, ofs = ffi.offsetof(o, "space"), 1
+ for i,name in ipairs{
+ ".symtab", ".shstrtab", ".strtab", ".rodata", ".note.GNU-stack",
+ } do
+ local sect = o.sect[i]
+ sect.align = fofs(1)
+ sect.name = f32(ofs)
+ ffi.copy(o.space+ofs, name)
+ ofs = ofs + #name+1
+ end
+ o.sect[1].type = f32(2) -- .symtab
+ o.sect[1].link = f32(3)
+ o.sect[1].info = f32(1)
+ o.sect[1].align = fofs(8)
+ o.sect[1].ofs = fofs(ffi.offsetof(o, "sym"))
+ o.sect[1].entsize = fofs(ffi.sizeof(o.sym[0]))
+ o.sect[1].size = fofs(ffi.sizeof(o.sym))
+ o.sym[1].name = f32(1)
+ o.sym[1].sectidx = f16(4)
+ o.sym[1].size = fofs(#s)
+ o.sym[1].info = 17
+ o.sect[2].type = f32(3) -- .shstrtab
+ o.sect[2].ofs = fofs(sofs)
+ o.sect[2].size = fofs(ofs)
+ o.sect[3].type = f32(3) -- .strtab
+ o.sect[3].ofs = fofs(sofs + ofs)
+ o.sect[3].size = fofs(#symname+2)
+ ffi.copy(o.space+ofs+1, symname)
+ ofs = ofs + #symname + 2
+ o.sect[4].type = f32(1) -- .rodata
+ o.sect[4].flags = fofs(2)
+ o.sect[4].ofs = fofs(sofs + ofs)
+ o.sect[4].size = fofs(#s)
+ o.sect[5].type = f32(1) -- .note.GNU-stack
+ o.sect[5].ofs = fofs(sofs + ofs + #s)
+
+ -- Write ELF object file.
+ local fp = savefile(output, "wb")
+ fp:write(ffi.string(o, ffi.sizeof(o)-4096+ofs))
+ bcsave_tail(fp, output, s)
+end
+
+local function bcsave_peobj(ctx, output, s, ffi)
+ ffi.cdef[[
+typedef struct {
+ uint16_t arch, nsects;
+ uint32_t time, symtabofs, nsyms;
+ uint16_t opthdrsz, flags;
+} PEheader;
+typedef struct {
+ char name[8];
+ uint32_t vsize, vaddr, size, ofs, relocofs, lineofs;
+ uint16_t nreloc, nline;
+ uint32_t flags;
+} PEsection;
+typedef struct __attribute((packed)) {
+ union {
+ char name[8];
+ uint32_t nameref[2];
+ };
+ uint32_t value;
+ int16_t sect;
+ uint16_t type;
+ uint8_t scl, naux;
+} PEsym;
+typedef struct __attribute((packed)) {
+ uint32_t size;
+ uint16_t nreloc, nline;
+ uint32_t cksum;
+ uint16_t assoc;
+ uint8_t comdatsel, unused[3];
+} PEsymaux;
+typedef struct {
+ PEheader hdr;
+ PEsection sect[2];
+ // Must be an even number of symbol structs.
+ PEsym sym0;
+ PEsymaux sym0aux;
+ PEsym sym1;
+ PEsymaux sym1aux;
+ PEsym sym2;
+ PEsym sym3;
+ uint32_t strtabsize;
+ uint8_t space[4096];
+} PEobj;
+]]
+ local symname = LJBC_PREFIX..ctx.modname
+ local ai = assert(map_arch[ctx.arch])
+ local is64 = ai.b == 64
+ local symexport = " /EXPORT:"..symname..",DATA "
+
+ -- The file format is always little-endian. Swap if the host is big-endian.
+ local function f32(x) return x end
+ local f16 = f32
+ if ffi.abi("be") then
+ f32 = bit.bswap
+ function f16(x) return bit.rshift(bit.bswap(x), 16) end
+ end
+
+ -- Create PE object and fill in header.
+ local o = ffi.new("PEobj")
+ local hdr = o.hdr
+ hdr.arch = f16(assert(ai.p))
+ hdr.nsects = f16(2)
+ hdr.symtabofs = f32(ffi.offsetof(o, "sym0"))
+ hdr.nsyms = f32(6)
+
+ -- Fill in sections and symbols.
+ o.sect[0].name = ".drectve"
+ o.sect[0].size = f32(#symexport)
+ o.sect[0].flags = f32(0x00100a00)
+ o.sym0.sect = f16(1)
+ o.sym0.scl = 3
+ o.sym0.name = ".drectve"
+ o.sym0.naux = 1
+ o.sym0aux.size = f32(#symexport)
+ o.sect[1].name = ".rdata"
+ o.sect[1].size = f32(#s)
+ o.sect[1].flags = f32(0x40300040)
+ o.sym1.sect = f16(2)
+ o.sym1.scl = 3
+ o.sym1.name = ".rdata"
+ o.sym1.naux = 1
+ o.sym1aux.size = f32(#s)
+ o.sym2.sect = f16(2)
+ o.sym2.scl = 2
+ o.sym2.nameref[1] = f32(4)
+ o.sym3.sect = f16(-1)
+ o.sym3.scl = 2
+ o.sym3.value = f32(1)
+ o.sym3.name = "@feat.00" -- Mark as SafeSEH compliant.
+ ffi.copy(o.space, symname)
+ local ofs = #symname + 1
+ o.strtabsize = f32(ofs + 4)
+ o.sect[0].ofs = f32(ffi.offsetof(o, "space") + ofs)
+ ffi.copy(o.space + ofs, symexport)
+ ofs = ofs + #symexport
+ o.sect[1].ofs = f32(ffi.offsetof(o, "space") + ofs)
+
+ -- Write PE object file.
+ local fp = savefile(output, "wb")
+ fp:write(ffi.string(o, ffi.sizeof(o)-4096+ofs))
+ bcsave_tail(fp, output, s)
+end
+
+local function bcsave_machobj(ctx, output, s, ffi)
+ ffi.cdef[[
+typedef struct
+{
+ uint32_t magic, cputype, cpusubtype, filetype, ncmds, sizeofcmds, flags;
+} mach_header;
+typedef struct
+{
+ mach_header; uint32_t reserved;
+} mach_header_64;
+typedef struct {
+ uint32_t cmd, cmdsize;
+ char segname[16];
+ uint32_t vmaddr, vmsize, fileoff, filesize;
+ uint32_t maxprot, initprot, nsects, flags;
+} mach_segment_command;
+typedef struct {
+ uint32_t cmd, cmdsize;
+ char segname[16];
+ uint64_t vmaddr, vmsize, fileoff, filesize;
+ uint32_t maxprot, initprot, nsects, flags;
+} mach_segment_command_64;
+typedef struct {
+ char sectname[16], segname[16];
+ uint32_t addr, size;
+ uint32_t offset, align, reloff, nreloc, flags;
+ uint32_t reserved1, reserved2;
+} mach_section;
+typedef struct {
+ char sectname[16], segname[16];
+ uint64_t addr, size;
+ uint32_t offset, align, reloff, nreloc, flags;
+ uint32_t reserved1, reserved2, reserved3;
+} mach_section_64;
+typedef struct {
+ uint32_t cmd, cmdsize, symoff, nsyms, stroff, strsize;
+} mach_symtab_command;
+typedef struct {
+ int32_t strx;
+ uint8_t type, sect;
+ int16_t desc;
+ uint32_t value;
+} mach_nlist;
+typedef struct {
+ int32_t strx;
+ uint8_t type, sect;
+ uint16_t desc;
+ uint64_t value;
+} mach_nlist_64;
+typedef struct
+{
+ int32_t magic, nfat_arch;
+} mach_fat_header;
+typedef struct
+{
+ int32_t cputype, cpusubtype, offset, size, align;
+} mach_fat_arch;
+typedef struct {
+ struct {
+ mach_header hdr;
+ mach_segment_command seg;
+ mach_section sec;
+ mach_symtab_command sym;
+ } arch[1];
+ mach_nlist sym_entry;
+ uint8_t space[4096];
+} mach_obj;
+typedef struct {
+ struct {
+ mach_header_64 hdr;
+ mach_segment_command_64 seg;
+ mach_section_64 sec;
+ mach_symtab_command sym;
+ } arch[1];
+ mach_nlist_64 sym_entry;
+ uint8_t space[4096];
+} mach_obj_64;
+typedef struct {
+ mach_fat_header fat;
+ mach_fat_arch fat_arch[2];
+ struct {
+ mach_header hdr;
+ mach_segment_command seg;
+ mach_section sec;
+ mach_symtab_command sym;
+ } arch[2];
+ mach_nlist sym_entry;
+ uint8_t space[4096];
+} mach_fat_obj;
+typedef struct {
+ mach_fat_header fat;
+ mach_fat_arch fat_arch[2];
+ struct {
+ mach_header_64 hdr;
+ mach_segment_command_64 seg;
+ mach_section_64 sec;
+ mach_symtab_command sym;
+ } arch[2];
+ mach_nlist_64 sym_entry;
+ uint8_t space[4096];
+} mach_fat_obj_64;
+]]
+ local symname = '_'..LJBC_PREFIX..ctx.modname
+ local isfat, is64, align, mobj = false, false, 4, "mach_obj"
+ if ctx.arch == "x64" then
+ is64, align, mobj = true, 8, "mach_obj_64"
+ elseif ctx.arch == "arm" then
+ isfat, mobj = true, "mach_fat_obj"
+ elseif ctx.arch == "arm64" then
+ is64, align, isfat, mobj = true, 8, true, "mach_fat_obj_64"
+ else
+ check(ctx.arch == "x86", "unsupported architecture for OSX")
+ end
+ local function aligned(v, a) return bit.band(v+a-1, -a) end
+ local be32 = bit.bswap -- Mach-O FAT is BE, supported archs are LE.
+
+ -- Create Mach-O object and fill in header.
+ local o = ffi.new(mobj)
+ local mach_size = aligned(ffi.offsetof(o, "space")+#symname+2, align)
+ local cputype = ({ x86={7}, x64={0x01000007}, arm={7,12}, arm64={0x01000007,0x0100000c} })[ctx.arch]
+ local cpusubtype = ({ x86={3}, x64={3}, arm={3,9}, arm64={3,0} })[ctx.arch]
+ if isfat then
+ o.fat.magic = be32(0xcafebabe)
+ o.fat.nfat_arch = be32(#cpusubtype)
+ end
+
+ -- Fill in sections and symbols.
+ for i=0,#cpusubtype-1 do
+ local ofs = 0
+ if isfat then
+ local a = o.fat_arch[i]
+ a.cputype = be32(cputype[i+1])
+ a.cpusubtype = be32(cpusubtype[i+1])
+ -- Subsequent slices overlap each other to share data.
+ ofs = ffi.offsetof(o, "arch") + i*ffi.sizeof(o.arch[0])
+ a.offset = be32(ofs)
+ a.size = be32(mach_size-ofs+#s)
+ end
+ local a = o.arch[i]
+ a.hdr.magic = is64 and 0xfeedfacf or 0xfeedface
+ a.hdr.cputype = cputype[i+1]
+ a.hdr.cpusubtype = cpusubtype[i+1]
+ a.hdr.filetype = 1
+ a.hdr.ncmds = 2
+ a.hdr.sizeofcmds = ffi.sizeof(a.seg)+ffi.sizeof(a.sec)+ffi.sizeof(a.sym)
+ a.seg.cmd = is64 and 0x19 or 0x1
+ a.seg.cmdsize = ffi.sizeof(a.seg)+ffi.sizeof(a.sec)
+ a.seg.vmsize = #s
+ a.seg.fileoff = mach_size-ofs
+ a.seg.filesize = #s
+ a.seg.maxprot = 1
+ a.seg.initprot = 1
+ a.seg.nsects = 1
+ ffi.copy(a.sec.sectname, "__data")
+ ffi.copy(a.sec.segname, "__DATA")
+ a.sec.size = #s
+ a.sec.offset = mach_size-ofs
+ a.sym.cmd = 2
+ a.sym.cmdsize = ffi.sizeof(a.sym)
+ a.sym.symoff = ffi.offsetof(o, "sym_entry")-ofs
+ a.sym.nsyms = 1
+ a.sym.stroff = ffi.offsetof(o, "sym_entry")+ffi.sizeof(o.sym_entry)-ofs
+ a.sym.strsize = aligned(#symname+2, align)
+ end
+ o.sym_entry.type = 0xf
+ o.sym_entry.sect = 1
+ o.sym_entry.strx = 1
+ ffi.copy(o.space+1, symname)
+
+ -- Write Macho-O object file.
+ local fp = savefile(output, "wb")
+ fp:write(ffi.string(o, mach_size))
+ bcsave_tail(fp, output, s)
+end
+
+local function bcsave_obj(ctx, output, s)
+ local ok, ffi = pcall(require, "ffi")
+ check(ok, "FFI library required to write this file type")
+ if output == "-" and jit.os == "Windows" then
+ set_stdout_binary(ffi)
+ end
+ if ctx.os == "windows" then
+ return bcsave_peobj(ctx, output, s, ffi)
+ elseif ctx.os == "osx" then
+ return bcsave_machobj(ctx, output, s, ffi)
+ else
+ return bcsave_elfobj(ctx, output, s, ffi)
+ end
+end
+
+------------------------------------------------------------------------------
+
+local function bclist(ctx, input, output)
+ local f = readfile(ctx, input)
+ require("jit.bc").dump(f, savefile(output, "w"), true)
+end
+
+local function bcsave(ctx, input, output)
+ local f = readfile(ctx, input)
+ local s = string.dump(f, ctx.strip)
+ local t = ctx.type
+ if not t then
+ t = detecttype(output)
+ ctx.type = t
+ end
+ if t == "raw" then
+ bcsave_raw(output, s)
+ else
+ if not ctx.modname then ctx.modname = detectmodname(input) end
+ if t == "obj" then
+ bcsave_obj(ctx, output, s)
+ else
+ bcsave_c(ctx, output, s)
+ end
+ end
+end
+
+local function docmd(...)
+ local arg = {...}
+ local n = 1
+ local list = false
+ local ctx = {
+ strip = true, arch = jit.arch, os = jit.os:lower(),
+ type = false, modname = false,
+ }
+ while n <= #arg do
+ local a = arg[n]
+ if type(a) == "string" and a:sub(1, 1) == "-" and a ~= "-" then
+ tremove(arg, n)
+ if a == "--" then break end
+ for m=2,#a do
+ local opt = a:sub(m, m)
+ if opt == "l" then
+ list = true
+ elseif opt == "s" then
+ ctx.strip = true
+ elseif opt == "g" then
+ ctx.strip = false
+ else
+ if arg[n] == nil or m ~= #a then usage() end
+ if opt == "e" then
+ if n ~= 1 then usage() end
+ arg[1] = check(loadstring(arg[1]))
+ elseif opt == "n" then
+ ctx.modname = checkmodname(tremove(arg, n))
+ elseif opt == "t" then
+ ctx.type = checkarg(tremove(arg, n), map_type, "file type")
+ elseif opt == "a" then
+ ctx.arch = checkarg(tremove(arg, n), map_arch, "architecture")
+ elseif opt == "o" then
+ ctx.os = checkarg(tremove(arg, n), map_os, "OS name")
+ elseif opt == "F" then
+ ctx.filename = "@"..tremove(arg, n)
+ else
+ usage()
+ end
+ end
+ end
+ else
+ n = n + 1
+ end
+ end
+ if list then
+ if #arg == 0 or #arg > 2 then usage() end
+ bclist(ctx, arg[1], arg[2] or "-")
+ else
+ if #arg ~= 2 then usage() end
+ bcsave(ctx, arg[1], arg[2])
+ end
+end
+
+------------------------------------------------------------------------------
+
+-- Public module functions.
+return {
+ start = docmd -- Process -b command line option.
+}
+
diff --git a/libs/luajit-cmake/luajit/src/jit/dis_arm.lua b/libs/luajit-cmake/luajit/src/jit/dis_arm.lua
new file mode 100644
index 0000000..18ab68d
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/jit/dis_arm.lua
@@ -0,0 +1,689 @@
+----------------------------------------------------------------------------
+-- LuaJIT ARM disassembler module.
+--
+-- Copyright (C) 2005-2022 Mike Pall. All rights reserved.
+-- Released under the MIT license. See Copyright Notice in luajit.h
+----------------------------------------------------------------------------
+-- This is a helper module used by the LuaJIT machine code dumper module.
+--
+-- It disassembles most user-mode ARMv7 instructions
+-- NYI: Advanced SIMD and VFP instructions.
+------------------------------------------------------------------------------
+
+local type = type
+local sub, byte, format = string.sub, string.byte, string.format
+local match, gmatch = string.match, string.gmatch
+local concat = table.concat
+local bit = require("bit")
+local band, bor, ror, tohex = bit.band, bit.bor, bit.ror, bit.tohex
+local lshift, rshift, arshift = bit.lshift, bit.rshift, bit.arshift
+
+------------------------------------------------------------------------------
+-- Opcode maps
+------------------------------------------------------------------------------
+
+local map_loadc = {
+ shift = 8, mask = 15,
+ [10] = {
+ shift = 20, mask = 1,
+ [0] = {
+ shift = 23, mask = 3,
+ [0] = "vmovFmDN", "vstmFNdr",
+ _ = {
+ shift = 21, mask = 1,
+ [0] = "vstrFdl",
+ { shift = 16, mask = 15, [13] = "vpushFdr", _ = "vstmdbFNdr", }
+ },
+ },
+ {
+ shift = 23, mask = 3,
+ [0] = "vmovFDNm",
+ { shift = 16, mask = 15, [13] = "vpopFdr", _ = "vldmFNdr", },
+ _ = {
+ shift = 21, mask = 1,
+ [0] = "vldrFdl", "vldmdbFNdr",
+ },
+ },
+ },
+ [11] = {
+ shift = 20, mask = 1,
+ [0] = {
+ shift = 23, mask = 3,
+ [0] = "vmovGmDN", "vstmGNdr",
+ _ = {
+ shift = 21, mask = 1,
+ [0] = "vstrGdl",
+ { shift = 16, mask = 15, [13] = "vpushGdr", _ = "vstmdbGNdr", }
+ },
+ },
+ {
+ shift = 23, mask = 3,
+ [0] = "vmovGDNm",
+ { shift = 16, mask = 15, [13] = "vpopGdr", _ = "vldmGNdr", },
+ _ = {
+ shift = 21, mask = 1,
+ [0] = "vldrGdl", "vldmdbGNdr",
+ },
+ },
+ },
+ _ = {
+ shift = 0, mask = 0 -- NYI ldc, mcrr, mrrc.
+ },
+}
+
+local map_vfps = {
+ shift = 6, mask = 0x2c001,
+ [0] = "vmlaF.dnm", "vmlsF.dnm",
+ [0x04000] = "vnmlsF.dnm", [0x04001] = "vnmlaF.dnm",
+ [0x08000] = "vmulF.dnm", [0x08001] = "vnmulF.dnm",
+ [0x0c000] = "vaddF.dnm", [0x0c001] = "vsubF.dnm",
+ [0x20000] = "vdivF.dnm",
+ [0x24000] = "vfnmsF.dnm", [0x24001] = "vfnmaF.dnm",
+ [0x28000] = "vfmaF.dnm", [0x28001] = "vfmsF.dnm",
+ [0x2c000] = "vmovF.dY",
+ [0x2c001] = {
+ shift = 7, mask = 0x1e01,
+ [0] = "vmovF.dm", "vabsF.dm",
+ [0x0200] = "vnegF.dm", [0x0201] = "vsqrtF.dm",
+ [0x0800] = "vcmpF.dm", [0x0801] = "vcmpeF.dm",
+ [0x0a00] = "vcmpzF.d", [0x0a01] = "vcmpzeF.d",
+ [0x0e01] = "vcvtG.dF.m",
+ [0x1000] = "vcvt.f32.u32Fdm", [0x1001] = "vcvt.f32.s32Fdm",
+ [0x1800] = "vcvtr.u32F.dm", [0x1801] = "vcvt.u32F.dm",
+ [0x1a00] = "vcvtr.s32F.dm", [0x1a01] = "vcvt.s32F.dm",
+ },
+}
+
+local map_vfpd = {
+ shift = 6, mask = 0x2c001,
+ [0] = "vmlaG.dnm", "vmlsG.dnm",
+ [0x04000] = "vnmlsG.dnm", [0x04001] = "vnmlaG.dnm",
+ [0x08000] = "vmulG.dnm", [0x08001] = "vnmulG.dnm",
+ [0x0c000] = "vaddG.dnm", [0x0c001] = "vsubG.dnm",
+ [0x20000] = "vdivG.dnm",
+ [0x24000] = "vfnmsG.dnm", [0x24001] = "vfnmaG.dnm",
+ [0x28000] = "vfmaG.dnm", [0x28001] = "vfmsG.dnm",
+ [0x2c000] = "vmovG.dY",
+ [0x2c001] = {
+ shift = 7, mask = 0x1e01,
+ [0] = "vmovG.dm", "vabsG.dm",
+ [0x0200] = "vnegG.dm", [0x0201] = "vsqrtG.dm",
+ [0x0800] = "vcmpG.dm", [0x0801] = "vcmpeG.dm",
+ [0x0a00] = "vcmpzG.d", [0x0a01] = "vcmpzeG.d",
+ [0x0e01] = "vcvtF.dG.m",
+ [0x1000] = "vcvt.f64.u32GdFm", [0x1001] = "vcvt.f64.s32GdFm",
+ [0x1800] = "vcvtr.u32FdG.m", [0x1801] = "vcvt.u32FdG.m",
+ [0x1a00] = "vcvtr.s32FdG.m", [0x1a01] = "vcvt.s32FdG.m",
+ },
+}
+
+local map_datac = {
+ shift = 24, mask = 1,
+ [0] = {
+ shift = 4, mask = 1,
+ [0] = {
+ shift = 8, mask = 15,
+ [10] = map_vfps,
+ [11] = map_vfpd,
+ -- NYI cdp, mcr, mrc.
+ },
+ {
+ shift = 8, mask = 15,
+ [10] = {
+ shift = 20, mask = 15,
+ [0] = "vmovFnD", "vmovFDn",
+ [14] = "vmsrD",
+ [15] = { shift = 12, mask = 15, [15] = "vmrs", _ = "vmrsD", },
+ },
+ },
+ },
+ "svcT",
+}
+
+local map_loadcu = {
+ shift = 0, mask = 0, -- NYI unconditional CP load/store.
+}
+
+local map_datacu = {
+ shift = 0, mask = 0, -- NYI unconditional CP data.
+}
+
+local map_simddata = {
+ shift = 0, mask = 0, -- NYI SIMD data.
+}
+
+local map_simdload = {
+ shift = 0, mask = 0, -- NYI SIMD load/store, preload.
+}
+
+local map_preload = {
+ shift = 0, mask = 0, -- NYI preload.
+}
+
+local map_media = {
+ shift = 20, mask = 31,
+ [0] = false,
+ { --01
+ shift = 5, mask = 7,
+ [0] = "sadd16DNM", "sasxDNM", "ssaxDNM", "ssub16DNM",
+ "sadd8DNM", false, false, "ssub8DNM",
+ },
+ { --02
+ shift = 5, mask = 7,
+ [0] = "qadd16DNM", "qasxDNM", "qsaxDNM", "qsub16DNM",
+ "qadd8DNM", false, false, "qsub8DNM",
+ },
+ { --03
+ shift = 5, mask = 7,
+ [0] = "shadd16DNM", "shasxDNM", "shsaxDNM", "shsub16DNM",
+ "shadd8DNM", false, false, "shsub8DNM",
+ },
+ false,
+ { --05
+ shift = 5, mask = 7,
+ [0] = "uadd16DNM", "uasxDNM", "usaxDNM", "usub16DNM",
+ "uadd8DNM", false, false, "usub8DNM",
+ },
+ { --06
+ shift = 5, mask = 7,
+ [0] = "uqadd16DNM", "uqasxDNM", "uqsaxDNM", "uqsub16DNM",
+ "uqadd8DNM", false, false, "uqsub8DNM",
+ },
+ { --07
+ shift = 5, mask = 7,
+ [0] = "uhadd16DNM", "uhasxDNM", "uhsaxDNM", "uhsub16DNM",
+ "uhadd8DNM", false, false, "uhsub8DNM",
+ },
+ { --08
+ shift = 5, mask = 7,
+ [0] = "pkhbtDNMU", false, "pkhtbDNMU",
+ { shift = 16, mask = 15, [15] = "sxtb16DMU", _ = "sxtab16DNMU", },
+ "pkhbtDNMU", "selDNM", "pkhtbDNMU",
+ },
+ false,
+ { --0a
+ shift = 5, mask = 7,
+ [0] = "ssatDxMu", "ssat16DxM", "ssatDxMu",
+ { shift = 16, mask = 15, [15] = "sxtbDMU", _ = "sxtabDNMU", },
+ "ssatDxMu", false, "ssatDxMu",
+ },
+ { --0b
+ shift = 5, mask = 7,
+ [0] = "ssatDxMu", "revDM", "ssatDxMu",
+ { shift = 16, mask = 15, [15] = "sxthDMU", _ = "sxtahDNMU", },
+ "ssatDxMu", "rev16DM", "ssatDxMu",
+ },
+ { --0c
+ shift = 5, mask = 7,
+ [3] = { shift = 16, mask = 15, [15] = "uxtb16DMU", _ = "uxtab16DNMU", },
+ },
+ false,
+ { --0e
+ shift = 5, mask = 7,
+ [0] = "usatDwMu", "usat16DwM", "usatDwMu",
+ { shift = 16, mask = 15, [15] = "uxtbDMU", _ = "uxtabDNMU", },
+ "usatDwMu", false, "usatDwMu",
+ },
+ { --0f
+ shift = 5, mask = 7,
+ [0] = "usatDwMu", "rbitDM", "usatDwMu",
+ { shift = 16, mask = 15, [15] = "uxthDMU", _ = "uxtahDNMU", },
+ "usatDwMu", "revshDM", "usatDwMu",
+ },
+ { --10
+ shift = 12, mask = 15,
+ [15] = {
+ shift = 5, mask = 7,
+ "smuadNMS", "smuadxNMS", "smusdNMS", "smusdxNMS",
+ },
+ _ = {
+ shift = 5, mask = 7,
+ [0] = "smladNMSD", "smladxNMSD", "smlsdNMSD", "smlsdxNMSD",
+ },
+ },
+ false, false, false,
+ { --14
+ shift = 5, mask = 7,
+ [0] = "smlaldDNMS", "smlaldxDNMS", "smlsldDNMS", "smlsldxDNMS",
+ },
+ { --15
+ shift = 5, mask = 7,
+ [0] = { shift = 12, mask = 15, [15] = "smmulNMS", _ = "smmlaNMSD", },
+ { shift = 12, mask = 15, [15] = "smmulrNMS", _ = "smmlarNMSD", },
+ false, false, false, false,
+ "smmlsNMSD", "smmlsrNMSD",
+ },
+ false, false,
+ { --18
+ shift = 5, mask = 7,
+ [0] = { shift = 12, mask = 15, [15] = "usad8NMS", _ = "usada8NMSD", },
+ },
+ false,
+ { --1a
+ shift = 5, mask = 3, [2] = "sbfxDMvw",
+ },
+ { --1b
+ shift = 5, mask = 3, [2] = "sbfxDMvw",
+ },
+ { --1c
+ shift = 5, mask = 3,
+ [0] = { shift = 0, mask = 15, [15] = "bfcDvX", _ = "bfiDMvX", },
+ },
+ { --1d
+ shift = 5, mask = 3,
+ [0] = { shift = 0, mask = 15, [15] = "bfcDvX", _ = "bfiDMvX", },
+ },
+ { --1e
+ shift = 5, mask = 3, [2] = "ubfxDMvw",
+ },
+ { --1f
+ shift = 5, mask = 3, [2] = "ubfxDMvw",
+ },
+}
+
+local map_load = {
+ shift = 21, mask = 9,
+ {
+ shift = 20, mask = 5,
+ [0] = "strtDL", "ldrtDL", [4] = "strbtDL", [5] = "ldrbtDL",
+ },
+ _ = {
+ shift = 20, mask = 5,
+ [0] = "strDL", "ldrDL", [4] = "strbDL", [5] = "ldrbDL",
+ }
+}
+
+local map_load1 = {
+ shift = 4, mask = 1,
+ [0] = map_load, map_media,
+}
+
+local map_loadm = {
+ shift = 20, mask = 1,
+ [0] = {
+ shift = 23, mask = 3,
+ [0] = "stmdaNR", "stmNR",
+ { shift = 16, mask = 63, [45] = "pushR", _ = "stmdbNR", }, "stmibNR",
+ },
+ {
+ shift = 23, mask = 3,
+ [0] = "ldmdaNR", { shift = 16, mask = 63, [61] = "popR", _ = "ldmNR", },
+ "ldmdbNR", "ldmibNR",
+ },
+}
+
+local map_data = {
+ shift = 21, mask = 15,
+ [0] = "andDNPs", "eorDNPs", "subDNPs", "rsbDNPs",
+ "addDNPs", "adcDNPs", "sbcDNPs", "rscDNPs",
+ "tstNP", "teqNP", "cmpNP", "cmnNP",
+ "orrDNPs", "movDPs", "bicDNPs", "mvnDPs",
+}
+
+local map_mul = {
+ shift = 21, mask = 7,
+ [0] = "mulNMSs", "mlaNMSDs", "umaalDNMS", "mlsDNMS",
+ "umullDNMSs", "umlalDNMSs", "smullDNMSs", "smlalDNMSs",
+}
+
+local map_sync = {
+ shift = 20, mask = 15, -- NYI: brackets around N. R(D+1) for ldrexd/strexd.
+ [0] = "swpDMN", false, false, false,
+ "swpbDMN", false, false, false,
+ "strexDMN", "ldrexDN", "strexdDN", "ldrexdDN",
+ "strexbDMN", "ldrexbDN", "strexhDN", "ldrexhDN",
+}
+
+local map_mulh = {
+ shift = 21, mask = 3,
+ [0] = { shift = 5, mask = 3,
+ [0] = "smlabbNMSD", "smlatbNMSD", "smlabtNMSD", "smlattNMSD", },
+ { shift = 5, mask = 3,
+ [0] = "smlawbNMSD", "smulwbNMS", "smlawtNMSD", "smulwtNMS", },
+ { shift = 5, mask = 3,
+ [0] = "smlalbbDNMS", "smlaltbDNMS", "smlalbtDNMS", "smlalttDNMS", },
+ { shift = 5, mask = 3,
+ [0] = "smulbbNMS", "smultbNMS", "smulbtNMS", "smulttNMS", },
+}
+
+local map_misc = {
+ shift = 4, mask = 7,
+ -- NYI: decode PSR bits of msr.
+ [0] = { shift = 21, mask = 1, [0] = "mrsD", "msrM", },
+ { shift = 21, mask = 3, "bxM", false, "clzDM", },
+ { shift = 21, mask = 3, "bxjM", },
+ { shift = 21, mask = 3, "blxM", },
+ false,
+ { shift = 21, mask = 3, [0] = "qaddDMN", "qsubDMN", "qdaddDMN", "qdsubDMN", },
+ false,
+ { shift = 21, mask = 3, "bkptK", },
+}
+
+local map_datar = {
+ shift = 4, mask = 9,
+ [9] = {
+ shift = 5, mask = 3,
+ [0] = { shift = 24, mask = 1, [0] = map_mul, map_sync, },
+ { shift = 20, mask = 1, [0] = "strhDL", "ldrhDL", },
+ { shift = 20, mask = 1, [0] = "ldrdDL", "ldrsbDL", },
+ { shift = 20, mask = 1, [0] = "strdDL", "ldrshDL", },
+ },
+ _ = {
+ shift = 20, mask = 25,
+ [16] = { shift = 7, mask = 1, [0] = map_misc, map_mulh, },
+ _ = {
+ shift = 0, mask = 0xffffffff,
+ [bor(0xe1a00000)] = "nop",
+ _ = map_data,
+ }
+ },
+}
+
+local map_datai = {
+ shift = 20, mask = 31, -- NYI: decode PSR bits of msr. Decode imm12.
+ [16] = "movwDW", [20] = "movtDW",
+ [18] = { shift = 0, mask = 0xf00ff, [0] = "nopv6", _ = "msrNW", },
+ [22] = "msrNW",
+ _ = map_data,
+}
+
+local map_branch = {
+ shift = 24, mask = 1,
+ [0] = "bB", "blB"
+}
+
+local map_condins = {
+ [0] = map_datar, map_datai, map_load, map_load1,
+ map_loadm, map_branch, map_loadc, map_datac
+}
+
+-- NYI: setend.
+local map_uncondins = {
+ [0] = false, map_simddata, map_simdload, map_preload,
+ false, "blxB", map_loadcu, map_datacu,
+}
+
+------------------------------------------------------------------------------
+
+local map_gpr = {
+ [0] = "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
+ "r8", "r9", "r10", "r11", "r12", "sp", "lr", "pc",
+}
+
+local map_cond = {
+ [0] = "eq", "ne", "hs", "lo", "mi", "pl", "vs", "vc",
+ "hi", "ls", "ge", "lt", "gt", "le", "al",
+}
+
+local map_shift = { [0] = "lsl", "lsr", "asr", "ror", }
+
+------------------------------------------------------------------------------
+
+-- Output a nicely formatted line with an opcode and operands.
+local function putop(ctx, text, operands)
+ local pos = ctx.pos
+ local extra = ""
+ if ctx.rel then
+ local sym = ctx.symtab[ctx.rel]
+ if sym then
+ extra = "\t->"..sym
+ elseif band(ctx.op, 0x0e000000) ~= 0x0a000000 then
+ extra = "\t; 0x"..tohex(ctx.rel)
+ end
+ end
+ if ctx.hexdump > 0 then
+ ctx.out(format("%08x %s %-5s %s%s\n",
+ ctx.addr+pos, tohex(ctx.op), text, concat(operands, ", "), extra))
+ else
+ ctx.out(format("%08x %-5s %s%s\n",
+ ctx.addr+pos, text, concat(operands, ", "), extra))
+ end
+ ctx.pos = pos + 4
+end
+
+-- Fallback for unknown opcodes.
+local function unknown(ctx)
+ return putop(ctx, ".long", { "0x"..tohex(ctx.op) })
+end
+
+-- Format operand 2 of load/store opcodes.
+local function fmtload(ctx, op, pos)
+ local base = map_gpr[band(rshift(op, 16), 15)]
+ local x, ofs
+ local ext = (band(op, 0x04000000) == 0)
+ if not ext and band(op, 0x02000000) == 0 then
+ ofs = band(op, 4095)
+ if band(op, 0x00800000) == 0 then ofs = -ofs end
+ if base == "pc" then ctx.rel = ctx.addr + pos + 8 + ofs end
+ ofs = "#"..ofs
+ elseif ext and band(op, 0x00400000) ~= 0 then
+ ofs = band(op, 15) + band(rshift(op, 4), 0xf0)
+ if band(op, 0x00800000) == 0 then ofs = -ofs end
+ if base == "pc" then ctx.rel = ctx.addr + pos + 8 + ofs end
+ ofs = "#"..ofs
+ else
+ ofs = map_gpr[band(op, 15)]
+ if ext or band(op, 0xfe0) == 0 then
+ elseif band(op, 0xfe0) == 0x60 then
+ ofs = format("%s, rrx", ofs)
+ else
+ local sh = band(rshift(op, 7), 31)
+ if sh == 0 then sh = 32 end
+ ofs = format("%s, %s #%d", ofs, map_shift[band(rshift(op, 5), 3)], sh)
+ end
+ if band(op, 0x00800000) == 0 then ofs = "-"..ofs end
+ end
+ if ofs == "#0" then
+ x = format("[%s]", base)
+ elseif band(op, 0x01000000) == 0 then
+ x = format("[%s], %s", base, ofs)
+ else
+ x = format("[%s, %s]", base, ofs)
+ end
+ if band(op, 0x01200000) == 0x01200000 then x = x.."!" end
+ return x
+end
+
+-- Format operand 2 of vector load/store opcodes.
+local function fmtvload(ctx, op, pos)
+ local base = map_gpr[band(rshift(op, 16), 15)]
+ local ofs = band(op, 255)*4
+ if band(op, 0x00800000) == 0 then ofs = -ofs end
+ if base == "pc" then ctx.rel = ctx.addr + pos + 8 + ofs end
+ if ofs == 0 then
+ return format("[%s]", base)
+ else
+ return format("[%s, #%d]", base, ofs)
+ end
+end
+
+local function fmtvr(op, vr, sh0, sh1)
+ if vr == "s" then
+ return format("s%d", 2*band(rshift(op, sh0), 15)+band(rshift(op, sh1), 1))
+ else
+ return format("d%d", band(rshift(op, sh0), 15)+band(rshift(op, sh1-4), 16))
+ end
+end
+
+-- Disassemble a single instruction.
+local function disass_ins(ctx)
+ local pos = ctx.pos
+ local b0, b1, b2, b3 = byte(ctx.code, pos+1, pos+4)
+ local op = bor(lshift(b3, 24), lshift(b2, 16), lshift(b1, 8), b0)
+ local operands = {}
+ local suffix = ""
+ local last, name, pat
+ local vr
+ ctx.op = op
+ ctx.rel = nil
+
+ local cond = rshift(op, 28)
+ local opat
+ if cond == 15 then
+ opat = map_uncondins[band(rshift(op, 25), 7)]
+ else
+ if cond ~= 14 then suffix = map_cond[cond] end
+ opat = map_condins[band(rshift(op, 25), 7)]
+ end
+ while type(opat) ~= "string" do
+ if not opat then return unknown(ctx) end
+ opat = opat[band(rshift(op, opat.shift), opat.mask)] or opat._
+ end
+ name, pat = match(opat, "^([a-z0-9]*)(.*)")
+ if sub(pat, 1, 1) == "." then
+ local s2, p2 = match(pat, "^([a-z0-9.]*)(.*)")
+ suffix = suffix..s2
+ pat = p2
+ end
+
+ for p in gmatch(pat, ".") do
+ local x = nil
+ if p == "D" then
+ x = map_gpr[band(rshift(op, 12), 15)]
+ elseif p == "N" then
+ x = map_gpr[band(rshift(op, 16), 15)]
+ elseif p == "S" then
+ x = map_gpr[band(rshift(op, 8), 15)]
+ elseif p == "M" then
+ x = map_gpr[band(op, 15)]
+ elseif p == "d" then
+ x = fmtvr(op, vr, 12, 22)
+ elseif p == "n" then
+ x = fmtvr(op, vr, 16, 7)
+ elseif p == "m" then
+ x = fmtvr(op, vr, 0, 5)
+ elseif p == "P" then
+ if band(op, 0x02000000) ~= 0 then
+ x = ror(band(op, 255), 2*band(rshift(op, 8), 15))
+ else
+ x = map_gpr[band(op, 15)]
+ if band(op, 0xff0) ~= 0 then
+ operands[#operands+1] = x
+ local s = map_shift[band(rshift(op, 5), 3)]
+ local r = nil
+ if band(op, 0xf90) == 0 then
+ if s == "ror" then s = "rrx" else r = "#32" end
+ elseif band(op, 0x10) == 0 then
+ r = "#"..band(rshift(op, 7), 31)
+ else
+ r = map_gpr[band(rshift(op, 8), 15)]
+ end
+ if name == "mov" then name = s; x = r
+ elseif r then x = format("%s %s", s, r)
+ else x = s end
+ end
+ end
+ elseif p == "L" then
+ x = fmtload(ctx, op, pos)
+ elseif p == "l" then
+ x = fmtvload(ctx, op, pos)
+ elseif p == "B" then
+ local addr = ctx.addr + pos + 8 + arshift(lshift(op, 8), 6)
+ if cond == 15 then addr = addr + band(rshift(op, 23), 2) end
+ ctx.rel = addr
+ x = "0x"..tohex(addr)
+ elseif p == "F" then
+ vr = "s"
+ elseif p == "G" then
+ vr = "d"
+ elseif p == "." then
+ suffix = suffix..(vr == "s" and ".f32" or ".f64")
+ elseif p == "R" then
+ if band(op, 0x00200000) ~= 0 and #operands == 1 then
+ operands[1] = operands[1].."!"
+ end
+ local t = {}
+ for i=0,15 do
+ if band(rshift(op, i), 1) == 1 then t[#t+1] = map_gpr[i] end
+ end
+ x = "{"..concat(t, ", ").."}"
+ elseif p == "r" then
+ if band(op, 0x00200000) ~= 0 and #operands == 2 then
+ operands[1] = operands[1].."!"
+ end
+ local s = tonumber(sub(last, 2))
+ local n = band(op, 255)
+ if vr == "d" then n = rshift(n, 1) end
+ operands[#operands] = format("{%s-%s%d}", last, vr, s+n-1)
+ elseif p == "W" then
+ x = band(op, 0x0fff) + band(rshift(op, 4), 0xf000)
+ elseif p == "T" then
+ x = "#0x"..tohex(band(op, 0x00ffffff), 6)
+ elseif p == "U" then
+ x = band(rshift(op, 7), 31)
+ if x == 0 then x = nil end
+ elseif p == "u" then
+ x = band(rshift(op, 7), 31)
+ if band(op, 0x40) == 0 then
+ if x == 0 then x = nil else x = "lsl #"..x end
+ else
+ if x == 0 then x = "asr #32" else x = "asr #"..x end
+ end
+ elseif p == "v" then
+ x = band(rshift(op, 7), 31)
+ elseif p == "w" then
+ x = band(rshift(op, 16), 31)
+ elseif p == "x" then
+ x = band(rshift(op, 16), 31) + 1
+ elseif p == "X" then
+ x = band(rshift(op, 16), 31) - last + 1
+ elseif p == "Y" then
+ x = band(rshift(op, 12), 0xf0) + band(op, 0x0f)
+ elseif p == "K" then
+ x = "#0x"..tohex(band(rshift(op, 4), 0x0000fff0) + band(op, 15), 4)
+ elseif p == "s" then
+ if band(op, 0x00100000) ~= 0 then suffix = "s"..suffix end
+ else
+ assert(false)
+ end
+ if x then
+ last = x
+ if type(x) == "number" then x = "#"..x end
+ operands[#operands+1] = x
+ end
+ end
+
+ return putop(ctx, name..suffix, operands)
+end
+
+------------------------------------------------------------------------------
+
+-- Disassemble a block of code.
+local function disass_block(ctx, ofs, len)
+ if not ofs then ofs = 0 end
+ local stop = len and ofs+len or #ctx.code
+ ctx.pos = ofs
+ ctx.rel = nil
+ while ctx.pos < stop do disass_ins(ctx) end
+end
+
+-- Extended API: create a disassembler context. Then call ctx:disass(ofs, len).
+local function create(code, addr, out)
+ local ctx = {}
+ ctx.code = code
+ ctx.addr = addr or 0
+ ctx.out = out or io.write
+ ctx.symtab = {}
+ ctx.disass = disass_block
+ ctx.hexdump = 8
+ return ctx
+end
+
+-- Simple API: disassemble code (a string) at address and output via out.
+local function disass(code, addr, out)
+ create(code, addr, out):disass()
+end
+
+-- Return register name for RID.
+local function regname(r)
+ if r < 16 then return map_gpr[r] end
+ return "d"..(r-16)
+end
+
+-- Public module functions.
+return {
+ create = create,
+ disass = disass,
+ regname = regname
+}
+
diff --git a/libs/luajit-cmake/luajit/src/jit/dis_arm64.lua b/libs/luajit-cmake/luajit/src/jit/dis_arm64.lua
new file mode 100644
index 0000000..531584a
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/jit/dis_arm64.lua
@@ -0,0 +1,1216 @@
+----------------------------------------------------------------------------
+-- LuaJIT ARM64 disassembler module.
+--
+-- Copyright (C) 2005-2022 Mike Pall. All rights reserved.
+-- Released under the MIT license. See Copyright Notice in luajit.h
+--
+-- Contributed by Djordje Kovacevic and Stefan Pejic from RT-RK.com.
+-- Sponsored by Cisco Systems, Inc.
+----------------------------------------------------------------------------
+-- This is a helper module used by the LuaJIT machine code dumper module.
+--
+-- It disassembles most user-mode AArch64 instructions.
+-- NYI: Advanced SIMD and VFP instructions.
+------------------------------------------------------------------------------
+
+local type = type
+local sub, byte, format = string.sub, string.byte, string.format
+local match, gmatch, gsub = string.match, string.gmatch, string.gsub
+local concat = table.concat
+local bit = require("bit")
+local band, bor, bxor, tohex = bit.band, bit.bor, bit.bxor, bit.tohex
+local lshift, rshift, arshift = bit.lshift, bit.rshift, bit.arshift
+local ror = bit.ror
+
+------------------------------------------------------------------------------
+-- Opcode maps
+------------------------------------------------------------------------------
+
+local map_adr = { -- PC-relative addressing.
+ shift = 31, mask = 1,
+ [0] = "adrDBx", "adrpDBx"
+}
+
+local map_addsubi = { -- Add/subtract immediate.
+ shift = 29, mask = 3,
+ [0] = "add|movDNIg", "adds|cmnD0NIg", "subDNIg", "subs|cmpD0NIg",
+}
+
+local map_logi = { -- Logical immediate.
+ shift = 31, mask = 1,
+ [0] = {
+ shift = 22, mask = 1,
+ [0] = {
+ shift = 29, mask = 3,
+ [0] = "andDNig", "orr|movDN0ig", "eorDNig", "ands|tstD0Nig"
+ },
+ false -- unallocated
+ },
+ {
+ shift = 29, mask = 3,
+ [0] = "andDNig", "orr|movDN0ig", "eorDNig", "ands|tstD0Nig"
+ }
+}
+
+local map_movwi = { -- Move wide immediate.
+ shift = 31, mask = 1,
+ [0] = {
+ shift = 22, mask = 1,
+ [0] = {
+ shift = 29, mask = 3,
+ [0] = "movnDWRg", false, "movz|movDYRg", "movkDWRg"
+ }, false -- unallocated
+ },
+ {
+ shift = 29, mask = 3,
+ [0] = "movnDWRg", false, "movz|movDYRg", "movkDWRg"
+ },
+}
+
+local map_bitf = { -- Bitfield.
+ shift = 31, mask = 1,
+ [0] = {
+ shift = 22, mask = 1,
+ [0] = {
+ shift = 29, mask = 3,
+ [0] = "sbfm|sbfiz|sbfx|asr|sxtw|sxth|sxtbDN12w",
+ "bfm|bfi|bfxilDN13w",
+ "ubfm|ubfiz|ubfx|lsr|lsl|uxth|uxtbDN12w"
+ }
+ },
+ {
+ shift = 22, mask = 1,
+ {
+ shift = 29, mask = 3,
+ [0] = "sbfm|sbfiz|sbfx|asr|sxtw|sxth|sxtbDN12x",
+ "bfm|bfi|bfxilDN13x",
+ "ubfm|ubfiz|ubfx|lsr|lsl|uxth|uxtbDN12x"
+ }
+ }
+}
+
+local map_datai = { -- Data processing - immediate.
+ shift = 23, mask = 7,
+ [0] = map_adr, map_adr, map_addsubi, false,
+ map_logi, map_movwi, map_bitf,
+ {
+ shift = 15, mask = 0x1c0c1,
+ [0] = "extr|rorDNM4w", [0x10080] = "extr|rorDNM4x",
+ [0x10081] = "extr|rorDNM4x"
+ }
+}
+
+local map_logsr = { -- Logical, shifted register.
+ shift = 31, mask = 1,
+ [0] = {
+ shift = 15, mask = 1,
+ [0] = {
+ shift = 29, mask = 3,
+ [0] = {
+ shift = 21, mask = 7,
+ [0] = "andDNMSg", "bicDNMSg", "andDNMSg", "bicDNMSg",
+ "andDNMSg", "bicDNMSg", "andDNMg", "bicDNMg"
+ },
+ {
+ shift = 21, mask = 7,
+ [0] ="orr|movDN0MSg", "orn|mvnDN0MSg", "orr|movDN0MSg", "orn|mvnDN0MSg",
+ "orr|movDN0MSg", "orn|mvnDN0MSg", "orr|movDN0Mg", "orn|mvnDN0Mg"
+ },
+ {
+ shift = 21, mask = 7,
+ [0] = "eorDNMSg", "eonDNMSg", "eorDNMSg", "eonDNMSg",
+ "eorDNMSg", "eonDNMSg", "eorDNMg", "eonDNMg"
+ },
+ {
+ shift = 21, mask = 7,
+ [0] = "ands|tstD0NMSg", "bicsDNMSg", "ands|tstD0NMSg", "bicsDNMSg",
+ "ands|tstD0NMSg", "bicsDNMSg", "ands|tstD0NMg", "bicsDNMg"
+ }
+ },
+ false -- unallocated
+ },
+ {
+ shift = 29, mask = 3,
+ [0] = {
+ shift = 21, mask = 7,
+ [0] = "andDNMSg", "bicDNMSg", "andDNMSg", "bicDNMSg",
+ "andDNMSg", "bicDNMSg", "andDNMg", "bicDNMg"
+ },
+ {
+ shift = 21, mask = 7,
+ [0] = "orr|movDN0MSg", "orn|mvnDN0MSg", "orr|movDN0MSg", "orn|mvnDN0MSg",
+ "orr|movDN0MSg", "orn|mvnDN0MSg", "orr|movDN0Mg", "orn|mvnDN0Mg"
+ },
+ {
+ shift = 21, mask = 7,
+ [0] = "eorDNMSg", "eonDNMSg", "eorDNMSg", "eonDNMSg",
+ "eorDNMSg", "eonDNMSg", "eorDNMg", "eonDNMg"
+ },
+ {
+ shift = 21, mask = 7,
+ [0] = "ands|tstD0NMSg", "bicsDNMSg", "ands|tstD0NMSg", "bicsDNMSg",
+ "ands|tstD0NMSg", "bicsDNMSg", "ands|tstD0NMg", "bicsDNMg"
+ }
+ }
+}
+
+local map_assh = {
+ shift = 31, mask = 1,
+ [0] = {
+ shift = 15, mask = 1,
+ [0] = {
+ shift = 29, mask = 3,
+ [0] = {
+ shift = 22, mask = 3,
+ [0] = "addDNMSg", "addDNMSg", "addDNMSg", "addDNMg"
+ },
+ {
+ shift = 22, mask = 3,
+ [0] = "adds|cmnD0NMSg", "adds|cmnD0NMSg",
+ "adds|cmnD0NMSg", "adds|cmnD0NMg"
+ },
+ {
+ shift = 22, mask = 3,
+ [0] = "sub|negDN0MSg", "sub|negDN0MSg", "sub|negDN0MSg", "sub|negDN0Mg"
+ },
+ {
+ shift = 22, mask = 3,
+ [0] = "subs|cmp|negsD0N0MzSg", "subs|cmp|negsD0N0MzSg",
+ "subs|cmp|negsD0N0MzSg", "subs|cmp|negsD0N0Mzg"
+ },
+ },
+ false -- unallocated
+ },
+ {
+ shift = 29, mask = 3,
+ [0] = {
+ shift = 22, mask = 3,
+ [0] = "addDNMSg", "addDNMSg", "addDNMSg", "addDNMg"
+ },
+ {
+ shift = 22, mask = 3,
+ [0] = "adds|cmnD0NMSg", "adds|cmnD0NMSg", "adds|cmnD0NMSg",
+ "adds|cmnD0NMg"
+ },
+ {
+ shift = 22, mask = 3,
+ [0] = "sub|negDN0MSg", "sub|negDN0MSg", "sub|negDN0MSg", "sub|negDN0Mg"
+ },
+ {
+ shift = 22, mask = 3,
+ [0] = "subs|cmp|negsD0N0MzSg", "subs|cmp|negsD0N0MzSg",
+ "subs|cmp|negsD0N0MzSg", "subs|cmp|negsD0N0Mzg"
+ }
+ }
+}
+
+local map_addsubsh = { -- Add/subtract, shifted register.
+ shift = 22, mask = 3,
+ [0] = map_assh, map_assh, map_assh
+}
+
+local map_addsubex = { -- Add/subtract, extended register.
+ shift = 22, mask = 3,
+ [0] = {
+ shift = 29, mask = 3,
+ [0] = "addDNMXg", "adds|cmnD0NMXg", "subDNMXg", "subs|cmpD0NMzXg",
+ }
+}
+
+local map_addsubc = { -- Add/subtract, with carry.
+ shift = 10, mask = 63,
+ [0] = {
+ shift = 29, mask = 3,
+ [0] = "adcDNMg", "adcsDNMg", "sbc|ngcDN0Mg", "sbcs|ngcsDN0Mg",
+ }
+}
+
+local map_ccomp = {
+ shift = 4, mask = 1,
+ [0] = {
+ shift = 10, mask = 3,
+ [0] = { -- Conditional compare register.
+ shift = 29, mask = 3,
+ "ccmnNMVCg", false, "ccmpNMVCg",
+ },
+ [2] = { -- Conditional compare immediate.
+ shift = 29, mask = 3,
+ "ccmnN5VCg", false, "ccmpN5VCg",
+ }
+ }
+}
+
+local map_csel = { -- Conditional select.
+ shift = 11, mask = 1,
+ [0] = {
+ shift = 10, mask = 1,
+ [0] = {
+ shift = 29, mask = 3,
+ [0] = "cselDNMzCg", false, "csinv|cinv|csetmDNMcg", false,
+ },
+ {
+ shift = 29, mask = 3,
+ [0] = "csinc|cinc|csetDNMcg", false, "csneg|cnegDNMcg", false,
+ }
+ }
+}
+
+local map_data1s = { -- Data processing, 1 source.
+ shift = 29, mask = 1,
+ [0] = {
+ shift = 31, mask = 1,
+ [0] = {
+ shift = 10, mask = 0x7ff,
+ [0] = "rbitDNg", "rev16DNg", "revDNw", false, "clzDNg", "clsDNg"
+ },
+ {
+ shift = 10, mask = 0x7ff,
+ [0] = "rbitDNg", "rev16DNg", "rev32DNx", "revDNx", "clzDNg", "clsDNg"
+ }
+ }
+}
+
+local map_data2s = { -- Data processing, 2 sources.
+ shift = 29, mask = 1,
+ [0] = {
+ shift = 10, mask = 63,
+ false, "udivDNMg", "sdivDNMg", false, false, false, false, "lslDNMg",
+ "lsrDNMg", "asrDNMg", "rorDNMg"
+ }
+}
+
+local map_data3s = { -- Data processing, 3 sources.
+ shift = 29, mask = 7,
+ [0] = {
+ shift = 21, mask = 7,
+ [0] = {
+ shift = 15, mask = 1,
+ [0] = "madd|mulDNMA0g", "msub|mnegDNMA0g"
+ }
+ }, false, false, false,
+ {
+ shift = 15, mask = 1,
+ [0] = {
+ shift = 21, mask = 7,
+ [0] = "madd|mulDNMA0g", "smaddl|smullDxNMwA0x", "smulhDNMx", false,
+ false, "umaddl|umullDxNMwA0x", "umulhDNMx"
+ },
+ {
+ shift = 21, mask = 7,
+ [0] = "msub|mnegDNMA0g", "smsubl|smneglDxNMwA0x", false, false,
+ false, "umsubl|umneglDxNMwA0x"
+ }
+ }
+}
+
+local map_datar = { -- Data processing, register.
+ shift = 28, mask = 1,
+ [0] = {
+ shift = 24, mask = 1,
+ [0] = map_logsr,
+ {
+ shift = 21, mask = 1,
+ [0] = map_addsubsh, map_addsubex
+ }
+ },
+ {
+ shift = 21, mask = 15,
+ [0] = map_addsubc, false, map_ccomp, false, map_csel, false,
+ {
+ shift = 30, mask = 1,
+ [0] = map_data2s, map_data1s
+ },
+ false, map_data3s, map_data3s, map_data3s, map_data3s, map_data3s,
+ map_data3s, map_data3s, map_data3s
+ }
+}
+
+local map_lrl = { -- Load register, literal.
+ shift = 26, mask = 1,
+ [0] = {
+ shift = 30, mask = 3,
+ [0] = "ldrDwB", "ldrDxB", "ldrswDxB"
+ },
+ {
+ shift = 30, mask = 3,
+ [0] = "ldrDsB", "ldrDdB"
+ }
+}
+
+local map_lsriind = { -- Load/store register, immediate pre/post-indexed.
+ shift = 30, mask = 3,
+ [0] = {
+ shift = 26, mask = 1,
+ [0] = {
+ shift = 22, mask = 3,
+ [0] = "strbDwzL", "ldrbDwzL", "ldrsbDxzL", "ldrsbDwzL"
+ }
+ },
+ {
+ shift = 26, mask = 1,
+ [0] = {
+ shift = 22, mask = 3,
+ [0] = "strhDwzL", "ldrhDwzL", "ldrshDxzL", "ldrshDwzL"
+ }
+ },
+ {
+ shift = 26, mask = 1,
+ [0] = {
+ shift = 22, mask = 3,
+ [0] = "strDwzL", "ldrDwzL", "ldrswDxzL"
+ },
+ {
+ shift = 22, mask = 3,
+ [0] = "strDszL", "ldrDszL"
+ }
+ },
+ {
+ shift = 26, mask = 1,
+ [0] = {
+ shift = 22, mask = 3,
+ [0] = "strDxzL", "ldrDxzL"
+ },
+ {
+ shift = 22, mask = 3,
+ [0] = "strDdzL", "ldrDdzL"
+ }
+ }
+}
+
+local map_lsriro = {
+ shift = 21, mask = 1,
+ [0] = { -- Load/store register immediate.
+ shift = 10, mask = 3,
+ [0] = { -- Unscaled immediate.
+ shift = 26, mask = 1,
+ [0] = {
+ shift = 30, mask = 3,
+ [0] = {
+ shift = 22, mask = 3,
+ [0] = "sturbDwK", "ldurbDwK"
+ },
+ {
+ shift = 22, mask = 3,
+ [0] = "sturhDwK", "ldurhDwK"
+ },
+ {
+ shift = 22, mask = 3,
+ [0] = "sturDwK", "ldurDwK"
+ },
+ {
+ shift = 22, mask = 3,
+ [0] = "sturDxK", "ldurDxK"
+ }
+ }
+ }, map_lsriind, false, map_lsriind
+ },
+ { -- Load/store register, register offset.
+ shift = 10, mask = 3,
+ [2] = {
+ shift = 26, mask = 1,
+ [0] = {
+ shift = 30, mask = 3,
+ [0] = {
+ shift = 22, mask = 3,
+ [0] = "strbDwO", "ldrbDwO", "ldrsbDxO", "ldrsbDwO"
+ },
+ {
+ shift = 22, mask = 3,
+ [0] = "strhDwO", "ldrhDwO", "ldrshDxO", "ldrshDwO"
+ },
+ {
+ shift = 22, mask = 3,
+ [0] = "strDwO", "ldrDwO", "ldrswDxO"
+ },
+ {
+ shift = 22, mask = 3,
+ [0] = "strDxO", "ldrDxO"
+ }
+ },
+ {
+ shift = 30, mask = 3,
+ [2] = {
+ shift = 22, mask = 3,
+ [0] = "strDsO", "ldrDsO"
+ },
+ [3] = {
+ shift = 22, mask = 3,
+ [0] = "strDdO", "ldrDdO"
+ }
+ }
+ }
+ }
+}
+
+local map_lsp = { -- Load/store register pair, offset.
+ shift = 22, mask = 1,
+ [0] = {
+ shift = 30, mask = 3,
+ [0] = {
+ shift = 26, mask = 1,
+ [0] = "stpDzAzwP", "stpDzAzsP",
+ },
+ {
+ shift = 26, mask = 1,
+ "stpDzAzdP"
+ },
+ {
+ shift = 26, mask = 1,
+ [0] = "stpDzAzxP"
+ }
+ },
+ {
+ shift = 30, mask = 3,
+ [0] = {
+ shift = 26, mask = 1,
+ [0] = "ldpDzAzwP", "ldpDzAzsP",
+ },
+ {
+ shift = 26, mask = 1,
+ [0] = "ldpswDAxP", "ldpDzAzdP"
+ },
+ {
+ shift = 26, mask = 1,
+ [0] = "ldpDzAzxP"
+ }
+ }
+}
+
+local map_ls = { -- Loads and stores.
+ shift = 24, mask = 0x31,
+ [0x10] = map_lrl, [0x30] = map_lsriro,
+ [0x20] = {
+ shift = 23, mask = 3,
+ map_lsp, map_lsp, map_lsp
+ },
+ [0x21] = {
+ shift = 23, mask = 3,
+ map_lsp, map_lsp, map_lsp
+ },
+ [0x31] = {
+ shift = 26, mask = 1,
+ [0] = {
+ shift = 30, mask = 3,
+ [0] = {
+ shift = 22, mask = 3,
+ [0] = "strbDwzU", "ldrbDwzU"
+ },
+ {
+ shift = 22, mask = 3,
+ [0] = "strhDwzU", "ldrhDwzU"
+ },
+ {
+ shift = 22, mask = 3,
+ [0] = "strDwzU", "ldrDwzU"
+ },
+ {
+ shift = 22, mask = 3,
+ [0] = "strDxzU", "ldrDxzU"
+ }
+ },
+ {
+ shift = 30, mask = 3,
+ [2] = {
+ shift = 22, mask = 3,
+ [0] = "strDszU", "ldrDszU"
+ },
+ [3] = {
+ shift = 22, mask = 3,
+ [0] = "strDdzU", "ldrDdzU"
+ }
+ }
+ },
+}
+
+local map_datafp = { -- Data processing, SIMD and FP.
+ shift = 28, mask = 7,
+ { -- 001
+ shift = 24, mask = 1,
+ [0] = {
+ shift = 21, mask = 1,
+ {
+ shift = 10, mask = 3,
+ [0] = {
+ shift = 12, mask = 1,
+ [0] = {
+ shift = 13, mask = 1,
+ [0] = {
+ shift = 14, mask = 1,
+ [0] = {
+ shift = 15, mask = 1,
+ [0] = { -- FP/int conversion.
+ shift = 31, mask = 1,
+ [0] = {
+ shift = 16, mask = 0xff,
+ [0x20] = "fcvtnsDwNs", [0x21] = "fcvtnuDwNs",
+ [0x22] = "scvtfDsNw", [0x23] = "ucvtfDsNw",
+ [0x24] = "fcvtasDwNs", [0x25] = "fcvtauDwNs",
+ [0x26] = "fmovDwNs", [0x27] = "fmovDsNw",
+ [0x28] = "fcvtpsDwNs", [0x29] = "fcvtpuDwNs",
+ [0x30] = "fcvtmsDwNs", [0x31] = "fcvtmuDwNs",
+ [0x38] = "fcvtzsDwNs", [0x39] = "fcvtzuDwNs",
+ [0x60] = "fcvtnsDwNd", [0x61] = "fcvtnuDwNd",
+ [0x62] = "scvtfDdNw", [0x63] = "ucvtfDdNw",
+ [0x64] = "fcvtasDwNd", [0x65] = "fcvtauDwNd",
+ [0x68] = "fcvtpsDwNd", [0x69] = "fcvtpuDwNd",
+ [0x70] = "fcvtmsDwNd", [0x71] = "fcvtmuDwNd",
+ [0x78] = "fcvtzsDwNd", [0x79] = "fcvtzuDwNd"
+ },
+ {
+ shift = 16, mask = 0xff,
+ [0x20] = "fcvtnsDxNs", [0x21] = "fcvtnuDxNs",
+ [0x22] = "scvtfDsNx", [0x23] = "ucvtfDsNx",
+ [0x24] = "fcvtasDxNs", [0x25] = "fcvtauDxNs",
+ [0x28] = "fcvtpsDxNs", [0x29] = "fcvtpuDxNs",
+ [0x30] = "fcvtmsDxNs", [0x31] = "fcvtmuDxNs",
+ [0x38] = "fcvtzsDxNs", [0x39] = "fcvtzuDxNs",
+ [0x60] = "fcvtnsDxNd", [0x61] = "fcvtnuDxNd",
+ [0x62] = "scvtfDdNx", [0x63] = "ucvtfDdNx",
+ [0x64] = "fcvtasDxNd", [0x65] = "fcvtauDxNd",
+ [0x66] = "fmovDxNd", [0x67] = "fmovDdNx",
+ [0x68] = "fcvtpsDxNd", [0x69] = "fcvtpuDxNd",
+ [0x70] = "fcvtmsDxNd", [0x71] = "fcvtmuDxNd",
+ [0x78] = "fcvtzsDxNd", [0x79] = "fcvtzuDxNd"
+ }
+ }
+ },
+ { -- FP data-processing, 1 source.
+ shift = 31, mask = 1,
+ [0] = {
+ shift = 22, mask = 3,
+ [0] = {
+ shift = 15, mask = 63,
+ [0] = "fmovDNf", "fabsDNf", "fnegDNf",
+ "fsqrtDNf", false, "fcvtDdNs", false, false,
+ "frintnDNf", "frintpDNf", "frintmDNf", "frintzDNf",
+ "frintaDNf", false, "frintxDNf", "frintiDNf",
+ },
+ {
+ shift = 15, mask = 63,
+ [0] = "fmovDNf", "fabsDNf", "fnegDNf",
+ "fsqrtDNf", "fcvtDsNd", false, false, false,
+ "frintnDNf", "frintpDNf", "frintmDNf", "frintzDNf",
+ "frintaDNf", false, "frintxDNf", "frintiDNf",
+ }
+ }
+ }
+ },
+ { -- FP compare.
+ shift = 31, mask = 1,
+ [0] = {
+ shift = 14, mask = 3,
+ [0] = {
+ shift = 23, mask = 1,
+ [0] = {
+ shift = 0, mask = 31,
+ [0] = "fcmpNMf", [8] = "fcmpNZf",
+ [16] = "fcmpeNMf", [24] = "fcmpeNZf",
+ }
+ }
+ }
+ }
+ },
+ { -- FP immediate.
+ shift = 31, mask = 1,
+ [0] = {
+ shift = 5, mask = 31,
+ [0] = {
+ shift = 23, mask = 1,
+ [0] = "fmovDFf"
+ }
+ }
+ }
+ },
+ { -- FP conditional compare.
+ shift = 31, mask = 1,
+ [0] = {
+ shift = 23, mask = 1,
+ [0] = {
+ shift = 4, mask = 1,
+ [0] = "fccmpNMVCf", "fccmpeNMVCf"
+ }
+ }
+ },
+ { -- FP data-processing, 2 sources.
+ shift = 31, mask = 1,
+ [0] = {
+ shift = 23, mask = 1,
+ [0] = {
+ shift = 12, mask = 15,
+ [0] = "fmulDNMf", "fdivDNMf", "faddDNMf", "fsubDNMf",
+ "fmaxDNMf", "fminDNMf", "fmaxnmDNMf", "fminnmDNMf",
+ "fnmulDNMf"
+ }
+ }
+ },
+ { -- FP conditional select.
+ shift = 31, mask = 1,
+ [0] = {
+ shift = 23, mask = 1,
+ [0] = "fcselDNMCf"
+ }
+ }
+ }
+ },
+ { -- FP data-processing, 3 sources.
+ shift = 31, mask = 1,
+ [0] = {
+ shift = 15, mask = 1,
+ [0] = {
+ shift = 21, mask = 5,
+ [0] = "fmaddDNMAf", "fnmaddDNMAf"
+ },
+ {
+ shift = 21, mask = 5,
+ [0] = "fmsubDNMAf", "fnmsubDNMAf"
+ }
+ }
+ }
+ }
+}
+
+local map_br = { -- Branches, exception generating and system instructions.
+ shift = 29, mask = 7,
+ [0] = "bB",
+ { -- Compare & branch, immediate.
+ shift = 24, mask = 3,
+ [0] = "cbzDBg", "cbnzDBg", "tbzDTBw", "tbnzDTBw"
+ },
+ { -- Conditional branch, immediate.
+ shift = 24, mask = 3,
+ [0] = {
+ shift = 4, mask = 1,
+ [0] = {
+ shift = 0, mask = 15,
+ [0] = "beqB", "bneB", "bhsB", "bloB", "bmiB", "bplB", "bvsB", "bvcB",
+ "bhiB", "blsB", "bgeB", "bltB", "bgtB", "bleB", "balB"
+ }
+ }
+ }, false, "blB",
+ { -- Compare & branch, immediate.
+ shift = 24, mask = 3,
+ [0] = "cbzDBg", "cbnzDBg", "tbzDTBx", "tbnzDTBx"
+ },
+ {
+ shift = 24, mask = 3,
+ [0] = { -- Exception generation.
+ shift = 0, mask = 0xe0001f,
+ [0x200000] = "brkW"
+ },
+ { -- System instructions.
+ shift = 0, mask = 0x3fffff,
+ [0x03201f] = "nop"
+ },
+ { -- Unconditional branch, register.
+ shift = 0, mask = 0xfffc1f,
+ [0x1f0000] = "brNx", [0x3f0000] = "blrNx",
+ [0x5f0000] = "retNx"
+ },
+ }
+}
+
+local map_init = {
+ shift = 25, mask = 15,
+ [0] = false, false, false, false, map_ls, map_datar, map_ls, map_datafp,
+ map_datai, map_datai, map_br, map_br, map_ls, map_datar, map_ls, map_datafp
+}
+
+------------------------------------------------------------------------------
+
+local map_regs = { x = {}, w = {}, d = {}, s = {} }
+
+for i=0,30 do
+ map_regs.x[i] = "x"..i
+ map_regs.w[i] = "w"..i
+ map_regs.d[i] = "d"..i
+ map_regs.s[i] = "s"..i
+end
+map_regs.x[31] = "sp"
+map_regs.w[31] = "wsp"
+map_regs.d[31] = "d31"
+map_regs.s[31] = "s31"
+
+local map_cond = {
+ [0] = "eq", "ne", "cs", "cc", "mi", "pl", "vs", "vc",
+ "hi", "ls", "ge", "lt", "gt", "le", "al",
+}
+
+local map_shift = { [0] = "lsl", "lsr", "asr", }
+
+local map_extend = {
+ [0] = "uxtb", "uxth", "uxtw", "uxtx", "sxtb", "sxth", "sxtw", "sxtx",
+}
+
+------------------------------------------------------------------------------
+
+-- Output a nicely formatted line with an opcode and operands.
+local function putop(ctx, text, operands)
+ local pos = ctx.pos
+ local extra = ""
+ if ctx.rel then
+ local sym = ctx.symtab[ctx.rel]
+ if sym then
+ extra = "\t->"..sym
+ end
+ end
+ if ctx.hexdump > 0 then
+ ctx.out(format("%08x %s %-5s %s%s\n",
+ ctx.addr+pos, tohex(ctx.op), text, concat(operands, ", "), extra))
+ else
+ ctx.out(format("%08x %-5s %s%s\n",
+ ctx.addr+pos, text, concat(operands, ", "), extra))
+ end
+ ctx.pos = pos + 4
+end
+
+-- Fallback for unknown opcodes.
+local function unknown(ctx)
+ return putop(ctx, ".long", { "0x"..tohex(ctx.op) })
+end
+
+local function match_reg(p, pat, regnum)
+ return map_regs[match(pat, p.."%w-([xwds])")][regnum]
+end
+
+local function fmt_hex32(x)
+ if x < 0 then
+ return tohex(x)
+ else
+ return format("%x", x)
+ end
+end
+
+local imm13_rep = { 0x55555555, 0x11111111, 0x01010101, 0x00010001, 0x00000001 }
+
+local function decode_imm13(op)
+ local imms = band(rshift(op, 10), 63)
+ local immr = band(rshift(op, 16), 63)
+ if band(op, 0x00400000) == 0 then
+ local len = 5
+ if imms >= 56 then
+ if imms >= 60 then len = 1 else len = 2 end
+ elseif imms >= 48 then len = 3 elseif imms >= 32 then len = 4 end
+ local l = lshift(1, len)-1
+ local s = band(imms, l)
+ local r = band(immr, l)
+ local imm = ror(rshift(-1, 31-s), r)
+ if len ~= 5 then imm = band(imm, lshift(1, l)-1) + rshift(imm, 31-l) end
+ imm = imm * imm13_rep[len]
+ local ix = fmt_hex32(imm)
+ if rshift(op, 31) ~= 0 then
+ return ix..tohex(imm)
+ else
+ return ix
+ end
+ else
+ local lo, hi = -1, 0
+ if imms < 32 then lo = rshift(-1, 31-imms) else hi = rshift(-1, 63-imms) end
+ if immr ~= 0 then
+ lo, hi = ror(lo, immr), ror(hi, immr)
+ local x = immr == 32 and 0 or band(bxor(lo, hi), lshift(-1, 32-immr))
+ lo, hi = bxor(lo, x), bxor(hi, x)
+ if immr >= 32 then lo, hi = hi, lo end
+ end
+ if hi ~= 0 then
+ return fmt_hex32(hi)..tohex(lo)
+ else
+ return fmt_hex32(lo)
+ end
+ end
+end
+
+local function parse_immpc(op, name)
+ if name == "b" or name == "bl" then
+ return arshift(lshift(op, 6), 4)
+ elseif name == "adr" or name == "adrp" then
+ local immlo = band(rshift(op, 29), 3)
+ local immhi = lshift(arshift(lshift(op, 8), 13), 2)
+ return bor(immhi, immlo)
+ elseif name == "tbz" or name == "tbnz" then
+ return lshift(arshift(lshift(op, 13), 18), 2)
+ else
+ return lshift(arshift(lshift(op, 8), 13), 2)
+ end
+end
+
+local function parse_fpimm8(op)
+ local sign = band(op, 0x100000) == 0 and 1 or -1
+ local exp = bxor(rshift(arshift(lshift(op, 12), 5), 24), 0x80) - 131
+ local frac = 16+band(rshift(op, 13), 15)
+ return sign * frac * 2^exp
+end
+
+local function prefer_bfx(sf, uns, imms, immr)
+ if imms < immr or imms == 31 or imms == 63 then
+ return false
+ end
+ if immr == 0 then
+ if sf == 0 and (imms == 7 or imms == 15) then
+ return false
+ end
+ if sf ~= 0 and uns == 0 and (imms == 7 or imms == 15 or imms == 31) then
+ return false
+ end
+ end
+ return true
+end
+
+-- Disassemble a single instruction.
+local function disass_ins(ctx)
+ local pos = ctx.pos
+ local b0, b1, b2, b3 = byte(ctx.code, pos+1, pos+4)
+ local op = bor(lshift(b3, 24), lshift(b2, 16), lshift(b1, 8), b0)
+ local operands = {}
+ local suffix = ""
+ local last, name, pat
+ local map_reg
+ ctx.op = op
+ ctx.rel = nil
+ last = nil
+ local opat
+ opat = map_init[band(rshift(op, 25), 15)]
+ while type(opat) ~= "string" do
+ if not opat then return unknown(ctx) end
+ opat = opat[band(rshift(op, opat.shift), opat.mask)] or opat._
+ end
+ name, pat = match(opat, "^([a-z0-9]*)(.*)")
+ local altname, pat2 = match(pat, "|([a-z0-9_.|]*)(.*)")
+ if altname then pat = pat2 end
+ if sub(pat, 1, 1) == "." then
+ local s2, p2 = match(pat, "^([a-z0-9.]*)(.*)")
+ suffix = suffix..s2
+ pat = p2
+ end
+
+ local rt = match(pat, "[gf]")
+ if rt then
+ if rt == "g" then
+ map_reg = band(op, 0x80000000) ~= 0 and map_regs.x or map_regs.w
+ else
+ map_reg = band(op, 0x400000) ~= 0 and map_regs.d or map_regs.s
+ end
+ end
+
+ local second0, immr
+
+ for p in gmatch(pat, ".") do
+ local x = nil
+ if p == "D" then
+ local regnum = band(op, 31)
+ x = rt and map_reg[regnum] or match_reg(p, pat, regnum)
+ elseif p == "N" then
+ local regnum = band(rshift(op, 5), 31)
+ x = rt and map_reg[regnum] or match_reg(p, pat, regnum)
+ elseif p == "M" then
+ local regnum = band(rshift(op, 16), 31)
+ x = rt and map_reg[regnum] or match_reg(p, pat, regnum)
+ elseif p == "A" then
+ local regnum = band(rshift(op, 10), 31)
+ x = rt and map_reg[regnum] or match_reg(p, pat, regnum)
+ elseif p == "B" then
+ local addr = ctx.addr + pos + parse_immpc(op, name)
+ ctx.rel = addr
+ x = "0x"..tohex(addr)
+ elseif p == "T" then
+ x = bor(band(rshift(op, 26), 32), band(rshift(op, 19), 31))
+ elseif p == "V" then
+ x = band(op, 15)
+ elseif p == "C" then
+ x = map_cond[band(rshift(op, 12), 15)]
+ elseif p == "c" then
+ local rn = band(rshift(op, 5), 31)
+ local rm = band(rshift(op, 16), 31)
+ local cond = band(rshift(op, 12), 15)
+ local invc = bxor(cond, 1)
+ x = map_cond[cond]
+ if altname and cond ~= 14 and cond ~= 15 then
+ local a1, a2 = match(altname, "([^|]*)|(.*)")
+ if rn == rm then
+ local n = #operands
+ operands[n] = nil
+ x = map_cond[invc]
+ if rn ~= 31 then
+ if a1 then name = a1 else name = altname end
+ else
+ operands[n-1] = nil
+ name = a2
+ end
+ end
+ end
+ elseif p == "W" then
+ x = band(rshift(op, 5), 0xffff)
+ elseif p == "Y" then
+ x = band(rshift(op, 5), 0xffff)
+ local hw = band(rshift(op, 21), 3)
+ if altname and (hw == 0 or x ~= 0) then
+ name = altname
+ end
+ elseif p == "L" then
+ local rn = map_regs.x[band(rshift(op, 5), 31)]
+ local imm9 = arshift(lshift(op, 11), 23)
+ if band(op, 0x800) ~= 0 then
+ x = "["..rn..", #"..imm9.."]!"
+ else
+ x = "["..rn.."], #"..imm9
+ end
+ elseif p == "U" then
+ local rn = map_regs.x[band(rshift(op, 5), 31)]
+ local sz = band(rshift(op, 30), 3)
+ local imm12 = lshift(arshift(lshift(op, 10), 20), sz)
+ if imm12 ~= 0 then
+ x = "["..rn..", #"..imm12.."]"
+ else
+ x = "["..rn.."]"
+ end
+ elseif p == "K" then
+ local rn = map_regs.x[band(rshift(op, 5), 31)]
+ local imm9 = arshift(lshift(op, 11), 23)
+ if imm9 ~= 0 then
+ x = "["..rn..", #"..imm9.."]"
+ else
+ x = "["..rn.."]"
+ end
+ elseif p == "O" then
+ local rn, rm = map_regs.x[band(rshift(op, 5), 31)]
+ local m = band(rshift(op, 13), 1)
+ if m == 0 then
+ rm = map_regs.w[band(rshift(op, 16), 31)]
+ else
+ rm = map_regs.x[band(rshift(op, 16), 31)]
+ end
+ x = "["..rn..", "..rm
+ local opt = band(rshift(op, 13), 7)
+ local s = band(rshift(op, 12), 1)
+ local sz = band(rshift(op, 30), 3)
+ -- extension to be applied
+ if opt == 3 then
+ if s == 0 then x = x.."]"
+ else x = x..", lsl #"..sz.."]" end
+ elseif opt == 2 or opt == 6 or opt == 7 then
+ if s == 0 then x = x..", "..map_extend[opt].."]"
+ else x = x..", "..map_extend[opt].." #"..sz.."]" end
+ else
+ x = x.."]"
+ end
+ elseif p == "P" then
+ local opcv, sh = rshift(op, 26), 2
+ if opcv >= 0x2a then sh = 4 elseif opcv >= 0x1b then sh = 3 end
+ local imm7 = lshift(arshift(lshift(op, 10), 25), sh)
+ local rn = map_regs.x[band(rshift(op, 5), 31)]
+ local ind = band(rshift(op, 23), 3)
+ if ind == 1 then
+ x = "["..rn.."], #"..imm7
+ elseif ind == 2 then
+ if imm7 == 0 then
+ x = "["..rn.."]"
+ else
+ x = "["..rn..", #"..imm7.."]"
+ end
+ elseif ind == 3 then
+ x = "["..rn..", #"..imm7.."]!"
+ end
+ elseif p == "I" then
+ local shf = band(rshift(op, 22), 3)
+ local imm12 = band(rshift(op, 10), 0x0fff)
+ local rn, rd = band(rshift(op, 5), 31), band(op, 31)
+ if altname == "mov" and shf == 0 and imm12 == 0 and (rn == 31 or rd == 31) then
+ name = altname
+ x = nil
+ elseif shf == 0 then
+ x = imm12
+ elseif shf == 1 then
+ x = imm12..", lsl #12"
+ end
+ elseif p == "i" then
+ x = "#0x"..decode_imm13(op)
+ elseif p == "1" then
+ immr = band(rshift(op, 16), 63)
+ x = immr
+ elseif p == "2" then
+ x = band(rshift(op, 10), 63)
+ if altname then
+ local a1, a2, a3, a4, a5, a6 =
+ match(altname, "([^|]*)|([^|]*)|([^|]*)|([^|]*)|([^|]*)|(.*)")
+ local sf = band(rshift(op, 26), 32)
+ local uns = band(rshift(op, 30), 1)
+ if prefer_bfx(sf, uns, x, immr) then
+ name = a2
+ x = x - immr + 1
+ elseif immr == 0 and x == 7 then
+ local n = #operands
+ operands[n] = nil
+ if sf ~= 0 then
+ operands[n-1] = gsub(operands[n-1], "x", "w")
+ end
+ last = operands[n-1]
+ name = a6
+ x = nil
+ elseif immr == 0 and x == 15 then
+ local n = #operands
+ operands[n] = nil
+ if sf ~= 0 then
+ operands[n-1] = gsub(operands[n-1], "x", "w")
+ end
+ last = operands[n-1]
+ name = a5
+ x = nil
+ elseif x == 31 or x == 63 then
+ if x == 31 and immr == 0 and name == "sbfm" then
+ name = a4
+ local n = #operands
+ operands[n] = nil
+ if sf ~= 0 then
+ operands[n-1] = gsub(operands[n-1], "x", "w")
+ end
+ last = operands[n-1]
+ else
+ name = a3
+ end
+ x = nil
+ elseif band(x, 31) ~= 31 and immr == x+1 and name == "ubfm" then
+ name = a4
+ last = "#"..(sf+32 - immr)
+ operands[#operands] = last
+ x = nil
+ elseif x < immr then
+ name = a1
+ last = "#"..(sf+32 - immr)
+ operands[#operands] = last
+ x = x + 1
+ end
+ end
+ elseif p == "3" then
+ x = band(rshift(op, 10), 63)
+ if altname then
+ local a1, a2 = match(altname, "([^|]*)|(.*)")
+ if x < immr then
+ name = a1
+ local sf = band(rshift(op, 26), 32)
+ last = "#"..(sf+32 - immr)
+ operands[#operands] = last
+ x = x + 1
+ else
+ name = a2
+ x = x - immr + 1
+ end
+ end
+ elseif p == "4" then
+ x = band(rshift(op, 10), 63)
+ local rn = band(rshift(op, 5), 31)
+ local rm = band(rshift(op, 16), 31)
+ if altname and rn == rm then
+ local n = #operands
+ operands[n] = nil
+ last = operands[n-1]
+ name = altname
+ end
+ elseif p == "5" then
+ x = band(rshift(op, 16), 31)
+ elseif p == "S" then
+ x = band(rshift(op, 10), 63)
+ if x == 0 then x = nil
+ else x = map_shift[band(rshift(op, 22), 3)].." #"..x end
+ elseif p == "X" then
+ local opt = band(rshift(op, 13), 7)
+ -- Width specifier <R>.
+ if opt ~= 3 and opt ~= 7 then
+ last = map_regs.w[band(rshift(op, 16), 31)]
+ operands[#operands] = last
+ end
+ x = band(rshift(op, 10), 7)
+ -- Extension.
+ if opt == 2 + band(rshift(op, 31), 1) and
+ band(rshift(op, second0 and 5 or 0), 31) == 31 then
+ if x == 0 then x = nil
+ else x = "lsl #"..x end
+ else
+ if x == 0 then x = map_extend[band(rshift(op, 13), 7)]
+ else x = map_extend[band(rshift(op, 13), 7)].." #"..x end
+ end
+ elseif p == "R" then
+ x = band(rshift(op,21), 3)
+ if x == 0 then x = nil
+ else x = "lsl #"..x*16 end
+ elseif p == "z" then
+ local n = #operands
+ if operands[n] == "sp" then operands[n] = "xzr"
+ elseif operands[n] == "wsp" then operands[n] = "wzr"
+ end
+ elseif p == "Z" then
+ x = 0
+ elseif p == "F" then
+ x = parse_fpimm8(op)
+ elseif p == "g" or p == "f" or p == "x" or p == "w" or
+ p == "d" or p == "s" then
+ -- These are handled in D/N/M/A.
+ elseif p == "0" then
+ if last == "sp" or last == "wsp" then
+ local n = #operands
+ operands[n] = nil
+ last = operands[n-1]
+ if altname then
+ local a1, a2 = match(altname, "([^|]*)|(.*)")
+ if not a1 then
+ name = altname
+ elseif second0 then
+ name, altname = a2, a1
+ else
+ name, altname = a1, a2
+ end
+ end
+ end
+ second0 = true
+ else
+ assert(false)
+ end
+ if x then
+ last = x
+ if type(x) == "number" then x = "#"..x end
+ operands[#operands+1] = x
+ end
+ end
+
+ return putop(ctx, name..suffix, operands)
+end
+
+------------------------------------------------------------------------------
+
+-- Disassemble a block of code.
+local function disass_block(ctx, ofs, len)
+ if not ofs then ofs = 0 end
+ local stop = len and ofs+len or #ctx.code
+ ctx.pos = ofs
+ ctx.rel = nil
+ while ctx.pos < stop do disass_ins(ctx) end
+end
+
+-- Extended API: create a disassembler context. Then call ctx:disass(ofs, len).
+local function create(code, addr, out)
+ local ctx = {}
+ ctx.code = code
+ ctx.addr = addr or 0
+ ctx.out = out or io.write
+ ctx.symtab = {}
+ ctx.disass = disass_block
+ ctx.hexdump = 8
+ return ctx
+end
+
+-- Simple API: disassemble code (a string) at address and output via out.
+local function disass(code, addr, out)
+ create(code, addr, out):disass()
+end
+
+-- Return register name for RID.
+local function regname(r)
+ if r < 32 then return map_regs.x[r] end
+ return map_regs.d[r-32]
+end
+
+-- Public module functions.
+return {
+ create = create,
+ disass = disass,
+ regname = regname
+}
+
diff --git a/libs/luajit-cmake/luajit/src/jit/dis_arm64be.lua b/libs/luajit-cmake/luajit/src/jit/dis_arm64be.lua
new file mode 100644
index 0000000..7337f5b
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/jit/dis_arm64be.lua
@@ -0,0 +1,12 @@
+----------------------------------------------------------------------------
+-- LuaJIT ARM64BE disassembler wrapper module.
+--
+-- Copyright (C) 2005-2022 Mike Pall. All rights reserved.
+-- Released under the MIT license. See Copyright Notice in luajit.h
+----------------------------------------------------------------------------
+-- ARM64 instructions are always little-endian. So just forward to the
+-- common ARM64 disassembler module. All the interesting stuff is there.
+------------------------------------------------------------------------------
+
+return require((string.match(..., ".*%.") or "").."dis_arm64")
+
diff --git a/libs/luajit-cmake/luajit/src/jit/dis_mips.lua b/libs/luajit-cmake/luajit/src/jit/dis_mips.lua
new file mode 100644
index 0000000..05dc30f
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/jit/dis_mips.lua
@@ -0,0 +1,694 @@
+----------------------------------------------------------------------------
+-- LuaJIT MIPS disassembler module.
+--
+-- Copyright (C) 2005-2022 Mike Pall. All rights reserved.
+-- Released under the MIT/X license. See Copyright Notice in luajit.h
+----------------------------------------------------------------------------
+-- This is a helper module used by the LuaJIT machine code dumper module.
+--
+-- It disassembles all standard MIPS32R1/R2 instructions.
+-- Default mode is big-endian, but see: dis_mipsel.lua
+------------------------------------------------------------------------------
+
+local type = type
+local byte, format = string.byte, string.format
+local match, gmatch = string.match, string.gmatch
+local concat = table.concat
+local bit = require("bit")
+local band, bor, tohex = bit.band, bit.bor, bit.tohex
+local lshift, rshift, arshift = bit.lshift, bit.rshift, bit.arshift
+
+------------------------------------------------------------------------------
+-- Extended opcode maps common to all MIPS releases
+------------------------------------------------------------------------------
+
+local map_srl = { shift = 21, mask = 1, [0] = "srlDTA", "rotrDTA", }
+local map_srlv = { shift = 6, mask = 1, [0] = "srlvDTS", "rotrvDTS", }
+
+local map_cop0 = {
+ shift = 25, mask = 1,
+ [0] = {
+ shift = 21, mask = 15,
+ [0] = "mfc0TDW", [4] = "mtc0TDW",
+ [10] = "rdpgprDT",
+ [11] = { shift = 5, mask = 1, [0] = "diT0", "eiT0", },
+ [14] = "wrpgprDT",
+ }, {
+ shift = 0, mask = 63,
+ [1] = "tlbr", [2] = "tlbwi", [6] = "tlbwr", [8] = "tlbp",
+ [24] = "eret", [31] = "deret",
+ [32] = "wait",
+ },
+}
+
+------------------------------------------------------------------------------
+-- Primary and extended opcode maps for MIPS R1-R5
+------------------------------------------------------------------------------
+
+local map_movci = { shift = 16, mask = 1, [0] = "movfDSC", "movtDSC", }
+
+local map_special = {
+ shift = 0, mask = 63,
+ [0] = { shift = 0, mask = -1, [0] = "nop", _ = "sllDTA" },
+ map_movci, map_srl, "sraDTA",
+ "sllvDTS", false, map_srlv, "sravDTS",
+ "jrS", "jalrD1S", "movzDST", "movnDST",
+ "syscallY", "breakY", false, "sync",
+ "mfhiD", "mthiS", "mfloD", "mtloS",
+ "dsllvDST", false, "dsrlvDST", "dsravDST",
+ "multST", "multuST", "divST", "divuST",
+ "dmultST", "dmultuST", "ddivST", "ddivuST",
+ "addDST", "addu|moveDST0", "subDST", "subu|neguDS0T",
+ "andDST", "or|moveDST0", "xorDST", "nor|notDST0",
+ false, false, "sltDST", "sltuDST",
+ "daddDST", "dadduDST", "dsubDST", "dsubuDST",
+ "tgeSTZ", "tgeuSTZ", "tltSTZ", "tltuSTZ",
+ "teqSTZ", false, "tneSTZ", false,
+ "dsllDTA", false, "dsrlDTA", "dsraDTA",
+ "dsll32DTA", false, "dsrl32DTA", "dsra32DTA",
+}
+
+local map_special2 = {
+ shift = 0, mask = 63,
+ [0] = "maddST", "madduST", "mulDST", false,
+ "msubST", "msubuST",
+ [32] = "clzDS", [33] = "cloDS",
+ [63] = "sdbbpY",
+}
+
+local map_bshfl = {
+ shift = 6, mask = 31,
+ [2] = "wsbhDT",
+ [16] = "sebDT",
+ [24] = "sehDT",
+}
+
+local map_dbshfl = {
+ shift = 6, mask = 31,
+ [2] = "dsbhDT",
+ [5] = "dshdDT",
+}
+
+local map_special3 = {
+ shift = 0, mask = 63,
+ [0] = "extTSAK", [1] = "dextmTSAP", [3] = "dextTSAK",
+ [4] = "insTSAL", [6] = "dinsuTSEQ", [7] = "dinsTSAL",
+ [32] = map_bshfl, [36] = map_dbshfl, [59] = "rdhwrTD",
+}
+
+local map_regimm = {
+ shift = 16, mask = 31,
+ [0] = "bltzSB", "bgezSB", "bltzlSB", "bgezlSB",
+ false, false, false, false,
+ "tgeiSI", "tgeiuSI", "tltiSI", "tltiuSI",
+ "teqiSI", false, "tneiSI", false,
+ "bltzalSB", "bgezalSB", "bltzallSB", "bgezallSB",
+ false, false, false, false,
+ false, false, false, false,
+ false, false, false, "synciSO",
+}
+
+local map_cop1s = {
+ shift = 0, mask = 63,
+ [0] = "add.sFGH", "sub.sFGH", "mul.sFGH", "div.sFGH",
+ "sqrt.sFG", "abs.sFG", "mov.sFG", "neg.sFG",
+ "round.l.sFG", "trunc.l.sFG", "ceil.l.sFG", "floor.l.sFG",
+ "round.w.sFG", "trunc.w.sFG", "ceil.w.sFG", "floor.w.sFG",
+ false,
+ { shift = 16, mask = 1, [0] = "movf.sFGC", "movt.sFGC" },
+ "movz.sFGT", "movn.sFGT",
+ false, "recip.sFG", "rsqrt.sFG", false,
+ false, false, false, false,
+ false, false, false, false,
+ false, "cvt.d.sFG", false, false,
+ "cvt.w.sFG", "cvt.l.sFG", "cvt.ps.sFGH", false,
+ false, false, false, false,
+ false, false, false, false,
+ "c.f.sVGH", "c.un.sVGH", "c.eq.sVGH", "c.ueq.sVGH",
+ "c.olt.sVGH", "c.ult.sVGH", "c.ole.sVGH", "c.ule.sVGH",
+ "c.sf.sVGH", "c.ngle.sVGH", "c.seq.sVGH", "c.ngl.sVGH",
+ "c.lt.sVGH", "c.nge.sVGH", "c.le.sVGH", "c.ngt.sVGH",
+}
+
+local map_cop1d = {
+ shift = 0, mask = 63,
+ [0] = "add.dFGH", "sub.dFGH", "mul.dFGH", "div.dFGH",
+ "sqrt.dFG", "abs.dFG", "mov.dFG", "neg.dFG",
+ "round.l.dFG", "trunc.l.dFG", "ceil.l.dFG", "floor.l.dFG",
+ "round.w.dFG", "trunc.w.dFG", "ceil.w.dFG", "floor.w.dFG",
+ false,
+ { shift = 16, mask = 1, [0] = "movf.dFGC", "movt.dFGC" },
+ "movz.dFGT", "movn.dFGT",
+ false, "recip.dFG", "rsqrt.dFG", false,
+ false, false, false, false,
+ false, false, false, false,
+ "cvt.s.dFG", false, false, false,
+ "cvt.w.dFG", "cvt.l.dFG", false, false,
+ false, false, false, false,
+ false, false, false, false,
+ "c.f.dVGH", "c.un.dVGH", "c.eq.dVGH", "c.ueq.dVGH",
+ "c.olt.dVGH", "c.ult.dVGH", "c.ole.dVGH", "c.ule.dVGH",
+ "c.df.dVGH", "c.ngle.dVGH", "c.deq.dVGH", "c.ngl.dVGH",
+ "c.lt.dVGH", "c.nge.dVGH", "c.le.dVGH", "c.ngt.dVGH",
+}
+
+local map_cop1ps = {
+ shift = 0, mask = 63,
+ [0] = "add.psFGH", "sub.psFGH", "mul.psFGH", false,
+ false, "abs.psFG", "mov.psFG", "neg.psFG",
+ false, false, false, false,
+ false, false, false, false,
+ false,
+ { shift = 16, mask = 1, [0] = "movf.psFGC", "movt.psFGC" },
+ "movz.psFGT", "movn.psFGT",
+ false, false, false, false,
+ false, false, false, false,
+ false, false, false, false,
+ "cvt.s.puFG", false, false, false,
+ false, false, false, false,
+ "cvt.s.plFG", false, false, false,
+ "pll.psFGH", "plu.psFGH", "pul.psFGH", "puu.psFGH",
+ "c.f.psVGH", "c.un.psVGH", "c.eq.psVGH", "c.ueq.psVGH",
+ "c.olt.psVGH", "c.ult.psVGH", "c.ole.psVGH", "c.ule.psVGH",
+ "c.psf.psVGH", "c.ngle.psVGH", "c.pseq.psVGH", "c.ngl.psVGH",
+ "c.lt.psVGH", "c.nge.psVGH", "c.le.psVGH", "c.ngt.psVGH",
+}
+
+local map_cop1w = {
+ shift = 0, mask = 63,
+ [32] = "cvt.s.wFG", [33] = "cvt.d.wFG",
+}
+
+local map_cop1l = {
+ shift = 0, mask = 63,
+ [32] = "cvt.s.lFG", [33] = "cvt.d.lFG",
+}
+
+local map_cop1bc = {
+ shift = 16, mask = 3,
+ [0] = "bc1fCB", "bc1tCB", "bc1flCB", "bc1tlCB",
+}
+
+local map_cop1 = {
+ shift = 21, mask = 31,
+ [0] = "mfc1TG", "dmfc1TG", "cfc1TG", "mfhc1TG",
+ "mtc1TG", "dmtc1TG", "ctc1TG", "mthc1TG",
+ map_cop1bc, false, false, false,
+ false, false, false, false,
+ map_cop1s, map_cop1d, false, false,
+ map_cop1w, map_cop1l, map_cop1ps,
+}
+
+local map_cop1x = {
+ shift = 0, mask = 63,
+ [0] = "lwxc1FSX", "ldxc1FSX", false, false,
+ false, "luxc1FSX", false, false,
+ "swxc1FSX", "sdxc1FSX", false, false,
+ false, "suxc1FSX", false, "prefxMSX",
+ false, false, false, false,
+ false, false, false, false,
+ false, false, false, false,
+ false, false, "alnv.psFGHS", false,
+ "madd.sFRGH", "madd.dFRGH", false, false,
+ false, false, "madd.psFRGH", false,
+ "msub.sFRGH", "msub.dFRGH", false, false,
+ false, false, "msub.psFRGH", false,
+ "nmadd.sFRGH", "nmadd.dFRGH", false, false,
+ false, false, "nmadd.psFRGH", false,
+ "nmsub.sFRGH", "nmsub.dFRGH", false, false,
+ false, false, "nmsub.psFRGH", false,
+}
+
+local map_pri = {
+ [0] = map_special, map_regimm, "jJ", "jalJ",
+ "beq|beqz|bST00B", "bne|bnezST0B", "blezSB", "bgtzSB",
+ "addiTSI", "addiu|liTS0I", "sltiTSI", "sltiuTSI",
+ "andiTSU", "ori|liTS0U", "xoriTSU", "luiTU",
+ map_cop0, map_cop1, false, map_cop1x,
+ "beql|beqzlST0B", "bnel|bnezlST0B", "blezlSB", "bgtzlSB",
+ "daddiTSI", "daddiuTSI", false, false,
+ map_special2, "jalxJ", false, map_special3,
+ "lbTSO", "lhTSO", "lwlTSO", "lwTSO",
+ "lbuTSO", "lhuTSO", "lwrTSO", false,
+ "sbTSO", "shTSO", "swlTSO", "swTSO",
+ false, false, "swrTSO", "cacheNSO",
+ "llTSO", "lwc1HSO", "lwc2TSO", "prefNSO",
+ false, "ldc1HSO", "ldc2TSO", "ldTSO",
+ "scTSO", "swc1HSO", "swc2TSO", false,
+ false, "sdc1HSO", "sdc2TSO", "sdTSO",
+}
+
+------------------------------------------------------------------------------
+-- Primary and extended opcode maps for MIPS R6
+------------------------------------------------------------------------------
+
+local map_mul_r6 = { shift = 6, mask = 3, [2] = "mulDST", [3] = "muhDST" }
+local map_mulu_r6 = { shift = 6, mask = 3, [2] = "muluDST", [3] = "muhuDST" }
+local map_div_r6 = { shift = 6, mask = 3, [2] = "divDST", [3] = "modDST" }
+local map_divu_r6 = { shift = 6, mask = 3, [2] = "divuDST", [3] = "moduDST" }
+local map_dmul_r6 = { shift = 6, mask = 3, [2] = "dmulDST", [3] = "dmuhDST" }
+local map_dmulu_r6 = { shift = 6, mask = 3, [2] = "dmuluDST", [3] = "dmuhuDST" }
+local map_ddiv_r6 = { shift = 6, mask = 3, [2] = "ddivDST", [3] = "dmodDST" }
+local map_ddivu_r6 = { shift = 6, mask = 3, [2] = "ddivuDST", [3] = "dmoduDST" }
+
+local map_special_r6 = {
+ shift = 0, mask = 63,
+ [0] = { shift = 0, mask = -1, [0] = "nop", _ = "sllDTA" },
+ false, map_srl, "sraDTA",
+ "sllvDTS", false, map_srlv, "sravDTS",
+ "jrS", "jalrD1S", false, false,
+ "syscallY", "breakY", false, "sync",
+ "clzDS", "cloDS", "dclzDS", "dcloDS",
+ "dsllvDST", "dlsaDSTA", "dsrlvDST", "dsravDST",
+ map_mul_r6, map_mulu_r6, map_div_r6, map_divu_r6,
+ map_dmul_r6, map_dmulu_r6, map_ddiv_r6, map_ddivu_r6,
+ "addDST", "addu|moveDST0", "subDST", "subu|neguDS0T",
+ "andDST", "or|moveDST0", "xorDST", "nor|notDST0",
+ false, false, "sltDST", "sltuDST",
+ "daddDST", "dadduDST", "dsubDST", "dsubuDST",
+ "tgeSTZ", "tgeuSTZ", "tltSTZ", "tltuSTZ",
+ "teqSTZ", "seleqzDST", "tneSTZ", "selnezDST",
+ "dsllDTA", false, "dsrlDTA", "dsraDTA",
+ "dsll32DTA", false, "dsrl32DTA", "dsra32DTA",
+}
+
+local map_bshfl_r6 = {
+ shift = 9, mask = 3,
+ [1] = "alignDSTa",
+ _ = {
+ shift = 6, mask = 31,
+ [0] = "bitswapDT",
+ [2] = "wsbhDT",
+ [16] = "sebDT",
+ [24] = "sehDT",
+ }
+}
+
+local map_dbshfl_r6 = {
+ shift = 9, mask = 3,
+ [1] = "dalignDSTa",
+ _ = {
+ shift = 6, mask = 31,
+ [0] = "dbitswapDT",
+ [2] = "dsbhDT",
+ [5] = "dshdDT",
+ }
+}
+
+local map_special3_r6 = {
+ shift = 0, mask = 63,
+ [0] = "extTSAK", [1] = "dextmTSAP", [3] = "dextTSAK",
+ [4] = "insTSAL", [6] = "dinsuTSEQ", [7] = "dinsTSAL",
+ [32] = map_bshfl_r6, [36] = map_dbshfl_r6, [59] = "rdhwrTD",
+}
+
+local map_regimm_r6 = {
+ shift = 16, mask = 31,
+ [0] = "bltzSB", [1] = "bgezSB",
+ [6] = "dahiSI", [30] = "datiSI",
+ [23] = "sigrieI", [31] = "synciSO",
+}
+
+local map_pcrel_r6 = {
+ shift = 19, mask = 3,
+ [0] = "addiupcS2", "lwpcS2", "lwupcS2", {
+ shift = 18, mask = 1,
+ [0] = "ldpcS3", { shift = 16, mask = 3, [2] = "auipcSI", [3] = "aluipcSI" }
+ }
+}
+
+local map_cop1s_r6 = {
+ shift = 0, mask = 63,
+ [0] = "add.sFGH", "sub.sFGH", "mul.sFGH", "div.sFGH",
+ "sqrt.sFG", "abs.sFG", "mov.sFG", "neg.sFG",
+ "round.l.sFG", "trunc.l.sFG", "ceil.l.sFG", "floor.l.sFG",
+ "round.w.sFG", "trunc.w.sFG", "ceil.w.sFG", "floor.w.sFG",
+ "sel.sFGH", false, false, false,
+ "seleqz.sFGH", "recip.sFG", "rsqrt.sFG", "selnez.sFGH",
+ "maddf.sFGH", "msubf.sFGH", "rint.sFG", "class.sFG",
+ "min.sFGH", "mina.sFGH", "max.sFGH", "maxa.sFGH",
+ false, "cvt.d.sFG", false, false,
+ "cvt.w.sFG", "cvt.l.sFG",
+}
+
+local map_cop1d_r6 = {
+ shift = 0, mask = 63,
+ [0] = "add.dFGH", "sub.dFGH", "mul.dFGH", "div.dFGH",
+ "sqrt.dFG", "abs.dFG", "mov.dFG", "neg.dFG",
+ "round.l.dFG", "trunc.l.dFG", "ceil.l.dFG", "floor.l.dFG",
+ "round.w.dFG", "trunc.w.dFG", "ceil.w.dFG", "floor.w.dFG",
+ "sel.dFGH", false, false, false,
+ "seleqz.dFGH", "recip.dFG", "rsqrt.dFG", "selnez.dFGH",
+ "maddf.dFGH", "msubf.dFGH", "rint.dFG", "class.dFG",
+ "min.dFGH", "mina.dFGH", "max.dFGH", "maxa.dFGH",
+ "cvt.s.dFG", false, false, false,
+ "cvt.w.dFG", "cvt.l.dFG",
+}
+
+local map_cop1w_r6 = {
+ shift = 0, mask = 63,
+ [0] = "cmp.af.sFGH", "cmp.un.sFGH", "cmp.eq.sFGH", "cmp.ueq.sFGH",
+ "cmp.lt.sFGH", "cmp.ult.sFGH", "cmp.le.sFGH", "cmp.ule.sFGH",
+ "cmp.saf.sFGH", "cmp.sun.sFGH", "cmp.seq.sFGH", "cmp.sueq.sFGH",
+ "cmp.slt.sFGH", "cmp.sult.sFGH", "cmp.sle.sFGH", "cmp.sule.sFGH",
+ false, "cmp.or.sFGH", "cmp.une.sFGH", "cmp.ne.sFGH",
+ false, false, false, false,
+ false, "cmp.sor.sFGH", "cmp.sune.sFGH", "cmp.sne.sFGH",
+ false, false, false, false,
+ "cvt.s.wFG", "cvt.d.wFG",
+}
+
+local map_cop1l_r6 = {
+ shift = 0, mask = 63,
+ [0] = "cmp.af.dFGH", "cmp.un.dFGH", "cmp.eq.dFGH", "cmp.ueq.dFGH",
+ "cmp.lt.dFGH", "cmp.ult.dFGH", "cmp.le.dFGH", "cmp.ule.dFGH",
+ "cmp.saf.dFGH", "cmp.sun.dFGH", "cmp.seq.dFGH", "cmp.sueq.dFGH",
+ "cmp.slt.dFGH", "cmp.sult.dFGH", "cmp.sle.dFGH", "cmp.sule.dFGH",
+ false, "cmp.or.dFGH", "cmp.une.dFGH", "cmp.ne.dFGH",
+ false, false, false, false,
+ false, "cmp.sor.dFGH", "cmp.sune.dFGH", "cmp.sne.dFGH",
+ false, false, false, false,
+ "cvt.s.lFG", "cvt.d.lFG",
+}
+
+local map_cop1_r6 = {
+ shift = 21, mask = 31,
+ [0] = "mfc1TG", "dmfc1TG", "cfc1TG", "mfhc1TG",
+ "mtc1TG", "dmtc1TG", "ctc1TG", "mthc1TG",
+ false, "bc1eqzHB", false, false,
+ false, "bc1nezHB", false, false,
+ map_cop1s_r6, map_cop1d_r6, false, false,
+ map_cop1w_r6, map_cop1l_r6,
+}
+
+local function maprs_popTS(rs, rt)
+ if rt == 0 then return 0 elseif rs == 0 then return 1
+ elseif rs == rt then return 2 else return 3 end
+end
+
+local map_pop06_r6 = {
+ maprs = maprs_popTS, [0] = "blezSB", "blezalcTB", "bgezalcTB", "bgeucSTB"
+}
+local map_pop07_r6 = {
+ maprs = maprs_popTS, [0] = "bgtzSB", "bgtzalcTB", "bltzalcTB", "bltucSTB"
+}
+local map_pop26_r6 = {
+ maprs = maprs_popTS, "blezcTB", "bgezcTB", "bgecSTB"
+}
+local map_pop27_r6 = {
+ maprs = maprs_popTS, "bgtzcTB", "bltzcTB", "bltcSTB"
+}
+
+local function maprs_popS(rs, rt)
+ if rs == 0 then return 0 else return 1 end
+end
+
+local map_pop66_r6 = {
+ maprs = maprs_popS, [0] = "jicTI", "beqzcSb"
+}
+local map_pop76_r6 = {
+ maprs = maprs_popS, [0] = "jialcTI", "bnezcSb"
+}
+
+local function maprs_popST(rs, rt)
+ if rs >= rt then return 0 elseif rs == 0 then return 1 else return 2 end
+end
+
+local map_pop10_r6 = {
+ maprs = maprs_popST, [0] = "bovcSTB", "beqzalcTB", "beqcSTB"
+}
+local map_pop30_r6 = {
+ maprs = maprs_popST, [0] = "bnvcSTB", "bnezalcTB", "bnecSTB"
+}
+
+local map_pri_r6 = {
+ [0] = map_special_r6, map_regimm_r6, "jJ", "jalJ",
+ "beq|beqz|bST00B", "bne|bnezST0B", map_pop06_r6, map_pop07_r6,
+ map_pop10_r6, "addiu|liTS0I", "sltiTSI", "sltiuTSI",
+ "andiTSU", "ori|liTS0U", "xoriTSU", "aui|luiTS0U",
+ map_cop0, map_cop1_r6, false, false,
+ false, false, map_pop26_r6, map_pop27_r6,
+ map_pop30_r6, "daddiuTSI", false, false,
+ false, "dauiTSI", false, map_special3_r6,
+ "lbTSO", "lhTSO", false, "lwTSO",
+ "lbuTSO", "lhuTSO", false, false,
+ "sbTSO", "shTSO", false, "swTSO",
+ false, false, false, false,
+ false, "lwc1HSO", "bc#", false,
+ false, "ldc1HSO", map_pop66_r6, "ldTSO",
+ false, "swc1HSO", "balc#", map_pcrel_r6,
+ false, "sdc1HSO", map_pop76_r6, "sdTSO",
+}
+
+------------------------------------------------------------------------------
+
+local map_gpr = {
+ [0] = "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
+ "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
+ "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
+ "r24", "r25", "r26", "r27", "r28", "sp", "r30", "ra",
+}
+
+------------------------------------------------------------------------------
+
+-- Output a nicely formatted line with an opcode and operands.
+local function putop(ctx, text, operands)
+ local pos = ctx.pos
+ local extra = ""
+ if ctx.rel then
+ local sym = ctx.symtab[ctx.rel]
+ if sym then extra = "\t->"..sym end
+ end
+ if ctx.hexdump > 0 then
+ ctx.out(format("%08x %s %-7s %s%s\n",
+ ctx.addr+pos, tohex(ctx.op), text, concat(operands, ", "), extra))
+ else
+ ctx.out(format("%08x %-7s %s%s\n",
+ ctx.addr+pos, text, concat(operands, ", "), extra))
+ end
+ ctx.pos = pos + 4
+end
+
+-- Fallback for unknown opcodes.
+local function unknown(ctx)
+ return putop(ctx, ".long", { "0x"..tohex(ctx.op) })
+end
+
+local function get_be(ctx)
+ local pos = ctx.pos
+ local b0, b1, b2, b3 = byte(ctx.code, pos+1, pos+4)
+ return bor(lshift(b0, 24), lshift(b1, 16), lshift(b2, 8), b3)
+end
+
+local function get_le(ctx)
+ local pos = ctx.pos
+ local b0, b1, b2, b3 = byte(ctx.code, pos+1, pos+4)
+ return bor(lshift(b3, 24), lshift(b2, 16), lshift(b1, 8), b0)
+end
+
+-- Disassemble a single instruction.
+local function disass_ins(ctx)
+ local op = ctx:get()
+ local operands = {}
+ local last = nil
+ ctx.op = op
+ ctx.rel = nil
+
+ local opat = ctx.map_pri[rshift(op, 26)]
+ while type(opat) ~= "string" do
+ if not opat then return unknown(ctx) end
+ if opat.maprs then
+ opat = opat[opat.maprs(band(rshift(op,21),31), band(rshift(op,16),31))]
+ else
+ opat = opat[band(rshift(op, opat.shift), opat.mask)] or opat._
+ end
+ end
+ local name, pat = match(opat, "^([a-z0-9_.]*)(.*)")
+ local altname, pat2 = match(pat, "|([a-z0-9_.|]*)(.*)")
+ if altname then pat = pat2 end
+
+ for p in gmatch(pat, ".") do
+ local x = nil
+ if p == "S" then
+ x = map_gpr[band(rshift(op, 21), 31)]
+ elseif p == "T" then
+ x = map_gpr[band(rshift(op, 16), 31)]
+ elseif p == "D" then
+ x = map_gpr[band(rshift(op, 11), 31)]
+ elseif p == "F" then
+ x = "f"..band(rshift(op, 6), 31)
+ elseif p == "G" then
+ x = "f"..band(rshift(op, 11), 31)
+ elseif p == "H" then
+ x = "f"..band(rshift(op, 16), 31)
+ elseif p == "R" then
+ x = "f"..band(rshift(op, 21), 31)
+ elseif p == "A" then
+ x = band(rshift(op, 6), 31)
+ elseif p == "a" then
+ x = band(rshift(op, 6), 7)
+ elseif p == "E" then
+ x = band(rshift(op, 6), 31) + 32
+ elseif p == "M" then
+ x = band(rshift(op, 11), 31)
+ elseif p == "N" then
+ x = band(rshift(op, 16), 31)
+ elseif p == "C" then
+ x = band(rshift(op, 18), 7)
+ if x == 0 then x = nil end
+ elseif p == "K" then
+ x = band(rshift(op, 11), 31) + 1
+ elseif p == "P" then
+ x = band(rshift(op, 11), 31) + 33
+ elseif p == "L" then
+ x = band(rshift(op, 11), 31) - last + 1
+ elseif p == "Q" then
+ x = band(rshift(op, 11), 31) - last + 33
+ elseif p == "I" then
+ x = arshift(lshift(op, 16), 16)
+ elseif p == "2" then
+ x = arshift(lshift(op, 13), 11)
+ elseif p == "3" then
+ x = arshift(lshift(op, 14), 11)
+ elseif p == "U" then
+ x = band(op, 0xffff)
+ elseif p == "O" then
+ local disp = arshift(lshift(op, 16), 16)
+ operands[#operands] = format("%d(%s)", disp, last)
+ elseif p == "X" then
+ local index = map_gpr[band(rshift(op, 16), 31)]
+ operands[#operands] = format("%s(%s)", index, last)
+ elseif p == "B" then
+ x = ctx.addr + ctx.pos + arshift(lshift(op, 16), 14) + 4
+ ctx.rel = x
+ x = format("0x%08x", x)
+ elseif p == "b" then
+ x = ctx.addr + ctx.pos + arshift(lshift(op, 11), 9) + 4
+ ctx.rel = x
+ x = format("0x%08x", x)
+ elseif p == "#" then
+ x = ctx.addr + ctx.pos + arshift(lshift(op, 6), 4) + 4
+ ctx.rel = x
+ x = format("0x%08x", x)
+ elseif p == "J" then
+ local a = ctx.addr + ctx.pos
+ x = a - band(a, 0x0fffffff) + band(op, 0x03ffffff)*4
+ ctx.rel = x
+ x = format("0x%08x", x)
+ elseif p == "V" then
+ x = band(rshift(op, 8), 7)
+ if x == 0 then x = nil end
+ elseif p == "W" then
+ x = band(op, 7)
+ if x == 0 then x = nil end
+ elseif p == "Y" then
+ x = band(rshift(op, 6), 0x000fffff)
+ if x == 0 then x = nil end
+ elseif p == "Z" then
+ x = band(rshift(op, 6), 1023)
+ if x == 0 then x = nil end
+ elseif p == "0" then
+ if last == "r0" or last == 0 then
+ local n = #operands
+ operands[n] = nil
+ last = operands[n-1]
+ if altname then
+ local a1, a2 = match(altname, "([^|]*)|(.*)")
+ if a1 then name, altname = a1, a2
+ else name = altname end
+ end
+ end
+ elseif p == "1" then
+ if last == "ra" then
+ operands[#operands] = nil
+ end
+ else
+ assert(false)
+ end
+ if x then operands[#operands+1] = x; last = x end
+ end
+
+ return putop(ctx, name, operands)
+end
+
+------------------------------------------------------------------------------
+
+-- Disassemble a block of code.
+local function disass_block(ctx, ofs, len)
+ if not ofs then ofs = 0 end
+ local stop = len and ofs+len or #ctx.code
+ stop = stop - stop % 4
+ ctx.pos = ofs - ofs % 4
+ ctx.rel = nil
+ while ctx.pos < stop do disass_ins(ctx) end
+end
+
+-- Extended API: create a disassembler context. Then call ctx:disass(ofs, len).
+local function create(code, addr, out)
+ local ctx = {}
+ ctx.code = code
+ ctx.addr = addr or 0
+ ctx.out = out or io.write
+ ctx.symtab = {}
+ ctx.disass = disass_block
+ ctx.hexdump = 8
+ ctx.get = get_be
+ ctx.map_pri = map_pri
+ return ctx
+end
+
+local function create_el(code, addr, out)
+ local ctx = create(code, addr, out)
+ ctx.get = get_le
+ return ctx
+end
+
+local function create_r6(code, addr, out)
+ local ctx = create(code, addr, out)
+ ctx.map_pri = map_pri_r6
+ return ctx
+end
+
+local function create_r6_el(code, addr, out)
+ local ctx = create(code, addr, out)
+ ctx.get = get_le
+ ctx.map_pri = map_pri_r6
+ return ctx
+end
+
+-- Simple API: disassemble code (a string) at address and output via out.
+local function disass(code, addr, out)
+ create(code, addr, out):disass()
+end
+
+local function disass_el(code, addr, out)
+ create_el(code, addr, out):disass()
+end
+
+local function disass_r6(code, addr, out)
+ create_r6(code, addr, out):disass()
+end
+
+local function disass_r6_el(code, addr, out)
+ create_r6_el(code, addr, out):disass()
+end
+
+-- Return register name for RID.
+local function regname(r)
+ if r < 32 then return map_gpr[r] end
+ return "f"..(r-32)
+end
+
+-- Public module functions.
+return {
+ create = create,
+ create_el = create_el,
+ create_r6 = create_r6,
+ create_r6_el = create_r6_el,
+ disass = disass,
+ disass_el = disass_el,
+ disass_r6 = disass_r6,
+ disass_r6_el = disass_r6_el,
+ regname = regname
+}
+
diff --git a/libs/luajit-cmake/luajit/src/jit/dis_mips64.lua b/libs/luajit-cmake/luajit/src/jit/dis_mips64.lua
new file mode 100644
index 0000000..1236e52
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/jit/dis_mips64.lua
@@ -0,0 +1,17 @@
+----------------------------------------------------------------------------
+-- LuaJIT MIPS64 disassembler wrapper module.
+--
+-- Copyright (C) 2005-2022 Mike Pall. All rights reserved.
+-- Released under the MIT license. See Copyright Notice in luajit.h
+----------------------------------------------------------------------------
+-- This module just exports the big-endian functions from the
+-- MIPS disassembler module. All the interesting stuff is there.
+------------------------------------------------------------------------------
+
+local dis_mips = require((string.match(..., ".*%.") or "").."dis_mips")
+return {
+ create = dis_mips.create,
+ disass = dis_mips.disass,
+ regname = dis_mips.regname
+}
+
diff --git a/libs/luajit-cmake/luajit/src/jit/dis_mips64el.lua b/libs/luajit-cmake/luajit/src/jit/dis_mips64el.lua
new file mode 100644
index 0000000..7c478d2
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/jit/dis_mips64el.lua
@@ -0,0 +1,17 @@
+----------------------------------------------------------------------------
+-- LuaJIT MIPS64EL disassembler wrapper module.
+--
+-- Copyright (C) 2005-2022 Mike Pall. All rights reserved.
+-- Released under the MIT license. See Copyright Notice in luajit.h
+----------------------------------------------------------------------------
+-- This module just exports the little-endian functions from the
+-- MIPS disassembler module. All the interesting stuff is there.
+------------------------------------------------------------------------------
+
+local dis_mips = require((string.match(..., ".*%.") or "").."dis_mips")
+return {
+ create = dis_mips.create_el,
+ disass = dis_mips.disass_el,
+ regname = dis_mips.regname
+}
+
diff --git a/libs/luajit-cmake/luajit/src/jit/dis_mips64r6.lua b/libs/luajit-cmake/luajit/src/jit/dis_mips64r6.lua
new file mode 100644
index 0000000..c5789ce
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/jit/dis_mips64r6.lua
@@ -0,0 +1,17 @@
+----------------------------------------------------------------------------
+-- LuaJIT MIPS64R6 disassembler wrapper module.
+--
+-- Copyright (C) 2005-2022 Mike Pall. All rights reserved.
+-- Released under the MIT license. See Copyright Notice in luajit.h
+----------------------------------------------------------------------------
+-- This module just exports the r6 big-endian functions from the
+-- MIPS disassembler module. All the interesting stuff is there.
+------------------------------------------------------------------------------
+
+local dis_mips = require((string.match(..., ".*%.") or "").."dis_mips")
+return {
+ create = dis_mips.create_r6,
+ disass = dis_mips.disass_r6,
+ regname = dis_mips.regname
+}
+
diff --git a/libs/luajit-cmake/luajit/src/jit/dis_mips64r6el.lua b/libs/luajit-cmake/luajit/src/jit/dis_mips64r6el.lua
new file mode 100644
index 0000000..f67f624
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/jit/dis_mips64r6el.lua
@@ -0,0 +1,17 @@
+----------------------------------------------------------------------------
+-- LuaJIT MIPS64R6EL disassembler wrapper module.
+--
+-- Copyright (C) 2005-2022 Mike Pall. All rights reserved.
+-- Released under the MIT license. See Copyright Notice in luajit.h
+----------------------------------------------------------------------------
+-- This module just exports the r6 little-endian functions from the
+-- MIPS disassembler module. All the interesting stuff is there.
+------------------------------------------------------------------------------
+
+local dis_mips = require((string.match(..., ".*%.") or "").."dis_mips")
+return {
+ create = dis_mips.create_r6_el,
+ disass = dis_mips.disass_r6_el,
+ regname = dis_mips.regname
+}
+
diff --git a/libs/luajit-cmake/luajit/src/jit/dis_mipsel.lua b/libs/luajit-cmake/luajit/src/jit/dis_mipsel.lua
new file mode 100644
index 0000000..a4fa6c6
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/jit/dis_mipsel.lua
@@ -0,0 +1,17 @@
+----------------------------------------------------------------------------
+-- LuaJIT MIPSEL disassembler wrapper module.
+--
+-- Copyright (C) 2005-2022 Mike Pall. All rights reserved.
+-- Released under the MIT license. See Copyright Notice in luajit.h
+----------------------------------------------------------------------------
+-- This module just exports the little-endian functions from the
+-- MIPS disassembler module. All the interesting stuff is there.
+------------------------------------------------------------------------------
+
+local dis_mips = require((string.match(..., ".*%.") or "").."dis_mips")
+return {
+ create = dis_mips.create_el,
+ disass = dis_mips.disass_el,
+ regname = dis_mips.regname
+}
+
diff --git a/libs/luajit-cmake/luajit/src/jit/dis_ppc.lua b/libs/luajit-cmake/luajit/src/jit/dis_ppc.lua
new file mode 100644
index 0000000..8f65f25
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/jit/dis_ppc.lua
@@ -0,0 +1,591 @@
+----------------------------------------------------------------------------
+-- LuaJIT PPC disassembler module.
+--
+-- Copyright (C) 2005-2022 Mike Pall. All rights reserved.
+-- Released under the MIT/X license. See Copyright Notice in luajit.h
+----------------------------------------------------------------------------
+-- This is a helper module used by the LuaJIT machine code dumper module.
+--
+-- It disassembles all common, non-privileged 32/64 bit PowerPC instructions
+-- plus the e500 SPE instructions and some Cell/Xenon extensions.
+--
+-- NYI: VMX, VMX128
+------------------------------------------------------------------------------
+
+local type = type
+local byte, format = string.byte, string.format
+local match, gmatch, gsub = string.match, string.gmatch, string.gsub
+local concat = table.concat
+local bit = require("bit")
+local band, bor, tohex = bit.band, bit.bor, bit.tohex
+local lshift, rshift, arshift = bit.lshift, bit.rshift, bit.arshift
+
+------------------------------------------------------------------------------
+-- Primary and extended opcode maps
+------------------------------------------------------------------------------
+
+local map_crops = {
+ shift = 1, mask = 1023,
+ [0] = "mcrfXX",
+ [33] = "crnor|crnotCCC=", [129] = "crandcCCC",
+ [193] = "crxor|crclrCCC%", [225] = "crnandCCC",
+ [257] = "crandCCC", [289] = "creqv|crsetCCC%",
+ [417] = "crorcCCC", [449] = "cror|crmoveCCC=",
+ [16] = "b_lrKB", [528] = "b_ctrKB",
+ [150] = "isync",
+}
+
+local map_rlwinm = setmetatable({
+ shift = 0, mask = -1,
+},
+{ __index = function(t, x)
+ local rot = band(rshift(x, 11), 31)
+ local mb = band(rshift(x, 6), 31)
+ local me = band(rshift(x, 1), 31)
+ if mb == 0 and me == 31-rot then
+ return "slwiRR~A."
+ elseif me == 31 and mb == 32-rot then
+ return "srwiRR~-A."
+ else
+ return "rlwinmRR~AAA."
+ end
+ end
+})
+
+local map_rld = {
+ shift = 2, mask = 7,
+ [0] = "rldiclRR~HM.", "rldicrRR~HM.", "rldicRR~HM.", "rldimiRR~HM.",
+ {
+ shift = 1, mask = 1,
+ [0] = "rldclRR~RM.", "rldcrRR~RM.",
+ },
+}
+
+local map_ext = setmetatable({
+ shift = 1, mask = 1023,
+
+ [0] = "cmp_YLRR", [32] = "cmpl_YLRR",
+ [4] = "twARR", [68] = "tdARR",
+
+ [8] = "subfcRRR.", [40] = "subfRRR.",
+ [104] = "negRR.", [136] = "subfeRRR.",
+ [200] = "subfzeRR.", [232] = "subfmeRR.",
+ [520] = "subfcoRRR.", [552] = "subfoRRR.",
+ [616] = "negoRR.", [648] = "subfeoRRR.",
+ [712] = "subfzeoRR.", [744] = "subfmeoRR.",
+
+ [9] = "mulhduRRR.", [73] = "mulhdRRR.", [233] = "mulldRRR.",
+ [457] = "divduRRR.", [489] = "divdRRR.",
+ [745] = "mulldoRRR.",
+ [969] = "divduoRRR.", [1001] = "divdoRRR.",
+
+ [10] = "addcRRR.", [138] = "addeRRR.",
+ [202] = "addzeRR.", [234] = "addmeRR.", [266] = "addRRR.",
+ [522] = "addcoRRR.", [650] = "addeoRRR.",
+ [714] = "addzeoRR.", [746] = "addmeoRR.", [778] = "addoRRR.",
+
+ [11] = "mulhwuRRR.", [75] = "mulhwRRR.", [235] = "mullwRRR.",
+ [459] = "divwuRRR.", [491] = "divwRRR.",
+ [747] = "mullwoRRR.",
+ [971] = "divwouRRR.", [1003] = "divwoRRR.",
+
+ [15] = "iselltRRR", [47] = "iselgtRRR", [79] = "iseleqRRR",
+
+ [144] = { shift = 20, mask = 1, [0] = "mtcrfRZ~", "mtocrfRZ~", },
+ [19] = { shift = 20, mask = 1, [0] = "mfcrR", "mfocrfRZ", },
+ [371] = { shift = 11, mask = 1023, [392] = "mftbR", [424] = "mftbuR", },
+ [339] = {
+ shift = 11, mask = 1023,
+ [32] = "mferR", [256] = "mflrR", [288] = "mfctrR", [16] = "mfspefscrR",
+ },
+ [467] = {
+ shift = 11, mask = 1023,
+ [32] = "mtxerR", [256] = "mtlrR", [288] = "mtctrR", [16] = "mtspefscrR",
+ },
+
+ [20] = "lwarxRR0R", [84] = "ldarxRR0R",
+
+ [21] = "ldxRR0R", [53] = "lduxRRR",
+ [149] = "stdxRR0R", [181] = "stduxRRR",
+ [341] = "lwaxRR0R", [373] = "lwauxRRR",
+
+ [23] = "lwzxRR0R", [55] = "lwzuxRRR",
+ [87] = "lbzxRR0R", [119] = "lbzuxRRR",
+ [151] = "stwxRR0R", [183] = "stwuxRRR",
+ [215] = "stbxRR0R", [247] = "stbuxRRR",
+ [279] = "lhzxRR0R", [311] = "lhzuxRRR",
+ [343] = "lhaxRR0R", [375] = "lhauxRRR",
+ [407] = "sthxRR0R", [439] = "sthuxRRR",
+
+ [54] = "dcbst-R0R", [86] = "dcbf-R0R",
+ [150] = "stwcxRR0R.", [214] = "stdcxRR0R.",
+ [246] = "dcbtst-R0R", [278] = "dcbt-R0R",
+ [310] = "eciwxRR0R", [438] = "ecowxRR0R",
+ [470] = "dcbi-RR",
+
+ [598] = {
+ shift = 21, mask = 3,
+ [0] = "sync", "lwsync", "ptesync",
+ },
+ [758] = "dcba-RR",
+ [854] = "eieio", [982] = "icbi-R0R", [1014] = "dcbz-R0R",
+
+ [26] = "cntlzwRR~", [58] = "cntlzdRR~",
+ [122] = "popcntbRR~",
+ [154] = "prtywRR~", [186] = "prtydRR~",
+
+ [28] = "andRR~R.", [60] = "andcRR~R.", [124] = "nor|notRR~R=.",
+ [284] = "eqvRR~R.", [316] = "xorRR~R.",
+ [412] = "orcRR~R.", [444] = "or|mrRR~R=.", [476] = "nandRR~R.",
+ [508] = "cmpbRR~R",
+
+ [512] = "mcrxrX",
+
+ [532] = "ldbrxRR0R", [660] = "stdbrxRR0R",
+
+ [533] = "lswxRR0R", [597] = "lswiRR0A",
+ [661] = "stswxRR0R", [725] = "stswiRR0A",
+
+ [534] = "lwbrxRR0R", [662] = "stwbrxRR0R",
+ [790] = "lhbrxRR0R", [918] = "sthbrxRR0R",
+
+ [535] = "lfsxFR0R", [567] = "lfsuxFRR",
+ [599] = "lfdxFR0R", [631] = "lfduxFRR",
+ [663] = "stfsxFR0R", [695] = "stfsuxFRR",
+ [727] = "stfdxFR0R", [759] = "stfduxFR0R",
+ [855] = "lfiwaxFR0R",
+ [983] = "stfiwxFR0R",
+
+ [24] = "slwRR~R.",
+
+ [27] = "sldRR~R.", [536] = "srwRR~R.",
+ [792] = "srawRR~R.", [824] = "srawiRR~A.",
+
+ [794] = "sradRR~R.", [826] = "sradiRR~H.", [827] = "sradiRR~H.",
+ [922] = "extshRR~.", [954] = "extsbRR~.", [986] = "extswRR~.",
+
+ [539] = "srdRR~R.",
+},
+{ __index = function(t, x)
+ if band(x, 31) == 15 then return "iselRRRC" end
+ end
+})
+
+local map_ld = {
+ shift = 0, mask = 3,
+ [0] = "ldRRE", "lduRRE", "lwaRRE",
+}
+
+local map_std = {
+ shift = 0, mask = 3,
+ [0] = "stdRRE", "stduRRE",
+}
+
+local map_fps = {
+ shift = 5, mask = 1,
+ {
+ shift = 1, mask = 15,
+ [0] = false, false, "fdivsFFF.", false,
+ "fsubsFFF.", "faddsFFF.", "fsqrtsF-F.", false,
+ "fresF-F.", "fmulsFF-F.", "frsqrtesF-F.", false,
+ "fmsubsFFFF~.", "fmaddsFFFF~.", "fnmsubsFFFF~.", "fnmaddsFFFF~.",
+ }
+}
+
+local map_fpd = {
+ shift = 5, mask = 1,
+ [0] = {
+ shift = 1, mask = 1023,
+ [0] = "fcmpuXFF", [32] = "fcmpoXFF", [64] = "mcrfsXX",
+ [38] = "mtfsb1A.", [70] = "mtfsb0A.", [134] = "mtfsfiA>>-A>",
+ [8] = "fcpsgnFFF.", [40] = "fnegF-F.", [72] = "fmrF-F.",
+ [136] = "fnabsF-F.", [264] = "fabsF-F.",
+ [12] = "frspF-F.",
+ [14] = "fctiwF-F.", [15] = "fctiwzF-F.",
+ [583] = "mffsF.", [711] = "mtfsfZF.",
+ [392] = "frinF-F.", [424] = "frizF-F.",
+ [456] = "fripF-F.", [488] = "frimF-F.",
+ [814] = "fctidF-F.", [815] = "fctidzF-F.", [846] = "fcfidF-F.",
+ },
+ {
+ shift = 1, mask = 15,
+ [0] = false, false, "fdivFFF.", false,
+ "fsubFFF.", "faddFFF.", "fsqrtF-F.", "fselFFFF~.",
+ "freF-F.", "fmulFF-F.", "frsqrteF-F.", false,
+ "fmsubFFFF~.", "fmaddFFFF~.", "fnmsubFFFF~.", "fnmaddFFFF~.",
+ }
+}
+
+local map_spe = {
+ shift = 0, mask = 2047,
+
+ [512] = "evaddwRRR", [514] = "evaddiwRAR~",
+ [516] = "evsubwRRR~", [518] = "evsubiwRAR~",
+ [520] = "evabsRR", [521] = "evnegRR",
+ [522] = "evextsbRR", [523] = "evextshRR", [524] = "evrndwRR",
+ [525] = "evcntlzwRR", [526] = "evcntlswRR",
+
+ [527] = "brincRRR",
+
+ [529] = "evandRRR", [530] = "evandcRRR", [534] = "evxorRRR",
+ [535] = "evor|evmrRRR=", [536] = "evnor|evnotRRR=",
+ [537] = "eveqvRRR", [539] = "evorcRRR", [542] = "evnandRRR",
+
+ [544] = "evsrwuRRR", [545] = "evsrwsRRR",
+ [546] = "evsrwiuRRA", [547] = "evsrwisRRA",
+ [548] = "evslwRRR", [550] = "evslwiRRA",
+ [552] = "evrlwRRR", [553] = "evsplatiRS",
+ [554] = "evrlwiRRA", [555] = "evsplatfiRS",
+ [556] = "evmergehiRRR", [557] = "evmergeloRRR",
+ [558] = "evmergehiloRRR", [559] = "evmergelohiRRR",
+
+ [560] = "evcmpgtuYRR", [561] = "evcmpgtsYRR",
+ [562] = "evcmpltuYRR", [563] = "evcmpltsYRR",
+ [564] = "evcmpeqYRR",
+
+ [632] = "evselRRR", [633] = "evselRRRW",
+ [634] = "evselRRRW", [635] = "evselRRRW",
+ [636] = "evselRRRW", [637] = "evselRRRW",
+ [638] = "evselRRRW", [639] = "evselRRRW",
+
+ [640] = "evfsaddRRR", [641] = "evfssubRRR",
+ [644] = "evfsabsRR", [645] = "evfsnabsRR", [646] = "evfsnegRR",
+ [648] = "evfsmulRRR", [649] = "evfsdivRRR",
+ [652] = "evfscmpgtYRR", [653] = "evfscmpltYRR", [654] = "evfscmpeqYRR",
+ [656] = "evfscfuiR-R", [657] = "evfscfsiR-R",
+ [658] = "evfscfufR-R", [659] = "evfscfsfR-R",
+ [660] = "evfsctuiR-R", [661] = "evfsctsiR-R",
+ [662] = "evfsctufR-R", [663] = "evfsctsfR-R",
+ [664] = "evfsctuizR-R", [666] = "evfsctsizR-R",
+ [668] = "evfststgtYRR", [669] = "evfststltYRR", [670] = "evfststeqYRR",
+
+ [704] = "efsaddRRR", [705] = "efssubRRR",
+ [708] = "efsabsRR", [709] = "efsnabsRR", [710] = "efsnegRR",
+ [712] = "efsmulRRR", [713] = "efsdivRRR",
+ [716] = "efscmpgtYRR", [717] = "efscmpltYRR", [718] = "efscmpeqYRR",
+ [719] = "efscfdR-R",
+ [720] = "efscfuiR-R", [721] = "efscfsiR-R",
+ [722] = "efscfufR-R", [723] = "efscfsfR-R",
+ [724] = "efsctuiR-R", [725] = "efsctsiR-R",
+ [726] = "efsctufR-R", [727] = "efsctsfR-R",
+ [728] = "efsctuizR-R", [730] = "efsctsizR-R",
+ [732] = "efststgtYRR", [733] = "efststltYRR", [734] = "efststeqYRR",
+
+ [736] = "efdaddRRR", [737] = "efdsubRRR",
+ [738] = "efdcfuidR-R", [739] = "efdcfsidR-R",
+ [740] = "efdabsRR", [741] = "efdnabsRR", [742] = "efdnegRR",
+ [744] = "efdmulRRR", [745] = "efddivRRR",
+ [746] = "efdctuidzR-R", [747] = "efdctsidzR-R",
+ [748] = "efdcmpgtYRR", [749] = "efdcmpltYRR", [750] = "efdcmpeqYRR",
+ [751] = "efdcfsR-R",
+ [752] = "efdcfuiR-R", [753] = "efdcfsiR-R",
+ [754] = "efdcfufR-R", [755] = "efdcfsfR-R",
+ [756] = "efdctuiR-R", [757] = "efdctsiR-R",
+ [758] = "efdctufR-R", [759] = "efdctsfR-R",
+ [760] = "efdctuizR-R", [762] = "efdctsizR-R",
+ [764] = "efdtstgtYRR", [765] = "efdtstltYRR", [766] = "efdtsteqYRR",
+
+ [768] = "evlddxRR0R", [769] = "evlddRR8",
+ [770] = "evldwxRR0R", [771] = "evldwRR8",
+ [772] = "evldhxRR0R", [773] = "evldhRR8",
+ [776] = "evlhhesplatxRR0R", [777] = "evlhhesplatRR2",
+ [780] = "evlhhousplatxRR0R", [781] = "evlhhousplatRR2",
+ [782] = "evlhhossplatxRR0R", [783] = "evlhhossplatRR2",
+ [784] = "evlwhexRR0R", [785] = "evlwheRR4",
+ [788] = "evlwhouxRR0R", [789] = "evlwhouRR4",
+ [790] = "evlwhosxRR0R", [791] = "evlwhosRR4",
+ [792] = "evlwwsplatxRR0R", [793] = "evlwwsplatRR4",
+ [796] = "evlwhsplatxRR0R", [797] = "evlwhsplatRR4",
+
+ [800] = "evstddxRR0R", [801] = "evstddRR8",
+ [802] = "evstdwxRR0R", [803] = "evstdwRR8",
+ [804] = "evstdhxRR0R", [805] = "evstdhRR8",
+ [816] = "evstwhexRR0R", [817] = "evstwheRR4",
+ [820] = "evstwhoxRR0R", [821] = "evstwhoRR4",
+ [824] = "evstwwexRR0R", [825] = "evstwweRR4",
+ [828] = "evstwwoxRR0R", [829] = "evstwwoRR4",
+
+ [1027] = "evmhessfRRR", [1031] = "evmhossfRRR", [1032] = "evmheumiRRR",
+ [1033] = "evmhesmiRRR", [1035] = "evmhesmfRRR", [1036] = "evmhoumiRRR",
+ [1037] = "evmhosmiRRR", [1039] = "evmhosmfRRR", [1059] = "evmhessfaRRR",
+ [1063] = "evmhossfaRRR", [1064] = "evmheumiaRRR", [1065] = "evmhesmiaRRR",
+ [1067] = "evmhesmfaRRR", [1068] = "evmhoumiaRRR", [1069] = "evmhosmiaRRR",
+ [1071] = "evmhosmfaRRR", [1095] = "evmwhssfRRR", [1096] = "evmwlumiRRR",
+ [1100] = "evmwhumiRRR", [1101] = "evmwhsmiRRR", [1103] = "evmwhsmfRRR",
+ [1107] = "evmwssfRRR", [1112] = "evmwumiRRR", [1113] = "evmwsmiRRR",
+ [1115] = "evmwsmfRRR", [1127] = "evmwhssfaRRR", [1128] = "evmwlumiaRRR",
+ [1132] = "evmwhumiaRRR", [1133] = "evmwhsmiaRRR", [1135] = "evmwhsmfaRRR",
+ [1139] = "evmwssfaRRR", [1144] = "evmwumiaRRR", [1145] = "evmwsmiaRRR",
+ [1147] = "evmwsmfaRRR",
+
+ [1216] = "evaddusiaawRR", [1217] = "evaddssiaawRR",
+ [1218] = "evsubfusiaawRR", [1219] = "evsubfssiaawRR",
+ [1220] = "evmraRR",
+ [1222] = "evdivwsRRR", [1223] = "evdivwuRRR",
+ [1224] = "evaddumiaawRR", [1225] = "evaddsmiaawRR",
+ [1226] = "evsubfumiaawRR", [1227] = "evsubfsmiaawRR",
+
+ [1280] = "evmheusiaawRRR", [1281] = "evmhessiaawRRR",
+ [1283] = "evmhessfaawRRR", [1284] = "evmhousiaawRRR",
+ [1285] = "evmhossiaawRRR", [1287] = "evmhossfaawRRR",
+ [1288] = "evmheumiaawRRR", [1289] = "evmhesmiaawRRR",
+ [1291] = "evmhesmfaawRRR", [1292] = "evmhoumiaawRRR",
+ [1293] = "evmhosmiaawRRR", [1295] = "evmhosmfaawRRR",
+ [1320] = "evmhegumiaaRRR", [1321] = "evmhegsmiaaRRR",
+ [1323] = "evmhegsmfaaRRR", [1324] = "evmhogumiaaRRR",
+ [1325] = "evmhogsmiaaRRR", [1327] = "evmhogsmfaaRRR",
+ [1344] = "evmwlusiaawRRR", [1345] = "evmwlssiaawRRR",
+ [1352] = "evmwlumiaawRRR", [1353] = "evmwlsmiaawRRR",
+ [1363] = "evmwssfaaRRR", [1368] = "evmwumiaaRRR",
+ [1369] = "evmwsmiaaRRR", [1371] = "evmwsmfaaRRR",
+ [1408] = "evmheusianwRRR", [1409] = "evmhessianwRRR",
+ [1411] = "evmhessfanwRRR", [1412] = "evmhousianwRRR",
+ [1413] = "evmhossianwRRR", [1415] = "evmhossfanwRRR",
+ [1416] = "evmheumianwRRR", [1417] = "evmhesmianwRRR",
+ [1419] = "evmhesmfanwRRR", [1420] = "evmhoumianwRRR",
+ [1421] = "evmhosmianwRRR", [1423] = "evmhosmfanwRRR",
+ [1448] = "evmhegumianRRR", [1449] = "evmhegsmianRRR",
+ [1451] = "evmhegsmfanRRR", [1452] = "evmhogumianRRR",
+ [1453] = "evmhogsmianRRR", [1455] = "evmhogsmfanRRR",
+ [1472] = "evmwlusianwRRR", [1473] = "evmwlssianwRRR",
+ [1480] = "evmwlumianwRRR", [1481] = "evmwlsmianwRRR",
+ [1491] = "evmwssfanRRR", [1496] = "evmwumianRRR",
+ [1497] = "evmwsmianRRR", [1499] = "evmwsmfanRRR",
+}
+
+local map_pri = {
+ [0] = false, false, "tdiARI", "twiARI",
+ map_spe, false, false, "mulliRRI",
+ "subficRRI", false, "cmpl_iYLRU", "cmp_iYLRI",
+ "addicRRI", "addic.RRI", "addi|liRR0I", "addis|lisRR0I",
+ "b_KBJ", "sc", "bKJ", map_crops,
+ "rlwimiRR~AAA.", map_rlwinm, false, "rlwnmRR~RAA.",
+ "oriNRR~U", "orisRR~U", "xoriRR~U", "xorisRR~U",
+ "andi.RR~U", "andis.RR~U", map_rld, map_ext,
+ "lwzRRD", "lwzuRRD", "lbzRRD", "lbzuRRD",
+ "stwRRD", "stwuRRD", "stbRRD", "stbuRRD",
+ "lhzRRD", "lhzuRRD", "lhaRRD", "lhauRRD",
+ "sthRRD", "sthuRRD", "lmwRRD", "stmwRRD",
+ "lfsFRD", "lfsuFRD", "lfdFRD", "lfduFRD",
+ "stfsFRD", "stfsuFRD", "stfdFRD", "stfduFRD",
+ false, false, map_ld, map_fps,
+ false, false, map_std, map_fpd,
+}
+
+------------------------------------------------------------------------------
+
+local map_gpr = {
+ [0] = "r0", "sp", "r2", "r3", "r4", "r5", "r6", "r7",
+ "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
+ "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
+ "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
+}
+
+local map_cond = { [0] = "lt", "gt", "eq", "so", "ge", "le", "ne", "ns", }
+
+-- Format a condition bit.
+local function condfmt(cond)
+ if cond <= 3 then
+ return map_cond[band(cond, 3)]
+ else
+ return format("4*cr%d+%s", rshift(cond, 2), map_cond[band(cond, 3)])
+ end
+end
+
+------------------------------------------------------------------------------
+
+-- Output a nicely formatted line with an opcode and operands.
+local function putop(ctx, text, operands)
+ local pos = ctx.pos
+ local extra = ""
+ if ctx.rel then
+ local sym = ctx.symtab[ctx.rel]
+ if sym then extra = "\t->"..sym end
+ end
+ if ctx.hexdump > 0 then
+ ctx.out(format("%08x %s %-7s %s%s\n",
+ ctx.addr+pos, tohex(ctx.op), text, concat(operands, ", "), extra))
+ else
+ ctx.out(format("%08x %-7s %s%s\n",
+ ctx.addr+pos, text, concat(operands, ", "), extra))
+ end
+ ctx.pos = pos + 4
+end
+
+-- Fallback for unknown opcodes.
+local function unknown(ctx)
+ return putop(ctx, ".long", { "0x"..tohex(ctx.op) })
+end
+
+-- Disassemble a single instruction.
+local function disass_ins(ctx)
+ local pos = ctx.pos
+ local b0, b1, b2, b3 = byte(ctx.code, pos+1, pos+4)
+ local op = bor(lshift(b0, 24), lshift(b1, 16), lshift(b2, 8), b3)
+ local operands = {}
+ local last = nil
+ local rs = 21
+ ctx.op = op
+ ctx.rel = nil
+
+ local opat = map_pri[rshift(b0, 2)]
+ while type(opat) ~= "string" do
+ if not opat then return unknown(ctx) end
+ opat = opat[band(rshift(op, opat.shift), opat.mask)]
+ end
+ local name, pat = match(opat, "^([a-z0-9_.]*)(.*)")
+ local altname, pat2 = match(pat, "|([a-z0-9_.]*)(.*)")
+ if altname then pat = pat2 end
+
+ for p in gmatch(pat, ".") do
+ local x = nil
+ if p == "R" then
+ x = map_gpr[band(rshift(op, rs), 31)]
+ rs = rs - 5
+ elseif p == "F" then
+ x = "f"..band(rshift(op, rs), 31)
+ rs = rs - 5
+ elseif p == "A" then
+ x = band(rshift(op, rs), 31)
+ rs = rs - 5
+ elseif p == "S" then
+ x = arshift(lshift(op, 27-rs), 27)
+ rs = rs - 5
+ elseif p == "I" then
+ x = arshift(lshift(op, 16), 16)
+ elseif p == "U" then
+ x = band(op, 0xffff)
+ elseif p == "D" or p == "E" then
+ local disp = arshift(lshift(op, 16), 16)
+ if p == "E" then disp = band(disp, -4) end
+ if last == "r0" then last = "0" end
+ operands[#operands] = format("%d(%s)", disp, last)
+ elseif p >= "2" and p <= "8" then
+ local disp = band(rshift(op, rs), 31) * p
+ if last == "r0" then last = "0" end
+ operands[#operands] = format("%d(%s)", disp, last)
+ elseif p == "H" then
+ x = band(rshift(op, rs), 31) + lshift(band(op, 2), 4)
+ rs = rs - 5
+ elseif p == "M" then
+ x = band(rshift(op, rs), 31) + band(op, 0x20)
+ elseif p == "C" then
+ x = condfmt(band(rshift(op, rs), 31))
+ rs = rs - 5
+ elseif p == "B" then
+ local bo = rshift(op, 21)
+ local cond = band(rshift(op, 16), 31)
+ local cn = ""
+ rs = rs - 10
+ if band(bo, 4) == 0 then
+ cn = band(bo, 2) == 0 and "dnz" or "dz"
+ if band(bo, 0x10) == 0 then
+ cn = cn..(band(bo, 8) == 0 and "f" or "t")
+ end
+ if band(bo, 0x10) == 0 then x = condfmt(cond) end
+ name = name..(band(bo, 1) == band(rshift(op, 15), 1) and "-" or "+")
+ elseif band(bo, 0x10) == 0 then
+ cn = map_cond[band(cond, 3) + (band(bo, 8) == 0 and 4 or 0)]
+ if cond > 3 then x = "cr"..rshift(cond, 2) end
+ name = name..(band(bo, 1) == band(rshift(op, 15), 1) and "-" or "+")
+ end
+ name = gsub(name, "_", cn)
+ elseif p == "J" then
+ x = arshift(lshift(op, 27-rs), 29-rs)*4
+ if band(op, 2) == 0 then x = ctx.addr + pos + x end
+ ctx.rel = x
+ x = "0x"..tohex(x)
+ elseif p == "K" then
+ if band(op, 1) ~= 0 then name = name.."l" end
+ if band(op, 2) ~= 0 then name = name.."a" end
+ elseif p == "X" or p == "Y" then
+ x = band(rshift(op, rs+2), 7)
+ if x == 0 and p == "Y" then x = nil else x = "cr"..x end
+ rs = rs - 5
+ elseif p == "W" then
+ x = "cr"..band(op, 7)
+ elseif p == "Z" then
+ x = band(rshift(op, rs-4), 255)
+ rs = rs - 10
+ elseif p == ">" then
+ operands[#operands] = rshift(operands[#operands], 1)
+ elseif p == "0" then
+ if last == "r0" then
+ operands[#operands] = nil
+ if altname then name = altname end
+ end
+ elseif p == "L" then
+ name = gsub(name, "_", band(op, 0x00200000) ~= 0 and "d" or "w")
+ elseif p == "." then
+ if band(op, 1) == 1 then name = name.."." end
+ elseif p == "N" then
+ if op == 0x60000000 then name = "nop"; break end
+ elseif p == "~" then
+ local n = #operands
+ operands[n-1], operands[n] = operands[n], operands[n-1]
+ elseif p == "=" then
+ local n = #operands
+ if last == operands[n-1] then
+ operands[n] = nil
+ name = altname
+ end
+ elseif p == "%" then
+ local n = #operands
+ if last == operands[n-1] and last == operands[n-2] then
+ operands[n] = nil
+ operands[n-1] = nil
+ name = altname
+ end
+ elseif p == "-" then
+ rs = rs - 5
+ else
+ assert(false)
+ end
+ if x then operands[#operands+1] = x; last = x end
+ end
+
+ return putop(ctx, name, operands)
+end
+
+------------------------------------------------------------------------------
+
+-- Disassemble a block of code.
+local function disass_block(ctx, ofs, len)
+ if not ofs then ofs = 0 end
+ local stop = len and ofs+len or #ctx.code
+ stop = stop - stop % 4
+ ctx.pos = ofs - ofs % 4
+ ctx.rel = nil
+ while ctx.pos < stop do disass_ins(ctx) end
+end
+
+-- Extended API: create a disassembler context. Then call ctx:disass(ofs, len).
+local function create(code, addr, out)
+ local ctx = {}
+ ctx.code = code
+ ctx.addr = addr or 0
+ ctx.out = out or io.write
+ ctx.symtab = {}
+ ctx.disass = disass_block
+ ctx.hexdump = 8
+ return ctx
+end
+
+-- Simple API: disassemble code (a string) at address and output via out.
+local function disass(code, addr, out)
+ create(code, addr, out):disass()
+end
+
+-- Return register name for RID.
+local function regname(r)
+ if r < 32 then return map_gpr[r] end
+ return "f"..(r-32)
+end
+
+-- Public module functions.
+return {
+ create = create,
+ disass = disass,
+ regname = regname
+}
+
diff --git a/libs/luajit-cmake/luajit/src/jit/dis_x64.lua b/libs/luajit-cmake/luajit/src/jit/dis_x64.lua
new file mode 100644
index 0000000..d076c6a
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/jit/dis_x64.lua
@@ -0,0 +1,17 @@
+----------------------------------------------------------------------------
+-- LuaJIT x64 disassembler wrapper module.
+--
+-- Copyright (C) 2005-2022 Mike Pall. All rights reserved.
+-- Released under the MIT license. See Copyright Notice in luajit.h
+----------------------------------------------------------------------------
+-- This module just exports the 64 bit functions from the combined
+-- x86/x64 disassembler module. All the interesting stuff is there.
+------------------------------------------------------------------------------
+
+local dis_x86 = require((string.match(..., ".*%.") or "").."dis_x86")
+return {
+ create = dis_x86.create64,
+ disass = dis_x86.disass64,
+ regname = dis_x86.regname64
+}
+
diff --git a/libs/luajit-cmake/luajit/src/jit/dis_x86.lua b/libs/luajit-cmake/luajit/src/jit/dis_x86.lua
new file mode 100644
index 0000000..84492ff
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/jit/dis_x86.lua
@@ -0,0 +1,953 @@
+----------------------------------------------------------------------------
+-- LuaJIT x86/x64 disassembler module.
+--
+-- Copyright (C) 2005-2022 Mike Pall. All rights reserved.
+-- Released under the MIT license. See Copyright Notice in luajit.h
+----------------------------------------------------------------------------
+-- This is a helper module used by the LuaJIT machine code dumper module.
+--
+-- Sending small code snippets to an external disassembler and mixing the
+-- output with our own stuff was too fragile. So I had to bite the bullet
+-- and write yet another x86 disassembler. Oh well ...
+--
+-- The output format is very similar to what ndisasm generates. But it has
+-- been developed independently by looking at the opcode tables from the
+-- Intel and AMD manuals. The supported instruction set is quite extensive
+-- and reflects what a current generation Intel or AMD CPU implements in
+-- 32 bit and 64 bit mode. Yes, this includes MMX, SSE, SSE2, SSE3, SSSE3,
+-- SSE4.1, SSE4.2, SSE4a, AVX, AVX2 and even privileged and hypervisor
+-- (VMX/SVM) instructions.
+--
+-- Notes:
+-- * The (useless) a16 prefix, 3DNow and pre-586 opcodes are unsupported.
+-- * No attempt at optimization has been made -- it's fast enough for my needs.
+------------------------------------------------------------------------------
+
+local type = type
+local sub, byte, format = string.sub, string.byte, string.format
+local match, gmatch, gsub = string.match, string.gmatch, string.gsub
+local lower, rep = string.lower, string.rep
+local bit = require("bit")
+local tohex = bit.tohex
+
+-- Map for 1st opcode byte in 32 bit mode. Ugly? Well ... read on.
+local map_opc1_32 = {
+--0x
+[0]="addBmr","addVmr","addBrm","addVrm","addBai","addVai","push es","pop es",
+"orBmr","orVmr","orBrm","orVrm","orBai","orVai","push cs","opc2*",
+--1x
+"adcBmr","adcVmr","adcBrm","adcVrm","adcBai","adcVai","push ss","pop ss",
+"sbbBmr","sbbVmr","sbbBrm","sbbVrm","sbbBai","sbbVai","push ds","pop ds",
+--2x
+"andBmr","andVmr","andBrm","andVrm","andBai","andVai","es:seg","daa",
+"subBmr","subVmr","subBrm","subVrm","subBai","subVai","cs:seg","das",
+--3x
+"xorBmr","xorVmr","xorBrm","xorVrm","xorBai","xorVai","ss:seg","aaa",
+"cmpBmr","cmpVmr","cmpBrm","cmpVrm","cmpBai","cmpVai","ds:seg","aas",
+--4x
+"incVR","incVR","incVR","incVR","incVR","incVR","incVR","incVR",
+"decVR","decVR","decVR","decVR","decVR","decVR","decVR","decVR",
+--5x
+"pushUR","pushUR","pushUR","pushUR","pushUR","pushUR","pushUR","pushUR",
+"popUR","popUR","popUR","popUR","popUR","popUR","popUR","popUR",
+--6x
+"sz*pushaw,pusha","sz*popaw,popa","boundVrm","arplWmr",
+"fs:seg","gs:seg","o16:","a16",
+"pushUi","imulVrmi","pushBs","imulVrms",
+"insb","insVS","outsb","outsVS",
+--7x
+"joBj","jnoBj","jbBj","jnbBj","jzBj","jnzBj","jbeBj","jaBj",
+"jsBj","jnsBj","jpeBj","jpoBj","jlBj","jgeBj","jleBj","jgBj",
+--8x
+"arith!Bmi","arith!Vmi","arith!Bmi","arith!Vms",
+"testBmr","testVmr","xchgBrm","xchgVrm",
+"movBmr","movVmr","movBrm","movVrm",
+"movVmg","leaVrm","movWgm","popUm",
+--9x
+"nop*xchgVaR|pause|xchgWaR|repne nop","xchgVaR","xchgVaR","xchgVaR",
+"xchgVaR","xchgVaR","xchgVaR","xchgVaR",
+"sz*cbw,cwde,cdqe","sz*cwd,cdq,cqo","call farViw","wait",
+"sz*pushfw,pushf","sz*popfw,popf","sahf","lahf",
+--Ax
+"movBao","movVao","movBoa","movVoa",
+"movsb","movsVS","cmpsb","cmpsVS",
+"testBai","testVai","stosb","stosVS",
+"lodsb","lodsVS","scasb","scasVS",
+--Bx
+"movBRi","movBRi","movBRi","movBRi","movBRi","movBRi","movBRi","movBRi",
+"movVRI","movVRI","movVRI","movVRI","movVRI","movVRI","movVRI","movVRI",
+--Cx
+"shift!Bmu","shift!Vmu","retBw","ret","vex*3$lesVrm","vex*2$ldsVrm","movBmi","movVmi",
+"enterBwu","leave","retfBw","retf","int3","intBu","into","iretVS",
+--Dx
+"shift!Bm1","shift!Vm1","shift!Bmc","shift!Vmc","aamBu","aadBu","salc","xlatb",
+"fp*0","fp*1","fp*2","fp*3","fp*4","fp*5","fp*6","fp*7",
+--Ex
+"loopneBj","loopeBj","loopBj","sz*jcxzBj,jecxzBj,jrcxzBj",
+"inBau","inVau","outBua","outVua",
+"callVj","jmpVj","jmp farViw","jmpBj","inBad","inVad","outBda","outVda",
+--Fx
+"lock:","int1","repne:rep","rep:","hlt","cmc","testb!Bm","testv!Vm",
+"clc","stc","cli","sti","cld","std","incb!Bm","incd!Vm",
+}
+assert(#map_opc1_32 == 255)
+
+-- Map for 1st opcode byte in 64 bit mode (overrides only).
+local map_opc1_64 = setmetatable({
+ [0x06]=false, [0x07]=false, [0x0e]=false,
+ [0x16]=false, [0x17]=false, [0x1e]=false, [0x1f]=false,
+ [0x27]=false, [0x2f]=false, [0x37]=false, [0x3f]=false,
+ [0x60]=false, [0x61]=false, [0x62]=false, [0x63]="movsxdVrDmt", [0x67]="a32:",
+ [0x40]="rex*", [0x41]="rex*b", [0x42]="rex*x", [0x43]="rex*xb",
+ [0x44]="rex*r", [0x45]="rex*rb", [0x46]="rex*rx", [0x47]="rex*rxb",
+ [0x48]="rex*w", [0x49]="rex*wb", [0x4a]="rex*wx", [0x4b]="rex*wxb",
+ [0x4c]="rex*wr", [0x4d]="rex*wrb", [0x4e]="rex*wrx", [0x4f]="rex*wrxb",
+ [0x82]=false, [0x9a]=false, [0xc4]="vex*3", [0xc5]="vex*2", [0xce]=false,
+ [0xd4]=false, [0xd5]=false, [0xd6]=false, [0xea]=false,
+}, { __index = map_opc1_32 })
+
+-- Map for 2nd opcode byte (0F xx). True CISC hell. Hey, I told you.
+-- Prefix dependent MMX/SSE opcodes: (none)|rep|o16|repne, -|F3|66|F2
+local map_opc2 = {
+--0x
+[0]="sldt!Dmp","sgdt!Ump","larVrm","lslVrm",nil,"syscall","clts","sysret",
+"invd","wbinvd",nil,"ud1",nil,"$prefetch!Bm","femms","3dnowMrmu",
+--1x
+"movupsXrm|movssXrvm|movupdXrm|movsdXrvm",
+"movupsXmr|movssXmvr|movupdXmr|movsdXmvr",
+"movhlpsXrm$movlpsXrm|movsldupXrm|movlpdXrm|movddupXrm",
+"movlpsXmr||movlpdXmr",
+"unpcklpsXrvm||unpcklpdXrvm",
+"unpckhpsXrvm||unpckhpdXrvm",
+"movlhpsXrm$movhpsXrm|movshdupXrm|movhpdXrm",
+"movhpsXmr||movhpdXmr",
+"$prefetcht!Bm","hintnopVm","hintnopVm","hintnopVm",
+"hintnopVm","hintnopVm","hintnopVm","hintnopVm",
+--2x
+"movUmx$","movUmy$","movUxm$","movUym$","movUmz$",nil,"movUzm$",nil,
+"movapsXrm||movapdXrm",
+"movapsXmr||movapdXmr",
+"cvtpi2psXrMm|cvtsi2ssXrvVmt|cvtpi2pdXrMm|cvtsi2sdXrvVmt",
+"movntpsXmr|movntssXmr|movntpdXmr|movntsdXmr",
+"cvttps2piMrXm|cvttss2siVrXm|cvttpd2piMrXm|cvttsd2siVrXm",
+"cvtps2piMrXm|cvtss2siVrXm|cvtpd2piMrXm|cvtsd2siVrXm",
+"ucomissXrm||ucomisdXrm",
+"comissXrm||comisdXrm",
+--3x
+"wrmsr","rdtsc","rdmsr","rdpmc","sysenter","sysexit",nil,"getsec",
+"opc3*38",nil,"opc3*3a",nil,nil,nil,nil,nil,
+--4x
+"cmovoVrm","cmovnoVrm","cmovbVrm","cmovnbVrm",
+"cmovzVrm","cmovnzVrm","cmovbeVrm","cmovaVrm",
+"cmovsVrm","cmovnsVrm","cmovpeVrm","cmovpoVrm",
+"cmovlVrm","cmovgeVrm","cmovleVrm","cmovgVrm",
+--5x
+"movmskpsVrXm$||movmskpdVrXm$","sqrtpsXrm|sqrtssXrm|sqrtpdXrm|sqrtsdXrm",
+"rsqrtpsXrm|rsqrtssXrvm","rcppsXrm|rcpssXrvm",
+"andpsXrvm||andpdXrvm","andnpsXrvm||andnpdXrvm",
+"orpsXrvm||orpdXrvm","xorpsXrvm||xorpdXrvm",
+"addpsXrvm|addssXrvm|addpdXrvm|addsdXrvm","mulpsXrvm|mulssXrvm|mulpdXrvm|mulsdXrvm",
+"cvtps2pdXrm|cvtss2sdXrvm|cvtpd2psXrm|cvtsd2ssXrvm",
+"cvtdq2psXrm|cvttps2dqXrm|cvtps2dqXrm",
+"subpsXrvm|subssXrvm|subpdXrvm|subsdXrvm","minpsXrvm|minssXrvm|minpdXrvm|minsdXrvm",
+"divpsXrvm|divssXrvm|divpdXrvm|divsdXrvm","maxpsXrvm|maxssXrvm|maxpdXrvm|maxsdXrvm",
+--6x
+"punpcklbwPrvm","punpcklwdPrvm","punpckldqPrvm","packsswbPrvm",
+"pcmpgtbPrvm","pcmpgtwPrvm","pcmpgtdPrvm","packuswbPrvm",
+"punpckhbwPrvm","punpckhwdPrvm","punpckhdqPrvm","packssdwPrvm",
+"||punpcklqdqXrvm","||punpckhqdqXrvm",
+"movPrVSm","movqMrm|movdquXrm|movdqaXrm",
+--7x
+"pshufwMrmu|pshufhwXrmu|pshufdXrmu|pshuflwXrmu","pshiftw!Pvmu",
+"pshiftd!Pvmu","pshiftq!Mvmu||pshiftdq!Xvmu",
+"pcmpeqbPrvm","pcmpeqwPrvm","pcmpeqdPrvm","emms*|",
+"vmreadUmr||extrqXmuu$|insertqXrmuu$","vmwriteUrm||extrqXrm$|insertqXrm$",
+nil,nil,
+"||haddpdXrvm|haddpsXrvm","||hsubpdXrvm|hsubpsXrvm",
+"movVSmMr|movqXrm|movVSmXr","movqMmr|movdquXmr|movdqaXmr",
+--8x
+"joVj","jnoVj","jbVj","jnbVj","jzVj","jnzVj","jbeVj","jaVj",
+"jsVj","jnsVj","jpeVj","jpoVj","jlVj","jgeVj","jleVj","jgVj",
+--9x
+"setoBm","setnoBm","setbBm","setnbBm","setzBm","setnzBm","setbeBm","setaBm",
+"setsBm","setnsBm","setpeBm","setpoBm","setlBm","setgeBm","setleBm","setgBm",
+--Ax
+"push fs","pop fs","cpuid","btVmr","shldVmru","shldVmrc",nil,nil,
+"push gs","pop gs","rsm","btsVmr","shrdVmru","shrdVmrc","fxsave!Dmp","imulVrm",
+--Bx
+"cmpxchgBmr","cmpxchgVmr","$lssVrm","btrVmr",
+"$lfsVrm","$lgsVrm","movzxVrBmt","movzxVrWmt",
+"|popcntVrm","ud2Dp","bt!Vmu","btcVmr",
+"bsfVrm","bsrVrm|lzcntVrm|bsrWrm","movsxVrBmt","movsxVrWmt",
+--Cx
+"xaddBmr","xaddVmr",
+"cmppsXrvmu|cmpssXrvmu|cmppdXrvmu|cmpsdXrvmu","$movntiVmr|",
+"pinsrwPrvWmu","pextrwDrPmu",
+"shufpsXrvmu||shufpdXrvmu","$cmpxchg!Qmp",
+"bswapVR","bswapVR","bswapVR","bswapVR","bswapVR","bswapVR","bswapVR","bswapVR",
+--Dx
+"||addsubpdXrvm|addsubpsXrvm","psrlwPrvm","psrldPrvm","psrlqPrvm",
+"paddqPrvm","pmullwPrvm",
+"|movq2dqXrMm|movqXmr|movdq2qMrXm$","pmovmskbVrMm||pmovmskbVrXm",
+"psubusbPrvm","psubuswPrvm","pminubPrvm","pandPrvm",
+"paddusbPrvm","padduswPrvm","pmaxubPrvm","pandnPrvm",
+--Ex
+"pavgbPrvm","psrawPrvm","psradPrvm","pavgwPrvm",
+"pmulhuwPrvm","pmulhwPrvm",
+"|cvtdq2pdXrm|cvttpd2dqXrm|cvtpd2dqXrm","$movntqMmr||$movntdqXmr",
+"psubsbPrvm","psubswPrvm","pminswPrvm","porPrvm",
+"paddsbPrvm","paddswPrvm","pmaxswPrvm","pxorPrvm",
+--Fx
+"|||lddquXrm","psllwPrvm","pslldPrvm","psllqPrvm",
+"pmuludqPrvm","pmaddwdPrvm","psadbwPrvm","maskmovqMrm||maskmovdquXrm$",
+"psubbPrvm","psubwPrvm","psubdPrvm","psubqPrvm",
+"paddbPrvm","paddwPrvm","padddPrvm","ud",
+}
+assert(map_opc2[255] == "ud")
+
+-- Map for three-byte opcodes. Can't wait for their next invention.
+local map_opc3 = {
+["38"] = { -- [66] 0f 38 xx
+--0x
+[0]="pshufbPrvm","phaddwPrvm","phadddPrvm","phaddswPrvm",
+"pmaddubswPrvm","phsubwPrvm","phsubdPrvm","phsubswPrvm",
+"psignbPrvm","psignwPrvm","psigndPrvm","pmulhrswPrvm",
+"||permilpsXrvm","||permilpdXrvm",nil,nil,
+--1x
+"||pblendvbXrma",nil,nil,nil,
+"||blendvpsXrma","||blendvpdXrma","||permpsXrvm","||ptestXrm",
+"||broadcastssXrm","||broadcastsdXrm","||broadcastf128XrlXm",nil,
+"pabsbPrm","pabswPrm","pabsdPrm",nil,
+--2x
+"||pmovsxbwXrm","||pmovsxbdXrm","||pmovsxbqXrm","||pmovsxwdXrm",
+"||pmovsxwqXrm","||pmovsxdqXrm",nil,nil,
+"||pmuldqXrvm","||pcmpeqqXrvm","||$movntdqaXrm","||packusdwXrvm",
+"||maskmovpsXrvm","||maskmovpdXrvm","||maskmovpsXmvr","||maskmovpdXmvr",
+--3x
+"||pmovzxbwXrm","||pmovzxbdXrm","||pmovzxbqXrm","||pmovzxwdXrm",
+"||pmovzxwqXrm","||pmovzxdqXrm","||permdXrvm","||pcmpgtqXrvm",
+"||pminsbXrvm","||pminsdXrvm","||pminuwXrvm","||pminudXrvm",
+"||pmaxsbXrvm","||pmaxsdXrvm","||pmaxuwXrvm","||pmaxudXrvm",
+--4x
+"||pmulddXrvm","||phminposuwXrm",nil,nil,
+nil,"||psrlvVSXrvm","||psravdXrvm","||psllvVSXrvm",
+--5x
+[0x58] = "||pbroadcastdXrlXm",[0x59] = "||pbroadcastqXrlXm",
+[0x5a] = "||broadcasti128XrlXm",
+--7x
+[0x78] = "||pbroadcastbXrlXm",[0x79] = "||pbroadcastwXrlXm",
+--8x
+[0x8c] = "||pmaskmovXrvVSm",
+[0x8e] = "||pmaskmovVSmXvr",
+--9x
+[0x96] = "||fmaddsub132pHXrvm",[0x97] = "||fmsubadd132pHXrvm",
+[0x98] = "||fmadd132pHXrvm",[0x99] = "||fmadd132sHXrvm",
+[0x9a] = "||fmsub132pHXrvm",[0x9b] = "||fmsub132sHXrvm",
+[0x9c] = "||fnmadd132pHXrvm",[0x9d] = "||fnmadd132sHXrvm",
+[0x9e] = "||fnmsub132pHXrvm",[0x9f] = "||fnmsub132sHXrvm",
+--Ax
+[0xa6] = "||fmaddsub213pHXrvm",[0xa7] = "||fmsubadd213pHXrvm",
+[0xa8] = "||fmadd213pHXrvm",[0xa9] = "||fmadd213sHXrvm",
+[0xaa] = "||fmsub213pHXrvm",[0xab] = "||fmsub213sHXrvm",
+[0xac] = "||fnmadd213pHXrvm",[0xad] = "||fnmadd213sHXrvm",
+[0xae] = "||fnmsub213pHXrvm",[0xaf] = "||fnmsub213sHXrvm",
+--Bx
+[0xb6] = "||fmaddsub231pHXrvm",[0xb7] = "||fmsubadd231pHXrvm",
+[0xb8] = "||fmadd231pHXrvm",[0xb9] = "||fmadd231sHXrvm",
+[0xba] = "||fmsub231pHXrvm",[0xbb] = "||fmsub231sHXrvm",
+[0xbc] = "||fnmadd231pHXrvm",[0xbd] = "||fnmadd231sHXrvm",
+[0xbe] = "||fnmsub231pHXrvm",[0xbf] = "||fnmsub231sHXrvm",
+--Dx
+[0xdc] = "||aesencXrvm", [0xdd] = "||aesenclastXrvm",
+[0xde] = "||aesdecXrvm", [0xdf] = "||aesdeclastXrvm",
+--Fx
+[0xf0] = "|||crc32TrBmt",[0xf1] = "|||crc32TrVmt",
+[0xf7] = "| sarxVrmv| shlxVrmv| shrxVrmv",
+},
+
+["3a"] = { -- [66] 0f 3a xx
+--0x
+[0x00]="||permqXrmu","||permpdXrmu","||pblenddXrvmu",nil,
+"||permilpsXrmu","||permilpdXrmu","||perm2f128Xrvmu",nil,
+"||roundpsXrmu","||roundpdXrmu","||roundssXrvmu","||roundsdXrvmu",
+"||blendpsXrvmu","||blendpdXrvmu","||pblendwXrvmu","palignrPrvmu",
+--1x
+nil,nil,nil,nil,
+"||pextrbVmXru","||pextrwVmXru","||pextrVmSXru","||extractpsVmXru",
+"||insertf128XrvlXmu","||extractf128XlXmYru",nil,nil,
+nil,nil,nil,nil,
+--2x
+"||pinsrbXrvVmu","||insertpsXrvmu","||pinsrXrvVmuS",nil,
+--3x
+[0x38] = "||inserti128Xrvmu",[0x39] = "||extracti128XlXmYru",
+--4x
+[0x40] = "||dppsXrvmu",
+[0x41] = "||dppdXrvmu",
+[0x42] = "||mpsadbwXrvmu",
+[0x44] = "||pclmulqdqXrvmu",
+[0x46] = "||perm2i128Xrvmu",
+[0x4a] = "||blendvpsXrvmb",[0x4b] = "||blendvpdXrvmb",
+[0x4c] = "||pblendvbXrvmb",
+--6x
+[0x60] = "||pcmpestrmXrmu",[0x61] = "||pcmpestriXrmu",
+[0x62] = "||pcmpistrmXrmu",[0x63] = "||pcmpistriXrmu",
+[0xdf] = "||aeskeygenassistXrmu",
+--Fx
+[0xf0] = "||| rorxVrmu",
+},
+}
+
+-- Map for VMX/SVM opcodes 0F 01 C0-FF (sgdt group with register operands).
+local map_opcvm = {
+[0xc1]="vmcall",[0xc2]="vmlaunch",[0xc3]="vmresume",[0xc4]="vmxoff",
+[0xc8]="monitor",[0xc9]="mwait",
+[0xd8]="vmrun",[0xd9]="vmmcall",[0xda]="vmload",[0xdb]="vmsave",
+[0xdc]="stgi",[0xdd]="clgi",[0xde]="skinit",[0xdf]="invlpga",
+[0xf8]="swapgs",[0xf9]="rdtscp",
+}
+
+-- Map for FP opcodes. And you thought stack machines are simple?
+local map_opcfp = {
+-- D8-DF 00-BF: opcodes with a memory operand.
+-- D8
+[0]="faddFm","fmulFm","fcomFm","fcompFm","fsubFm","fsubrFm","fdivFm","fdivrFm",
+"fldFm",nil,"fstFm","fstpFm","fldenvVm","fldcwWm","fnstenvVm","fnstcwWm",
+-- DA
+"fiaddDm","fimulDm","ficomDm","ficompDm",
+"fisubDm","fisubrDm","fidivDm","fidivrDm",
+-- DB
+"fildDm","fisttpDm","fistDm","fistpDm",nil,"fld twordFmp",nil,"fstp twordFmp",
+-- DC
+"faddGm","fmulGm","fcomGm","fcompGm","fsubGm","fsubrGm","fdivGm","fdivrGm",
+-- DD
+"fldGm","fisttpQm","fstGm","fstpGm","frstorDmp",nil,"fnsaveDmp","fnstswWm",
+-- DE
+"fiaddWm","fimulWm","ficomWm","ficompWm",
+"fisubWm","fisubrWm","fidivWm","fidivrWm",
+-- DF
+"fildWm","fisttpWm","fistWm","fistpWm",
+"fbld twordFmp","fildQm","fbstp twordFmp","fistpQm",
+-- xx C0-FF: opcodes with a pseudo-register operand.
+-- D8
+"faddFf","fmulFf","fcomFf","fcompFf","fsubFf","fsubrFf","fdivFf","fdivrFf",
+-- D9
+"fldFf","fxchFf",{"fnop"},nil,
+{"fchs","fabs",nil,nil,"ftst","fxam"},
+{"fld1","fldl2t","fldl2e","fldpi","fldlg2","fldln2","fldz"},
+{"f2xm1","fyl2x","fptan","fpatan","fxtract","fprem1","fdecstp","fincstp"},
+{"fprem","fyl2xp1","fsqrt","fsincos","frndint","fscale","fsin","fcos"},
+-- DA
+"fcmovbFf","fcmoveFf","fcmovbeFf","fcmovuFf",nil,{nil,"fucompp"},nil,nil,
+-- DB
+"fcmovnbFf","fcmovneFf","fcmovnbeFf","fcmovnuFf",
+{nil,nil,"fnclex","fninit"},"fucomiFf","fcomiFf",nil,
+-- DC
+"fadd toFf","fmul toFf",nil,nil,
+"fsub toFf","fsubr toFf","fdivr toFf","fdiv toFf",
+-- DD
+"ffreeFf",nil,"fstFf","fstpFf","fucomFf","fucompFf",nil,nil,
+-- DE
+"faddpFf","fmulpFf",nil,{nil,"fcompp"},
+"fsubrpFf","fsubpFf","fdivrpFf","fdivpFf",
+-- DF
+nil,nil,nil,nil,{"fnstsw ax"},"fucomipFf","fcomipFf",nil,
+}
+assert(map_opcfp[126] == "fcomipFf")
+
+-- Map for opcode groups. The subkey is sp from the ModRM byte.
+local map_opcgroup = {
+ arith = { "add", "or", "adc", "sbb", "and", "sub", "xor", "cmp" },
+ shift = { "rol", "ror", "rcl", "rcr", "shl", "shr", "sal", "sar" },
+ testb = { "testBmi", "testBmi", "not", "neg", "mul", "imul", "div", "idiv" },
+ testv = { "testVmi", "testVmi", "not", "neg", "mul", "imul", "div", "idiv" },
+ incb = { "inc", "dec" },
+ incd = { "inc", "dec", "callUmp", "$call farDmp",
+ "jmpUmp", "$jmp farDmp", "pushUm" },
+ sldt = { "sldt", "str", "lldt", "ltr", "verr", "verw" },
+ sgdt = { "vm*$sgdt", "vm*$sidt", "$lgdt", "vm*$lidt",
+ "smsw", nil, "lmsw", "vm*$invlpg" },
+ bt = { nil, nil, nil, nil, "bt", "bts", "btr", "btc" },
+ cmpxchg = { nil, "sz*,cmpxchg8bQmp,cmpxchg16bXmp", nil, nil,
+ nil, nil, "vmptrld|vmxon|vmclear", "vmptrst" },
+ pshiftw = { nil, nil, "psrlw", nil, "psraw", nil, "psllw" },
+ pshiftd = { nil, nil, "psrld", nil, "psrad", nil, "pslld" },
+ pshiftq = { nil, nil, "psrlq", nil, nil, nil, "psllq" },
+ pshiftdq = { nil, nil, "psrlq", "psrldq", nil, nil, "psllq", "pslldq" },
+ fxsave = { "$fxsave", "$fxrstor", "$ldmxcsr", "$stmxcsr",
+ nil, "lfenceDp$", "mfenceDp$", "sfenceDp$clflush" },
+ prefetch = { "prefetch", "prefetchw" },
+ prefetcht = { "prefetchnta", "prefetcht0", "prefetcht1", "prefetcht2" },
+}
+
+------------------------------------------------------------------------------
+
+-- Maps for register names.
+local map_regs = {
+ B = { "al", "cl", "dl", "bl", "ah", "ch", "dh", "bh",
+ "r8b", "r9b", "r10b", "r11b", "r12b", "r13b", "r14b", "r15b" },
+ B64 = { "al", "cl", "dl", "bl", "spl", "bpl", "sil", "dil",
+ "r8b", "r9b", "r10b", "r11b", "r12b", "r13b", "r14b", "r15b" },
+ W = { "ax", "cx", "dx", "bx", "sp", "bp", "si", "di",
+ "r8w", "r9w", "r10w", "r11w", "r12w", "r13w", "r14w", "r15w" },
+ D = { "eax", "ecx", "edx", "ebx", "esp", "ebp", "esi", "edi",
+ "r8d", "r9d", "r10d", "r11d", "r12d", "r13d", "r14d", "r15d" },
+ Q = { "rax", "rcx", "rdx", "rbx", "rsp", "rbp", "rsi", "rdi",
+ "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15" },
+ M = { "mm0", "mm1", "mm2", "mm3", "mm4", "mm5", "mm6", "mm7",
+ "mm0", "mm1", "mm2", "mm3", "mm4", "mm5", "mm6", "mm7" }, -- No x64 ext!
+ X = { "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7",
+ "xmm8", "xmm9", "xmm10", "xmm11", "xmm12", "xmm13", "xmm14", "xmm15" },
+ Y = { "ymm0", "ymm1", "ymm2", "ymm3", "ymm4", "ymm5", "ymm6", "ymm7",
+ "ymm8", "ymm9", "ymm10", "ymm11", "ymm12", "ymm13", "ymm14", "ymm15" },
+}
+local map_segregs = { "es", "cs", "ss", "ds", "fs", "gs", "segr6", "segr7" }
+
+-- Maps for size names.
+local map_sz2n = {
+ B = 1, W = 2, D = 4, Q = 8, M = 8, X = 16, Y = 32,
+}
+local map_sz2prefix = {
+ B = "byte", W = "word", D = "dword",
+ Q = "qword",
+ M = "qword", X = "xword", Y = "yword",
+ F = "dword", G = "qword", -- No need for sizes/register names for these two.
+}
+
+------------------------------------------------------------------------------
+
+-- Output a nicely formatted line with an opcode and operands.
+local function putop(ctx, text, operands)
+ local code, pos, hex = ctx.code, ctx.pos, ""
+ local hmax = ctx.hexdump
+ if hmax > 0 then
+ for i=ctx.start,pos-1 do
+ hex = hex..format("%02X", byte(code, i, i))
+ end
+ if #hex > hmax then hex = sub(hex, 1, hmax)..". "
+ else hex = hex..rep(" ", hmax-#hex+2) end
+ end
+ if operands then text = text.." "..operands end
+ if ctx.o16 then text = "o16 "..text; ctx.o16 = false end
+ if ctx.a32 then text = "a32 "..text; ctx.a32 = false end
+ if ctx.rep then text = ctx.rep.." "..text; ctx.rep = false end
+ if ctx.rex then
+ local t = (ctx.rexw and "w" or "")..(ctx.rexr and "r" or "")..
+ (ctx.rexx and "x" or "")..(ctx.rexb and "b" or "")..
+ (ctx.vexl and "l" or "")
+ if ctx.vexv and ctx.vexv ~= 0 then t = t.."v"..ctx.vexv end
+ if t ~= "" then text = ctx.rex.."."..t.." "..gsub(text, "^ ", "")
+ elseif ctx.rex == "vex" then text = gsub("v"..text, "^v ", "") end
+ ctx.rexw = false; ctx.rexr = false; ctx.rexx = false; ctx.rexb = false
+ ctx.rex = false; ctx.vexl = false; ctx.vexv = false
+ end
+ if ctx.seg then
+ local text2, n = gsub(text, "%[", "["..ctx.seg..":")
+ if n == 0 then text = ctx.seg.." "..text else text = text2 end
+ ctx.seg = false
+ end
+ if ctx.lock then text = "lock "..text; ctx.lock = false end
+ local imm = ctx.imm
+ if imm then
+ local sym = ctx.symtab[imm]
+ if sym then text = text.."\t->"..sym end
+ end
+ ctx.out(format("%08x %s%s\n", ctx.addr+ctx.start, hex, text))
+ ctx.mrm = false
+ ctx.vexv = false
+ ctx.start = pos
+ ctx.imm = nil
+end
+
+-- Clear all prefix flags.
+local function clearprefixes(ctx)
+ ctx.o16 = false; ctx.seg = false; ctx.lock = false; ctx.rep = false
+ ctx.rexw = false; ctx.rexr = false; ctx.rexx = false; ctx.rexb = false
+ ctx.rex = false; ctx.a32 = false; ctx.vexl = false
+end
+
+-- Fallback for incomplete opcodes at the end.
+local function incomplete(ctx)
+ ctx.pos = ctx.stop+1
+ clearprefixes(ctx)
+ return putop(ctx, "(incomplete)")
+end
+
+-- Fallback for unknown opcodes.
+local function unknown(ctx)
+ clearprefixes(ctx)
+ return putop(ctx, "(unknown)")
+end
+
+-- Return an immediate of the specified size.
+local function getimm(ctx, pos, n)
+ if pos+n-1 > ctx.stop then return incomplete(ctx) end
+ local code = ctx.code
+ if n == 1 then
+ local b1 = byte(code, pos, pos)
+ return b1
+ elseif n == 2 then
+ local b1, b2 = byte(code, pos, pos+1)
+ return b1+b2*256
+ else
+ local b1, b2, b3, b4 = byte(code, pos, pos+3)
+ local imm = b1+b2*256+b3*65536+b4*16777216
+ ctx.imm = imm
+ return imm
+ end
+end
+
+-- Process pattern string and generate the operands.
+local function putpat(ctx, name, pat)
+ local operands, regs, sz, mode, sp, rm, sc, rx, sdisp
+ local code, pos, stop, vexl = ctx.code, ctx.pos, ctx.stop, ctx.vexl
+
+ -- Chars used: 1DFGHIMPQRSTUVWXYabcdfgijlmoprstuvwxyz
+ for p in gmatch(pat, ".") do
+ local x = nil
+ if p == "V" or p == "U" then
+ if ctx.rexw then sz = "Q"; ctx.rexw = false
+ elseif ctx.o16 then sz = "W"; ctx.o16 = false
+ elseif p == "U" and ctx.x64 then sz = "Q"
+ else sz = "D" end
+ regs = map_regs[sz]
+ elseif p == "T" then
+ if ctx.rexw then sz = "Q"; ctx.rexw = false else sz = "D" end
+ regs = map_regs[sz]
+ elseif p == "B" then
+ sz = "B"
+ regs = ctx.rex and map_regs.B64 or map_regs.B
+ elseif match(p, "[WDQMXYFG]") then
+ sz = p
+ if sz == "X" and vexl then sz = "Y"; ctx.vexl = false end
+ regs = map_regs[sz]
+ elseif p == "P" then
+ sz = ctx.o16 and "X" or "M"; ctx.o16 = false
+ if sz == "X" and vexl then sz = "Y"; ctx.vexl = false end
+ regs = map_regs[sz]
+ elseif p == "H" then
+ name = name..(ctx.rexw and "d" or "s")
+ ctx.rexw = false
+ elseif p == "S" then
+ name = name..lower(sz)
+ elseif p == "s" then
+ local imm = getimm(ctx, pos, 1); if not imm then return end
+ x = imm <= 127 and format("+0x%02x", imm)
+ or format("-0x%02x", 256-imm)
+ pos = pos+1
+ elseif p == "u" then
+ local imm = getimm(ctx, pos, 1); if not imm then return end
+ x = format("0x%02x", imm)
+ pos = pos+1
+ elseif p == "b" then
+ local imm = getimm(ctx, pos, 1); if not imm then return end
+ x = regs[imm/16+1]
+ pos = pos+1
+ elseif p == "w" then
+ local imm = getimm(ctx, pos, 2); if not imm then return end
+ x = format("0x%x", imm)
+ pos = pos+2
+ elseif p == "o" then -- [offset]
+ if ctx.x64 then
+ local imm1 = getimm(ctx, pos, 4); if not imm1 then return end
+ local imm2 = getimm(ctx, pos+4, 4); if not imm2 then return end
+ x = format("[0x%08x%08x]", imm2, imm1)
+ pos = pos+8
+ else
+ local imm = getimm(ctx, pos, 4); if not imm then return end
+ x = format("[0x%08x]", imm)
+ pos = pos+4
+ end
+ elseif p == "i" or p == "I" then
+ local n = map_sz2n[sz]
+ if n == 8 and ctx.x64 and p == "I" then
+ local imm1 = getimm(ctx, pos, 4); if not imm1 then return end
+ local imm2 = getimm(ctx, pos+4, 4); if not imm2 then return end
+ x = format("0x%08x%08x", imm2, imm1)
+ else
+ if n == 8 then n = 4 end
+ local imm = getimm(ctx, pos, n); if not imm then return end
+ if sz == "Q" and (imm < 0 or imm > 0x7fffffff) then
+ imm = (0xffffffff+1)-imm
+ x = format(imm > 65535 and "-0x%08x" or "-0x%x", imm)
+ else
+ x = format(imm > 65535 and "0x%08x" or "0x%x", imm)
+ end
+ end
+ pos = pos+n
+ elseif p == "j" then
+ local n = map_sz2n[sz]
+ if n == 8 then n = 4 end
+ local imm = getimm(ctx, pos, n); if not imm then return end
+ if sz == "B" and imm > 127 then imm = imm-256
+ elseif imm > 2147483647 then imm = imm-4294967296 end
+ pos = pos+n
+ imm = imm + pos + ctx.addr
+ if imm > 4294967295 and not ctx.x64 then imm = imm-4294967296 end
+ ctx.imm = imm
+ if sz == "W" then
+ x = format("word 0x%04x", imm%65536)
+ elseif ctx.x64 then
+ local lo = imm % 0x1000000
+ x = format("0x%02x%06x", (imm-lo) / 0x1000000, lo)
+ else
+ x = "0x"..tohex(imm)
+ end
+ elseif p == "R" then
+ local r = byte(code, pos-1, pos-1)%8
+ if ctx.rexb then r = r + 8; ctx.rexb = false end
+ x = regs[r+1]
+ elseif p == "a" then x = regs[1]
+ elseif p == "c" then x = "cl"
+ elseif p == "d" then x = "dx"
+ elseif p == "1" then x = "1"
+ else
+ if not mode then
+ mode = ctx.mrm
+ if not mode then
+ if pos > stop then return incomplete(ctx) end
+ mode = byte(code, pos, pos)
+ pos = pos+1
+ end
+ rm = mode%8; mode = (mode-rm)/8
+ sp = mode%8; mode = (mode-sp)/8
+ sdisp = ""
+ if mode < 3 then
+ if rm == 4 then
+ if pos > stop then return incomplete(ctx) end
+ sc = byte(code, pos, pos)
+ pos = pos+1
+ rm = sc%8; sc = (sc-rm)/8
+ rx = sc%8; sc = (sc-rx)/8
+ if ctx.rexx then rx = rx + 8; ctx.rexx = false end
+ if rx == 4 then rx = nil end
+ end
+ if mode > 0 or rm == 5 then
+ local dsz = mode
+ if dsz ~= 1 then dsz = 4 end
+ local disp = getimm(ctx, pos, dsz); if not disp then return end
+ if mode == 0 then rm = nil end
+ if rm or rx or (not sc and ctx.x64 and not ctx.a32) then
+ if dsz == 1 and disp > 127 then
+ sdisp = format("-0x%x", 256-disp)
+ elseif disp >= 0 and disp <= 0x7fffffff then
+ sdisp = format("+0x%x", disp)
+ else
+ sdisp = format("-0x%x", (0xffffffff+1)-disp)
+ end
+ else
+ sdisp = format(ctx.x64 and not ctx.a32 and
+ not (disp >= 0 and disp <= 0x7fffffff)
+ and "0xffffffff%08x" or "0x%08x", disp)
+ end
+ pos = pos+dsz
+ end
+ end
+ if rm and ctx.rexb then rm = rm + 8; ctx.rexb = false end
+ if ctx.rexr then sp = sp + 8; ctx.rexr = false end
+ end
+ if p == "m" then
+ if mode == 3 then x = regs[rm+1]
+ else
+ local aregs = ctx.a32 and map_regs.D or ctx.aregs
+ local srm, srx = "", ""
+ if rm then srm = aregs[rm+1]
+ elseif not sc and ctx.x64 and not ctx.a32 then srm = "rip" end
+ ctx.a32 = false
+ if rx then
+ if rm then srm = srm.."+" end
+ srx = aregs[rx+1]
+ if sc > 0 then srx = srx.."*"..(2^sc) end
+ end
+ x = format("[%s%s%s]", srm, srx, sdisp)
+ end
+ if mode < 3 and
+ (not match(pat, "[aRrgp]") or match(pat, "t")) then -- Yuck.
+ x = map_sz2prefix[sz].." "..x
+ end
+ elseif p == "r" then x = regs[sp+1]
+ elseif p == "g" then x = map_segregs[sp+1]
+ elseif p == "p" then -- Suppress prefix.
+ elseif p == "f" then x = "st"..rm
+ elseif p == "x" then
+ if sp == 0 and ctx.lock and not ctx.x64 then
+ x = "CR8"; ctx.lock = false
+ else
+ x = "CR"..sp
+ end
+ elseif p == "v" then
+ if ctx.vexv then
+ x = regs[ctx.vexv+1]; ctx.vexv = false
+ end
+ elseif p == "y" then x = "DR"..sp
+ elseif p == "z" then x = "TR"..sp
+ elseif p == "l" then vexl = false
+ elseif p == "t" then
+ else
+ error("bad pattern `"..pat.."'")
+ end
+ end
+ if x then operands = operands and operands..", "..x or x end
+ end
+ ctx.pos = pos
+ return putop(ctx, name, operands)
+end
+
+-- Forward declaration.
+local map_act
+
+-- Fetch and cache MRM byte.
+local function getmrm(ctx)
+ local mrm = ctx.mrm
+ if not mrm then
+ local pos = ctx.pos
+ if pos > ctx.stop then return nil end
+ mrm = byte(ctx.code, pos, pos)
+ ctx.pos = pos+1
+ ctx.mrm = mrm
+ end
+ return mrm
+end
+
+-- Dispatch to handler depending on pattern.
+local function dispatch(ctx, opat, patgrp)
+ if not opat then return unknown(ctx) end
+ if match(opat, "%|") then -- MMX/SSE variants depending on prefix.
+ local p
+ if ctx.rep then
+ p = ctx.rep=="rep" and "%|([^%|]*)" or "%|[^%|]*%|[^%|]*%|([^%|]*)"
+ ctx.rep = false
+ elseif ctx.o16 then p = "%|[^%|]*%|([^%|]*)"; ctx.o16 = false
+ else p = "^[^%|]*" end
+ opat = match(opat, p)
+ if not opat then return unknown(ctx) end
+-- ctx.rep = false; ctx.o16 = false
+ --XXX fails for 66 f2 0f 38 f1 06 crc32 eax,WORD PTR [esi]
+ --XXX remove in branches?
+ end
+ if match(opat, "%$") then -- reg$mem variants.
+ local mrm = getmrm(ctx); if not mrm then return incomplete(ctx) end
+ opat = match(opat, mrm >= 192 and "^[^%$]*" or "%$(.*)")
+ if opat == "" then return unknown(ctx) end
+ end
+ if opat == "" then return unknown(ctx) end
+ local name, pat = match(opat, "^([a-z0-9 ]*)(.*)")
+ if pat == "" and patgrp then pat = patgrp end
+ return map_act[sub(pat, 1, 1)](ctx, name, pat)
+end
+
+-- Get a pattern from an opcode map and dispatch to handler.
+local function dispatchmap(ctx, opcmap)
+ local pos = ctx.pos
+ local opat = opcmap[byte(ctx.code, pos, pos)]
+ pos = pos + 1
+ ctx.pos = pos
+ return dispatch(ctx, opat)
+end
+
+-- Map for action codes. The key is the first char after the name.
+map_act = {
+ -- Simple opcodes without operands.
+ [""] = function(ctx, name, pat)
+ return putop(ctx, name)
+ end,
+
+ -- Operand size chars fall right through.
+ B = putpat, W = putpat, D = putpat, Q = putpat,
+ V = putpat, U = putpat, T = putpat,
+ M = putpat, X = putpat, P = putpat,
+ F = putpat, G = putpat, Y = putpat,
+ H = putpat,
+
+ -- Collect prefixes.
+ [":"] = function(ctx, name, pat)
+ ctx[pat == ":" and name or sub(pat, 2)] = name
+ if ctx.pos - ctx.start > 5 then return unknown(ctx) end -- Limit #prefixes.
+ end,
+
+ -- Chain to special handler specified by name.
+ ["*"] = function(ctx, name, pat)
+ return map_act[name](ctx, name, sub(pat, 2))
+ end,
+
+ -- Use named subtable for opcode group.
+ ["!"] = function(ctx, name, pat)
+ local mrm = getmrm(ctx); if not mrm then return incomplete(ctx) end
+ return dispatch(ctx, map_opcgroup[name][((mrm-(mrm%8))/8)%8+1], sub(pat, 2))
+ end,
+
+ -- o16,o32[,o64] variants.
+ sz = function(ctx, name, pat)
+ if ctx.o16 then ctx.o16 = false
+ else
+ pat = match(pat, ",(.*)")
+ if ctx.rexw then
+ local p = match(pat, ",(.*)")
+ if p then pat = p; ctx.rexw = false end
+ end
+ end
+ pat = match(pat, "^[^,]*")
+ return dispatch(ctx, pat)
+ end,
+
+ -- Two-byte opcode dispatch.
+ opc2 = function(ctx, name, pat)
+ return dispatchmap(ctx, map_opc2)
+ end,
+
+ -- Three-byte opcode dispatch.
+ opc3 = function(ctx, name, pat)
+ return dispatchmap(ctx, map_opc3[pat])
+ end,
+
+ -- VMX/SVM dispatch.
+ vm = function(ctx, name, pat)
+ return dispatch(ctx, map_opcvm[ctx.mrm])
+ end,
+
+ -- Floating point opcode dispatch.
+ fp = function(ctx, name, pat)
+ local mrm = getmrm(ctx); if not mrm then return incomplete(ctx) end
+ local rm = mrm%8
+ local idx = pat*8 + ((mrm-rm)/8)%8
+ if mrm >= 192 then idx = idx + 64 end
+ local opat = map_opcfp[idx]
+ if type(opat) == "table" then opat = opat[rm+1] end
+ return dispatch(ctx, opat)
+ end,
+
+ -- REX prefix.
+ rex = function(ctx, name, pat)
+ if ctx.rex then return unknown(ctx) end -- Only 1 REX or VEX prefix allowed.
+ for p in gmatch(pat, ".") do ctx["rex"..p] = true end
+ ctx.rex = "rex"
+ end,
+
+ -- VEX prefix.
+ vex = function(ctx, name, pat)
+ if ctx.rex then return unknown(ctx) end -- Only 1 REX or VEX prefix allowed.
+ ctx.rex = "vex"
+ local pos = ctx.pos
+ if ctx.mrm then
+ ctx.mrm = nil
+ pos = pos-1
+ end
+ local b = byte(ctx.code, pos, pos)
+ if not b then return incomplete(ctx) end
+ pos = pos+1
+ if b < 128 then ctx.rexr = true end
+ local m = 1
+ if pat == "3" then
+ m = b%32; b = (b-m)/32
+ local nb = b%2; b = (b-nb)/2
+ if nb == 0 then ctx.rexb = true end
+ local nx = b%2
+ if nx == 0 then ctx.rexx = true end
+ b = byte(ctx.code, pos, pos)
+ if not b then return incomplete(ctx) end
+ pos = pos+1
+ if b >= 128 then ctx.rexw = true end
+ end
+ ctx.pos = pos
+ local map
+ if m == 1 then map = map_opc2
+ elseif m == 2 then map = map_opc3["38"]
+ elseif m == 3 then map = map_opc3["3a"]
+ else return unknown(ctx) end
+ local p = b%4; b = (b-p)/4
+ if p == 1 then ctx.o16 = "o16"
+ elseif p == 2 then ctx.rep = "rep"
+ elseif p == 3 then ctx.rep = "repne" end
+ local l = b%2; b = (b-l)/2
+ if l ~= 0 then ctx.vexl = true end
+ ctx.vexv = (-1-b)%16
+ return dispatchmap(ctx, map)
+ end,
+
+ -- Special case for nop with REX prefix.
+ nop = function(ctx, name, pat)
+ return dispatch(ctx, ctx.rex and pat or "nop")
+ end,
+
+ -- Special case for 0F 77.
+ emms = function(ctx, name, pat)
+ if ctx.rex ~= "vex" then
+ return putop(ctx, "emms")
+ elseif ctx.vexl then
+ ctx.vexl = false
+ return putop(ctx, "zeroall")
+ else
+ return putop(ctx, "zeroupper")
+ end
+ end,
+}
+
+------------------------------------------------------------------------------
+
+-- Disassemble a block of code.
+local function disass_block(ctx, ofs, len)
+ if not ofs then ofs = 0 end
+ local stop = len and ofs+len or #ctx.code
+ ofs = ofs + 1
+ ctx.start = ofs
+ ctx.pos = ofs
+ ctx.stop = stop
+ ctx.imm = nil
+ ctx.mrm = false
+ clearprefixes(ctx)
+ while ctx.pos <= stop do dispatchmap(ctx, ctx.map1) end
+ if ctx.pos ~= ctx.start then incomplete(ctx) end
+end
+
+-- Extended API: create a disassembler context. Then call ctx:disass(ofs, len).
+local function create(code, addr, out)
+ local ctx = {}
+ ctx.code = code
+ ctx.addr = (addr or 0) - 1
+ ctx.out = out or io.write
+ ctx.symtab = {}
+ ctx.disass = disass_block
+ ctx.hexdump = 16
+ ctx.x64 = false
+ ctx.map1 = map_opc1_32
+ ctx.aregs = map_regs.D
+ return ctx
+end
+
+local function create64(code, addr, out)
+ local ctx = create(code, addr, out)
+ ctx.x64 = true
+ ctx.map1 = map_opc1_64
+ ctx.aregs = map_regs.Q
+ return ctx
+end
+
+-- Simple API: disassemble code (a string) at address and output via out.
+local function disass(code, addr, out)
+ create(code, addr, out):disass()
+end
+
+local function disass64(code, addr, out)
+ create64(code, addr, out):disass()
+end
+
+-- Return register name for RID.
+local function regname(r)
+ if r < 8 then return map_regs.D[r+1] end
+ return map_regs.X[r-7]
+end
+
+local function regname64(r)
+ if r < 16 then return map_regs.Q[r+1] end
+ return map_regs.X[r-15]
+end
+
+-- Public module functions.
+return {
+ create = create,
+ create64 = create64,
+ disass = disass,
+ disass64 = disass64,
+ regname = regname,
+ regname64 = regname64
+}
+
diff --git a/libs/luajit-cmake/luajit/src/jit/dump.lua b/libs/luajit-cmake/luajit/src/jit/dump.lua
new file mode 100644
index 0000000..18e7a4b
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/jit/dump.lua
@@ -0,0 +1,726 @@
+----------------------------------------------------------------------------
+-- LuaJIT compiler dump module.
+--
+-- Copyright (C) 2005-2022 Mike Pall. All rights reserved.
+-- Released under the MIT license. See Copyright Notice in luajit.h
+----------------------------------------------------------------------------
+--
+-- This module can be used to debug the JIT compiler itself. It dumps the
+-- code representations and structures used in various compiler stages.
+--
+-- Example usage:
+--
+-- luajit -jdump -e "local x=0; for i=1,1e6 do x=x+i end; print(x)"
+-- luajit -jdump=im -e "for i=1,1000 do for j=1,1000 do end end" | less -R
+-- luajit -jdump=is myapp.lua | less -R
+-- luajit -jdump=-b myapp.lua
+-- luajit -jdump=+aH,myapp.html myapp.lua
+-- luajit -jdump=ixT,myapp.dump myapp.lua
+--
+-- The first argument specifies the dump mode. The second argument gives
+-- the output file name. Default output is to stdout, unless the environment
+-- variable LUAJIT_DUMPFILE is set. The file is overwritten every time the
+-- module is started.
+--
+-- Different features can be turned on or off with the dump mode. If the
+-- mode starts with a '+', the following features are added to the default
+-- set of features; a '-' removes them. Otherwise the features are replaced.
+--
+-- The following dump features are available (* marks the default):
+--
+-- * t Print a line for each started, ended or aborted trace (see also -jv).
+-- * b Dump the traced bytecode.
+-- * i Dump the IR (intermediate representation).
+-- r Augment the IR with register/stack slots.
+-- s Dump the snapshot map.
+-- * m Dump the generated machine code.
+-- x Print each taken trace exit.
+-- X Print each taken trace exit and the contents of all registers.
+-- a Print the IR of aborted traces, too.
+--
+-- The output format can be set with the following characters:
+--
+-- T Plain text output.
+-- A ANSI-colored text output
+-- H Colorized HTML + CSS output.
+--
+-- The default output format is plain text. It's set to ANSI-colored text
+-- if the COLORTERM variable is set. Note: this is independent of any output
+-- redirection, which is actually considered a feature.
+--
+-- You probably want to use less -R to enjoy viewing ANSI-colored text from
+-- a pipe or a file. Add this to your ~/.bashrc: export LESS="-R"
+--
+------------------------------------------------------------------------------
+
+-- Cache some library functions and objects.
+local jit = require("jit")
+assert(jit.version_num == 20100, "LuaJIT core/library version mismatch")
+local jutil = require("jit.util")
+local vmdef = require("jit.vmdef")
+local funcinfo, funcbc = jutil.funcinfo, jutil.funcbc
+local traceinfo, traceir, tracek = jutil.traceinfo, jutil.traceir, jutil.tracek
+local tracemc, tracesnap = jutil.tracemc, jutil.tracesnap
+local traceexitstub, ircalladdr = jutil.traceexitstub, jutil.ircalladdr
+local bit = require("bit")
+local band, shr, tohex = bit.band, bit.rshift, bit.tohex
+local sub, gsub, format = string.sub, string.gsub, string.format
+local byte, rep = string.byte, string.rep
+local type, tostring = type, tostring
+local stdout, stderr = io.stdout, io.stderr
+
+-- Load other modules on-demand.
+local bcline, disass
+
+-- Active flag, output file handle and dump mode.
+local active, out, dumpmode
+
+------------------------------------------------------------------------------
+
+local symtabmt = { __index = false }
+local symtab = {}
+local nexitsym = 0
+
+-- Fill nested symbol table with per-trace exit stub addresses.
+local function fillsymtab_tr(tr, nexit)
+ local t = {}
+ symtabmt.__index = t
+ if jit.arch:sub(1, 4) == "mips" then
+ t[traceexitstub(tr, 0)] = "exit"
+ return
+ end
+ for i=0,nexit-1 do
+ local addr = traceexitstub(tr, i)
+ if addr < 0 then addr = addr + 2^32 end
+ t[addr] = tostring(i)
+ end
+ local addr = traceexitstub(tr, nexit)
+ if addr then t[addr] = "stack_check" end
+end
+
+-- Fill symbol table with trace exit stub addresses.
+local function fillsymtab(tr, nexit)
+ local t = symtab
+ if nexitsym == 0 then
+ local maskaddr = jit.arch == "arm" and -2
+ local ircall = vmdef.ircall
+ for i=0,#ircall do
+ local addr = ircalladdr(i)
+ if addr ~= 0 then
+ if maskaddr then addr = band(addr, maskaddr) end
+ if addr < 0 then addr = addr + 2^32 end
+ t[addr] = ircall[i]
+ end
+ end
+ end
+ if nexitsym == 1000000 then -- Per-trace exit stubs.
+ fillsymtab_tr(tr, nexit)
+ elseif nexit > nexitsym then -- Shared exit stubs.
+ for i=nexitsym,nexit-1 do
+ local addr = traceexitstub(i)
+ if addr == nil then -- Fall back to per-trace exit stubs.
+ fillsymtab_tr(tr, nexit)
+ setmetatable(symtab, symtabmt)
+ nexit = 1000000
+ break
+ end
+ if addr < 0 then addr = addr + 2^32 end
+ t[addr] = tostring(i)
+ end
+ nexitsym = nexit
+ end
+ return t
+end
+
+local function dumpwrite(s)
+ out:write(s)
+end
+
+-- Disassemble machine code.
+local function dump_mcode(tr)
+ local info = traceinfo(tr)
+ if not info then return end
+ local mcode, addr, loop = tracemc(tr)
+ if not mcode then return end
+ if not disass then disass = require("jit.dis_"..jit.arch) end
+ if addr < 0 then addr = addr + 2^32 end
+ out:write("---- TRACE ", tr, " mcode ", #mcode, "\n")
+ local ctx = disass.create(mcode, addr, dumpwrite)
+ ctx.hexdump = 0
+ ctx.symtab = fillsymtab(tr, info.nexit)
+ if loop ~= 0 then
+ symtab[addr+loop] = "LOOP"
+ ctx:disass(0, loop)
+ out:write("->LOOP:\n")
+ ctx:disass(loop, #mcode-loop)
+ symtab[addr+loop] = nil
+ else
+ ctx:disass(0, #mcode)
+ end
+end
+
+------------------------------------------------------------------------------
+
+local irtype_text = {
+ [0] = "nil",
+ "fal",
+ "tru",
+ "lud",
+ "str",
+ "p32",
+ "thr",
+ "pro",
+ "fun",
+ "p64",
+ "cdt",
+ "tab",
+ "udt",
+ "flt",
+ "num",
+ "i8 ",
+ "u8 ",
+ "i16",
+ "u16",
+ "int",
+ "u32",
+ "i64",
+ "u64",
+ "sfp",
+}
+
+local colortype_ansi = {
+ [0] = "%s",
+ "%s",
+ "%s",
+ "\027[36m%s\027[m",
+ "\027[32m%s\027[m",
+ "%s",
+ "\027[1m%s\027[m",
+ "%s",
+ "\027[1m%s\027[m",
+ "%s",
+ "\027[33m%s\027[m",
+ "\027[31m%s\027[m",
+ "\027[36m%s\027[m",
+ "\027[34m%s\027[m",
+ "\027[34m%s\027[m",
+ "\027[35m%s\027[m",
+ "\027[35m%s\027[m",
+ "\027[35m%s\027[m",
+ "\027[35m%s\027[m",
+ "\027[35m%s\027[m",
+ "\027[35m%s\027[m",
+ "\027[35m%s\027[m",
+ "\027[35m%s\027[m",
+ "\027[35m%s\027[m",
+}
+
+local function colorize_text(s)
+ return s
+end
+
+local function colorize_ansi(s, t, extra)
+ local out = format(colortype_ansi[t], s)
+ if extra then out = "\027[3m"..out end
+ return out
+end
+
+local irtype_ansi = setmetatable({},
+ { __index = function(tab, t)
+ local s = colorize_ansi(irtype_text[t], t); tab[t] = s; return s; end })
+
+local html_escape = { ["<"] = "&lt;", [">"] = "&gt;", ["&"] = "&amp;", }
+
+local function colorize_html(s, t, extra)
+ s = gsub(s, "[<>&]", html_escape)
+ return format('<span class="irt_%s%s">%s</span>',
+ irtype_text[t], extra and " irt_extra" or "", s)
+end
+
+local irtype_html = setmetatable({},
+ { __index = function(tab, t)
+ local s = colorize_html(irtype_text[t], t); tab[t] = s; return s; end })
+
+local header_html = [[
+<style type="text/css">
+background { background: #ffffff; color: #000000; }
+pre.ljdump {
+font-size: 10pt;
+background: #f0f4ff;
+color: #000000;
+border: 1px solid #bfcfff;
+padding: 0.5em;
+margin-left: 2em;
+margin-right: 2em;
+}
+span.irt_str { color: #00a000; }
+span.irt_thr, span.irt_fun { color: #404040; font-weight: bold; }
+span.irt_tab { color: #c00000; }
+span.irt_udt, span.irt_lud { color: #00c0c0; }
+span.irt_num { color: #4040c0; }
+span.irt_int, span.irt_i8, span.irt_u8, span.irt_i16, span.irt_u16 { color: #b040b0; }
+span.irt_extra { font-style: italic; }
+</style>
+]]
+
+local colorize, irtype
+
+-- Lookup tables to convert some literals into names.
+local litname = {
+ ["SLOAD "] = setmetatable({}, { __index = function(t, mode)
+ local s = ""
+ if band(mode, 1) ~= 0 then s = s.."P" end
+ if band(mode, 2) ~= 0 then s = s.."F" end
+ if band(mode, 4) ~= 0 then s = s.."T" end
+ if band(mode, 8) ~= 0 then s = s.."C" end
+ if band(mode, 16) ~= 0 then s = s.."R" end
+ if band(mode, 32) ~= 0 then s = s.."I" end
+ if band(mode, 64) ~= 0 then s = s.."K" end
+ t[mode] = s
+ return s
+ end}),
+ ["XLOAD "] = { [0] = "", "R", "V", "RV", "U", "RU", "VU", "RVU", },
+ ["CONV "] = setmetatable({}, { __index = function(t, mode)
+ local s = irtype[band(mode, 31)]
+ s = irtype[band(shr(mode, 5), 31)].."."..s
+ if band(mode, 0x800) ~= 0 then s = s.." sext" end
+ local c = shr(mode, 12)
+ if c == 1 then s = s.." none"
+ elseif c == 2 then s = s.." index"
+ elseif c == 3 then s = s.." check" end
+ t[mode] = s
+ return s
+ end}),
+ ["FLOAD "] = vmdef.irfield,
+ ["FREF "] = vmdef.irfield,
+ ["FPMATH"] = vmdef.irfpm,
+ ["TMPREF"] = { [0] = "", "IN", "OUT", "INOUT", "", "", "OUT2", "INOUT2" },
+ ["BUFHDR"] = { [0] = "RESET", "APPEND", "WRITE" },
+ ["TOSTR "] = { [0] = "INT", "NUM", "CHAR" },
+}
+
+local function ctlsub(c)
+ if c == "\n" then return "\\n"
+ elseif c == "\r" then return "\\r"
+ elseif c == "\t" then return "\\t"
+ else return format("\\%03d", byte(c))
+ end
+end
+
+local function fmtfunc(func, pc)
+ local fi = funcinfo(func, pc)
+ if fi.loc then
+ return fi.loc
+ elseif fi.ffid then
+ return vmdef.ffnames[fi.ffid]
+ elseif fi.addr then
+ return format("C:%x", fi.addr)
+ else
+ return "(?)"
+ end
+end
+
+local function formatk(tr, idx, sn)
+ local k, t, slot = tracek(tr, idx)
+ local tn = type(k)
+ local s
+ if tn == "number" then
+ if t < 12 then
+ s = k == 0 and "NULL" or format("[0x%08x]", k)
+ elseif band(sn or 0, 0x30000) ~= 0 then
+ s = band(sn, 0x20000) ~= 0 and "contpc" or "ftsz"
+ elseif k == 2^52+2^51 then
+ s = "bias"
+ else
+ s = format(0 < k and k < 0x1p-1026 and "%+a" or "%+.14g", k)
+ end
+ elseif tn == "string" then
+ s = format(#k > 20 and '"%.20s"~' or '"%s"', gsub(k, "%c", ctlsub))
+ elseif tn == "function" then
+ s = fmtfunc(k)
+ elseif tn == "table" then
+ s = format("{%p}", k)
+ elseif tn == "userdata" then
+ if t == 12 then
+ s = format("userdata:%p", k)
+ else
+ s = format("[%p]", k)
+ if s == "[NULL]" then s = "NULL" end
+ end
+ elseif t == 21 then -- int64_t
+ s = sub(tostring(k), 1, -3)
+ if sub(s, 1, 1) ~= "-" then s = "+"..s end
+ elseif sn == 0x1057fff then -- SNAP(1, SNAP_FRAME | SNAP_NORESTORE, REF_NIL)
+ return "----" -- Special case for LJ_FR2 slot 1.
+ else
+ s = tostring(k) -- For primitives.
+ end
+ s = colorize(format("%-4s", s), t, band(sn or 0, 0x100000) ~= 0)
+ if slot then
+ s = format("%s @%d", s, slot)
+ end
+ return s
+end
+
+local function printsnap(tr, snap)
+ local n = 2
+ for s=0,snap[1]-1 do
+ local sn = snap[n]
+ if shr(sn, 24) == s then
+ n = n + 1
+ local ref = band(sn, 0xffff) - 0x8000 -- REF_BIAS
+ if ref < 0 then
+ out:write(formatk(tr, ref, sn))
+ elseif band(sn, 0x80000) ~= 0 then -- SNAP_SOFTFPNUM
+ out:write(colorize(format("%04d/%04d", ref, ref+1), 14))
+ else
+ local m, ot, op1, op2 = traceir(tr, ref)
+ out:write(colorize(format("%04d", ref), band(ot, 31), band(sn, 0x100000) ~= 0))
+ end
+ out:write(band(sn, 0x10000) == 0 and " " or "|") -- SNAP_FRAME
+ else
+ out:write("---- ")
+ end
+ end
+ out:write("]\n")
+end
+
+-- Dump snapshots (not interleaved with IR).
+local function dump_snap(tr)
+ out:write("---- TRACE ", tr, " snapshots\n")
+ for i=0,1000000000 do
+ local snap = tracesnap(tr, i)
+ if not snap then break end
+ out:write(format("#%-3d %04d [ ", i, snap[0]))
+ printsnap(tr, snap)
+ end
+end
+
+-- Return a register name or stack slot for a rid/sp location.
+local function ridsp_name(ridsp, ins)
+ if not disass then disass = require("jit.dis_"..jit.arch) end
+ local rid, slot = band(ridsp, 0xff), shr(ridsp, 8)
+ if rid == 253 or rid == 254 then
+ return (slot == 0 or slot == 255) and " {sink" or format(" {%04d", ins-slot)
+ end
+ if ridsp > 255 then return format("[%x]", slot*4) end
+ if rid < 128 then return disass.regname(rid) end
+ return ""
+end
+
+-- Dump CALL* function ref and return optional ctype.
+local function dumpcallfunc(tr, ins)
+ local ctype
+ if ins > 0 then
+ local m, ot, op1, op2 = traceir(tr, ins)
+ if band(ot, 31) == 0 then -- nil type means CARG(func, ctype).
+ ins = op1
+ ctype = formatk(tr, op2)
+ end
+ end
+ if ins < 0 then
+ out:write(format("[0x%x](", tonumber((tracek(tr, ins)))))
+ else
+ out:write(format("%04d (", ins))
+ end
+ return ctype
+end
+
+-- Recursively gather CALL* args and dump them.
+local function dumpcallargs(tr, ins)
+ if ins < 0 then
+ out:write(formatk(tr, ins))
+ else
+ local m, ot, op1, op2 = traceir(tr, ins)
+ local oidx = 6*shr(ot, 8)
+ local op = sub(vmdef.irnames, oidx+1, oidx+6)
+ if op == "CARG " then
+ dumpcallargs(tr, op1)
+ if op2 < 0 then
+ out:write(" ", formatk(tr, op2))
+ else
+ out:write(" ", format("%04d", op2))
+ end
+ else
+ out:write(format("%04d", ins))
+ end
+ end
+end
+
+-- Dump IR and interleaved snapshots.
+local function dump_ir(tr, dumpsnap, dumpreg)
+ local info = traceinfo(tr)
+ if not info then return end
+ local nins = info.nins
+ out:write("---- TRACE ", tr, " IR\n")
+ local irnames = vmdef.irnames
+ local snapref = 65536
+ local snap, snapno
+ if dumpsnap then
+ snap = tracesnap(tr, 0)
+ snapref = snap[0]
+ snapno = 0
+ end
+ for ins=1,nins do
+ if ins >= snapref then
+ if dumpreg then
+ out:write(format(".... SNAP #%-3d [ ", snapno))
+ else
+ out:write(format(".... SNAP #%-3d [ ", snapno))
+ end
+ printsnap(tr, snap)
+ snapno = snapno + 1
+ snap = tracesnap(tr, snapno)
+ snapref = snap and snap[0] or 65536
+ end
+ local m, ot, op1, op2, ridsp = traceir(tr, ins)
+ local oidx, t = 6*shr(ot, 8), band(ot, 31)
+ local op = sub(irnames, oidx+1, oidx+6)
+ if op == "LOOP " then
+ if dumpreg then
+ out:write(format("%04d ------------ LOOP ------------\n", ins))
+ else
+ out:write(format("%04d ------ LOOP ------------\n", ins))
+ end
+ elseif op ~= "NOP " and op ~= "CARG " and
+ (dumpreg or op ~= "RENAME") then
+ local rid = band(ridsp, 255)
+ if dumpreg then
+ out:write(format("%04d %-6s", ins, ridsp_name(ridsp, ins)))
+ else
+ out:write(format("%04d ", ins))
+ end
+ out:write(format("%s%s %s %s ",
+ (rid == 254 or rid == 253) and "}" or
+ (band(ot, 128) == 0 and " " or ">"),
+ band(ot, 64) == 0 and " " or "+",
+ irtype[t], op))
+ local m1, m2 = band(m, 3), band(m, 3*4)
+ if sub(op, 1, 4) == "CALL" then
+ local ctype
+ if m2 == 1*4 then -- op2 == IRMlit
+ out:write(format("%-10s (", vmdef.ircall[op2]))
+ else
+ ctype = dumpcallfunc(tr, op2)
+ end
+ if op1 ~= -1 then dumpcallargs(tr, op1) end
+ out:write(")")
+ if ctype then out:write(" ctype ", ctype) end
+ elseif op == "CNEW " and op2 == -1 then
+ out:write(formatk(tr, op1))
+ elseif m1 ~= 3 then -- op1 != IRMnone
+ if op1 < 0 then
+ out:write(formatk(tr, op1))
+ else
+ out:write(format(m1 == 0 and "%04d" or "#%-3d", op1))
+ end
+ if m2 ~= 3*4 then -- op2 != IRMnone
+ if m2 == 1*4 then -- op2 == IRMlit
+ local litn = litname[op]
+ if litn and litn[op2] then
+ out:write(" ", litn[op2])
+ elseif op == "UREFO " or op == "UREFC " then
+ out:write(format(" #%-3d", shr(op2, 8)))
+ else
+ out:write(format(" #%-3d", op2))
+ end
+ elseif op2 < 0 then
+ out:write(" ", formatk(tr, op2))
+ else
+ out:write(format(" %04d", op2))
+ end
+ end
+ end
+ out:write("\n")
+ end
+ end
+ if snap then
+ if dumpreg then
+ out:write(format(".... SNAP #%-3d [ ", snapno))
+ else
+ out:write(format(".... SNAP #%-3d [ ", snapno))
+ end
+ printsnap(tr, snap)
+ end
+end
+
+------------------------------------------------------------------------------
+
+local recprefix = ""
+local recdepth = 0
+
+-- Format trace error message.
+local function fmterr(err, info)
+ if type(err) == "number" then
+ if type(info) == "function" then info = fmtfunc(info) end
+ err = format(vmdef.traceerr[err], info)
+ end
+ return err
+end
+
+-- Dump trace states.
+local function dump_trace(what, tr, func, pc, otr, oex)
+ if what == "stop" or (what == "abort" and dumpmode.a) then
+ if dumpmode.i then dump_ir(tr, dumpmode.s, dumpmode.r and what == "stop")
+ elseif dumpmode.s then dump_snap(tr) end
+ if dumpmode.m then dump_mcode(tr) end
+ end
+ if what == "start" then
+ if dumpmode.H then out:write('<pre class="ljdump">\n') end
+ out:write("---- TRACE ", tr, " ", what)
+ if otr then out:write(" ", otr, "/", oex == -1 and "stitch" or oex) end
+ out:write(" ", fmtfunc(func, pc), "\n")
+ elseif what == "stop" or what == "abort" then
+ out:write("---- TRACE ", tr, " ", what)
+ if what == "abort" then
+ out:write(" ", fmtfunc(func, pc), " -- ", fmterr(otr, oex), "\n")
+ else
+ local info = traceinfo(tr)
+ local link, ltype = info.link, info.linktype
+ if link == tr or link == 0 then
+ out:write(" -> ", ltype, "\n")
+ elseif ltype == "root" then
+ out:write(" -> ", link, "\n")
+ else
+ out:write(" -> ", link, " ", ltype, "\n")
+ end
+ end
+ if dumpmode.H then out:write("</pre>\n\n") else out:write("\n") end
+ else
+ if what == "flush" then symtab, nexitsym = {}, 0 end
+ out:write("---- TRACE ", what, "\n\n")
+ end
+ out:flush()
+end
+
+-- Dump recorded bytecode.
+local function dump_record(tr, func, pc, depth)
+ if depth ~= recdepth then
+ recdepth = depth
+ recprefix = rep(" .", depth)
+ end
+ local line
+ if pc >= 0 then
+ line = bcline(func, pc, recprefix)
+ if dumpmode.H then line = gsub(line, "[<>&]", html_escape) end
+ else
+ line = "0000 "..recprefix.." FUNCC \n"
+ end
+ if pc <= 0 then
+ out:write(sub(line, 1, -2), " ; ", fmtfunc(func), "\n")
+ else
+ out:write(line)
+ end
+ if pc >= 0 and band(funcbc(func, pc), 0xff) < 16 then -- ORDER BC
+ out:write(bcline(func, pc+1, recprefix)) -- Write JMP for cond.
+ end
+end
+
+------------------------------------------------------------------------------
+
+local gpr64 = jit.arch:match("64")
+local fprmips32 = jit.arch == "mips" or jit.arch == "mipsel"
+
+-- Dump taken trace exits.
+local function dump_texit(tr, ex, ngpr, nfpr, ...)
+ out:write("---- TRACE ", tr, " exit ", ex, "\n")
+ if dumpmode.X then
+ local regs = {...}
+ if gpr64 then
+ for i=1,ngpr do
+ out:write(format(" %016x", regs[i]))
+ if i % 4 == 0 then out:write("\n") end
+ end
+ else
+ for i=1,ngpr do
+ out:write(" ", tohex(regs[i]))
+ if i % 8 == 0 then out:write("\n") end
+ end
+ end
+ if fprmips32 then
+ for i=1,nfpr,2 do
+ out:write(format(" %+17.14g", regs[ngpr+i]))
+ if i % 8 == 7 then out:write("\n") end
+ end
+ else
+ for i=1,nfpr do
+ out:write(format(" %+17.14g", regs[ngpr+i]))
+ if i % 4 == 0 then out:write("\n") end
+ end
+ end
+ end
+end
+
+------------------------------------------------------------------------------
+
+-- Detach dump handlers.
+local function dumpoff()
+ if active then
+ active = false
+ jit.attach(dump_texit)
+ jit.attach(dump_record)
+ jit.attach(dump_trace)
+ if out and out ~= stdout and out ~= stderr then out:close() end
+ out = nil
+ end
+end
+
+-- Open the output file and attach dump handlers.
+local function dumpon(opt, outfile)
+ if active then dumpoff() end
+
+ local term = os.getenv("TERM")
+ local colormode = (term and term:match("color") or os.getenv("COLORTERM")) and "A" or "T"
+ if opt then
+ opt = gsub(opt, "[TAH]", function(mode) colormode = mode; return ""; end)
+ end
+
+ local m = { t=true, b=true, i=true, m=true, }
+ if opt and opt ~= "" then
+ local o = sub(opt, 1, 1)
+ if o ~= "+" and o ~= "-" then m = {} end
+ for i=1,#opt do m[sub(opt, i, i)] = (o ~= "-") end
+ end
+ dumpmode = m
+
+ if m.t or m.b or m.i or m.s or m.m then
+ jit.attach(dump_trace, "trace")
+ end
+ if m.b then
+ jit.attach(dump_record, "record")
+ if not bcline then bcline = require("jit.bc").line end
+ end
+ if m.x or m.X then
+ jit.attach(dump_texit, "texit")
+ end
+
+ if not outfile then outfile = os.getenv("LUAJIT_DUMPFILE") end
+ if outfile then
+ out = outfile == "-" and stdout or assert(io.open(outfile, "w"))
+ else
+ out = stdout
+ end
+
+ m[colormode] = true
+ if colormode == "A" then
+ colorize = colorize_ansi
+ irtype = irtype_ansi
+ elseif colormode == "H" then
+ colorize = colorize_html
+ irtype = irtype_html
+ out:write(header_html)
+ else
+ colorize = colorize_text
+ irtype = irtype_text
+ end
+
+ active = true
+end
+
+-- Public module functions.
+return {
+ on = dumpon,
+ off = dumpoff,
+ start = dumpon -- For -j command line option.
+}
+
diff --git a/libs/luajit-cmake/luajit/src/jit/p.lua b/libs/luajit-cmake/luajit/src/jit/p.lua
new file mode 100644
index 0000000..f225c31
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/jit/p.lua
@@ -0,0 +1,312 @@
+----------------------------------------------------------------------------
+-- LuaJIT profiler.
+--
+-- Copyright (C) 2005-2022 Mike Pall. All rights reserved.
+-- Released under the MIT license. See Copyright Notice in luajit.h
+----------------------------------------------------------------------------
+--
+-- This module is a simple command line interface to the built-in
+-- low-overhead profiler of LuaJIT.
+--
+-- The lower-level API of the profiler is accessible via the "jit.profile"
+-- module or the luaJIT_profile_* C API.
+--
+-- Example usage:
+--
+-- luajit -jp myapp.lua
+-- luajit -jp=s myapp.lua
+-- luajit -jp=-s myapp.lua
+-- luajit -jp=vl myapp.lua
+-- luajit -jp=G,profile.txt myapp.lua
+--
+-- The following dump features are available:
+--
+-- f Stack dump: function name, otherwise module:line. Default mode.
+-- F Stack dump: ditto, but always prepend module.
+-- l Stack dump: module:line.
+-- <number> stack dump depth (callee < caller). Default: 1.
+-- -<number> Inverse stack dump depth (caller > callee).
+-- s Split stack dump after first stack level. Implies abs(depth) >= 2.
+-- p Show full path for module names.
+-- v Show VM states. Can be combined with stack dumps, e.g. vf or fv.
+-- z Show zones. Can be combined with stack dumps, e.g. zf or fz.
+-- r Show raw sample counts. Default: show percentages.
+-- a Annotate excerpts from source code files.
+-- A Annotate complete source code files.
+-- G Produce raw output suitable for graphical tools (e.g. flame graphs).
+-- m<number> Minimum sample percentage to be shown. Default: 3.
+-- i<number> Sampling interval in milliseconds. Default: 10.
+--
+----------------------------------------------------------------------------
+
+-- Cache some library functions and objects.
+local jit = require("jit")
+assert(jit.version_num == 20100, "LuaJIT core/library version mismatch")
+local profile = require("jit.profile")
+local vmdef = require("jit.vmdef")
+local math = math
+local pairs, ipairs, tonumber, floor = pairs, ipairs, tonumber, math.floor
+local sort, format = table.sort, string.format
+local stdout = io.stdout
+local zone -- Load jit.zone module on demand.
+
+-- Output file handle.
+local out
+
+------------------------------------------------------------------------------
+
+local prof_ud
+local prof_states, prof_split, prof_min, prof_raw, prof_fmt, prof_depth
+local prof_ann, prof_count1, prof_count2, prof_samples
+
+local map_vmmode = {
+ N = "Compiled",
+ I = "Interpreted",
+ C = "C code",
+ G = "Garbage Collector",
+ J = "JIT Compiler",
+}
+
+-- Profiler callback.
+local function prof_cb(th, samples, vmmode)
+ prof_samples = prof_samples + samples
+ local key_stack, key_stack2, key_state
+ -- Collect keys for sample.
+ if prof_states then
+ if prof_states == "v" then
+ key_state = map_vmmode[vmmode] or vmmode
+ else
+ key_state = zone:get() or "(none)"
+ end
+ end
+ if prof_fmt then
+ key_stack = profile.dumpstack(th, prof_fmt, prof_depth)
+ key_stack = key_stack:gsub("%[builtin#(%d+)%]", function(x)
+ return vmdef.ffnames[tonumber(x)]
+ end)
+ if prof_split == 2 then
+ local k1, k2 = key_stack:match("(.-) [<>] (.*)")
+ if k2 then key_stack, key_stack2 = k1, k2 end
+ elseif prof_split == 3 then
+ key_stack2 = profile.dumpstack(th, "l", 1)
+ end
+ end
+ -- Order keys.
+ local k1, k2
+ if prof_split == 1 then
+ if key_state then
+ k1 = key_state
+ if key_stack then k2 = key_stack end
+ end
+ elseif key_stack then
+ k1 = key_stack
+ if key_stack2 then k2 = key_stack2 elseif key_state then k2 = key_state end
+ end
+ -- Coalesce samples in one or two levels.
+ if k1 then
+ local t1 = prof_count1
+ t1[k1] = (t1[k1] or 0) + samples
+ if k2 then
+ local t2 = prof_count2
+ local t3 = t2[k1]
+ if not t3 then t3 = {}; t2[k1] = t3 end
+ t3[k2] = (t3[k2] or 0) + samples
+ end
+ end
+end
+
+------------------------------------------------------------------------------
+
+-- Show top N list.
+local function prof_top(count1, count2, samples, indent)
+ local t, n = {}, 0
+ for k in pairs(count1) do
+ n = n + 1
+ t[n] = k
+ end
+ sort(t, function(a, b) return count1[a] > count1[b] end)
+ for i=1,n do
+ local k = t[i]
+ local v = count1[k]
+ local pct = floor(v*100/samples + 0.5)
+ if pct < prof_min then break end
+ if not prof_raw then
+ out:write(format("%s%2d%% %s\n", indent, pct, k))
+ elseif prof_raw == "r" then
+ out:write(format("%s%5d %s\n", indent, v, k))
+ else
+ out:write(format("%s %d\n", k, v))
+ end
+ if count2 then
+ local r = count2[k]
+ if r then
+ prof_top(r, nil, v, (prof_split == 3 or prof_split == 1) and " -- " or
+ (prof_depth < 0 and " -> " or " <- "))
+ end
+ end
+ end
+end
+
+-- Annotate source code
+local function prof_annotate(count1, samples)
+ local files = {}
+ local ms = 0
+ for k, v in pairs(count1) do
+ local pct = floor(v*100/samples + 0.5)
+ ms = math.max(ms, v)
+ if pct >= prof_min then
+ local file, line = k:match("^(.*):(%d+)$")
+ if not file then file = k; line = 0 end
+ local fl = files[file]
+ if not fl then fl = {}; files[file] = fl; files[#files+1] = file end
+ line = tonumber(line)
+ fl[line] = prof_raw and v or pct
+ end
+ end
+ sort(files)
+ local fmtv, fmtn = " %3d%% | %s\n", " | %s\n"
+ if prof_raw then
+ local n = math.max(5, math.ceil(math.log10(ms)))
+ fmtv = "%"..n.."d | %s\n"
+ fmtn = (" "):rep(n).." | %s\n"
+ end
+ local ann = prof_ann
+ for _, file in ipairs(files) do
+ local f0 = file:byte()
+ if f0 == 40 or f0 == 91 then
+ out:write(format("\n====== %s ======\n[Cannot annotate non-file]\n", file))
+ break
+ end
+ local fp, err = io.open(file)
+ if not fp then
+ out:write(format("====== ERROR: %s: %s\n", file, err))
+ break
+ end
+ out:write(format("\n====== %s ======\n", file))
+ local fl = files[file]
+ local n, show = 1, false
+ if ann ~= 0 then
+ for i=1,ann do
+ if fl[i] then show = true; out:write("@@ 1 @@\n"); break end
+ end
+ end
+ for line in fp:lines() do
+ if line:byte() == 27 then
+ out:write("[Cannot annotate bytecode file]\n")
+ break
+ end
+ local v = fl[n]
+ if ann ~= 0 then
+ local v2 = fl[n+ann]
+ if show then
+ if v2 then show = n+ann elseif v then show = n
+ elseif show+ann < n then show = false end
+ elseif v2 then
+ show = n+ann
+ out:write(format("@@ %d @@\n", n))
+ end
+ if not show then goto next end
+ end
+ if v then
+ out:write(format(fmtv, v, line))
+ else
+ out:write(format(fmtn, line))
+ end
+ ::next::
+ n = n + 1
+ end
+ fp:close()
+ end
+end
+
+------------------------------------------------------------------------------
+
+-- Finish profiling and dump result.
+local function prof_finish()
+ if prof_ud then
+ profile.stop()
+ local samples = prof_samples
+ if samples == 0 then
+ if prof_raw ~= true then out:write("[No samples collected]\n") end
+ return
+ end
+ if prof_ann then
+ prof_annotate(prof_count1, samples)
+ else
+ prof_top(prof_count1, prof_count2, samples, "")
+ end
+ prof_count1 = nil
+ prof_count2 = nil
+ prof_ud = nil
+ if out ~= stdout then out:close() end
+ end
+end
+
+-- Start profiling.
+local function prof_start(mode)
+ local interval = ""
+ mode = mode:gsub("i%d*", function(s) interval = s; return "" end)
+ prof_min = 3
+ mode = mode:gsub("m(%d+)", function(s) prof_min = tonumber(s); return "" end)
+ prof_depth = 1
+ mode = mode:gsub("%-?%d+", function(s) prof_depth = tonumber(s); return "" end)
+ local m = {}
+ for c in mode:gmatch(".") do m[c] = c end
+ prof_states = m.z or m.v
+ if prof_states == "z" then zone = require("jit.zone") end
+ local scope = m.l or m.f or m.F or (prof_states and "" or "f")
+ local flags = (m.p or "")
+ prof_raw = m.r
+ if m.s then
+ prof_split = 2
+ if prof_depth == -1 or m["-"] then prof_depth = -2
+ elseif prof_depth == 1 then prof_depth = 2 end
+ elseif mode:find("[fF].*l") then
+ scope = "l"
+ prof_split = 3
+ else
+ prof_split = (scope == "" or mode:find("[zv].*[lfF]")) and 1 or 0
+ end
+ prof_ann = m.A and 0 or (m.a and 3)
+ if prof_ann then
+ scope = "l"
+ prof_fmt = "pl"
+ prof_split = 0
+ prof_depth = 1
+ elseif m.G and scope ~= "" then
+ prof_fmt = flags..scope.."Z;"
+ prof_depth = -100
+ prof_raw = true
+ prof_min = 0
+ elseif scope == "" then
+ prof_fmt = false
+ else
+ local sc = prof_split == 3 and m.f or m.F or scope
+ prof_fmt = flags..sc..(prof_depth >= 0 and "Z < " or "Z > ")
+ end
+ prof_count1 = {}
+ prof_count2 = {}
+ prof_samples = 0
+ profile.start(scope:lower()..interval, prof_cb)
+ prof_ud = newproxy(true)
+ getmetatable(prof_ud).__gc = prof_finish
+end
+
+------------------------------------------------------------------------------
+
+local function start(mode, outfile)
+ if not outfile then outfile = os.getenv("LUAJIT_PROFILEFILE") end
+ if outfile then
+ out = outfile == "-" and stdout or assert(io.open(outfile, "w"))
+ else
+ out = stdout
+ end
+ prof_start(mode or "f")
+end
+
+-- Public module functions.
+return {
+ start = start, -- For -j command line option.
+ stop = prof_finish
+}
+
diff --git a/libs/luajit-cmake/luajit/src/jit/v.lua b/libs/luajit-cmake/luajit/src/jit/v.lua
new file mode 100644
index 0000000..ac8b19d
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/jit/v.lua
@@ -0,0 +1,170 @@
+----------------------------------------------------------------------------
+-- Verbose mode of the LuaJIT compiler.
+--
+-- Copyright (C) 2005-2022 Mike Pall. All rights reserved.
+-- Released under the MIT license. See Copyright Notice in luajit.h
+----------------------------------------------------------------------------
+--
+-- This module shows verbose information about the progress of the
+-- JIT compiler. It prints one line for each generated trace. This module
+-- is useful to see which code has been compiled or where the compiler
+-- punts and falls back to the interpreter.
+--
+-- Example usage:
+--
+-- luajit -jv -e "for i=1,1000 do for j=1,1000 do end end"
+-- luajit -jv=myapp.out myapp.lua
+--
+-- Default output is to stderr. To redirect the output to a file, pass a
+-- filename as an argument (use '-' for stdout) or set the environment
+-- variable LUAJIT_VERBOSEFILE. The file is overwritten every time the
+-- module is started.
+--
+-- The output from the first example should look like this:
+--
+-- [TRACE 1 (command line):1 loop]
+-- [TRACE 2 (1/3) (command line):1 -> 1]
+--
+-- The first number in each line is the internal trace number. Next are
+-- the file name ('(command line)') and the line number (':1') where the
+-- trace has started. Side traces also show the parent trace number and
+-- the exit number where they are attached to in parentheses ('(1/3)').
+-- An arrow at the end shows where the trace links to ('-> 1'), unless
+-- it loops to itself.
+--
+-- In this case the inner loop gets hot and is traced first, generating
+-- a root trace. Then the last exit from the 1st trace gets hot, too,
+-- and triggers generation of the 2nd trace. The side trace follows the
+-- path along the outer loop and *around* the inner loop, back to its
+-- start, and then links to the 1st trace. Yes, this may seem unusual,
+-- if you know how traditional compilers work. Trace compilers are full
+-- of surprises like this -- have fun! :-)
+--
+-- Aborted traces are shown like this:
+--
+-- [TRACE --- foo.lua:44 -- leaving loop in root trace at foo:lua:50]
+--
+-- Don't worry -- trace aborts are quite common, even in programs which
+-- can be fully compiled. The compiler may retry several times until it
+-- finds a suitable trace.
+--
+-- Of course this doesn't work with features that are not-yet-implemented
+-- (NYI error messages). The VM simply falls back to the interpreter. This
+-- may not matter at all if the particular trace is not very high up in
+-- the CPU usage profile. Oh, and the interpreter is quite fast, too.
+--
+-- Also check out the -jdump module, which prints all the gory details.
+--
+------------------------------------------------------------------------------
+
+-- Cache some library functions and objects.
+local jit = require("jit")
+assert(jit.version_num == 20100, "LuaJIT core/library version mismatch")
+local jutil = require("jit.util")
+local vmdef = require("jit.vmdef")
+local funcinfo, traceinfo = jutil.funcinfo, jutil.traceinfo
+local type, format = type, string.format
+local stdout, stderr = io.stdout, io.stderr
+
+-- Active flag and output file handle.
+local active, out
+
+------------------------------------------------------------------------------
+
+local startloc, startex
+
+local function fmtfunc(func, pc)
+ local fi = funcinfo(func, pc)
+ if fi.loc then
+ return fi.loc
+ elseif fi.ffid then
+ return vmdef.ffnames[fi.ffid]
+ elseif fi.addr then
+ return format("C:%x", fi.addr)
+ else
+ return "(?)"
+ end
+end
+
+-- Format trace error message.
+local function fmterr(err, info)
+ if type(err) == "number" then
+ if type(info) == "function" then info = fmtfunc(info) end
+ err = format(vmdef.traceerr[err], info)
+ end
+ return err
+end
+
+-- Dump trace states.
+local function dump_trace(what, tr, func, pc, otr, oex)
+ if what == "start" then
+ startloc = fmtfunc(func, pc)
+ startex = otr and "("..otr.."/"..(oex == -1 and "stitch" or oex)..") " or ""
+ else
+ if what == "abort" then
+ local loc = fmtfunc(func, pc)
+ if loc ~= startloc then
+ out:write(format("[TRACE --- %s%s -- %s at %s]\n",
+ startex, startloc, fmterr(otr, oex), loc))
+ else
+ out:write(format("[TRACE --- %s%s -- %s]\n",
+ startex, startloc, fmterr(otr, oex)))
+ end
+ elseif what == "stop" then
+ local info = traceinfo(tr)
+ local link, ltype = info.link, info.linktype
+ if ltype == "interpreter" then
+ out:write(format("[TRACE %3s %s%s -- fallback to interpreter]\n",
+ tr, startex, startloc))
+ elseif ltype == "stitch" then
+ out:write(format("[TRACE %3s %s%s %s %s]\n",
+ tr, startex, startloc, ltype, fmtfunc(func, pc)))
+ elseif link == tr or link == 0 then
+ out:write(format("[TRACE %3s %s%s %s]\n",
+ tr, startex, startloc, ltype))
+ elseif ltype == "root" then
+ out:write(format("[TRACE %3s %s%s -> %d]\n",
+ tr, startex, startloc, link))
+ else
+ out:write(format("[TRACE %3s %s%s -> %d %s]\n",
+ tr, startex, startloc, link, ltype))
+ end
+ else
+ out:write(format("[TRACE %s]\n", what))
+ end
+ out:flush()
+ end
+end
+
+------------------------------------------------------------------------------
+
+-- Detach dump handlers.
+local function dumpoff()
+ if active then
+ active = false
+ jit.attach(dump_trace)
+ if out and out ~= stdout and out ~= stderr then out:close() end
+ out = nil
+ end
+end
+
+-- Open the output file and attach dump handlers.
+local function dumpon(outfile)
+ if active then dumpoff() end
+ if not outfile then outfile = os.getenv("LUAJIT_VERBOSEFILE") end
+ if outfile then
+ out = outfile == "-" and stdout or assert(io.open(outfile, "w"))
+ else
+ out = stderr
+ end
+ jit.attach(dump_trace, "trace")
+ active = true
+end
+
+-- Public module functions.
+return {
+ on = dumpon,
+ off = dumpoff,
+ start = dumpon -- For -j command line option.
+}
+
diff --git a/libs/luajit-cmake/luajit/src/jit/zone.lua b/libs/luajit-cmake/luajit/src/jit/zone.lua
new file mode 100644
index 0000000..1308cb7
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/jit/zone.lua
@@ -0,0 +1,45 @@
+----------------------------------------------------------------------------
+-- LuaJIT profiler zones.
+--
+-- Copyright (C) 2005-2022 Mike Pall. All rights reserved.
+-- Released under the MIT license. See Copyright Notice in luajit.h
+----------------------------------------------------------------------------
+--
+-- This module implements a simple hierarchical zone model.
+--
+-- Example usage:
+--
+-- local zone = require("jit.zone")
+-- zone("AI")
+-- ...
+-- zone("A*")
+-- ...
+-- print(zone:get()) --> "A*"
+-- ...
+-- zone()
+-- ...
+-- print(zone:get()) --> "AI"
+-- ...
+-- zone()
+--
+----------------------------------------------------------------------------
+
+local remove = table.remove
+
+return setmetatable({
+ flush = function(t)
+ for i=#t,1,-1 do t[i] = nil end
+ end,
+ get = function(t)
+ return t[#t]
+ end
+}, {
+ __call = function(t, zone)
+ if zone then
+ t[#t+1] = zone
+ else
+ return (assert(remove(t), "empty zone stack"))
+ end
+ end
+})
+
diff --git a/libs/luajit-cmake/luajit/src/lauxlib.h b/libs/luajit-cmake/luajit/src/lauxlib.h
new file mode 100644
index 0000000..a44f027
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/lauxlib.h
@@ -0,0 +1,161 @@
+/*
+** $Id: lauxlib.h,v 1.88.1.1 2007/12/27 13:02:25 roberto Exp $
+** Auxiliary functions for building Lua libraries
+** See Copyright Notice in lua.h
+*/
+
+
+#ifndef lauxlib_h
+#define lauxlib_h
+
+
+#include <stddef.h>
+#include <stdio.h>
+
+#include "lua.h"
+
+
+/* extra error code for `luaL_load' */
+#define LUA_ERRFILE (LUA_ERRERR+1)
+
+typedef struct luaL_Reg {
+ const char *name;
+ lua_CFunction func;
+} luaL_Reg;
+
+LUALIB_API void (luaL_openlib) (lua_State *L, const char *libname,
+ const luaL_Reg *l, int nup);
+LUALIB_API void (luaL_register) (lua_State *L, const char *libname,
+ const luaL_Reg *l);
+LUALIB_API int (luaL_getmetafield) (lua_State *L, int obj, const char *e);
+LUALIB_API int (luaL_callmeta) (lua_State *L, int obj, const char *e);
+LUALIB_API int (luaL_typerror) (lua_State *L, int narg, const char *tname);
+LUALIB_API int (luaL_argerror) (lua_State *L, int numarg, const char *extramsg);
+LUALIB_API const char *(luaL_checklstring) (lua_State *L, int numArg,
+ size_t *l);
+LUALIB_API const char *(luaL_optlstring) (lua_State *L, int numArg,
+ const char *def, size_t *l);
+LUALIB_API lua_Number (luaL_checknumber) (lua_State *L, int numArg);
+LUALIB_API lua_Number (luaL_optnumber) (lua_State *L, int nArg, lua_Number def);
+
+LUALIB_API lua_Integer (luaL_checkinteger) (lua_State *L, int numArg);
+LUALIB_API lua_Integer (luaL_optinteger) (lua_State *L, int nArg,
+ lua_Integer def);
+
+LUALIB_API void (luaL_checkstack) (lua_State *L, int sz, const char *msg);
+LUALIB_API void (luaL_checktype) (lua_State *L, int narg, int t);
+LUALIB_API void (luaL_checkany) (lua_State *L, int narg);
+
+LUALIB_API int (luaL_newmetatable) (lua_State *L, const char *tname);
+LUALIB_API void *(luaL_checkudata) (lua_State *L, int ud, const char *tname);
+
+LUALIB_API void (luaL_where) (lua_State *L, int lvl);
+LUALIB_API int (luaL_error) (lua_State *L, const char *fmt, ...);
+
+LUALIB_API int (luaL_checkoption) (lua_State *L, int narg, const char *def,
+ const char *const lst[]);
+
+/* pre-defined references */
+#define LUA_NOREF (-2)
+#define LUA_REFNIL (-1)
+
+LUALIB_API int (luaL_ref) (lua_State *L, int t);
+LUALIB_API void (luaL_unref) (lua_State *L, int t, int ref);
+
+LUALIB_API int (luaL_loadfile) (lua_State *L, const char *filename);
+LUALIB_API int (luaL_loadbuffer) (lua_State *L, const char *buff, size_t sz,
+ const char *name);
+LUALIB_API int (luaL_loadstring) (lua_State *L, const char *s);
+
+LUALIB_API lua_State *(luaL_newstate) (void);
+
+
+LUALIB_API const char *(luaL_gsub) (lua_State *L, const char *s, const char *p,
+ const char *r);
+
+LUALIB_API const char *(luaL_findtable) (lua_State *L, int idx,
+ const char *fname, int szhint);
+
+/* From Lua 5.2. */
+LUALIB_API int luaL_fileresult(lua_State *L, int stat, const char *fname);
+LUALIB_API int luaL_execresult(lua_State *L, int stat);
+LUALIB_API int (luaL_loadfilex) (lua_State *L, const char *filename,
+ const char *mode);
+LUALIB_API int (luaL_loadbufferx) (lua_State *L, const char *buff, size_t sz,
+ const char *name, const char *mode);
+LUALIB_API void luaL_traceback (lua_State *L, lua_State *L1, const char *msg,
+ int level);
+LUALIB_API void (luaL_setfuncs) (lua_State *L, const luaL_Reg *l, int nup);
+LUALIB_API void (luaL_pushmodule) (lua_State *L, const char *modname,
+ int sizehint);
+LUALIB_API void *(luaL_testudata) (lua_State *L, int ud, const char *tname);
+LUALIB_API void (luaL_setmetatable) (lua_State *L, const char *tname);
+
+
+/*
+** ===============================================================
+** some useful macros
+** ===============================================================
+*/
+
+#define luaL_argcheck(L, cond,numarg,extramsg) \
+ ((void)((cond) || luaL_argerror(L, (numarg), (extramsg))))
+#define luaL_checkstring(L,n) (luaL_checklstring(L, (n), NULL))
+#define luaL_optstring(L,n,d) (luaL_optlstring(L, (n), (d), NULL))
+#define luaL_checkint(L,n) ((int)luaL_checkinteger(L, (n)))
+#define luaL_optint(L,n,d) ((int)luaL_optinteger(L, (n), (d)))
+#define luaL_checklong(L,n) ((long)luaL_checkinteger(L, (n)))
+#define luaL_optlong(L,n,d) ((long)luaL_optinteger(L, (n), (d)))
+
+#define luaL_typename(L,i) lua_typename(L, lua_type(L,(i)))
+
+#define luaL_dofile(L, fn) \
+ (luaL_loadfile(L, fn) || lua_pcall(L, 0, LUA_MULTRET, 0))
+
+#define luaL_dostring(L, s) \
+ (luaL_loadstring(L, s) || lua_pcall(L, 0, LUA_MULTRET, 0))
+
+#define luaL_getmetatable(L,n) (lua_getfield(L, LUA_REGISTRYINDEX, (n)))
+
+#define luaL_opt(L,f,n,d) (lua_isnoneornil(L,(n)) ? (d) : f(L,(n)))
+
+/* From Lua 5.2. */
+#define luaL_newlibtable(L, l) \
+ lua_createtable(L, 0, sizeof(l)/sizeof((l)[0]) - 1)
+#define luaL_newlib(L, l) (luaL_newlibtable(L, l), luaL_setfuncs(L, l, 0))
+
+/*
+** {======================================================
+** Generic Buffer manipulation
+** =======================================================
+*/
+
+
+
+typedef struct luaL_Buffer {
+ char *p; /* current position in buffer */
+ int lvl; /* number of strings in the stack (level) */
+ lua_State *L;
+ char buffer[LUAL_BUFFERSIZE];
+} luaL_Buffer;
+
+#define luaL_addchar(B,c) \
+ ((void)((B)->p < ((B)->buffer+LUAL_BUFFERSIZE) || luaL_prepbuffer(B)), \
+ (*(B)->p++ = (char)(c)))
+
+/* compatibility only */
+#define luaL_putchar(B,c) luaL_addchar(B,c)
+
+#define luaL_addsize(B,n) ((B)->p += (n))
+
+LUALIB_API void (luaL_buffinit) (lua_State *L, luaL_Buffer *B);
+LUALIB_API char *(luaL_prepbuffer) (luaL_Buffer *B);
+LUALIB_API void (luaL_addlstring) (luaL_Buffer *B, const char *s, size_t l);
+LUALIB_API void (luaL_addstring) (luaL_Buffer *B, const char *s);
+LUALIB_API void (luaL_addvalue) (luaL_Buffer *B);
+LUALIB_API void (luaL_pushresult) (luaL_Buffer *B);
+
+
+/* }====================================================== */
+
+#endif
diff --git a/libs/luajit-cmake/luajit/src/lib_aux.c b/libs/luajit-cmake/luajit/src/lib_aux.c
new file mode 100644
index 0000000..b8e5643
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/lib_aux.c
@@ -0,0 +1,370 @@
+/*
+** Auxiliary library for the Lua/C API.
+** Copyright (C) 2005-2022 Mike Pall. See Copyright Notice in luajit.h
+**
+** Major parts taken verbatim or adapted from the Lua interpreter.
+** Copyright (C) 1994-2008 Lua.org, PUC-Rio. See Copyright Notice in lua.h
+*/
+
+#include <errno.h>
+#include <stdarg.h>
+#include <stdio.h>
+
+#define lib_aux_c
+#define LUA_LIB
+
+#include "lua.h"
+#include "lauxlib.h"
+
+#include "lj_obj.h"
+#include "lj_err.h"
+#include "lj_state.h"
+#include "lj_trace.h"
+#include "lj_lib.h"
+
+#if LJ_TARGET_POSIX
+#include <sys/wait.h>
+#endif
+
+/* -- I/O error handling -------------------------------------------------- */
+
+LUALIB_API int luaL_fileresult(lua_State *L, int stat, const char *fname)
+{
+ if (stat) {
+ setboolV(L->top++, 1);
+ return 1;
+ } else {
+ int en = errno; /* Lua API calls may change this value. */
+ setnilV(L->top++);
+ if (fname)
+ lua_pushfstring(L, "%s: %s", fname, strerror(en));
+ else
+ lua_pushfstring(L, "%s", strerror(en));
+ setintV(L->top++, en);
+ lj_trace_abort(G(L));
+ return 3;
+ }
+}
+
+LUALIB_API int luaL_execresult(lua_State *L, int stat)
+{
+ if (stat != -1) {
+#if LJ_TARGET_POSIX
+ if (WIFSIGNALED(stat)) {
+ stat = WTERMSIG(stat);
+ setnilV(L->top++);
+ lua_pushliteral(L, "signal");
+ } else {
+ if (WIFEXITED(stat))
+ stat = WEXITSTATUS(stat);
+ if (stat == 0)
+ setboolV(L->top++, 1);
+ else
+ setnilV(L->top++);
+ lua_pushliteral(L, "exit");
+ }
+#else
+ if (stat == 0)
+ setboolV(L->top++, 1);
+ else
+ setnilV(L->top++);
+ lua_pushliteral(L, "exit");
+#endif
+ setintV(L->top++, stat);
+ return 3;
+ }
+ return luaL_fileresult(L, 0, NULL);
+}
+
+/* -- Module registration ------------------------------------------------- */
+
+LUALIB_API const char *luaL_findtable(lua_State *L, int idx,
+ const char *fname, int szhint)
+{
+ const char *e;
+ lua_pushvalue(L, idx);
+ do {
+ e = strchr(fname, '.');
+ if (e == NULL) e = fname + strlen(fname);
+ lua_pushlstring(L, fname, (size_t)(e - fname));
+ lua_rawget(L, -2);
+ if (lua_isnil(L, -1)) { /* no such field? */
+ lua_pop(L, 1); /* remove this nil */
+ lua_createtable(L, 0, (*e == '.' ? 1 : szhint)); /* new table for field */
+ lua_pushlstring(L, fname, (size_t)(e - fname));
+ lua_pushvalue(L, -2);
+ lua_settable(L, -4); /* set new table into field */
+ } else if (!lua_istable(L, -1)) { /* field has a non-table value? */
+ lua_pop(L, 2); /* remove table and value */
+ return fname; /* return problematic part of the name */
+ }
+ lua_remove(L, -2); /* remove previous table */
+ fname = e + 1;
+ } while (*e == '.');
+ return NULL;
+}
+
+static int libsize(const luaL_Reg *l)
+{
+ int size = 0;
+ for (; l && l->name; l++) size++;
+ return size;
+}
+
+LUALIB_API void luaL_pushmodule(lua_State *L, const char *modname, int sizehint)
+{
+ luaL_findtable(L, LUA_REGISTRYINDEX, "_LOADED", 16);
+ lua_getfield(L, -1, modname);
+ if (!lua_istable(L, -1)) {
+ lua_pop(L, 1);
+ if (luaL_findtable(L, LUA_GLOBALSINDEX, modname, sizehint) != NULL)
+ lj_err_callerv(L, LJ_ERR_BADMODN, modname);
+ lua_pushvalue(L, -1);
+ lua_setfield(L, -3, modname); /* _LOADED[modname] = new table. */
+ }
+ lua_remove(L, -2); /* Remove _LOADED table. */
+}
+
+LUALIB_API void luaL_openlib(lua_State *L, const char *libname,
+ const luaL_Reg *l, int nup)
+{
+ lj_lib_checkfpu(L);
+ if (libname) {
+ luaL_pushmodule(L, libname, libsize(l));
+ lua_insert(L, -(nup + 1)); /* Move module table below upvalues. */
+ }
+ if (l)
+ luaL_setfuncs(L, l, nup);
+ else
+ lua_pop(L, nup); /* Remove upvalues. */
+}
+
+LUALIB_API void luaL_register(lua_State *L, const char *libname,
+ const luaL_Reg *l)
+{
+ luaL_openlib(L, libname, l, 0);
+}
+
+LUALIB_API void luaL_setfuncs(lua_State *L, const luaL_Reg *l, int nup)
+{
+ luaL_checkstack(L, nup, "too many upvalues");
+ for (; l->name; l++) {
+ int i;
+ for (i = 0; i < nup; i++) /* Copy upvalues to the top. */
+ lua_pushvalue(L, -nup);
+ lua_pushcclosure(L, l->func, nup);
+ lua_setfield(L, -(nup + 2), l->name);
+ }
+ lua_pop(L, nup); /* Remove upvalues. */
+}
+
+LUALIB_API const char *luaL_gsub(lua_State *L, const char *s,
+ const char *p, const char *r)
+{
+ const char *wild;
+ size_t l = strlen(p);
+ luaL_Buffer b;
+ luaL_buffinit(L, &b);
+ while ((wild = strstr(s, p)) != NULL) {
+ luaL_addlstring(&b, s, (size_t)(wild - s)); /* push prefix */
+ luaL_addstring(&b, r); /* push replacement in place of pattern */
+ s = wild + l; /* continue after `p' */
+ }
+ luaL_addstring(&b, s); /* push last suffix */
+ luaL_pushresult(&b);
+ return lua_tostring(L, -1);
+}
+
+/* -- Buffer handling ----------------------------------------------------- */
+
+#define bufflen(B) ((size_t)((B)->p - (B)->buffer))
+#define bufffree(B) ((size_t)(LUAL_BUFFERSIZE - bufflen(B)))
+
+static int emptybuffer(luaL_Buffer *B)
+{
+ size_t l = bufflen(B);
+ if (l == 0)
+ return 0; /* put nothing on stack */
+ lua_pushlstring(B->L, B->buffer, l);
+ B->p = B->buffer;
+ B->lvl++;
+ return 1;
+}
+
+static void adjuststack(luaL_Buffer *B)
+{
+ if (B->lvl > 1) {
+ lua_State *L = B->L;
+ int toget = 1; /* number of levels to concat */
+ size_t toplen = lua_strlen(L, -1);
+ do {
+ size_t l = lua_strlen(L, -(toget+1));
+ if (!(B->lvl - toget + 1 >= LUA_MINSTACK/2 || toplen > l))
+ break;
+ toplen += l;
+ toget++;
+ } while (toget < B->lvl);
+ lua_concat(L, toget);
+ B->lvl = B->lvl - toget + 1;
+ }
+}
+
+LUALIB_API char *luaL_prepbuffer(luaL_Buffer *B)
+{
+ if (emptybuffer(B))
+ adjuststack(B);
+ return B->buffer;
+}
+
+LUALIB_API void luaL_addlstring(luaL_Buffer *B, const char *s, size_t l)
+{
+ if (l <= bufffree(B)) {
+ memcpy(B->p, s, l);
+ B->p += l;
+ } else {
+ emptybuffer(B);
+ lua_pushlstring(B->L, s, l);
+ B->lvl++;
+ adjuststack(B);
+ }
+}
+
+LUALIB_API void luaL_addstring(luaL_Buffer *B, const char *s)
+{
+ luaL_addlstring(B, s, strlen(s));
+}
+
+LUALIB_API void luaL_pushresult(luaL_Buffer *B)
+{
+ emptybuffer(B);
+ lua_concat(B->L, B->lvl);
+ B->lvl = 1;
+}
+
+LUALIB_API void luaL_addvalue(luaL_Buffer *B)
+{
+ lua_State *L = B->L;
+ size_t vl;
+ const char *s = lua_tolstring(L, -1, &vl);
+ if (vl <= bufffree(B)) { /* fit into buffer? */
+ memcpy(B->p, s, vl); /* put it there */
+ B->p += vl;
+ lua_pop(L, 1); /* remove from stack */
+ } else {
+ if (emptybuffer(B))
+ lua_insert(L, -2); /* put buffer before new value */
+ B->lvl++; /* add new value into B stack */
+ adjuststack(B);
+ }
+}
+
+LUALIB_API void luaL_buffinit(lua_State *L, luaL_Buffer *B)
+{
+ B->L = L;
+ B->p = B->buffer;
+ B->lvl = 0;
+}
+
+/* -- Reference management ------------------------------------------------ */
+
+#define FREELIST_REF 0
+
+/* Convert a stack index to an absolute index. */
+#define abs_index(L, i) \
+ ((i) > 0 || (i) <= LUA_REGISTRYINDEX ? (i) : lua_gettop(L) + (i) + 1)
+
+LUALIB_API int luaL_ref(lua_State *L, int t)
+{
+ int ref;
+ t = abs_index(L, t);
+ if (lua_isnil(L, -1)) {
+ lua_pop(L, 1); /* remove from stack */
+ return LUA_REFNIL; /* `nil' has a unique fixed reference */
+ }
+ lua_rawgeti(L, t, FREELIST_REF); /* get first free element */
+ ref = (int)lua_tointeger(L, -1); /* ref = t[FREELIST_REF] */
+ lua_pop(L, 1); /* remove it from stack */
+ if (ref != 0) { /* any free element? */
+ lua_rawgeti(L, t, ref); /* remove it from list */
+ lua_rawseti(L, t, FREELIST_REF); /* (t[FREELIST_REF] = t[ref]) */
+ } else { /* no free elements */
+ ref = (int)lua_objlen(L, t);
+ ref++; /* create new reference */
+ }
+ lua_rawseti(L, t, ref);
+ return ref;
+}
+
+LUALIB_API void luaL_unref(lua_State *L, int t, int ref)
+{
+ if (ref >= 0) {
+ t = abs_index(L, t);
+ lua_rawgeti(L, t, FREELIST_REF);
+ lua_rawseti(L, t, ref); /* t[ref] = t[FREELIST_REF] */
+ lua_pushinteger(L, ref);
+ lua_rawseti(L, t, FREELIST_REF); /* t[FREELIST_REF] = ref */
+ }
+}
+
+/* -- Default allocator and panic function -------------------------------- */
+
+static int panic(lua_State *L)
+{
+ const char *s = lua_tostring(L, -1);
+ fputs("PANIC: unprotected error in call to Lua API (", stderr);
+ fputs(s ? s : "?", stderr);
+ fputc(')', stderr); fputc('\n', stderr);
+ fflush(stderr);
+ return 0;
+}
+
+#ifdef LUAJIT_USE_SYSMALLOC
+
+#if LJ_64 && !LJ_GC64 && !defined(LUAJIT_USE_VALGRIND)
+#error "Must use builtin allocator for 64 bit target"
+#endif
+
+static void *mem_alloc(void *ud, void *ptr, size_t osize, size_t nsize)
+{
+ (void)ud;
+ (void)osize;
+ if (nsize == 0) {
+ free(ptr);
+ return NULL;
+ } else {
+ return realloc(ptr, nsize);
+ }
+}
+
+LUALIB_API lua_State *luaL_newstate(void)
+{
+ lua_State *L = lua_newstate(mem_alloc, NULL);
+ if (L) G(L)->panic = panic;
+ return L;
+}
+
+#else
+
+LUALIB_API lua_State *luaL_newstate(void)
+{
+ lua_State *L;
+#if LJ_64 && !LJ_GC64
+ L = lj_state_newstate(LJ_ALLOCF_INTERNAL, NULL);
+#else
+ L = lua_newstate(LJ_ALLOCF_INTERNAL, NULL);
+#endif
+ if (L) G(L)->panic = panic;
+ return L;
+}
+
+#if LJ_64 && !LJ_GC64
+LUA_API lua_State *lua_newstate(lua_Alloc f, void *ud)
+{
+ UNUSED(f); UNUSED(ud);
+ fputs("Must use luaL_newstate() for 64 bit target\n", stderr);
+ return NULL;
+}
+#endif
+
+#endif
+
diff --git a/libs/luajit-cmake/luajit/src/lib_base.c b/libs/luajit-cmake/luajit/src/lib_base.c
new file mode 100644
index 0000000..98ec67c
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/lib_base.c
@@ -0,0 +1,696 @@
+/*
+** Base and coroutine library.
+** Copyright (C) 2005-2022 Mike Pall. See Copyright Notice in luajit.h
+**
+** Major portions taken verbatim or adapted from the Lua interpreter.
+** Copyright (C) 1994-2011 Lua.org, PUC-Rio. See Copyright Notice in lua.h
+*/
+
+#include <stdio.h>
+
+#define lib_base_c
+#define LUA_LIB
+
+#include "lua.h"
+#include "lauxlib.h"
+#include "lualib.h"
+
+#include "lj_obj.h"
+#include "lj_gc.h"
+#include "lj_err.h"
+#include "lj_debug.h"
+#include "lj_buf.h"
+#include "lj_str.h"
+#include "lj_tab.h"
+#include "lj_meta.h"
+#include "lj_state.h"
+#include "lj_frame.h"
+#if LJ_HASFFI
+#include "lj_ctype.h"
+#include "lj_cconv.h"
+#endif
+#include "lj_bc.h"
+#include "lj_ff.h"
+#include "lj_dispatch.h"
+#include "lj_char.h"
+#include "lj_strscan.h"
+#include "lj_strfmt.h"
+#include "lj_lib.h"
+
+/* -- Base library: checks ------------------------------------------------ */
+
+#define LJLIB_MODULE_base
+
+LJLIB_ASM(assert) LJLIB_REC(.)
+{
+ lj_lib_checkany(L, 1);
+ if (L->top == L->base+1)
+ lj_err_caller(L, LJ_ERR_ASSERT);
+ else if (tvisstr(L->base+1) || tvisnumber(L->base+1))
+ lj_err_callermsg(L, strdata(lj_lib_checkstr(L, 2)));
+ else
+ lj_err_run(L);
+ return FFH_UNREACHABLE;
+}
+
+/* ORDER LJ_T */
+LJLIB_PUSH("nil")
+LJLIB_PUSH("boolean")
+LJLIB_PUSH(top-1) /* boolean */
+LJLIB_PUSH("userdata")
+LJLIB_PUSH("string")
+LJLIB_PUSH("upval")
+LJLIB_PUSH("thread")
+LJLIB_PUSH("proto")
+LJLIB_PUSH("function")
+LJLIB_PUSH("trace")
+LJLIB_PUSH("cdata")
+LJLIB_PUSH("table")
+LJLIB_PUSH(top-9) /* userdata */
+LJLIB_PUSH("number")
+LJLIB_ASM_(type) LJLIB_REC(.)
+/* Recycle the lj_lib_checkany(L, 1) from assert. */
+
+/* -- Base library: iterators --------------------------------------------- */
+
+/* This solves a circular dependency problem -- change FF_next_N as needed. */
+LJ_STATIC_ASSERT((int)FF_next == FF_next_N);
+
+LJLIB_ASM(next) LJLIB_REC(.)
+{
+ lj_lib_checktab(L, 1);
+ lj_err_msg(L, LJ_ERR_NEXTIDX);
+ return FFH_UNREACHABLE;
+}
+
+#if LJ_52 || LJ_HASFFI
+static int ffh_pairs(lua_State *L, MMS mm)
+{
+ TValue *o = lj_lib_checkany(L, 1);
+ cTValue *mo = lj_meta_lookup(L, o, mm);
+ if ((LJ_52 || tviscdata(o)) && !tvisnil(mo)) {
+ L->top = o+1; /* Only keep one argument. */
+ copyTV(L, L->base-1-LJ_FR2, mo); /* Replace callable. */
+ return FFH_TAILCALL;
+ } else {
+ if (!tvistab(o)) lj_err_argt(L, 1, LUA_TTABLE);
+ if (LJ_FR2) { copyTV(L, o-1, o); o--; }
+ setfuncV(L, o-1, funcV(lj_lib_upvalue(L, 1)));
+ if (mm == MM_pairs) setnilV(o+1); else setintV(o+1, 0);
+ return FFH_RES(3);
+ }
+}
+#else
+#define ffh_pairs(L, mm) (lj_lib_checktab(L, 1), FFH_UNREACHABLE)
+#endif
+
+LJLIB_PUSH(lastcl)
+LJLIB_ASM(pairs) LJLIB_REC(xpairs 0)
+{
+ return ffh_pairs(L, MM_pairs);
+}
+
+LJLIB_NOREGUV LJLIB_ASM(ipairs_aux) LJLIB_REC(.)
+{
+ lj_lib_checktab(L, 1);
+ lj_lib_checkint(L, 2);
+ return FFH_UNREACHABLE;
+}
+
+LJLIB_PUSH(lastcl)
+LJLIB_ASM(ipairs) LJLIB_REC(xpairs 1)
+{
+ return ffh_pairs(L, MM_ipairs);
+}
+
+/* -- Base library: getters and setters ----------------------------------- */
+
+LJLIB_ASM_(getmetatable) LJLIB_REC(.)
+/* Recycle the lj_lib_checkany(L, 1) from assert. */
+
+LJLIB_ASM(setmetatable) LJLIB_REC(.)
+{
+ GCtab *t = lj_lib_checktab(L, 1);
+ GCtab *mt = lj_lib_checktabornil(L, 2);
+ if (!tvisnil(lj_meta_lookup(L, L->base, MM_metatable)))
+ lj_err_caller(L, LJ_ERR_PROTMT);
+ setgcref(t->metatable, obj2gco(mt));
+ if (mt) { lj_gc_objbarriert(L, t, mt); }
+ settabV(L, L->base-1-LJ_FR2, t);
+ return FFH_RES(1);
+}
+
+LJLIB_CF(getfenv) LJLIB_REC(.)
+{
+ GCfunc *fn;
+ cTValue *o = L->base;
+ if (!(o < L->top && tvisfunc(o))) {
+ int level = lj_lib_optint(L, 1, 1);
+ o = lj_debug_frame(L, level, &level);
+ if (o == NULL)
+ lj_err_arg(L, 1, LJ_ERR_INVLVL);
+ if (LJ_FR2) o--;
+ }
+ fn = &gcval(o)->fn;
+ settabV(L, L->top++, isluafunc(fn) ? tabref(fn->l.env) : tabref(L->env));
+ return 1;
+}
+
+LJLIB_CF(setfenv)
+{
+ GCfunc *fn;
+ GCtab *t = lj_lib_checktab(L, 2);
+ cTValue *o = L->base;
+ if (!(o < L->top && tvisfunc(o))) {
+ int level = lj_lib_checkint(L, 1);
+ if (level == 0) {
+ /* NOBARRIER: A thread (i.e. L) is never black. */
+ setgcref(L->env, obj2gco(t));
+ return 0;
+ }
+ o = lj_debug_frame(L, level, &level);
+ if (o == NULL)
+ lj_err_arg(L, 1, LJ_ERR_INVLVL);
+ if (LJ_FR2) o--;
+ }
+ fn = &gcval(o)->fn;
+ if (!isluafunc(fn))
+ lj_err_caller(L, LJ_ERR_SETFENV);
+ setgcref(fn->l.env, obj2gco(t));
+ lj_gc_objbarrier(L, obj2gco(fn), t);
+ setfuncV(L, L->top++, fn);
+ return 1;
+}
+
+LJLIB_ASM(rawget) LJLIB_REC(.)
+{
+ lj_lib_checktab(L, 1);
+ lj_lib_checkany(L, 2);
+ return FFH_UNREACHABLE;
+}
+
+LJLIB_CF(rawset) LJLIB_REC(.)
+{
+ lj_lib_checktab(L, 1);
+ lj_lib_checkany(L, 2);
+ L->top = 1+lj_lib_checkany(L, 3);
+ lua_rawset(L, 1);
+ return 1;
+}
+
+LJLIB_CF(rawequal) LJLIB_REC(.)
+{
+ cTValue *o1 = lj_lib_checkany(L, 1);
+ cTValue *o2 = lj_lib_checkany(L, 2);
+ setboolV(L->top-1, lj_obj_equal(o1, o2));
+ return 1;
+}
+
+#if LJ_52
+LJLIB_CF(rawlen) LJLIB_REC(.)
+{
+ cTValue *o = L->base;
+ int32_t len;
+ if (L->top > o && tvisstr(o))
+ len = (int32_t)strV(o)->len;
+ else
+ len = (int32_t)lj_tab_len(lj_lib_checktab(L, 1));
+ setintV(L->top-1, len);
+ return 1;
+}
+#endif
+
+LJLIB_CF(unpack)
+{
+ GCtab *t = lj_lib_checktab(L, 1);
+ int32_t n, i = lj_lib_optint(L, 2, 1);
+ int32_t e = (L->base+3-1 < L->top && !tvisnil(L->base+3-1)) ?
+ lj_lib_checkint(L, 3) : (int32_t)lj_tab_len(t);
+ uint32_t nu;
+ if (i > e) return 0;
+ nu = (uint32_t)e - (uint32_t)i;
+ n = (int32_t)(nu+1);
+ if (nu >= LUAI_MAXCSTACK || !lua_checkstack(L, n))
+ lj_err_caller(L, LJ_ERR_UNPACK);
+ do {
+ cTValue *tv = lj_tab_getint(t, i);
+ if (tv) {
+ copyTV(L, L->top++, tv);
+ } else {
+ setnilV(L->top++);
+ }
+ } while (i++ < e);
+ return n;
+}
+
+LJLIB_CF(select) LJLIB_REC(.)
+{
+ int32_t n = (int32_t)(L->top - L->base);
+ if (n >= 1 && tvisstr(L->base) && *strVdata(L->base) == '#') {
+ setintV(L->top-1, n-1);
+ return 1;
+ } else {
+ int32_t i = lj_lib_checkint(L, 1);
+ if (i < 0) i = n + i; else if (i > n) i = n;
+ if (i < 1)
+ lj_err_arg(L, 1, LJ_ERR_IDXRNG);
+ return n - i;
+ }
+}
+
+/* -- Base library: conversions ------------------------------------------- */
+
+LJLIB_ASM(tonumber) LJLIB_REC(.)
+{
+ int32_t base = lj_lib_optint(L, 2, 10);
+ if (base == 10) {
+ TValue *o = lj_lib_checkany(L, 1);
+ if (lj_strscan_numberobj(o)) {
+ copyTV(L, L->base-1-LJ_FR2, o);
+ return FFH_RES(1);
+ }
+#if LJ_HASFFI
+ if (tviscdata(o)) {
+ CTState *cts = ctype_cts(L);
+ CType *ct = lj_ctype_rawref(cts, cdataV(o)->ctypeid);
+ if (ctype_isenum(ct->info)) ct = ctype_child(cts, ct);
+ if (ctype_isnum(ct->info) || ctype_iscomplex(ct->info)) {
+ if (LJ_DUALNUM && ctype_isinteger_or_bool(ct->info) &&
+ ct->size <= 4 && !(ct->size == 4 && (ct->info & CTF_UNSIGNED))) {
+ int32_t i;
+ lj_cconv_ct_tv(cts, ctype_get(cts, CTID_INT32), (uint8_t *)&i, o, 0);
+ setintV(L->base-1-LJ_FR2, i);
+ return FFH_RES(1);
+ }
+ lj_cconv_ct_tv(cts, ctype_get(cts, CTID_DOUBLE),
+ (uint8_t *)&(L->base-1-LJ_FR2)->n, o, 0);
+ return FFH_RES(1);
+ }
+ }
+#endif
+ } else {
+ const char *p = strdata(lj_lib_checkstr(L, 1));
+ char *ep;
+ unsigned int neg = 0;
+ unsigned long ul;
+ if (base < 2 || base > 36)
+ lj_err_arg(L, 2, LJ_ERR_BASERNG);
+ while (lj_char_isspace((unsigned char)(*p))) p++;
+ if (*p == '-') { p++; neg = 1; } else if (*p == '+') { p++; }
+ if (lj_char_isalnum((unsigned char)(*p))) {
+ ul = strtoul(p, &ep, base);
+ if (p != ep) {
+ while (lj_char_isspace((unsigned char)(*ep))) ep++;
+ if (*ep == '\0') {
+ if (LJ_DUALNUM && LJ_LIKELY(ul < 0x80000000u+neg)) {
+ if (neg) ul = (unsigned long)-(long)ul;
+ setintV(L->base-1-LJ_FR2, (int32_t)ul);
+ } else {
+ lua_Number n = (lua_Number)ul;
+ if (neg) n = -n;
+ setnumV(L->base-1-LJ_FR2, n);
+ }
+ return FFH_RES(1);
+ }
+ }
+ }
+ }
+ setnilV(L->base-1-LJ_FR2);
+ return FFH_RES(1);
+}
+
+LJLIB_ASM(tostring) LJLIB_REC(.)
+{
+ TValue *o = lj_lib_checkany(L, 1);
+ cTValue *mo;
+ L->top = o+1; /* Only keep one argument. */
+ if (!tvisnil(mo = lj_meta_lookup(L, o, MM_tostring))) {
+ copyTV(L, L->base-1-LJ_FR2, mo); /* Replace callable. */
+ return FFH_TAILCALL;
+ }
+ lj_gc_check(L);
+ setstrV(L, L->base-1-LJ_FR2, lj_strfmt_obj(L, L->base));
+ return FFH_RES(1);
+}
+
+/* -- Base library: throw and catch errors -------------------------------- */
+
+LJLIB_CF(error)
+{
+ int32_t level = lj_lib_optint(L, 2, 1);
+ lua_settop(L, 1);
+ if (lua_isstring(L, 1) && level > 0) {
+ luaL_where(L, level);
+ lua_pushvalue(L, 1);
+ lua_concat(L, 2);
+ }
+ return lua_error(L);
+}
+
+LJLIB_ASM(pcall) LJLIB_REC(.)
+{
+ lj_lib_checkany(L, 1);
+ lj_lib_checkfunc(L, 2); /* For xpcall only. */
+ return FFH_UNREACHABLE;
+}
+LJLIB_ASM_(xpcall) LJLIB_REC(.)
+
+/* -- Base library: load Lua code ----------------------------------------- */
+
+static int load_aux(lua_State *L, int status, int envarg)
+{
+ if (status == LUA_OK) {
+ if (tvistab(L->base+envarg-1)) {
+ GCfunc *fn = funcV(L->top-1);
+ GCtab *t = tabV(L->base+envarg-1);
+ setgcref(fn->c.env, obj2gco(t));
+ lj_gc_objbarrier(L, fn, t);
+ }
+ return 1;
+ } else {
+ setnilV(L->top-2);
+ return 2;
+ }
+}
+
+LJLIB_CF(loadfile)
+{
+ GCstr *fname = lj_lib_optstr(L, 1);
+ GCstr *mode = lj_lib_optstr(L, 2);
+ int status;
+ lua_settop(L, 3); /* Ensure env arg exists. */
+ status = luaL_loadfilex(L, fname ? strdata(fname) : NULL,
+ mode ? strdata(mode) : NULL);
+ return load_aux(L, status, 3);
+}
+
+static const char *reader_func(lua_State *L, void *ud, size_t *size)
+{
+ UNUSED(ud);
+ luaL_checkstack(L, 2, "too many nested functions");
+ copyTV(L, L->top++, L->base);
+ lua_call(L, 0, 1); /* Call user-supplied function. */
+ L->top--;
+ if (tvisnil(L->top)) {
+ *size = 0;
+ return NULL;
+ } else if (tvisstr(L->top) || tvisnumber(L->top)) {
+ copyTV(L, L->base+4, L->top); /* Anchor string in reserved stack slot. */
+ return lua_tolstring(L, 5, size);
+ } else {
+ lj_err_caller(L, LJ_ERR_RDRSTR);
+ return NULL;
+ }
+}
+
+LJLIB_CF(load)
+{
+ GCstr *name = lj_lib_optstr(L, 2);
+ GCstr *mode = lj_lib_optstr(L, 3);
+ int status;
+ if (L->base < L->top &&
+ (tvisstr(L->base) || tvisnumber(L->base) || tvisbuf(L->base))) {
+ const char *s;
+ MSize len;
+ if (tvisbuf(L->base)) {
+ SBufExt *sbx = bufV(L->base);
+ s = sbx->r;
+ len = sbufxlen(sbx);
+ if (!name) name = &G(L)->strempty; /* Buffers are not NUL-terminated. */
+ } else {
+ GCstr *str = lj_lib_checkstr(L, 1);
+ s = strdata(str);
+ len = str->len;
+ }
+ lua_settop(L, 4); /* Ensure env arg exists. */
+ status = luaL_loadbufferx(L, s, len, name ? strdata(name) : s,
+ mode ? strdata(mode) : NULL);
+ } else {
+ lj_lib_checkfunc(L, 1);
+ lua_settop(L, 5); /* Reserve a slot for the string from the reader. */
+ status = lua_loadx(L, reader_func, NULL, name ? strdata(name) : "=(load)",
+ mode ? strdata(mode) : NULL);
+ }
+ return load_aux(L, status, 4);
+}
+
+LJLIB_CF(loadstring)
+{
+ return lj_cf_load(L);
+}
+
+LJLIB_CF(dofile)
+{
+ GCstr *fname = lj_lib_optstr(L, 1);
+ setnilV(L->top);
+ L->top = L->base+1;
+ if (luaL_loadfile(L, fname ? strdata(fname) : NULL) != LUA_OK)
+ lua_error(L);
+ lua_call(L, 0, LUA_MULTRET);
+ return (int)(L->top - L->base) - 1;
+}
+
+/* -- Base library: GC control -------------------------------------------- */
+
+LJLIB_CF(gcinfo)
+{
+ setintV(L->top++, (int32_t)(G(L)->gc.total >> 10));
+ return 1;
+}
+
+LJLIB_CF(collectgarbage)
+{
+ int opt = lj_lib_checkopt(L, 1, LUA_GCCOLLECT, /* ORDER LUA_GC* */
+ "\4stop\7restart\7collect\5count\1\377\4step\10setpause\12setstepmul\1\377\11isrunning");
+ int32_t data = lj_lib_optint(L, 2, 0);
+ if (opt == LUA_GCCOUNT) {
+ setnumV(L->top, (lua_Number)G(L)->gc.total/1024.0);
+ } else {
+ int res = lua_gc(L, opt, data);
+ if (opt == LUA_GCSTEP || opt == LUA_GCISRUNNING)
+ setboolV(L->top, res);
+ else
+ setintV(L->top, res);
+ }
+ L->top++;
+ return 1;
+}
+
+/* -- Base library: miscellaneous functions ------------------------------- */
+
+LJLIB_PUSH(top-2) /* Upvalue holds weak table. */
+LJLIB_CF(newproxy)
+{
+ lua_settop(L, 1);
+ lua_newuserdata(L, 0);
+ if (lua_toboolean(L, 1) == 0) { /* newproxy(): without metatable. */
+ return 1;
+ } else if (lua_isboolean(L, 1)) { /* newproxy(true): with metatable. */
+ lua_newtable(L);
+ lua_pushvalue(L, -1);
+ lua_pushboolean(L, 1);
+ lua_rawset(L, lua_upvalueindex(1)); /* Remember mt in weak table. */
+ } else { /* newproxy(proxy): inherit metatable. */
+ int validproxy = 0;
+ if (lua_getmetatable(L, 1)) {
+ lua_rawget(L, lua_upvalueindex(1));
+ validproxy = lua_toboolean(L, -1);
+ lua_pop(L, 1);
+ }
+ if (!validproxy)
+ lj_err_arg(L, 1, LJ_ERR_NOPROXY);
+ lua_getmetatable(L, 1);
+ }
+ lua_setmetatable(L, 2);
+ return 1;
+}
+
+LJLIB_PUSH("tostring")
+LJLIB_CF(print)
+{
+ ptrdiff_t i, nargs = L->top - L->base;
+ cTValue *tv = lj_tab_getstr(tabref(L->env), strV(lj_lib_upvalue(L, 1)));
+ int shortcut;
+ if (tv && !tvisnil(tv)) {
+ copyTV(L, L->top++, tv);
+ } else {
+ setstrV(L, L->top++, strV(lj_lib_upvalue(L, 1)));
+ lua_gettable(L, LUA_GLOBALSINDEX);
+ tv = L->top-1;
+ }
+ shortcut = (tvisfunc(tv) && funcV(tv)->c.ffid == FF_tostring) &&
+ !gcrefu(basemt_it(G(L), LJ_TNUMX));
+ for (i = 0; i < nargs; i++) {
+ cTValue *o = &L->base[i];
+ const char *str;
+ size_t size;
+ MSize len;
+ if (shortcut && (str = lj_strfmt_wstrnum(L, o, &len)) != NULL) {
+ size = len;
+ } else {
+ copyTV(L, L->top+1, o);
+ copyTV(L, L->top, L->top-1);
+ L->top += 2;
+ lua_call(L, 1, 1);
+ str = lua_tolstring(L, -1, &size);
+ if (!str)
+ lj_err_caller(L, LJ_ERR_PRTOSTR);
+ L->top--;
+ }
+ if (i)
+ putchar('\t');
+ fwrite(str, 1, size, stdout);
+ }
+ putchar('\n');
+ return 0;
+}
+
+LJLIB_PUSH(top-3)
+LJLIB_SET(_VERSION)
+
+#include "lj_libdef.h"
+
+/* -- Coroutine library --------------------------------------------------- */
+
+#define LJLIB_MODULE_coroutine
+
+LJLIB_CF(coroutine_status)
+{
+ const char *s;
+ lua_State *co;
+ if (!(L->top > L->base && tvisthread(L->base)))
+ lj_err_arg(L, 1, LJ_ERR_NOCORO);
+ co = threadV(L->base);
+ if (co == L) s = "running";
+ else if (co->status == LUA_YIELD) s = "suspended";
+ else if (co->status != LUA_OK) s = "dead";
+ else if (co->base > tvref(co->stack)+1+LJ_FR2) s = "normal";
+ else if (co->top == co->base) s = "dead";
+ else s = "suspended";
+ lua_pushstring(L, s);
+ return 1;
+}
+
+LJLIB_CF(coroutine_running)
+{
+#if LJ_52
+ int ismain = lua_pushthread(L);
+ setboolV(L->top++, ismain);
+ return 2;
+#else
+ if (lua_pushthread(L))
+ setnilV(L->top++);
+ return 1;
+#endif
+}
+
+LJLIB_CF(coroutine_isyieldable)
+{
+ setboolV(L->top++, cframe_canyield(L->cframe));
+ return 1;
+}
+
+LJLIB_CF(coroutine_create)
+{
+ lua_State *L1;
+ if (!(L->base < L->top && tvisfunc(L->base)))
+ lj_err_argt(L, 1, LUA_TFUNCTION);
+ L1 = lua_newthread(L);
+ setfuncV(L, L1->top++, funcV(L->base));
+ return 1;
+}
+
+LJLIB_ASM(coroutine_yield)
+{
+ lj_err_caller(L, LJ_ERR_CYIELD);
+ return FFH_UNREACHABLE;
+}
+
+static int ffh_resume(lua_State *L, lua_State *co, int wrap)
+{
+ if (co->cframe != NULL || co->status > LUA_YIELD ||
+ (co->status == LUA_OK && co->top == co->base)) {
+ ErrMsg em = co->cframe ? LJ_ERR_CORUN : LJ_ERR_CODEAD;
+ if (wrap) lj_err_caller(L, em);
+ setboolV(L->base-1-LJ_FR2, 0);
+ setstrV(L, L->base-LJ_FR2, lj_err_str(L, em));
+ return FFH_RES(2);
+ }
+ lj_state_growstack(co, (MSize)(L->top - L->base));
+ return FFH_RETRY;
+}
+
+LJLIB_ASM(coroutine_resume)
+{
+ if (!(L->top > L->base && tvisthread(L->base)))
+ lj_err_arg(L, 1, LJ_ERR_NOCORO);
+ return ffh_resume(L, threadV(L->base), 0);
+}
+
+LJLIB_NOREG LJLIB_ASM(coroutine_wrap_aux)
+{
+ return ffh_resume(L, threadV(lj_lib_upvalue(L, 1)), 1);
+}
+
+/* Inline declarations. */
+LJ_ASMF void lj_ff_coroutine_wrap_aux(void);
+#if !(LJ_TARGET_MIPS && defined(ljamalg_c))
+LJ_FUNCA_NORET void LJ_FASTCALL lj_ffh_coroutine_wrap_err(lua_State *L,
+ lua_State *co);
+#endif
+
+/* Error handler, called from assembler VM. */
+void LJ_FASTCALL lj_ffh_coroutine_wrap_err(lua_State *L, lua_State *co)
+{
+ co->top--; copyTV(L, L->top, co->top); L->top++;
+ if (tvisstr(L->top-1))
+ lj_err_callermsg(L, strVdata(L->top-1));
+ else
+ lj_err_run(L);
+}
+
+/* Forward declaration. */
+static void setpc_wrap_aux(lua_State *L, GCfunc *fn);
+
+LJLIB_CF(coroutine_wrap)
+{
+ GCfunc *fn;
+ lj_cf_coroutine_create(L);
+ fn = lj_lib_pushcc(L, lj_ffh_coroutine_wrap_aux, FF_coroutine_wrap_aux, 1);
+ setpc_wrap_aux(L, fn);
+ return 1;
+}
+
+#include "lj_libdef.h"
+
+/* Fix the PC of wrap_aux. Really ugly workaround. */
+static void setpc_wrap_aux(lua_State *L, GCfunc *fn)
+{
+ setmref(fn->c.pc, &L2GG(L)->bcff[lj_lib_init_coroutine[1]+2]);
+}
+
+/* ------------------------------------------------------------------------ */
+
+static void newproxy_weaktable(lua_State *L)
+{
+ /* NOBARRIER: The table is new (marked white). */
+ GCtab *t = lj_tab_new(L, 0, 1);
+ settabV(L, L->top++, t);
+ setgcref(t->metatable, obj2gco(t));
+ setstrV(L, lj_tab_setstr(L, t, lj_str_newlit(L, "__mode")),
+ lj_str_newlit(L, "kv"));
+ t->nomm = (uint8_t)(~(1u<<MM_mode));
+}
+
+LUALIB_API int luaopen_base(lua_State *L)
+{
+ /* NOBARRIER: Table and value are the same. */
+ GCtab *env = tabref(L->env);
+ settabV(L, lj_tab_setstr(L, env, lj_str_newlit(L, "_G")), env);
+ lua_pushliteral(L, LUA_VERSION); /* top-3. */
+ newproxy_weaktable(L); /* top-2. */
+ LJ_LIB_REG(L, "_G", base);
+ LJ_LIB_REG(L, LUA_COLIBNAME, coroutine);
+ return 2;
+}
+
diff --git a/libs/luajit-cmake/luajit/src/lib_bit.c b/libs/luajit-cmake/luajit/src/lib_bit.c
new file mode 100644
index 0000000..38c0f57
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/lib_bit.c
@@ -0,0 +1,180 @@
+/*
+** Bit manipulation library.
+** Copyright (C) 2005-2022 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#define lib_bit_c
+#define LUA_LIB
+
+#include "lua.h"
+#include "lauxlib.h"
+#include "lualib.h"
+
+#include "lj_obj.h"
+#include "lj_err.h"
+#include "lj_buf.h"
+#include "lj_strscan.h"
+#include "lj_strfmt.h"
+#if LJ_HASFFI
+#include "lj_ctype.h"
+#include "lj_cdata.h"
+#include "lj_cconv.h"
+#include "lj_carith.h"
+#endif
+#include "lj_ff.h"
+#include "lj_lib.h"
+
+/* ------------------------------------------------------------------------ */
+
+#define LJLIB_MODULE_bit
+
+#if LJ_HASFFI
+static int bit_result64(lua_State *L, CTypeID id, uint64_t x)
+{
+ GCcdata *cd = lj_cdata_new_(L, id, 8);
+ *(uint64_t *)cdataptr(cd) = x;
+ setcdataV(L, L->base-1-LJ_FR2, cd);
+ return FFH_RES(1);
+}
+#else
+static int32_t bit_checkbit(lua_State *L, int narg)
+{
+ TValue *o = L->base + narg-1;
+ if (!(o < L->top && lj_strscan_numberobj(o)))
+ lj_err_argt(L, narg, LUA_TNUMBER);
+ if (LJ_LIKELY(tvisint(o))) {
+ return intV(o);
+ } else {
+ int32_t i = lj_num2bit(numV(o));
+ if (LJ_DUALNUM) setintV(o, i);
+ return i;
+ }
+}
+#endif
+
+LJLIB_ASM(bit_tobit) LJLIB_REC(bit_tobit)
+{
+#if LJ_HASFFI
+ CTypeID id = 0;
+ setintV(L->base-1-LJ_FR2, (int32_t)lj_carith_check64(L, 1, &id));
+ return FFH_RES(1);
+#else
+ lj_lib_checknumber(L, 1);
+ return FFH_RETRY;
+#endif
+}
+
+LJLIB_ASM(bit_bnot) LJLIB_REC(bit_unary IR_BNOT)
+{
+#if LJ_HASFFI
+ CTypeID id = 0;
+ uint64_t x = lj_carith_check64(L, 1, &id);
+ return id ? bit_result64(L, id, ~x) : FFH_RETRY;
+#else
+ lj_lib_checknumber(L, 1);
+ return FFH_RETRY;
+#endif
+}
+
+LJLIB_ASM(bit_bswap) LJLIB_REC(bit_unary IR_BSWAP)
+{
+#if LJ_HASFFI
+ CTypeID id = 0;
+ uint64_t x = lj_carith_check64(L, 1, &id);
+ return id ? bit_result64(L, id, lj_bswap64(x)) : FFH_RETRY;
+#else
+ lj_lib_checknumber(L, 1);
+ return FFH_RETRY;
+#endif
+}
+
+LJLIB_ASM(bit_lshift) LJLIB_REC(bit_shift IR_BSHL)
+{
+#if LJ_HASFFI
+ CTypeID id = 0, id2 = 0;
+ uint64_t x = lj_carith_check64(L, 1, &id);
+ int32_t sh = (int32_t)lj_carith_check64(L, 2, &id2);
+ if (id) {
+ x = lj_carith_shift64(x, sh, curr_func(L)->c.ffid - (int)FF_bit_lshift);
+ return bit_result64(L, id, x);
+ }
+ if (id2) setintV(L->base+1, sh);
+ return FFH_RETRY;
+#else
+ lj_lib_checknumber(L, 1);
+ bit_checkbit(L, 2);
+ return FFH_RETRY;
+#endif
+}
+LJLIB_ASM_(bit_rshift) LJLIB_REC(bit_shift IR_BSHR)
+LJLIB_ASM_(bit_arshift) LJLIB_REC(bit_shift IR_BSAR)
+LJLIB_ASM_(bit_rol) LJLIB_REC(bit_shift IR_BROL)
+LJLIB_ASM_(bit_ror) LJLIB_REC(bit_shift IR_BROR)
+
+LJLIB_ASM(bit_band) LJLIB_REC(bit_nary IR_BAND)
+{
+#if LJ_HASFFI
+ CTypeID id = 0;
+ TValue *o = L->base, *top = L->top;
+ int i = 0;
+ do { lj_carith_check64(L, ++i, &id); } while (++o < top);
+ if (id) {
+ CTState *cts = ctype_cts(L);
+ CType *ct = ctype_get(cts, id);
+ int op = curr_func(L)->c.ffid - (int)FF_bit_bor;
+ uint64_t x, y = op >= 0 ? 0 : ~(uint64_t)0;
+ o = L->base;
+ do {
+ lj_cconv_ct_tv(cts, ct, (uint8_t *)&x, o, 0);
+ if (op < 0) y &= x; else if (op == 0) y |= x; else y ^= x;
+ } while (++o < top);
+ return bit_result64(L, id, y);
+ }
+ return FFH_RETRY;
+#else
+ int i = 0;
+ do { lj_lib_checknumber(L, ++i); } while (L->base+i < L->top);
+ return FFH_RETRY;
+#endif
+}
+LJLIB_ASM_(bit_bor) LJLIB_REC(bit_nary IR_BOR)
+LJLIB_ASM_(bit_bxor) LJLIB_REC(bit_nary IR_BXOR)
+
+/* ------------------------------------------------------------------------ */
+
+LJLIB_CF(bit_tohex) LJLIB_REC(.)
+{
+#if LJ_HASFFI
+ CTypeID id = 0, id2 = 0;
+ uint64_t b = lj_carith_check64(L, 1, &id);
+ int32_t n = L->base+1>=L->top ? (id ? 16 : 8) :
+ (int32_t)lj_carith_check64(L, 2, &id2);
+#else
+ uint32_t b = (uint32_t)bit_checkbit(L, 1);
+ int32_t n = L->base+1>=L->top ? 8 : bit_checkbit(L, 2);
+#endif
+ SBuf *sb = lj_buf_tmp_(L);
+ SFormat sf = (STRFMT_UINT|STRFMT_T_HEX);
+ if (n < 0) { n = -n; sf |= STRFMT_F_UPPER; }
+ sf |= ((SFormat)((n+1)&255) << STRFMT_SH_PREC);
+#if LJ_HASFFI
+ if (n < 16) b &= ((uint64_t)1 << 4*n)-1;
+#else
+ if (n < 8) b &= (1u << 4*n)-1;
+#endif
+ sb = lj_strfmt_putfxint(sb, sf, b);
+ setstrV(L, L->top-1, lj_buf_str(L, sb));
+ lj_gc_check(L);
+ return 1;
+}
+
+/* ------------------------------------------------------------------------ */
+
+#include "lj_libdef.h"
+
+LUALIB_API int luaopen_bit(lua_State *L)
+{
+ LJ_LIB_REG(L, LUA_BITLIBNAME, bit);
+ return 1;
+}
+
diff --git a/libs/luajit-cmake/luajit/src/lib_buffer.c b/libs/luajit-cmake/luajit/src/lib_buffer.c
new file mode 100644
index 0000000..d6ff134
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/lib_buffer.c
@@ -0,0 +1,360 @@
+/*
+** Buffer library.
+** Copyright (C) 2005-2022 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#define lib_buffer_c
+#define LUA_LIB
+
+#include "lua.h"
+#include "lauxlib.h"
+#include "lualib.h"
+
+#include "lj_obj.h"
+
+#if LJ_HASBUFFER
+#include "lj_gc.h"
+#include "lj_err.h"
+#include "lj_buf.h"
+#include "lj_str.h"
+#include "lj_tab.h"
+#include "lj_udata.h"
+#include "lj_meta.h"
+#if LJ_HASFFI
+#include "lj_ctype.h"
+#include "lj_cdata.h"
+#include "lj_cconv.h"
+#endif
+#include "lj_strfmt.h"
+#include "lj_serialize.h"
+#include "lj_lib.h"
+
+/* -- Helper functions ---------------------------------------------------- */
+
+/* Check that the first argument is a string buffer. */
+static SBufExt *buffer_tobuf(lua_State *L)
+{
+ if (!(L->base < L->top && tvisbuf(L->base)))
+ lj_err_argtype(L, 1, "buffer");
+ return bufV(L->base);
+}
+
+/* Ditto, but for writers. */
+static LJ_AINLINE SBufExt *buffer_tobufw(lua_State *L)
+{
+ SBufExt *sbx = buffer_tobuf(L);
+ setsbufXL_(sbx, L);
+ return sbx;
+}
+
+#define buffer_toudata(sbx) ((GCudata *)(sbx)-1)
+
+/* -- Buffer methods ------------------------------------------------------ */
+
+#define LJLIB_MODULE_buffer_method
+
+LJLIB_CF(buffer_method_free)
+{
+ SBufExt *sbx = buffer_tobuf(L);
+ lj_bufx_free(L, sbx);
+ L->top = L->base+1; /* Chain buffer object. */
+ return 1;
+}
+
+LJLIB_CF(buffer_method_reset) LJLIB_REC(.)
+{
+ SBufExt *sbx = buffer_tobuf(L);
+ lj_bufx_reset(sbx);
+ L->top = L->base+1; /* Chain buffer object. */
+ return 1;
+}
+
+LJLIB_CF(buffer_method_skip) LJLIB_REC(.)
+{
+ SBufExt *sbx = buffer_tobuf(L);
+ MSize n = (MSize)lj_lib_checkintrange(L, 2, 0, LJ_MAX_BUF);
+ MSize len = sbufxlen(sbx);
+ if (n < len) {
+ sbx->r += n;
+ } else if (sbufiscow(sbx)) {
+ sbx->r = sbx->w;
+ } else {
+ sbx->r = sbx->w = sbx->b;
+ }
+ L->top = L->base+1; /* Chain buffer object. */
+ return 1;
+}
+
+LJLIB_CF(buffer_method_set) LJLIB_REC(.)
+{
+ SBufExt *sbx = buffer_tobuf(L);
+ GCobj *ref;
+ const char *p;
+ MSize len;
+#if LJ_HASFFI
+ if (tviscdata(L->base+1)) {
+ CTState *cts = ctype_cts(L);
+ lj_cconv_ct_tv(cts, ctype_get(cts, CTID_P_CVOID), (uint8_t *)&p,
+ L->base+1, CCF_ARG(2));
+ len = (MSize)lj_lib_checkintrange(L, 3, 0, LJ_MAX_BUF);
+ } else
+#endif
+ {
+ GCstr *str = lj_lib_checkstrx(L, 2);
+ p = strdata(str);
+ len = str->len;
+ }
+ lj_bufx_free(L, sbx);
+ lj_bufx_set_cow(L, sbx, p, len);
+ ref = gcV(L->base+1);
+ setgcref(sbx->cowref, ref);
+ lj_gc_objbarrier(L, buffer_toudata(sbx), ref);
+ L->top = L->base+1; /* Chain buffer object. */
+ return 1;
+}
+
+LJLIB_CF(buffer_method_put) LJLIB_REC(.)
+{
+ SBufExt *sbx = buffer_tobufw(L);
+ ptrdiff_t arg, narg = L->top - L->base;
+ for (arg = 1; arg < narg; arg++) {
+ cTValue *o = &L->base[arg], *mo = NULL;
+ retry:
+ if (tvisstr(o)) {
+ lj_buf_putstr((SBuf *)sbx, strV(o));
+ } else if (tvisint(o)) {
+ lj_strfmt_putint((SBuf *)sbx, intV(o));
+ } else if (tvisnum(o)) {
+ lj_strfmt_putfnum((SBuf *)sbx, STRFMT_G14, numV(o));
+ } else if (tvisbuf(o)) {
+ SBufExt *sbx2 = bufV(o);
+ if (sbx2 == sbx) lj_err_arg(L, (int)(arg+1), LJ_ERR_BUFFER_SELF);
+ lj_buf_putmem((SBuf *)sbx, sbx2->r, sbufxlen(sbx2));
+ } else if (!mo && !tvisnil(mo = lj_meta_lookup(L, o, MM_tostring))) {
+ /* Call __tostring metamethod inline. */
+ copyTV(L, L->top++, mo);
+ copyTV(L, L->top++, o);
+ lua_call(L, 1, 1);
+ o = &L->base[arg]; /* The stack may have been reallocated. */
+ copyTV(L, &L->base[arg], L->top-1);
+ L->top = L->base + narg;
+ goto retry; /* Retry with the result. */
+ } else {
+ lj_err_argtype(L, (int)(arg+1), "string/number/__tostring");
+ }
+ /* Probably not useful to inline other __tostring MMs, e.g. FFI numbers. */
+ }
+ L->top = L->base+1; /* Chain buffer object. */
+ lj_gc_check(L);
+ return 1;
+}
+
+LJLIB_CF(buffer_method_putf) LJLIB_REC(.)
+{
+ SBufExt *sbx = buffer_tobufw(L);
+ lj_strfmt_putarg(L, (SBuf *)sbx, 2, 2);
+ L->top = L->base+1; /* Chain buffer object. */
+ lj_gc_check(L);
+ return 1;
+}
+
+LJLIB_CF(buffer_method_get) LJLIB_REC(.)
+{
+ SBufExt *sbx = buffer_tobuf(L);
+ ptrdiff_t arg, narg = L->top - L->base;
+ if (narg == 1) {
+ narg++;
+ setnilV(L->top++); /* get() is the same as get(nil). */
+ }
+ for (arg = 1; arg < narg; arg++) {
+ TValue *o = &L->base[arg];
+ MSize n = tvisnil(o) ? LJ_MAX_BUF :
+ (MSize) lj_lib_checkintrange(L, (int)(arg+1), 0, LJ_MAX_BUF);
+ MSize len = sbufxlen(sbx);
+ if (n > len) n = len;
+ setstrV(L, o, lj_str_new(L, sbx->r, n));
+ sbx->r += n;
+ }
+ if (sbx->r == sbx->w && !sbufiscow(sbx)) sbx->r = sbx->w = sbx->b;
+ lj_gc_check(L);
+ return (int)(narg-1);
+}
+
+#if LJ_HASFFI
+LJLIB_CF(buffer_method_putcdata) LJLIB_REC(.)
+{
+ SBufExt *sbx = buffer_tobufw(L);
+ const char *p;
+ MSize len;
+ if (tviscdata(L->base+1)) {
+ CTState *cts = ctype_cts(L);
+ lj_cconv_ct_tv(cts, ctype_get(cts, CTID_P_CVOID), (uint8_t *)&p,
+ L->base+1, CCF_ARG(2));
+ } else {
+ lj_err_argtype(L, 2, "cdata");
+ }
+ len = (MSize)lj_lib_checkintrange(L, 3, 0, LJ_MAX_BUF);
+ lj_buf_putmem((SBuf *)sbx, p, len);
+ L->top = L->base+1; /* Chain buffer object. */
+ return 1;
+}
+
+LJLIB_CF(buffer_method_reserve) LJLIB_REC(.)
+{
+ SBufExt *sbx = buffer_tobufw(L);
+ MSize sz = (MSize)lj_lib_checkintrange(L, 2, 0, LJ_MAX_BUF);
+ GCcdata *cd;
+ lj_buf_more((SBuf *)sbx, sz);
+ ctype_loadffi(L);
+ cd = lj_cdata_new_(L, CTID_P_UINT8, CTSIZE_PTR);
+ *(void **)cdataptr(cd) = sbx->w;
+ setcdataV(L, L->top++, cd);
+ setintV(L->top++, sbufleft(sbx));
+ return 2;
+}
+
+LJLIB_CF(buffer_method_commit) LJLIB_REC(.)
+{
+ SBufExt *sbx = buffer_tobuf(L);
+ MSize len = (MSize)lj_lib_checkintrange(L, 2, 0, LJ_MAX_BUF);
+ if (len > sbufleft(sbx)) lj_err_arg(L, 2, LJ_ERR_NUMRNG);
+ sbx->w += len;
+ L->top = L->base+1; /* Chain buffer object. */
+ return 1;
+}
+
+LJLIB_CF(buffer_method_ref) LJLIB_REC(.)
+{
+ SBufExt *sbx = buffer_tobuf(L);
+ GCcdata *cd;
+ ctype_loadffi(L);
+ cd = lj_cdata_new_(L, CTID_P_UINT8, CTSIZE_PTR);
+ *(void **)cdataptr(cd) = sbx->r;
+ setcdataV(L, L->top++, cd);
+ setintV(L->top++, sbufxlen(sbx));
+ return 2;
+}
+#endif
+
+LJLIB_CF(buffer_method_encode) LJLIB_REC(.)
+{
+ SBufExt *sbx = buffer_tobufw(L);
+ cTValue *o = lj_lib_checkany(L, 2);
+ lj_serialize_put(sbx, o);
+ lj_gc_check(L);
+ L->top = L->base+1; /* Chain buffer object. */
+ return 1;
+}
+
+LJLIB_CF(buffer_method_decode) LJLIB_REC(.)
+{
+ SBufExt *sbx = buffer_tobufw(L);
+ setnilV(L->top++);
+ sbx->r = lj_serialize_get(sbx, L->top-1);
+ lj_gc_check(L);
+ return 1;
+}
+
+LJLIB_CF(buffer_method___gc)
+{
+ SBufExt *sbx = buffer_tobuf(L);
+ lj_bufx_free(L, sbx);
+ return 0;
+}
+
+LJLIB_CF(buffer_method___tostring) LJLIB_REC(.)
+{
+ SBufExt *sbx = buffer_tobuf(L);
+ setstrV(L, L->top-1, lj_str_new(L, sbx->r, sbufxlen(sbx)));
+ lj_gc_check(L);
+ return 1;
+}
+
+LJLIB_CF(buffer_method___len) LJLIB_REC(.)
+{
+ SBufExt *sbx = buffer_tobuf(L);
+ setintV(L->top-1, (int32_t)sbufxlen(sbx));
+ return 1;
+}
+
+LJLIB_PUSH("buffer") LJLIB_SET(__metatable)
+LJLIB_PUSH(top-1) LJLIB_SET(__index)
+
+/* -- Buffer library functions -------------------------------------------- */
+
+#define LJLIB_MODULE_buffer
+
+LJLIB_PUSH(top-2) LJLIB_SET(!) /* Set environment. */
+
+LJLIB_CF(buffer_new)
+{
+ MSize sz = 0;
+ int targ = 1;
+ GCtab *env, *dict_str = NULL, *dict_mt = NULL;
+ GCudata *ud;
+ SBufExt *sbx;
+ if (L->base < L->top && !tvistab(L->base)) {
+ targ = 2;
+ if (!tvisnil(L->base))
+ sz = (MSize)lj_lib_checkintrange(L, 1, 0, LJ_MAX_BUF);
+ }
+ if (L->base+targ-1 < L->top) {
+ GCtab *options = lj_lib_checktab(L, targ);
+ cTValue *opt_dict, *opt_mt;
+ opt_dict = lj_tab_getstr(options, lj_str_newlit(L, "dict"));
+ if (opt_dict && tvistab(opt_dict)) {
+ dict_str = tabV(opt_dict);
+ lj_serialize_dict_prep_str(L, dict_str);
+ }
+ opt_mt = lj_tab_getstr(options, lj_str_newlit(L, "metatable"));
+ if (opt_mt && tvistab(opt_mt)) {
+ dict_mt = tabV(opt_mt);
+ lj_serialize_dict_prep_mt(L, dict_mt);
+ }
+ }
+ env = tabref(curr_func(L)->c.env);
+ ud = lj_udata_new(L, sizeof(SBufExt), env);
+ ud->udtype = UDTYPE_BUFFER;
+ /* NOBARRIER: The GCudata is new (marked white). */
+ setgcref(ud->metatable, obj2gco(env));
+ setudataV(L, L->top++, ud);
+ sbx = (SBufExt *)uddata(ud);
+ lj_bufx_init(L, sbx);
+ setgcref(sbx->dict_str, obj2gco(dict_str));
+ setgcref(sbx->dict_mt, obj2gco(dict_mt));
+ if (sz > 0) lj_buf_need2((SBuf *)sbx, sz);
+ lj_gc_check(L);
+ return 1;
+}
+
+LJLIB_CF(buffer_encode) LJLIB_REC(.)
+{
+ cTValue *o = lj_lib_checkany(L, 1);
+ setstrV(L, L->top++, lj_serialize_encode(L, o));
+ lj_gc_check(L);
+ return 1;
+}
+
+LJLIB_CF(buffer_decode) LJLIB_REC(.)
+{
+ GCstr *str = lj_lib_checkstrx(L, 1);
+ setnilV(L->top++);
+ lj_serialize_decode(L, L->top-1, str);
+ lj_gc_check(L);
+ return 1;
+}
+
+/* ------------------------------------------------------------------------ */
+
+#include "lj_libdef.h"
+
+int luaopen_string_buffer(lua_State *L)
+{
+ LJ_LIB_REG(L, NULL, buffer_method);
+ lua_getfield(L, -1, "__tostring");
+ lua_setfield(L, -2, "tostring");
+ LJ_LIB_REG(L, NULL, buffer);
+ return 1;
+}
+
+#endif
diff --git a/libs/luajit-cmake/luajit/src/lib_debug.c b/libs/luajit-cmake/luajit/src/lib_debug.c
new file mode 100644
index 0000000..3af7a35
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/lib_debug.c
@@ -0,0 +1,406 @@
+/*
+** Debug library.
+** Copyright (C) 2005-2022 Mike Pall. See Copyright Notice in luajit.h
+**
+** Major portions taken verbatim or adapted from the Lua interpreter.
+** Copyright (C) 1994-2008 Lua.org, PUC-Rio. See Copyright Notice in lua.h
+*/
+
+#define lib_debug_c
+#define LUA_LIB
+
+#include "lua.h"
+#include "lauxlib.h"
+#include "lualib.h"
+
+#include "lj_obj.h"
+#include "lj_gc.h"
+#include "lj_err.h"
+#include "lj_debug.h"
+#include "lj_lib.h"
+
+/* ------------------------------------------------------------------------ */
+
+#define LJLIB_MODULE_debug
+
+LJLIB_CF(debug_getregistry)
+{
+ copyTV(L, L->top++, registry(L));
+ return 1;
+}
+
+LJLIB_CF(debug_getmetatable) LJLIB_REC(.)
+{
+ lj_lib_checkany(L, 1);
+ if (!lua_getmetatable(L, 1)) {
+ setnilV(L->top-1);
+ }
+ return 1;
+}
+
+LJLIB_CF(debug_setmetatable)
+{
+ lj_lib_checktabornil(L, 2);
+ L->top = L->base+2;
+ lua_setmetatable(L, 1);
+#if !LJ_52
+ setboolV(L->top-1, 1);
+#endif
+ return 1;
+}
+
+LJLIB_CF(debug_getfenv)
+{
+ lj_lib_checkany(L, 1);
+ lua_getfenv(L, 1);
+ return 1;
+}
+
+LJLIB_CF(debug_setfenv)
+{
+ lj_lib_checktab(L, 2);
+ L->top = L->base+2;
+ if (!lua_setfenv(L, 1))
+ lj_err_caller(L, LJ_ERR_SETFENV);
+ return 1;
+}
+
+/* ------------------------------------------------------------------------ */
+
+static void settabss(lua_State *L, const char *i, const char *v)
+{
+ lua_pushstring(L, v);
+ lua_setfield(L, -2, i);
+}
+
+static void settabsi(lua_State *L, const char *i, int v)
+{
+ lua_pushinteger(L, v);
+ lua_setfield(L, -2, i);
+}
+
+static void settabsb(lua_State *L, const char *i, int v)
+{
+ lua_pushboolean(L, v);
+ lua_setfield(L, -2, i);
+}
+
+static lua_State *getthread(lua_State *L, int *arg)
+{
+ if (L->base < L->top && tvisthread(L->base)) {
+ *arg = 1;
+ return threadV(L->base);
+ } else {
+ *arg = 0;
+ return L;
+ }
+}
+
+static void treatstackoption(lua_State *L, lua_State *L1, const char *fname)
+{
+ if (L == L1) {
+ lua_pushvalue(L, -2);
+ lua_remove(L, -3);
+ }
+ else
+ lua_xmove(L1, L, 1);
+ lua_setfield(L, -2, fname);
+}
+
+LJLIB_CF(debug_getinfo)
+{
+ lj_Debug ar;
+ int arg, opt_f = 0, opt_L = 0;
+ lua_State *L1 = getthread(L, &arg);
+ const char *options = luaL_optstring(L, arg+2, "flnSu");
+ if (lua_isnumber(L, arg+1)) {
+ if (!lua_getstack(L1, (int)lua_tointeger(L, arg+1), (lua_Debug *)&ar)) {
+ setnilV(L->top-1);
+ return 1;
+ }
+ } else if (L->base+arg < L->top && tvisfunc(L->base+arg)) {
+ options = lua_pushfstring(L, ">%s", options);
+ setfuncV(L1, L1->top++, funcV(L->base+arg));
+ } else {
+ lj_err_arg(L, arg+1, LJ_ERR_NOFUNCL);
+ }
+ if (!lj_debug_getinfo(L1, options, &ar, 1))
+ lj_err_arg(L, arg+2, LJ_ERR_INVOPT);
+ lua_createtable(L, 0, 16); /* Create result table. */
+ for (; *options; options++) {
+ switch (*options) {
+ case 'S':
+ settabss(L, "source", ar.source);
+ settabss(L, "short_src", ar.short_src);
+ settabsi(L, "linedefined", ar.linedefined);
+ settabsi(L, "lastlinedefined", ar.lastlinedefined);
+ settabss(L, "what", ar.what);
+ break;
+ case 'l':
+ settabsi(L, "currentline", ar.currentline);
+ break;
+ case 'u':
+ settabsi(L, "nups", ar.nups);
+ settabsi(L, "nparams", ar.nparams);
+ settabsb(L, "isvararg", ar.isvararg);
+ break;
+ case 'n':
+ settabss(L, "name", ar.name);
+ settabss(L, "namewhat", ar.namewhat);
+ break;
+ case 'f': opt_f = 1; break;
+ case 'L': opt_L = 1; break;
+ default: break;
+ }
+ }
+ if (opt_L) treatstackoption(L, L1, "activelines");
+ if (opt_f) treatstackoption(L, L1, "func");
+ return 1; /* Return result table. */
+}
+
+LJLIB_CF(debug_getlocal)
+{
+ int arg;
+ lua_State *L1 = getthread(L, &arg);
+ lua_Debug ar;
+ const char *name;
+ int slot = lj_lib_checkint(L, arg+2);
+ if (tvisfunc(L->base+arg)) {
+ L->top = L->base+arg+1;
+ lua_pushstring(L, lua_getlocal(L, NULL, slot));
+ return 1;
+ }
+ if (!lua_getstack(L1, lj_lib_checkint(L, arg+1), &ar))
+ lj_err_arg(L, arg+1, LJ_ERR_LVLRNG);
+ name = lua_getlocal(L1, &ar, slot);
+ if (name) {
+ lua_xmove(L1, L, 1);
+ lua_pushstring(L, name);
+ lua_pushvalue(L, -2);
+ return 2;
+ } else {
+ setnilV(L->top-1);
+ return 1;
+ }
+}
+
+LJLIB_CF(debug_setlocal)
+{
+ int arg;
+ lua_State *L1 = getthread(L, &arg);
+ lua_Debug ar;
+ TValue *tv;
+ if (!lua_getstack(L1, lj_lib_checkint(L, arg+1), &ar))
+ lj_err_arg(L, arg+1, LJ_ERR_LVLRNG);
+ tv = lj_lib_checkany(L, arg+3);
+ copyTV(L1, L1->top++, tv);
+ lua_pushstring(L, lua_setlocal(L1, &ar, lj_lib_checkint(L, arg+2)));
+ return 1;
+}
+
+static int debug_getupvalue(lua_State *L, int get)
+{
+ int32_t n = lj_lib_checkint(L, 2);
+ const char *name;
+ lj_lib_checkfunc(L, 1);
+ name = get ? lua_getupvalue(L, 1, n) : lua_setupvalue(L, 1, n);
+ if (name) {
+ lua_pushstring(L, name);
+ if (!get) return 1;
+ copyTV(L, L->top, L->top-2);
+ L->top++;
+ return 2;
+ }
+ return 0;
+}
+
+LJLIB_CF(debug_getupvalue)
+{
+ return debug_getupvalue(L, 1);
+}
+
+LJLIB_CF(debug_setupvalue)
+{
+ lj_lib_checkany(L, 3);
+ return debug_getupvalue(L, 0);
+}
+
+LJLIB_CF(debug_upvalueid)
+{
+ GCfunc *fn = lj_lib_checkfunc(L, 1);
+ int32_t n = lj_lib_checkint(L, 2) - 1;
+ if ((uint32_t)n >= fn->l.nupvalues)
+ lj_err_arg(L, 2, LJ_ERR_IDXRNG);
+ lua_pushlightuserdata(L, isluafunc(fn) ? (void *)gcref(fn->l.uvptr[n]) :
+ (void *)&fn->c.upvalue[n]);
+ return 1;
+}
+
+LJLIB_CF(debug_upvaluejoin)
+{
+ GCfunc *fn[2];
+ GCRef *p[2];
+ int i;
+ for (i = 0; i < 2; i++) {
+ int32_t n;
+ fn[i] = lj_lib_checkfunc(L, 2*i+1);
+ if (!isluafunc(fn[i]))
+ lj_err_arg(L, 2*i+1, LJ_ERR_NOLFUNC);
+ n = lj_lib_checkint(L, 2*i+2) - 1;
+ if ((uint32_t)n >= fn[i]->l.nupvalues)
+ lj_err_arg(L, 2*i+2, LJ_ERR_IDXRNG);
+ p[i] = &fn[i]->l.uvptr[n];
+ }
+ setgcrefr(*p[0], *p[1]);
+ lj_gc_objbarrier(L, fn[0], gcref(*p[1]));
+ return 0;
+}
+
+#if LJ_52
+LJLIB_CF(debug_getuservalue)
+{
+ TValue *o = L->base;
+ if (o < L->top && tvisudata(o))
+ settabV(L, o, tabref(udataV(o)->env));
+ else
+ setnilV(o);
+ L->top = o+1;
+ return 1;
+}
+
+LJLIB_CF(debug_setuservalue)
+{
+ TValue *o = L->base;
+ if (!(o < L->top && tvisudata(o)))
+ lj_err_argt(L, 1, LUA_TUSERDATA);
+ if (!(o+1 < L->top && tvistab(o+1)))
+ lj_err_argt(L, 2, LUA_TTABLE);
+ L->top = o+2;
+ lua_setfenv(L, 1);
+ return 1;
+}
+#endif
+
+/* ------------------------------------------------------------------------ */
+
+#define KEY_HOOK (U64x(80000000,00000000)|'h')
+
+static void hookf(lua_State *L, lua_Debug *ar)
+{
+ static const char *const hooknames[] =
+ {"call", "return", "line", "count", "tail return"};
+ (L->top++)->u64 = KEY_HOOK;
+ lua_rawget(L, LUA_REGISTRYINDEX);
+ if (lua_isfunction(L, -1)) {
+ lua_pushstring(L, hooknames[(int)ar->event]);
+ if (ar->currentline >= 0)
+ lua_pushinteger(L, ar->currentline);
+ else lua_pushnil(L);
+ lua_call(L, 2, 0);
+ }
+}
+
+static int makemask(const char *smask, int count)
+{
+ int mask = 0;
+ if (strchr(smask, 'c')) mask |= LUA_MASKCALL;
+ if (strchr(smask, 'r')) mask |= LUA_MASKRET;
+ if (strchr(smask, 'l')) mask |= LUA_MASKLINE;
+ if (count > 0) mask |= LUA_MASKCOUNT;
+ return mask;
+}
+
+static char *unmakemask(int mask, char *smask)
+{
+ int i = 0;
+ if (mask & LUA_MASKCALL) smask[i++] = 'c';
+ if (mask & LUA_MASKRET) smask[i++] = 'r';
+ if (mask & LUA_MASKLINE) smask[i++] = 'l';
+ smask[i] = '\0';
+ return smask;
+}
+
+LJLIB_CF(debug_sethook)
+{
+ int arg, mask, count;
+ lua_Hook func;
+ (void)getthread(L, &arg);
+ if (lua_isnoneornil(L, arg+1)) {
+ lua_settop(L, arg+1);
+ func = NULL; mask = 0; count = 0; /* turn off hooks */
+ } else {
+ const char *smask = luaL_checkstring(L, arg+2);
+ luaL_checktype(L, arg+1, LUA_TFUNCTION);
+ count = luaL_optint(L, arg+3, 0);
+ func = hookf; mask = makemask(smask, count);
+ }
+ (L->top++)->u64 = KEY_HOOK;
+ lua_pushvalue(L, arg+1);
+ lua_rawset(L, LUA_REGISTRYINDEX);
+ lua_sethook(L, func, mask, count);
+ return 0;
+}
+
+LJLIB_CF(debug_gethook)
+{
+ char buff[5];
+ int mask = lua_gethookmask(L);
+ lua_Hook hook = lua_gethook(L);
+ if (hook != NULL && hook != hookf) { /* external hook? */
+ lua_pushliteral(L, "external hook");
+ } else {
+ (L->top++)->u64 = KEY_HOOK;
+ lua_rawget(L, LUA_REGISTRYINDEX); /* get hook */
+ }
+ lua_pushstring(L, unmakemask(mask, buff));
+ lua_pushinteger(L, lua_gethookcount(L));
+ return 3;
+}
+
+/* ------------------------------------------------------------------------ */
+
+LJLIB_CF(debug_debug)
+{
+ for (;;) {
+ char buffer[250];
+ fputs("lua_debug> ", stderr);
+ if (fgets(buffer, sizeof(buffer), stdin) == 0 ||
+ strcmp(buffer, "cont\n") == 0)
+ return 0;
+ if (luaL_loadbuffer(L, buffer, strlen(buffer), "=(debug command)") ||
+ lua_pcall(L, 0, 0, 0)) {
+ const char *s = lua_tostring(L, -1);
+ fputs(s ? s : "(error object is not a string)", stderr);
+ fputs("\n", stderr);
+ }
+ lua_settop(L, 0); /* remove eventual returns */
+ }
+}
+
+/* ------------------------------------------------------------------------ */
+
+#define LEVELS1 12 /* size of the first part of the stack */
+#define LEVELS2 10 /* size of the second part of the stack */
+
+LJLIB_CF(debug_traceback)
+{
+ int arg;
+ lua_State *L1 = getthread(L, &arg);
+ const char *msg = lua_tostring(L, arg+1);
+ if (msg == NULL && L->top > L->base+arg)
+ L->top = L->base+arg+1;
+ else
+ luaL_traceback(L, L1, msg, lj_lib_optint(L, arg+2, (L == L1)));
+ return 1;
+}
+
+/* ------------------------------------------------------------------------ */
+
+#include "lj_libdef.h"
+
+LUALIB_API int luaopen_debug(lua_State *L)
+{
+ LJ_LIB_REG(L, LUA_DBLIBNAME, debug);
+ return 1;
+}
+
diff --git a/libs/luajit-cmake/luajit/src/lib_ffi.c b/libs/luajit-cmake/luajit/src/lib_ffi.c
new file mode 100644
index 0000000..2295cf1
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/lib_ffi.c
@@ -0,0 +1,870 @@
+/*
+** FFI library.
+** Copyright (C) 2005-2022 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#define lib_ffi_c
+#define LUA_LIB
+
+#include <errno.h>
+
+#include "lua.h"
+#include "lauxlib.h"
+#include "lualib.h"
+
+#include "lj_obj.h"
+
+#if LJ_HASFFI
+
+#include "lj_gc.h"
+#include "lj_err.h"
+#include "lj_str.h"
+#include "lj_tab.h"
+#include "lj_meta.h"
+#include "lj_ctype.h"
+#include "lj_cparse.h"
+#include "lj_cdata.h"
+#include "lj_cconv.h"
+#include "lj_carith.h"
+#include "lj_ccall.h"
+#include "lj_ccallback.h"
+#include "lj_clib.h"
+#include "lj_strfmt.h"
+#include "lj_ff.h"
+#include "lj_lib.h"
+
+/* -- C type checks ------------------------------------------------------- */
+
+/* Check first argument for a C type and returns its ID. */
+static CTypeID ffi_checkctype(lua_State *L, CTState *cts, TValue *param)
+{
+ TValue *o = L->base;
+ if (!(o < L->top)) {
+ err_argtype:
+ lj_err_argtype(L, 1, "C type");
+ }
+ if (tvisstr(o)) { /* Parse an abstract C type declaration. */
+ GCstr *s = strV(o);
+ CPState cp;
+ int errcode;
+ cp.L = L;
+ cp.cts = cts;
+ cp.srcname = strdata(s);
+ cp.p = strdata(s);
+ cp.param = param;
+ cp.mode = CPARSE_MODE_ABSTRACT|CPARSE_MODE_NOIMPLICIT;
+ errcode = lj_cparse(&cp);
+ if (errcode) lj_err_throw(L, errcode); /* Propagate errors. */
+ return cp.val.id;
+ } else {
+ GCcdata *cd;
+ if (!tviscdata(o)) goto err_argtype;
+ if (param && param < L->top) lj_err_arg(L, 1, LJ_ERR_FFI_NUMPARAM);
+ cd = cdataV(o);
+ return cd->ctypeid == CTID_CTYPEID ? *(CTypeID *)cdataptr(cd) : cd->ctypeid;
+ }
+}
+
+/* Check argument for C data and return it. */
+static GCcdata *ffi_checkcdata(lua_State *L, int narg)
+{
+ TValue *o = L->base + narg-1;
+ if (!(o < L->top && tviscdata(o)))
+ lj_err_argt(L, narg, LUA_TCDATA);
+ return cdataV(o);
+}
+
+/* Convert argument to C pointer. */
+static void *ffi_checkptr(lua_State *L, int narg, CTypeID id)
+{
+ CTState *cts = ctype_cts(L);
+ TValue *o = L->base + narg-1;
+ void *p;
+ if (o >= L->top)
+ lj_err_arg(L, narg, LJ_ERR_NOVAL);
+ lj_cconv_ct_tv(cts, ctype_get(cts, id), (uint8_t *)&p, o, CCF_ARG(narg));
+ return p;
+}
+
+/* Convert argument to int32_t. */
+static int32_t ffi_checkint(lua_State *L, int narg)
+{
+ CTState *cts = ctype_cts(L);
+ TValue *o = L->base + narg-1;
+ int32_t i;
+ if (o >= L->top)
+ lj_err_arg(L, narg, LJ_ERR_NOVAL);
+ lj_cconv_ct_tv(cts, ctype_get(cts, CTID_INT32), (uint8_t *)&i, o,
+ CCF_ARG(narg));
+ return i;
+}
+
+/* -- C type metamethods -------------------------------------------------- */
+
+#define LJLIB_MODULE_ffi_meta
+
+/* Handle ctype __index/__newindex metamethods. */
+static int ffi_index_meta(lua_State *L, CTState *cts, CType *ct, MMS mm)
+{
+ CTypeID id = ctype_typeid(cts, ct);
+ cTValue *tv = lj_ctype_meta(cts, id, mm);
+ TValue *base = L->base;
+ if (!tv) {
+ const char *s;
+ err_index:
+ s = strdata(lj_ctype_repr(L, id, NULL));
+ if (tvisstr(L->base+1)) {
+ lj_err_callerv(L, LJ_ERR_FFI_BADMEMBER, s, strVdata(L->base+1));
+ } else {
+ const char *key = tviscdata(L->base+1) ?
+ strdata(lj_ctype_repr(L, cdataV(L->base+1)->ctypeid, NULL)) :
+ lj_typename(L->base+1);
+ lj_err_callerv(L, LJ_ERR_FFI_BADIDXW, s, key);
+ }
+ }
+ if (!tvisfunc(tv)) {
+ if (mm == MM_index) {
+ cTValue *o = lj_meta_tget(L, tv, base+1);
+ if (o) {
+ if (tvisnil(o)) goto err_index;
+ copyTV(L, L->top-1, o);
+ return 1;
+ }
+ } else {
+ TValue *o = lj_meta_tset(L, tv, base+1);
+ if (o) {
+ copyTV(L, o, base+2);
+ return 0;
+ }
+ }
+ copyTV(L, base, L->top);
+ tv = L->top-1-LJ_FR2;
+ }
+ return lj_meta_tailcall(L, tv);
+}
+
+LJLIB_CF(ffi_meta___index) LJLIB_REC(cdata_index 0)
+{
+ CTState *cts = ctype_cts(L);
+ CTInfo qual = 0;
+ CType *ct;
+ uint8_t *p;
+ TValue *o = L->base;
+ if (!(o+1 < L->top && tviscdata(o))) /* Also checks for presence of key. */
+ lj_err_argt(L, 1, LUA_TCDATA);
+ ct = lj_cdata_index(cts, cdataV(o), o+1, &p, &qual);
+ if ((qual & 1))
+ return ffi_index_meta(L, cts, ct, MM_index);
+ if (lj_cdata_get(cts, ct, L->top-1, p))
+ lj_gc_check(L);
+ return 1;
+}
+
+LJLIB_CF(ffi_meta___newindex) LJLIB_REC(cdata_index 1)
+{
+ CTState *cts = ctype_cts(L);
+ CTInfo qual = 0;
+ CType *ct;
+ uint8_t *p;
+ TValue *o = L->base;
+ if (!(o+2 < L->top && tviscdata(o))) /* Also checks for key and value. */
+ lj_err_argt(L, 1, LUA_TCDATA);
+ ct = lj_cdata_index(cts, cdataV(o), o+1, &p, &qual);
+ if ((qual & 1)) {
+ if ((qual & CTF_CONST))
+ lj_err_caller(L, LJ_ERR_FFI_WRCONST);
+ return ffi_index_meta(L, cts, ct, MM_newindex);
+ }
+ lj_cdata_set(cts, ct, p, o+2, qual);
+ return 0;
+}
+
+/* Common handler for cdata arithmetic. */
+static int ffi_arith(lua_State *L)
+{
+ MMS mm = (MMS)(curr_func(L)->c.ffid - (int)FF_ffi_meta___eq + (int)MM_eq);
+ return lj_carith_op(L, mm);
+}
+
+/* The following functions must be in contiguous ORDER MM. */
+LJLIB_CF(ffi_meta___eq) LJLIB_REC(cdata_arith MM_eq)
+{
+ return ffi_arith(L);
+}
+
+LJLIB_CF(ffi_meta___len) LJLIB_REC(cdata_arith MM_len)
+{
+ return ffi_arith(L);
+}
+
+LJLIB_CF(ffi_meta___lt) LJLIB_REC(cdata_arith MM_lt)
+{
+ return ffi_arith(L);
+}
+
+LJLIB_CF(ffi_meta___le) LJLIB_REC(cdata_arith MM_le)
+{
+ return ffi_arith(L);
+}
+
+LJLIB_CF(ffi_meta___concat) LJLIB_REC(cdata_arith MM_concat)
+{
+ return ffi_arith(L);
+}
+
+/* Forward declaration. */
+static int lj_cf_ffi_new(lua_State *L);
+
+LJLIB_CF(ffi_meta___call) LJLIB_REC(cdata_call)
+{
+ CTState *cts = ctype_cts(L);
+ GCcdata *cd = ffi_checkcdata(L, 1);
+ CTypeID id = cd->ctypeid;
+ CType *ct;
+ cTValue *tv;
+ MMS mm = MM_call;
+ if (cd->ctypeid == CTID_CTYPEID) {
+ id = *(CTypeID *)cdataptr(cd);
+ mm = MM_new;
+ } else {
+ int ret = lj_ccall_func(L, cd);
+ if (ret >= 0)
+ return ret;
+ }
+ /* Handle ctype __call/__new metamethod. */
+ ct = ctype_raw(cts, id);
+ if (ctype_isptr(ct->info)) id = ctype_cid(ct->info);
+ tv = lj_ctype_meta(cts, id, mm);
+ if (tv)
+ return lj_meta_tailcall(L, tv);
+ else if (mm == MM_call)
+ lj_err_callerv(L, LJ_ERR_FFI_BADCALL, strdata(lj_ctype_repr(L, id, NULL)));
+ return lj_cf_ffi_new(L);
+}
+
+LJLIB_CF(ffi_meta___add) LJLIB_REC(cdata_arith MM_add)
+{
+ return ffi_arith(L);
+}
+
+LJLIB_CF(ffi_meta___sub) LJLIB_REC(cdata_arith MM_sub)
+{
+ return ffi_arith(L);
+}
+
+LJLIB_CF(ffi_meta___mul) LJLIB_REC(cdata_arith MM_mul)
+{
+ return ffi_arith(L);
+}
+
+LJLIB_CF(ffi_meta___div) LJLIB_REC(cdata_arith MM_div)
+{
+ return ffi_arith(L);
+}
+
+LJLIB_CF(ffi_meta___mod) LJLIB_REC(cdata_arith MM_mod)
+{
+ return ffi_arith(L);
+}
+
+LJLIB_CF(ffi_meta___pow) LJLIB_REC(cdata_arith MM_pow)
+{
+ return ffi_arith(L);
+}
+
+LJLIB_CF(ffi_meta___unm) LJLIB_REC(cdata_arith MM_unm)
+{
+ return ffi_arith(L);
+}
+/* End of contiguous ORDER MM. */
+
+LJLIB_CF(ffi_meta___tostring)
+{
+ GCcdata *cd = ffi_checkcdata(L, 1);
+ const char *msg = "cdata<%s>: %p";
+ CTypeID id = cd->ctypeid;
+ void *p = cdataptr(cd);
+ if (id == CTID_CTYPEID) {
+ msg = "ctype<%s>";
+ id = *(CTypeID *)p;
+ } else {
+ CTState *cts = ctype_cts(L);
+ CType *ct = ctype_raw(cts, id);
+ if (ctype_isref(ct->info)) {
+ p = *(void **)p;
+ ct = ctype_rawchild(cts, ct);
+ }
+ if (ctype_iscomplex(ct->info)) {
+ setstrV(L, L->top-1, lj_ctype_repr_complex(L, cdataptr(cd), ct->size));
+ goto checkgc;
+ } else if (ct->size == 8 && ctype_isinteger(ct->info)) {
+ setstrV(L, L->top-1, lj_ctype_repr_int64(L, *(uint64_t *)cdataptr(cd),
+ (ct->info & CTF_UNSIGNED)));
+ goto checkgc;
+ } else if (ctype_isfunc(ct->info)) {
+ p = *(void **)p;
+ } else if (ctype_isenum(ct->info)) {
+ msg = "cdata<%s>: %d";
+ p = (void *)(uintptr_t)*(uint32_t **)p;
+ } else {
+ if (ctype_isptr(ct->info)) {
+ p = cdata_getptr(p, ct->size);
+ ct = ctype_rawchild(cts, ct);
+ }
+ if (ctype_isstruct(ct->info) || ctype_isvector(ct->info)) {
+ /* Handle ctype __tostring metamethod. */
+ cTValue *tv = lj_ctype_meta(cts, ctype_typeid(cts, ct), MM_tostring);
+ if (tv)
+ return lj_meta_tailcall(L, tv);
+ }
+ }
+ }
+ lj_strfmt_pushf(L, msg, strdata(lj_ctype_repr(L, id, NULL)), p);
+checkgc:
+ lj_gc_check(L);
+ return 1;
+}
+
+static int ffi_pairs(lua_State *L, MMS mm)
+{
+ CTState *cts = ctype_cts(L);
+ CTypeID id = ffi_checkcdata(L, 1)->ctypeid;
+ CType *ct = ctype_raw(cts, id);
+ cTValue *tv;
+ if (ctype_isptr(ct->info)) id = ctype_cid(ct->info);
+ tv = lj_ctype_meta(cts, id, mm);
+ if (!tv)
+ lj_err_callerv(L, LJ_ERR_FFI_BADMM, strdata(lj_ctype_repr(L, id, NULL)),
+ strdata(mmname_str(G(L), mm)));
+ return lj_meta_tailcall(L, tv);
+}
+
+LJLIB_CF(ffi_meta___pairs)
+{
+ return ffi_pairs(L, MM_pairs);
+}
+
+LJLIB_CF(ffi_meta___ipairs)
+{
+ return ffi_pairs(L, MM_ipairs);
+}
+
+LJLIB_PUSH("ffi") LJLIB_SET(__metatable)
+
+#include "lj_libdef.h"
+
+/* -- C library metamethods ----------------------------------------------- */
+
+#define LJLIB_MODULE_ffi_clib
+
+/* Index C library by a name. */
+static TValue *ffi_clib_index(lua_State *L)
+{
+ TValue *o = L->base;
+ CLibrary *cl;
+ if (!(o < L->top && tvisudata(o) && udataV(o)->udtype == UDTYPE_FFI_CLIB))
+ lj_err_argt(L, 1, LUA_TUSERDATA);
+ cl = (CLibrary *)uddata(udataV(o));
+ if (!(o+1 < L->top && tvisstr(o+1)))
+ lj_err_argt(L, 2, LUA_TSTRING);
+ return lj_clib_index(L, cl, strV(o+1));
+}
+
+LJLIB_CF(ffi_clib___index) LJLIB_REC(clib_index 1)
+{
+ TValue *tv = ffi_clib_index(L);
+ if (tviscdata(tv)) {
+ CTState *cts = ctype_cts(L);
+ GCcdata *cd = cdataV(tv);
+ CType *s = ctype_get(cts, cd->ctypeid);
+ if (ctype_isextern(s->info)) {
+ CTypeID sid = ctype_cid(s->info);
+ void *sp = *(void **)cdataptr(cd);
+ CType *ct = ctype_raw(cts, sid);
+ if (lj_cconv_tv_ct(cts, ct, sid, L->top-1, sp))
+ lj_gc_check(L);
+ return 1;
+ }
+ }
+ copyTV(L, L->top-1, tv);
+ return 1;
+}
+
+LJLIB_CF(ffi_clib___newindex) LJLIB_REC(clib_index 0)
+{
+ TValue *tv = ffi_clib_index(L);
+ TValue *o = L->base+2;
+ if (o < L->top && tviscdata(tv)) {
+ CTState *cts = ctype_cts(L);
+ GCcdata *cd = cdataV(tv);
+ CType *d = ctype_get(cts, cd->ctypeid);
+ if (ctype_isextern(d->info)) {
+ CTInfo qual = 0;
+ for (;;) { /* Skip attributes and collect qualifiers. */
+ d = ctype_child(cts, d);
+ if (!ctype_isattrib(d->info)) break;
+ if (ctype_attrib(d->info) == CTA_QUAL) qual |= d->size;
+ }
+ if (!((d->info|qual) & CTF_CONST)) {
+ lj_cconv_ct_tv(cts, d, *(void **)cdataptr(cd), o, 0);
+ return 0;
+ }
+ }
+ }
+ lj_err_caller(L, LJ_ERR_FFI_WRCONST);
+ return 0; /* unreachable */
+}
+
+LJLIB_CF(ffi_clib___gc)
+{
+ TValue *o = L->base;
+ if (o < L->top && tvisudata(o) && udataV(o)->udtype == UDTYPE_FFI_CLIB)
+ lj_clib_unload((CLibrary *)uddata(udataV(o)));
+ return 0;
+}
+
+#include "lj_libdef.h"
+
+/* -- Callback function metamethods --------------------------------------- */
+
+#define LJLIB_MODULE_ffi_callback
+
+static int ffi_callback_set(lua_State *L, GCfunc *fn)
+{
+ GCcdata *cd = ffi_checkcdata(L, 1);
+ CTState *cts = ctype_cts(L);
+ CType *ct = ctype_raw(cts, cd->ctypeid);
+ if (ctype_isptr(ct->info) && (LJ_32 || ct->size == 8)) {
+ MSize slot = lj_ccallback_ptr2slot(cts, *(void **)cdataptr(cd));
+ if (slot < cts->cb.sizeid && cts->cb.cbid[slot] != 0) {
+ GCtab *t = cts->miscmap;
+ TValue *tv = lj_tab_setint(L, t, (int32_t)slot);
+ if (fn) {
+ setfuncV(L, tv, fn);
+ lj_gc_anybarriert(L, t);
+ } else {
+ setnilV(tv);
+ cts->cb.cbid[slot] = 0;
+ cts->cb.topid = slot < cts->cb.topid ? slot : cts->cb.topid;
+ }
+ return 0;
+ }
+ }
+ lj_err_caller(L, LJ_ERR_FFI_BADCBACK);
+ return 0;
+}
+
+LJLIB_CF(ffi_callback_free)
+{
+ return ffi_callback_set(L, NULL);
+}
+
+LJLIB_CF(ffi_callback_set)
+{
+ GCfunc *fn = lj_lib_checkfunc(L, 2);
+ return ffi_callback_set(L, fn);
+}
+
+LJLIB_PUSH(top-1) LJLIB_SET(__index)
+
+#include "lj_libdef.h"
+
+/* -- FFI library functions ----------------------------------------------- */
+
+#define LJLIB_MODULE_ffi
+
+LJLIB_CF(ffi_cdef)
+{
+ GCstr *s = lj_lib_checkstr(L, 1);
+ CPState cp;
+ int errcode;
+ cp.L = L;
+ cp.cts = ctype_cts(L);
+ cp.srcname = strdata(s);
+ cp.p = strdata(s);
+ cp.param = L->base+1;
+ cp.mode = CPARSE_MODE_MULTI|CPARSE_MODE_DIRECT;
+ errcode = lj_cparse(&cp);
+ if (errcode) lj_err_throw(L, errcode); /* Propagate errors. */
+ lj_gc_check(L);
+ return 0;
+}
+
+LJLIB_CF(ffi_new) LJLIB_REC(.)
+{
+ CTState *cts = ctype_cts(L);
+ CTypeID id = ffi_checkctype(L, cts, NULL);
+ CType *ct = ctype_raw(cts, id);
+ CTSize sz;
+ CTInfo info = lj_ctype_info(cts, id, &sz);
+ TValue *o = L->base+1;
+ GCcdata *cd;
+ if ((info & CTF_VLA)) {
+ o++;
+ sz = lj_ctype_vlsize(cts, ct, (CTSize)ffi_checkint(L, 2));
+ }
+ if (sz == CTSIZE_INVALID)
+ lj_err_arg(L, 1, LJ_ERR_FFI_INVSIZE);
+ cd = lj_cdata_newx(cts, id, sz, info);
+ setcdataV(L, o-1, cd); /* Anchor the uninitialized cdata. */
+ lj_cconv_ct_init(cts, ct, sz, cdataptr(cd),
+ o, (MSize)(L->top - o)); /* Initialize cdata. */
+ if (ctype_isstruct(ct->info)) {
+ /* Handle ctype __gc metamethod. Use the fast lookup here. */
+ cTValue *tv = lj_tab_getinth(cts->miscmap, -(int32_t)id);
+ if (tv && tvistab(tv) && (tv = lj_meta_fast(L, tabV(tv), MM_gc))) {
+ GCtab *t = cts->finalizer;
+ if (gcref(t->metatable)) {
+ /* Add to finalizer table, if still enabled. */
+ copyTV(L, lj_tab_set(L, t, o-1), tv);
+ lj_gc_anybarriert(L, t);
+ cd->marked |= LJ_GC_CDATA_FIN;
+ }
+ }
+ }
+ L->top = o; /* Only return the cdata itself. */
+ lj_gc_check(L);
+ return 1;
+}
+
+LJLIB_CF(ffi_cast) LJLIB_REC(ffi_new)
+{
+ CTState *cts = ctype_cts(L);
+ CTypeID id = ffi_checkctype(L, cts, NULL);
+ CType *d = ctype_raw(cts, id);
+ TValue *o = lj_lib_checkany(L, 2);
+ L->top = o+1; /* Make sure this is the last item on the stack. */
+ if (!(ctype_isnum(d->info) || ctype_isptr(d->info) || ctype_isenum(d->info)))
+ lj_err_arg(L, 1, LJ_ERR_FFI_INVTYPE);
+ if (!(tviscdata(o) && cdataV(o)->ctypeid == id)) {
+ GCcdata *cd = lj_cdata_new(cts, id, d->size);
+ lj_cconv_ct_tv(cts, d, cdataptr(cd), o, CCF_CAST);
+ setcdataV(L, o, cd);
+ lj_gc_check(L);
+ }
+ return 1;
+}
+
+LJLIB_CF(ffi_typeof) LJLIB_REC(.)
+{
+ CTState *cts = ctype_cts(L);
+ CTypeID id = ffi_checkctype(L, cts, L->base+1);
+ GCcdata *cd = lj_cdata_new(cts, CTID_CTYPEID, 4);
+ *(CTypeID *)cdataptr(cd) = id;
+ setcdataV(L, L->top-1, cd);
+ lj_gc_check(L);
+ return 1;
+}
+
+/* Internal and unsupported API. */
+LJLIB_CF(ffi_typeinfo)
+{
+ CTState *cts = ctype_cts(L);
+ CTypeID id = (CTypeID)ffi_checkint(L, 1);
+ if (id > 0 && id < cts->top) {
+ CType *ct = ctype_get(cts, id);
+ GCtab *t;
+ lua_createtable(L, 0, 4); /* Increment hash size if fields are added. */
+ t = tabV(L->top-1);
+ setintV(lj_tab_setstr(L, t, lj_str_newlit(L, "info")), (int32_t)ct->info);
+ if (ct->size != CTSIZE_INVALID)
+ setintV(lj_tab_setstr(L, t, lj_str_newlit(L, "size")), (int32_t)ct->size);
+ if (ct->sib)
+ setintV(lj_tab_setstr(L, t, lj_str_newlit(L, "sib")), (int32_t)ct->sib);
+ if (gcref(ct->name)) {
+ GCstr *s = gco2str(gcref(ct->name));
+ if (isdead(G(L), obj2gco(s))) flipwhite(obj2gco(s));
+ setstrV(L, lj_tab_setstr(L, t, lj_str_newlit(L, "name")), s);
+ }
+ lj_gc_check(L);
+ return 1;
+ }
+ return 0;
+}
+
+LJLIB_CF(ffi_istype) LJLIB_REC(.)
+{
+ CTState *cts = ctype_cts(L);
+ CTypeID id1 = ffi_checkctype(L, cts, NULL);
+ TValue *o = lj_lib_checkany(L, 2);
+ int b = 0;
+ if (tviscdata(o)) {
+ GCcdata *cd = cdataV(o);
+ CTypeID id2 = cd->ctypeid == CTID_CTYPEID ? *(CTypeID *)cdataptr(cd) :
+ cd->ctypeid;
+ CType *ct1 = lj_ctype_rawref(cts, id1);
+ CType *ct2 = lj_ctype_rawref(cts, id2);
+ if (ct1 == ct2) {
+ b = 1;
+ } else if (ctype_type(ct1->info) == ctype_type(ct2->info) &&
+ ct1->size == ct2->size) {
+ if (ctype_ispointer(ct1->info))
+ b = lj_cconv_compatptr(cts, ct1, ct2, CCF_IGNQUAL);
+ else if (ctype_isnum(ct1->info) || ctype_isvoid(ct1->info))
+ b = (((ct1->info ^ ct2->info) & ~(CTF_QUAL|CTF_LONG)) == 0);
+ } else if (ctype_isstruct(ct1->info) && ctype_isptr(ct2->info) &&
+ ct1 == ctype_rawchild(cts, ct2)) {
+ b = 1;
+ }
+ }
+ setboolV(L->top-1, b);
+ setboolV(&G(L)->tmptv2, b); /* Remember for trace recorder. */
+ return 1;
+}
+
+LJLIB_CF(ffi_sizeof) LJLIB_REC(ffi_xof FF_ffi_sizeof)
+{
+ CTState *cts = ctype_cts(L);
+ CTypeID id = ffi_checkctype(L, cts, NULL);
+ CTSize sz;
+ if (LJ_UNLIKELY(tviscdata(L->base) && cdataisv(cdataV(L->base)))) {
+ sz = cdatavlen(cdataV(L->base));
+ } else {
+ CType *ct = lj_ctype_rawref(cts, id);
+ if (ctype_isvltype(ct->info))
+ sz = lj_ctype_vlsize(cts, ct, (CTSize)ffi_checkint(L, 2));
+ else
+ sz = ctype_hassize(ct->info) ? ct->size : CTSIZE_INVALID;
+ if (LJ_UNLIKELY(sz == CTSIZE_INVALID)) {
+ setnilV(L->top-1);
+ return 1;
+ }
+ }
+ setintV(L->top-1, (int32_t)sz);
+ return 1;
+}
+
+LJLIB_CF(ffi_alignof) LJLIB_REC(ffi_xof FF_ffi_alignof)
+{
+ CTState *cts = ctype_cts(L);
+ CTypeID id = ffi_checkctype(L, cts, NULL);
+ CTSize sz = 0;
+ CTInfo info = lj_ctype_info_raw(cts, id, &sz);
+ setintV(L->top-1, 1 << ctype_align(info));
+ return 1;
+}
+
+LJLIB_CF(ffi_offsetof) LJLIB_REC(ffi_xof FF_ffi_offsetof)
+{
+ CTState *cts = ctype_cts(L);
+ CTypeID id = ffi_checkctype(L, cts, NULL);
+ GCstr *name = lj_lib_checkstr(L, 2);
+ CType *ct = lj_ctype_rawref(cts, id);
+ CTSize ofs;
+ if (ctype_isstruct(ct->info) && ct->size != CTSIZE_INVALID) {
+ CType *fct = lj_ctype_getfield(cts, ct, name, &ofs);
+ if (fct) {
+ setintV(L->top-1, ofs);
+ if (ctype_isfield(fct->info)) {
+ return 1;
+ } else if (ctype_isbitfield(fct->info)) {
+ setintV(L->top++, ctype_bitpos(fct->info));
+ setintV(L->top++, ctype_bitbsz(fct->info));
+ return 3;
+ }
+ }
+ }
+ return 0;
+}
+
+LJLIB_CF(ffi_errno) LJLIB_REC(.)
+{
+ int err = errno;
+ if (L->top > L->base)
+ errno = ffi_checkint(L, 1);
+ setintV(L->top++, err);
+ return 1;
+}
+
+LJLIB_CF(ffi_string) LJLIB_REC(.)
+{
+ CTState *cts = ctype_cts(L);
+ TValue *o = lj_lib_checkany(L, 1);
+ const char *p;
+ size_t len;
+ if (o+1 < L->top && !tvisnil(o+1)) {
+ len = (size_t)ffi_checkint(L, 2);
+ lj_cconv_ct_tv(cts, ctype_get(cts, CTID_P_CVOID), (uint8_t *)&p, o,
+ CCF_ARG(1));
+ } else {
+ lj_cconv_ct_tv(cts, ctype_get(cts, CTID_P_CCHAR), (uint8_t *)&p, o,
+ CCF_ARG(1));
+ len = strlen(p);
+ }
+ L->top = o+1; /* Make sure this is the last item on the stack. */
+ setstrV(L, o, lj_str_new(L, p, len));
+ lj_gc_check(L);
+ return 1;
+}
+
+LJLIB_CF(ffi_copy) LJLIB_REC(.)
+{
+ void *dp = ffi_checkptr(L, 1, CTID_P_VOID);
+ void *sp = ffi_checkptr(L, 2, CTID_P_CVOID);
+ TValue *o = L->base+1;
+ CTSize len;
+ if (tvisstr(o) && o+1 >= L->top)
+ len = strV(o)->len+1; /* Copy Lua string including trailing '\0'. */
+ else
+ len = (CTSize)ffi_checkint(L, 3);
+ memcpy(dp, sp, len);
+ return 0;
+}
+
+LJLIB_CF(ffi_fill) LJLIB_REC(.)
+{
+ void *dp = ffi_checkptr(L, 1, CTID_P_VOID);
+ CTSize len = (CTSize)ffi_checkint(L, 2);
+ int32_t fill = 0;
+ if (L->base+2 < L->top && !tvisnil(L->base+2)) fill = ffi_checkint(L, 3);
+ memset(dp, fill, len);
+ return 0;
+}
+
+/* Test ABI string. */
+LJLIB_CF(ffi_abi) LJLIB_REC(.)
+{
+ GCstr *s = lj_lib_checkstr(L, 1);
+ int b = lj_cparse_case(s,
+#if LJ_64
+ "\00564bit"
+#else
+ "\00532bit"
+#endif
+#if LJ_ARCH_HASFPU
+ "\003fpu"
+#endif
+#if LJ_ABI_SOFTFP
+ "\006softfp"
+#else
+ "\006hardfp"
+#endif
+#if LJ_ABI_EABI
+ "\004eabi"
+#endif
+#if LJ_ABI_WIN
+ "\003win"
+#endif
+#if LJ_TARGET_UWP
+ "\003uwp"
+#endif
+#if LJ_LE
+ "\002le"
+#else
+ "\002be"
+#endif
+#if LJ_GC64
+ "\004gc64"
+#endif
+ ) >= 0;
+ setboolV(L->top-1, b);
+ setboolV(&G(L)->tmptv2, b); /* Remember for trace recorder. */
+ return 1;
+}
+
+LJLIB_PUSH(top-8) LJLIB_SET(!) /* Store reference to miscmap table. */
+
+LJLIB_CF(ffi_metatype)
+{
+ CTState *cts = ctype_cts(L);
+ CTypeID id = ffi_checkctype(L, cts, NULL);
+ GCtab *mt = lj_lib_checktab(L, 2);
+ GCtab *t = cts->miscmap;
+ CType *ct = ctype_raw(cts, id);
+ TValue *tv;
+ GCcdata *cd;
+ if (!(ctype_isstruct(ct->info) || ctype_iscomplex(ct->info) ||
+ ctype_isvector(ct->info)))
+ lj_err_arg(L, 1, LJ_ERR_FFI_INVTYPE);
+ tv = lj_tab_setinth(L, t, -(int32_t)id);
+ if (!tvisnil(tv))
+ lj_err_caller(L, LJ_ERR_PROTMT);
+ settabV(L, tv, mt);
+ lj_gc_anybarriert(L, t);
+ cd = lj_cdata_new(cts, CTID_CTYPEID, 4);
+ *(CTypeID *)cdataptr(cd) = id;
+ setcdataV(L, L->top-1, cd);
+ lj_gc_check(L);
+ return 1;
+}
+
+LJLIB_PUSH(top-7) LJLIB_SET(!) /* Store reference to finalizer table. */
+
+LJLIB_CF(ffi_gc) LJLIB_REC(.)
+{
+ GCcdata *cd = ffi_checkcdata(L, 1);
+ TValue *fin = lj_lib_checkany(L, 2);
+ CTState *cts = ctype_cts(L);
+ CType *ct = ctype_raw(cts, cd->ctypeid);
+ if (!(ctype_isptr(ct->info) || ctype_isstruct(ct->info) ||
+ ctype_isrefarray(ct->info)))
+ lj_err_arg(L, 1, LJ_ERR_FFI_INVTYPE);
+ lj_cdata_setfin(L, cd, gcval(fin), itype(fin));
+ L->top = L->base+1; /* Pass through the cdata object. */
+ return 1;
+}
+
+LJLIB_PUSH(top-5) LJLIB_SET(!) /* Store clib metatable in func environment. */
+
+LJLIB_CF(ffi_load)
+{
+ GCstr *name = lj_lib_checkstr(L, 1);
+ int global = (L->base+1 < L->top && tvistruecond(L->base+1));
+ lj_clib_load(L, tabref(curr_func(L)->c.env), name, global);
+ return 1;
+}
+
+LJLIB_PUSH(top-4) LJLIB_SET(C)
+LJLIB_PUSH(top-3) LJLIB_SET(os)
+LJLIB_PUSH(top-2) LJLIB_SET(arch)
+
+#include "lj_libdef.h"
+
+/* ------------------------------------------------------------------------ */
+
+/* Create special weak-keyed finalizer table. */
+static GCtab *ffi_finalizer(lua_State *L)
+{
+ /* NOBARRIER: The table is new (marked white). */
+ GCtab *t = lj_tab_new(L, 0, 1);
+ settabV(L, L->top++, t);
+ setgcref(t->metatable, obj2gco(t));
+ setstrV(L, lj_tab_setstr(L, t, lj_str_newlit(L, "__mode")),
+ lj_str_newlit(L, "k"));
+ t->nomm = (uint8_t)(~(1u<<MM_mode));
+ return t;
+}
+
+/* Register FFI module as loaded. */
+static void ffi_register_module(lua_State *L)
+{
+ cTValue *tmp = lj_tab_getstr(tabV(registry(L)), lj_str_newlit(L, "_LOADED"));
+ if (tmp && tvistab(tmp)) {
+ GCtab *t = tabV(tmp);
+ copyTV(L, lj_tab_setstr(L, t, lj_str_newlit(L, LUA_FFILIBNAME)), L->top-1);
+ lj_gc_anybarriert(L, t);
+ }
+}
+
+LUALIB_API int luaopen_ffi(lua_State *L)
+{
+ CTState *cts = lj_ctype_init(L);
+ settabV(L, L->top++, (cts->miscmap = lj_tab_new(L, 0, 1)));
+ cts->finalizer = ffi_finalizer(L);
+ LJ_LIB_REG(L, NULL, ffi_meta);
+ /* NOBARRIER: basemt is a GC root. */
+ setgcref(basemt_it(G(L), LJ_TCDATA), obj2gco(tabV(L->top-1)));
+ LJ_LIB_REG(L, NULL, ffi_clib);
+ LJ_LIB_REG(L, NULL, ffi_callback);
+ /* NOBARRIER: the key is new and lj_tab_newkey() handles the barrier. */
+ settabV(L, lj_tab_setstr(L, cts->miscmap, &cts->g->strempty), tabV(L->top-1));
+ L->top--;
+ lj_clib_default(L, tabV(L->top-1)); /* Create ffi.C default namespace. */
+ lua_pushliteral(L, LJ_OS_NAME);
+ lua_pushliteral(L, LJ_ARCH_NAME);
+ LJ_LIB_REG(L, NULL, ffi); /* Note: no global "ffi" created! */
+ ffi_register_module(L);
+ return 1;
+}
+
+#endif
diff --git a/libs/luajit-cmake/luajit/src/lib_init.c b/libs/luajit-cmake/luajit/src/lib_init.c
new file mode 100644
index 0000000..35e06fe
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/lib_init.c
@@ -0,0 +1,55 @@
+/*
+** Library initialization.
+** Copyright (C) 2005-2022 Mike Pall. See Copyright Notice in luajit.h
+**
+** Major parts taken verbatim from the Lua interpreter.
+** Copyright (C) 1994-2008 Lua.org, PUC-Rio. See Copyright Notice in lua.h
+*/
+
+#define lib_init_c
+#define LUA_LIB
+
+#include "lua.h"
+#include "lauxlib.h"
+#include "lualib.h"
+
+#include "lj_arch.h"
+
+static const luaL_Reg lj_lib_load[] = {
+ { "", luaopen_base },
+ { LUA_LOADLIBNAME, luaopen_package },
+ { LUA_TABLIBNAME, luaopen_table },
+ { LUA_IOLIBNAME, luaopen_io },
+ { LUA_OSLIBNAME, luaopen_os },
+ { LUA_STRLIBNAME, luaopen_string },
+ { LUA_MATHLIBNAME, luaopen_math },
+ { LUA_DBLIBNAME, luaopen_debug },
+ { LUA_BITLIBNAME, luaopen_bit },
+ { LUA_JITLIBNAME, luaopen_jit },
+ { NULL, NULL }
+};
+
+static const luaL_Reg lj_lib_preload[] = {
+#if LJ_HASFFI
+ { LUA_FFILIBNAME, luaopen_ffi },
+#endif
+ { NULL, NULL }
+};
+
+LUALIB_API void luaL_openlibs(lua_State *L)
+{
+ const luaL_Reg *lib;
+ for (lib = lj_lib_load; lib->func; lib++) {
+ lua_pushcfunction(L, lib->func);
+ lua_pushstring(L, lib->name);
+ lua_call(L, 1, 0);
+ }
+ luaL_findtable(L, LUA_REGISTRYINDEX, "_PRELOAD",
+ sizeof(lj_lib_preload)/sizeof(lj_lib_preload[0])-1);
+ for (lib = lj_lib_preload; lib->func; lib++) {
+ lua_pushcfunction(L, lib->func);
+ lua_setfield(L, -2, lib->name);
+ }
+ lua_pop(L, 1);
+}
+
diff --git a/libs/luajit-cmake/luajit/src/lib_io.c b/libs/luajit-cmake/luajit/src/lib_io.c
new file mode 100644
index 0000000..c22faa2
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/lib_io.c
@@ -0,0 +1,551 @@
+/*
+** I/O library.
+** Copyright (C) 2005-2022 Mike Pall. See Copyright Notice in luajit.h
+**
+** Major portions taken verbatim or adapted from the Lua interpreter.
+** Copyright (C) 1994-2011 Lua.org, PUC-Rio. See Copyright Notice in lua.h
+*/
+
+#include <errno.h>
+#include <stdio.h>
+
+#define lib_io_c
+#define LUA_LIB
+
+#include "lua.h"
+#include "lauxlib.h"
+#include "lualib.h"
+
+#include "lj_obj.h"
+#include "lj_gc.h"
+#include "lj_err.h"
+#include "lj_buf.h"
+#include "lj_str.h"
+#include "lj_state.h"
+#include "lj_strfmt.h"
+#include "lj_ff.h"
+#include "lj_lib.h"
+
+/* Userdata payload for I/O file. */
+typedef struct IOFileUD {
+ FILE *fp; /* File handle. */
+ uint32_t type; /* File type. */
+} IOFileUD;
+
+#define IOFILE_TYPE_FILE 0 /* Regular file. */
+#define IOFILE_TYPE_PIPE 1 /* Pipe. */
+#define IOFILE_TYPE_STDF 2 /* Standard file handle. */
+#define IOFILE_TYPE_MASK 3
+
+#define IOFILE_FLAG_CLOSE 4 /* Close after io.lines() iterator. */
+
+#define IOSTDF_UD(L, id) (&gcref(G(L)->gcroot[(id)])->ud)
+#define IOSTDF_IOF(L, id) ((IOFileUD *)uddata(IOSTDF_UD(L, (id))))
+
+/* -- Open/close helpers -------------------------------------------------- */
+
+static IOFileUD *io_tofilep(lua_State *L)
+{
+ if (!(L->base < L->top && tvisudata(L->base) &&
+ udataV(L->base)->udtype == UDTYPE_IO_FILE))
+ lj_err_argtype(L, 1, "FILE*");
+ return (IOFileUD *)uddata(udataV(L->base));
+}
+
+static IOFileUD *io_tofile(lua_State *L)
+{
+ IOFileUD *iof = io_tofilep(L);
+ if (iof->fp == NULL)
+ lj_err_caller(L, LJ_ERR_IOCLFL);
+ return iof;
+}
+
+static IOFileUD *io_stdfile(lua_State *L, ptrdiff_t id)
+{
+ IOFileUD *iof = IOSTDF_IOF(L, id);
+ if (iof->fp == NULL)
+ lj_err_caller(L, LJ_ERR_IOSTDCL);
+ return iof;
+}
+
+static IOFileUD *io_file_new(lua_State *L)
+{
+ IOFileUD *iof = (IOFileUD *)lua_newuserdata(L, sizeof(IOFileUD));
+ GCudata *ud = udataV(L->top-1);
+ ud->udtype = UDTYPE_IO_FILE;
+ /* NOBARRIER: The GCudata is new (marked white). */
+ setgcrefr(ud->metatable, curr_func(L)->c.env);
+ iof->fp = NULL;
+ iof->type = IOFILE_TYPE_FILE;
+ return iof;
+}
+
+static IOFileUD *io_file_open(lua_State *L, const char *mode)
+{
+ const char *fname = strdata(lj_lib_checkstr(L, 1));
+ IOFileUD *iof = io_file_new(L);
+ iof->fp = fopen(fname, mode);
+ if (iof->fp == NULL)
+ luaL_argerror(L, 1, lj_strfmt_pushf(L, "%s: %s", fname, strerror(errno)));
+ return iof;
+}
+
+static int io_file_close(lua_State *L, IOFileUD *iof)
+{
+ int ok;
+ if ((iof->type & IOFILE_TYPE_MASK) == IOFILE_TYPE_FILE) {
+ ok = (fclose(iof->fp) == 0);
+ } else if ((iof->type & IOFILE_TYPE_MASK) == IOFILE_TYPE_PIPE) {
+ int stat = -1;
+#if LJ_TARGET_POSIX
+ stat = pclose(iof->fp);
+#elif LJ_TARGET_WINDOWS && !LJ_TARGET_XBOXONE && !LJ_TARGET_UWP
+ stat = _pclose(iof->fp);
+#endif
+#if LJ_52
+ iof->fp = NULL;
+ return luaL_execresult(L, stat);
+#else
+ ok = (stat != -1);
+#endif
+ } else {
+ lj_assertL((iof->type & IOFILE_TYPE_MASK) == IOFILE_TYPE_STDF,
+ "close of unknown FILE* type");
+ setnilV(L->top++);
+ lua_pushliteral(L, "cannot close standard file");
+ return 2;
+ }
+ iof->fp = NULL;
+ return luaL_fileresult(L, ok, NULL);
+}
+
+/* -- Read/write helpers -------------------------------------------------- */
+
+static int io_file_readnum(lua_State *L, FILE *fp)
+{
+ lua_Number d;
+ if (fscanf(fp, LUA_NUMBER_SCAN, &d) == 1) {
+ if (LJ_DUALNUM) {
+ int32_t i = lj_num2int(d);
+ if (d == (lua_Number)i && !tvismzero((cTValue *)&d)) {
+ setintV(L->top++, i);
+ return 1;
+ }
+ }
+ setnumV(L->top++, d);
+ return 1;
+ } else {
+ setnilV(L->top++);
+ return 0;
+ }
+}
+
+static int io_file_readline(lua_State *L, FILE *fp, MSize chop)
+{
+ MSize m = LUAL_BUFFERSIZE, n = 0, ok = 0;
+ char *buf;
+ for (;;) {
+ buf = lj_buf_tmp(L, m);
+ if (fgets(buf+n, m-n, fp) == NULL) break;
+ n += (MSize)strlen(buf+n);
+ ok |= n;
+ if (n && buf[n-1] == '\n') { n -= chop; break; }
+ if (n >= m - 64) m += m;
+ }
+ setstrV(L, L->top++, lj_str_new(L, buf, (size_t)n));
+ lj_gc_check(L);
+ return (int)ok;
+}
+
+static void io_file_readall(lua_State *L, FILE *fp)
+{
+ MSize m, n;
+ for (m = LUAL_BUFFERSIZE, n = 0; ; m += m) {
+ char *buf = lj_buf_tmp(L, m);
+ n += (MSize)fread(buf+n, 1, m-n, fp);
+ if (n != m) {
+ setstrV(L, L->top++, lj_str_new(L, buf, (size_t)n));
+ lj_gc_check(L);
+ return;
+ }
+ }
+}
+
+static int io_file_readlen(lua_State *L, FILE *fp, MSize m)
+{
+ if (m) {
+ char *buf = lj_buf_tmp(L, m);
+ MSize n = (MSize)fread(buf, 1, m, fp);
+ setstrV(L, L->top++, lj_str_new(L, buf, (size_t)n));
+ lj_gc_check(L);
+ return n > 0;
+ } else {
+ int c = getc(fp);
+ ungetc(c, fp);
+ setstrV(L, L->top++, &G(L)->strempty);
+ return (c != EOF);
+ }
+}
+
+static int io_file_read(lua_State *L, IOFileUD *iof, int start)
+{
+ FILE *fp = iof->fp;
+ int ok, n, nargs = (int)(L->top - L->base) - start;
+ clearerr(fp);
+ if (nargs == 0) {
+ ok = io_file_readline(L, fp, 1);
+ n = start+1; /* Return 1 result. */
+ } else {
+ /* The results plus the buffers go on top of the args. */
+ luaL_checkstack(L, nargs+LUA_MINSTACK, "too many arguments");
+ ok = 1;
+ for (n = start; nargs-- && ok; n++) {
+ if (tvisstr(L->base+n)) {
+ const char *p = strVdata(L->base+n);
+ if (p[0] == '*') p++;
+ if (p[0] == 'n')
+ ok = io_file_readnum(L, fp);
+ else if ((p[0] & ~0x20) == 'L')
+ ok = io_file_readline(L, fp, (p[0] == 'l'));
+ else if (p[0] == 'a')
+ io_file_readall(L, fp);
+ else
+ lj_err_arg(L, n+1, LJ_ERR_INVFMT);
+ } else if (tvisnumber(L->base+n)) {
+ ok = io_file_readlen(L, fp, (MSize)lj_lib_checkint(L, n+1));
+ } else {
+ lj_err_arg(L, n+1, LJ_ERR_INVOPT);
+ }
+ }
+ }
+ if (ferror(fp))
+ return luaL_fileresult(L, 0, NULL);
+ if (!ok)
+ setnilV(L->top-1); /* Replace last result with nil. */
+ return n - start;
+}
+
+static int io_file_write(lua_State *L, IOFileUD *iof, int start)
+{
+ FILE *fp = iof->fp;
+ cTValue *tv;
+ int status = 1;
+ for (tv = L->base+start; tv < L->top; tv++) {
+ MSize len;
+ const char *p = lj_strfmt_wstrnum(L, tv, &len);
+ if (!p)
+ lj_err_argt(L, (int)(tv - L->base) + 1, LUA_TSTRING);
+ status = status && (fwrite(p, 1, len, fp) == len);
+ }
+ if (LJ_52 && status) {
+ L->top = L->base+1;
+ if (start == 0)
+ setudataV(L, L->base, IOSTDF_UD(L, GCROOT_IO_OUTPUT));
+ return 1;
+ }
+ return luaL_fileresult(L, status, NULL);
+}
+
+static int io_file_iter(lua_State *L)
+{
+ GCfunc *fn = curr_func(L);
+ IOFileUD *iof = uddata(udataV(&fn->c.upvalue[0]));
+ int n = fn->c.nupvalues - 1;
+ if (iof->fp == NULL)
+ lj_err_caller(L, LJ_ERR_IOCLFL);
+ L->top = L->base;
+ if (n) { /* Copy upvalues with options to stack. */
+ lj_state_checkstack(L, (MSize)n);
+ memcpy(L->top, &fn->c.upvalue[1], n*sizeof(TValue));
+ L->top += n;
+ }
+ n = io_file_read(L, iof, 0);
+ if (ferror(iof->fp))
+ lj_err_callermsg(L, strVdata(L->top-2));
+ if (tvisnil(L->base) && (iof->type & IOFILE_FLAG_CLOSE)) {
+ io_file_close(L, iof); /* Return values are ignored. */
+ return 0;
+ }
+ return n;
+}
+
+static int io_file_lines(lua_State *L)
+{
+ int n = (int)(L->top - L->base);
+ if (n > LJ_MAX_UPVAL)
+ lj_err_caller(L, LJ_ERR_UNPACK);
+ lua_pushcclosure(L, io_file_iter, n);
+ return 1;
+}
+
+/* -- I/O file methods ---------------------------------------------------- */
+
+#define LJLIB_MODULE_io_method
+
+LJLIB_CF(io_method_close)
+{
+ IOFileUD *iof;
+ if (L->base < L->top) {
+ iof = io_tofile(L);
+ } else {
+ iof = IOSTDF_IOF(L, GCROOT_IO_OUTPUT);
+ if (iof->fp == NULL)
+ lj_err_caller(L, LJ_ERR_IOCLFL);
+ }
+ return io_file_close(L, iof);
+}
+
+LJLIB_CF(io_method_read)
+{
+ return io_file_read(L, io_tofile(L), 1);
+}
+
+LJLIB_CF(io_method_write) LJLIB_REC(io_write 0)
+{
+ return io_file_write(L, io_tofile(L), 1);
+}
+
+LJLIB_CF(io_method_flush) LJLIB_REC(io_flush 0)
+{
+ return luaL_fileresult(L, fflush(io_tofile(L)->fp) == 0, NULL);
+}
+
+#if LJ_32 && defined(__ANDROID__) && __ANDROID_API__ < 24
+/* The Android NDK is such an unmatched marvel of engineering. */
+extern int fseeko32(FILE *, long int, int) __asm__("fseeko");
+extern long int ftello32(FILE *) __asm__("ftello");
+#define fseeko(fp, pos, whence) (fseeko32((fp), (pos), (whence)))
+#define ftello(fp) (ftello32((fp)))
+#endif
+
+LJLIB_CF(io_method_seek)
+{
+ FILE *fp = io_tofile(L)->fp;
+ int opt = lj_lib_checkopt(L, 2, 1, "\3set\3cur\3end");
+ int64_t ofs = 0;
+ cTValue *o;
+ int res;
+ if (opt == 0) opt = SEEK_SET;
+ else if (opt == 1) opt = SEEK_CUR;
+ else if (opt == 2) opt = SEEK_END;
+ o = L->base+2;
+ if (o < L->top) {
+ if (tvisint(o))
+ ofs = (int64_t)intV(o);
+ else if (tvisnum(o))
+ ofs = (int64_t)numV(o);
+ else if (!tvisnil(o))
+ lj_err_argt(L, 3, LUA_TNUMBER);
+ }
+#if LJ_TARGET_POSIX
+ res = fseeko(fp, ofs, opt);
+#elif _MSC_VER >= 1400
+ res = _fseeki64(fp, ofs, opt);
+#elif defined(__MINGW32__)
+ res = fseeko64(fp, ofs, opt);
+#else
+ res = fseek(fp, (long)ofs, opt);
+#endif
+ if (res)
+ return luaL_fileresult(L, 0, NULL);
+#if LJ_TARGET_POSIX
+ ofs = ftello(fp);
+#elif _MSC_VER >= 1400
+ ofs = _ftelli64(fp);
+#elif defined(__MINGW32__)
+ ofs = ftello64(fp);
+#else
+ ofs = (int64_t)ftell(fp);
+#endif
+ setint64V(L->top-1, ofs);
+ return 1;
+}
+
+LJLIB_CF(io_method_setvbuf)
+{
+ FILE *fp = io_tofile(L)->fp;
+ int opt = lj_lib_checkopt(L, 2, -1, "\4full\4line\2no");
+ size_t sz = (size_t)lj_lib_optint(L, 3, LUAL_BUFFERSIZE);
+ if (opt == 0) opt = _IOFBF;
+ else if (opt == 1) opt = _IOLBF;
+ else if (opt == 2) opt = _IONBF;
+ return luaL_fileresult(L, setvbuf(fp, NULL, opt, sz) == 0, NULL);
+}
+
+LJLIB_CF(io_method_lines)
+{
+ io_tofile(L);
+ return io_file_lines(L);
+}
+
+LJLIB_CF(io_method___gc)
+{
+ IOFileUD *iof = io_tofilep(L);
+ if (iof->fp != NULL && (iof->type & IOFILE_TYPE_MASK) != IOFILE_TYPE_STDF)
+ io_file_close(L, iof);
+ return 0;
+}
+
+LJLIB_CF(io_method___tostring)
+{
+ IOFileUD *iof = io_tofilep(L);
+ if (iof->fp != NULL)
+ lua_pushfstring(L, "file (%p)", iof->fp);
+ else
+ lua_pushliteral(L, "file (closed)");
+ return 1;
+}
+
+LJLIB_PUSH(top-1) LJLIB_SET(__index)
+
+#include "lj_libdef.h"
+
+/* -- I/O library functions ----------------------------------------------- */
+
+#define LJLIB_MODULE_io
+
+LJLIB_PUSH(top-2) LJLIB_SET(!) /* Set environment. */
+
+LJLIB_CF(io_open)
+{
+ const char *fname = strdata(lj_lib_checkstr(L, 1));
+ GCstr *s = lj_lib_optstr(L, 2);
+ const char *mode = s ? strdata(s) : "r";
+ IOFileUD *iof = io_file_new(L);
+ iof->fp = fopen(fname, mode);
+ return iof->fp != NULL ? 1 : luaL_fileresult(L, 0, fname);
+}
+
+LJLIB_CF(io_popen)
+{
+#if LJ_TARGET_POSIX || (LJ_TARGET_WINDOWS && !LJ_TARGET_XBOXONE && !LJ_TARGET_UWP)
+ const char *fname = strdata(lj_lib_checkstr(L, 1));
+ GCstr *s = lj_lib_optstr(L, 2);
+ const char *mode = s ? strdata(s) : "r";
+ IOFileUD *iof = io_file_new(L);
+ iof->type = IOFILE_TYPE_PIPE;
+#if LJ_TARGET_POSIX
+ fflush(NULL);
+ iof->fp = popen(fname, mode);
+#else
+ iof->fp = _popen(fname, mode);
+#endif
+ return iof->fp != NULL ? 1 : luaL_fileresult(L, 0, fname);
+#else
+ return luaL_error(L, LUA_QL("popen") " not supported");
+#endif
+}
+
+LJLIB_CF(io_tmpfile)
+{
+ IOFileUD *iof = io_file_new(L);
+#if LJ_TARGET_PS3 || LJ_TARGET_PS4 || LJ_TARGET_PS5 || LJ_TARGET_PSVITA || LJ_TARGET_NX
+ iof->fp = NULL; errno = ENOSYS;
+#else
+ iof->fp = tmpfile();
+#endif
+ return iof->fp != NULL ? 1 : luaL_fileresult(L, 0, NULL);
+}
+
+LJLIB_CF(io_close)
+{
+ return lj_cf_io_method_close(L);
+}
+
+LJLIB_CF(io_read)
+{
+ return io_file_read(L, io_stdfile(L, GCROOT_IO_INPUT), 0);
+}
+
+LJLIB_CF(io_write) LJLIB_REC(io_write GCROOT_IO_OUTPUT)
+{
+ return io_file_write(L, io_stdfile(L, GCROOT_IO_OUTPUT), 0);
+}
+
+LJLIB_CF(io_flush) LJLIB_REC(io_flush GCROOT_IO_OUTPUT)
+{
+ return luaL_fileresult(L, fflush(io_stdfile(L, GCROOT_IO_OUTPUT)->fp) == 0, NULL);
+}
+
+static int io_std_getset(lua_State *L, ptrdiff_t id, const char *mode)
+{
+ if (L->base < L->top && !tvisnil(L->base)) {
+ if (tvisudata(L->base)) {
+ io_tofile(L);
+ L->top = L->base+1;
+ } else {
+ io_file_open(L, mode);
+ }
+ /* NOBARRIER: The standard I/O handles are GC roots. */
+ setgcref(G(L)->gcroot[id], gcV(L->top-1));
+ } else {
+ setudataV(L, L->top++, IOSTDF_UD(L, id));
+ }
+ return 1;
+}
+
+LJLIB_CF(io_input)
+{
+ return io_std_getset(L, GCROOT_IO_INPUT, "r");
+}
+
+LJLIB_CF(io_output)
+{
+ return io_std_getset(L, GCROOT_IO_OUTPUT, "w");
+}
+
+LJLIB_CF(io_lines)
+{
+ if (L->base == L->top) setnilV(L->top++);
+ if (!tvisnil(L->base)) { /* io.lines(fname) */
+ IOFileUD *iof = io_file_open(L, "r");
+ iof->type = IOFILE_TYPE_FILE|IOFILE_FLAG_CLOSE;
+ L->top--;
+ setudataV(L, L->base, udataV(L->top));
+ } else { /* io.lines() iterates over stdin. */
+ setudataV(L, L->base, IOSTDF_UD(L, GCROOT_IO_INPUT));
+ }
+ return io_file_lines(L);
+}
+
+LJLIB_CF(io_type)
+{
+ cTValue *o = lj_lib_checkany(L, 1);
+ if (!(tvisudata(o) && udataV(o)->udtype == UDTYPE_IO_FILE))
+ setnilV(L->top++);
+ else if (((IOFileUD *)uddata(udataV(o)))->fp != NULL)
+ lua_pushliteral(L, "file");
+ else
+ lua_pushliteral(L, "closed file");
+ return 1;
+}
+
+#include "lj_libdef.h"
+
+/* ------------------------------------------------------------------------ */
+
+static GCobj *io_std_new(lua_State *L, FILE *fp, const char *name)
+{
+ IOFileUD *iof = (IOFileUD *)lua_newuserdata(L, sizeof(IOFileUD));
+ GCudata *ud = udataV(L->top-1);
+ ud->udtype = UDTYPE_IO_FILE;
+ /* NOBARRIER: The GCudata is new (marked white). */
+ setgcref(ud->metatable, gcV(L->top-3));
+ iof->fp = fp;
+ iof->type = IOFILE_TYPE_STDF;
+ lua_setfield(L, -2, name);
+ return obj2gco(ud);
+}
+
+LUALIB_API int luaopen_io(lua_State *L)
+{
+ LJ_LIB_REG(L, NULL, io_method);
+ copyTV(L, L->top, L->top-1); L->top++;
+ lua_setfield(L, LUA_REGISTRYINDEX, LUA_FILEHANDLE);
+ LJ_LIB_REG(L, LUA_IOLIBNAME, io);
+ setgcref(G(L)->gcroot[GCROOT_IO_INPUT], io_std_new(L, stdin, "stdin"));
+ setgcref(G(L)->gcroot[GCROOT_IO_OUTPUT], io_std_new(L, stdout, "stdout"));
+ io_std_new(L, stderr, "stderr");
+ return 1;
+}
+
diff --git a/libs/luajit-cmake/luajit/src/lib_jit.c b/libs/luajit-cmake/luajit/src/lib_jit.c
new file mode 100644
index 0000000..2867d42
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/lib_jit.c
@@ -0,0 +1,761 @@
+/*
+** JIT library.
+** Copyright (C) 2005-2022 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#define lib_jit_c
+#define LUA_LIB
+
+#include "lua.h"
+#include "lauxlib.h"
+#include "lualib.h"
+
+#include "lj_obj.h"
+#include "lj_gc.h"
+#include "lj_err.h"
+#include "lj_debug.h"
+#include "lj_str.h"
+#include "lj_tab.h"
+#include "lj_state.h"
+#include "lj_bc.h"
+#if LJ_HASFFI
+#include "lj_ctype.h"
+#endif
+#if LJ_HASJIT
+#include "lj_ir.h"
+#include "lj_jit.h"
+#include "lj_ircall.h"
+#include "lj_iropt.h"
+#include "lj_target.h"
+#endif
+#include "lj_trace.h"
+#include "lj_dispatch.h"
+#include "lj_vm.h"
+#include "lj_vmevent.h"
+#include "lj_lib.h"
+
+#include "luajit.h"
+
+/* -- jit.* functions ----------------------------------------------------- */
+
+#define LJLIB_MODULE_jit
+
+static int setjitmode(lua_State *L, int mode)
+{
+ int idx = 0;
+ if (L->base == L->top || tvisnil(L->base)) { /* jit.on/off/flush([nil]) */
+ mode |= LUAJIT_MODE_ENGINE;
+ } else {
+ /* jit.on/off/flush(func|proto, nil|true|false) */
+ if (tvisfunc(L->base) || tvisproto(L->base))
+ idx = 1;
+ else if (!tvistrue(L->base)) /* jit.on/off/flush(true, nil|true|false) */
+ goto err;
+ if (L->base+1 < L->top && tvisbool(L->base+1))
+ mode |= boolV(L->base+1) ? LUAJIT_MODE_ALLFUNC : LUAJIT_MODE_ALLSUBFUNC;
+ else
+ mode |= LUAJIT_MODE_FUNC;
+ }
+ if (luaJIT_setmode(L, idx, mode) != 1) {
+ if ((mode & LUAJIT_MODE_MASK) == LUAJIT_MODE_ENGINE)
+ lj_err_caller(L, LJ_ERR_NOJIT);
+ err:
+ lj_err_argt(L, 1, LUA_TFUNCTION);
+ }
+ return 0;
+}
+
+LJLIB_CF(jit_on)
+{
+ return setjitmode(L, LUAJIT_MODE_ON);
+}
+
+LJLIB_CF(jit_off)
+{
+ return setjitmode(L, LUAJIT_MODE_OFF);
+}
+
+LJLIB_CF(jit_flush)
+{
+#if LJ_HASJIT
+ if (L->base < L->top && tvisnumber(L->base)) {
+ int traceno = lj_lib_checkint(L, 1);
+ luaJIT_setmode(L, traceno, LUAJIT_MODE_FLUSH|LUAJIT_MODE_TRACE);
+ return 0;
+ }
+#endif
+ return setjitmode(L, LUAJIT_MODE_FLUSH);
+}
+
+#if LJ_HASJIT
+/* Push a string for every flag bit that is set. */
+static void flagbits_to_strings(lua_State *L, uint32_t flags, uint32_t base,
+ const char *str)
+{
+ for (; *str; base <<= 1, str += 1+*str)
+ if (flags & base)
+ setstrV(L, L->top++, lj_str_new(L, str+1, *(uint8_t *)str));
+}
+#endif
+
+LJLIB_CF(jit_status)
+{
+#if LJ_HASJIT
+ jit_State *J = L2J(L);
+ L->top = L->base;
+ setboolV(L->top++, (J->flags & JIT_F_ON) ? 1 : 0);
+ flagbits_to_strings(L, J->flags, JIT_F_CPU, JIT_F_CPUSTRING);
+ flagbits_to_strings(L, J->flags, JIT_F_OPT, JIT_F_OPTSTRING);
+ return (int)(L->top - L->base);
+#else
+ setboolV(L->top++, 0);
+ return 1;
+#endif
+}
+
+LJLIB_CF(jit_security)
+{
+ int idx = lj_lib_checkopt(L, 1, -1, LJ_SECURITY_MODESTRING);
+ setintV(L->top++, ((LJ_SECURITY_MODE >> (2*idx)) & 3));
+ return 1;
+}
+
+LJLIB_CF(jit_attach)
+{
+#ifdef LUAJIT_DISABLE_VMEVENT
+ luaL_error(L, "vmevent API disabled");
+#else
+ GCfunc *fn = lj_lib_checkfunc(L, 1);
+ GCstr *s = lj_lib_optstr(L, 2);
+ luaL_findtable(L, LUA_REGISTRYINDEX, LJ_VMEVENTS_REGKEY, LJ_VMEVENTS_HSIZE);
+ if (s) { /* Attach to given event. */
+ const uint8_t *p = (const uint8_t *)strdata(s);
+ uint32_t h = s->len;
+ while (*p) h = h ^ (lj_rol(h, 6) + *p++);
+ lua_pushvalue(L, 1);
+ lua_rawseti(L, -2, VMEVENT_HASHIDX(h));
+ G(L)->vmevmask = VMEVENT_NOCACHE; /* Invalidate cache. */
+ } else { /* Detach if no event given. */
+ setnilV(L->top++);
+ while (lua_next(L, -2)) {
+ L->top--;
+ if (tvisfunc(L->top) && funcV(L->top) == fn) {
+ setnilV(lj_tab_set(L, tabV(L->top-2), L->top-1));
+ }
+ }
+ }
+#endif
+ return 0;
+}
+
+LJLIB_PUSH(top-5) LJLIB_SET(os)
+LJLIB_PUSH(top-4) LJLIB_SET(arch)
+LJLIB_PUSH(top-3) LJLIB_SET(version_num)
+LJLIB_PUSH(top-2) LJLIB_SET(version)
+
+#include "lj_libdef.h"
+
+/* -- jit.util.* functions ------------------------------------------------ */
+
+#define LJLIB_MODULE_jit_util
+
+/* -- Reflection API for Lua functions ------------------------------------ */
+
+/* Return prototype of first argument (Lua function or prototype object) */
+static GCproto *check_Lproto(lua_State *L, int nolua)
+{
+ TValue *o = L->base;
+ if (L->top > o) {
+ if (tvisproto(o)) {
+ return protoV(o);
+ } else if (tvisfunc(o)) {
+ if (isluafunc(funcV(o)))
+ return funcproto(funcV(o));
+ else if (nolua)
+ return NULL;
+ }
+ }
+ lj_err_argt(L, 1, LUA_TFUNCTION);
+ return NULL; /* unreachable */
+}
+
+static void setintfield(lua_State *L, GCtab *t, const char *name, int32_t val)
+{
+ setintV(lj_tab_setstr(L, t, lj_str_newz(L, name)), val);
+}
+
+/* local info = jit.util.funcinfo(func [,pc]) */
+LJLIB_CF(jit_util_funcinfo)
+{
+ GCproto *pt = check_Lproto(L, 1);
+ if (pt) {
+ BCPos pc = (BCPos)lj_lib_optint(L, 2, 0);
+ GCtab *t;
+ lua_createtable(L, 0, 16); /* Increment hash size if fields are added. */
+ t = tabV(L->top-1);
+ setintfield(L, t, "linedefined", pt->firstline);
+ setintfield(L, t, "lastlinedefined", pt->firstline + pt->numline);
+ setintfield(L, t, "stackslots", pt->framesize);
+ setintfield(L, t, "params", pt->numparams);
+ setintfield(L, t, "bytecodes", (int32_t)pt->sizebc);
+ setintfield(L, t, "gcconsts", (int32_t)pt->sizekgc);
+ setintfield(L, t, "nconsts", (int32_t)pt->sizekn);
+ setintfield(L, t, "upvalues", (int32_t)pt->sizeuv);
+ if (pc < pt->sizebc)
+ setintfield(L, t, "currentline", lj_debug_line(pt, pc));
+ lua_pushboolean(L, (pt->flags & PROTO_VARARG));
+ lua_setfield(L, -2, "isvararg");
+ lua_pushboolean(L, (pt->flags & PROTO_CHILD));
+ lua_setfield(L, -2, "children");
+ setstrV(L, L->top++, proto_chunkname(pt));
+ lua_setfield(L, -2, "source");
+ lj_debug_pushloc(L, pt, pc);
+ lua_setfield(L, -2, "loc");
+ setprotoV(L, lj_tab_setstr(L, t, lj_str_newlit(L, "proto")), pt);
+ } else {
+ GCfunc *fn = funcV(L->base);
+ GCtab *t;
+ lua_createtable(L, 0, 4); /* Increment hash size if fields are added. */
+ t = tabV(L->top-1);
+ if (!iscfunc(fn))
+ setintfield(L, t, "ffid", fn->c.ffid);
+ setintptrV(lj_tab_setstr(L, t, lj_str_newlit(L, "addr")),
+ (intptr_t)(void *)fn->c.f);
+ setintfield(L, t, "upvalues", fn->c.nupvalues);
+ }
+ return 1;
+}
+
+/* local ins, m = jit.util.funcbc(func, pc) */
+LJLIB_CF(jit_util_funcbc)
+{
+ GCproto *pt = check_Lproto(L, 0);
+ BCPos pc = (BCPos)lj_lib_checkint(L, 2);
+ if (pc < pt->sizebc) {
+ BCIns ins = proto_bc(pt)[pc];
+ BCOp op = bc_op(ins);
+ lj_assertL(op < BC__MAX, "bad bytecode op %d", op);
+ setintV(L->top, ins);
+ setintV(L->top+1, lj_bc_mode[op]);
+ L->top += 2;
+ return 2;
+ }
+ return 0;
+}
+
+/* local k = jit.util.funck(func, idx) */
+LJLIB_CF(jit_util_funck)
+{
+ GCproto *pt = check_Lproto(L, 0);
+ ptrdiff_t idx = (ptrdiff_t)lj_lib_checkint(L, 2);
+ if (idx >= 0) {
+ if (idx < (ptrdiff_t)pt->sizekn) {
+ copyTV(L, L->top-1, proto_knumtv(pt, idx));
+ return 1;
+ }
+ } else {
+ if (~idx < (ptrdiff_t)pt->sizekgc) {
+ GCobj *gc = proto_kgc(pt, idx);
+ setgcV(L, L->top-1, gc, ~gc->gch.gct);
+ return 1;
+ }
+ }
+ return 0;
+}
+
+/* local name = jit.util.funcuvname(func, idx) */
+LJLIB_CF(jit_util_funcuvname)
+{
+ GCproto *pt = check_Lproto(L, 0);
+ uint32_t idx = (uint32_t)lj_lib_checkint(L, 2);
+ if (idx < pt->sizeuv) {
+ setstrV(L, L->top-1, lj_str_newz(L, lj_debug_uvname(pt, idx)));
+ return 1;
+ }
+ return 0;
+}
+
+/* -- Reflection API for traces ------------------------------------------- */
+
+#if LJ_HASJIT
+
+/* Check trace argument. Must not throw for non-existent trace numbers. */
+static GCtrace *jit_checktrace(lua_State *L)
+{
+ TraceNo tr = (TraceNo)lj_lib_checkint(L, 1);
+ jit_State *J = L2J(L);
+ if (tr > 0 && tr < J->sizetrace)
+ return traceref(J, tr);
+ return NULL;
+}
+
+/* Names of link types. ORDER LJ_TRLINK */
+static const char *const jit_trlinkname[] = {
+ "none", "root", "loop", "tail-recursion", "up-recursion", "down-recursion",
+ "interpreter", "return", "stitch"
+};
+
+/* local info = jit.util.traceinfo(tr) */
+LJLIB_CF(jit_util_traceinfo)
+{
+ GCtrace *T = jit_checktrace(L);
+ if (T) {
+ GCtab *t;
+ lua_createtable(L, 0, 8); /* Increment hash size if fields are added. */
+ t = tabV(L->top-1);
+ setintfield(L, t, "nins", (int32_t)T->nins - REF_BIAS - 1);
+ setintfield(L, t, "nk", REF_BIAS - (int32_t)T->nk);
+ setintfield(L, t, "link", T->link);
+ setintfield(L, t, "nexit", T->nsnap);
+ setstrV(L, L->top++, lj_str_newz(L, jit_trlinkname[T->linktype]));
+ lua_setfield(L, -2, "linktype");
+ /* There are many more fields. Add them only when needed. */
+ return 1;
+ }
+ return 0;
+}
+
+/* local m, ot, op1, op2, prev = jit.util.traceir(tr, idx) */
+LJLIB_CF(jit_util_traceir)
+{
+ GCtrace *T = jit_checktrace(L);
+ IRRef ref = (IRRef)lj_lib_checkint(L, 2) + REF_BIAS;
+ if (T && ref >= REF_BIAS && ref < T->nins) {
+ IRIns *ir = &T->ir[ref];
+ int32_t m = lj_ir_mode[ir->o];
+ setintV(L->top-2, m);
+ setintV(L->top-1, ir->ot);
+ setintV(L->top++, (int32_t)ir->op1 - (irm_op1(m)==IRMref ? REF_BIAS : 0));
+ setintV(L->top++, (int32_t)ir->op2 - (irm_op2(m)==IRMref ? REF_BIAS : 0));
+ setintV(L->top++, ir->prev);
+ return 5;
+ }
+ return 0;
+}
+
+/* local k, t [, slot] = jit.util.tracek(tr, idx) */
+LJLIB_CF(jit_util_tracek)
+{
+ GCtrace *T = jit_checktrace(L);
+ IRRef ref = (IRRef)lj_lib_checkint(L, 2) + REF_BIAS;
+ if (T && ref >= T->nk && ref < REF_BIAS) {
+ IRIns *ir = &T->ir[ref];
+ int32_t slot = -1;
+ if (ir->o == IR_KSLOT) {
+ slot = ir->op2;
+ ir = &T->ir[ir->op1];
+ }
+#if LJ_HASFFI
+ if (ir->o == IR_KINT64) ctype_loadffi(L);
+#endif
+ lj_ir_kvalue(L, L->top-2, ir);
+ setintV(L->top-1, (int32_t)irt_type(ir->t));
+ if (slot == -1)
+ return 2;
+ setintV(L->top++, slot);
+ return 3;
+ }
+ return 0;
+}
+
+/* local snap = jit.util.tracesnap(tr, sn) */
+LJLIB_CF(jit_util_tracesnap)
+{
+ GCtrace *T = jit_checktrace(L);
+ SnapNo sn = (SnapNo)lj_lib_checkint(L, 2);
+ if (T && sn < T->nsnap) {
+ SnapShot *snap = &T->snap[sn];
+ SnapEntry *map = &T->snapmap[snap->mapofs];
+ MSize n, nent = snap->nent;
+ GCtab *t;
+ lua_createtable(L, nent+2, 0);
+ t = tabV(L->top-1);
+ setintV(lj_tab_setint(L, t, 0), (int32_t)snap->ref - REF_BIAS);
+ setintV(lj_tab_setint(L, t, 1), (int32_t)snap->nslots);
+ for (n = 0; n < nent; n++)
+ setintV(lj_tab_setint(L, t, (int32_t)(n+2)), (int32_t)map[n]);
+ setintV(lj_tab_setint(L, t, (int32_t)(nent+2)), (int32_t)SNAP(255, 0, 0));
+ return 1;
+ }
+ return 0;
+}
+
+/* local mcode, addr, loop = jit.util.tracemc(tr) */
+LJLIB_CF(jit_util_tracemc)
+{
+ GCtrace *T = jit_checktrace(L);
+ if (T && T->mcode != NULL) {
+ setstrV(L, L->top-1, lj_str_new(L, (const char *)T->mcode, T->szmcode));
+ setintptrV(L->top++, (intptr_t)(void *)T->mcode);
+ setintV(L->top++, T->mcloop);
+ return 3;
+ }
+ return 0;
+}
+
+/* local addr = jit.util.traceexitstub([tr,] exitno) */
+LJLIB_CF(jit_util_traceexitstub)
+{
+#ifdef EXITSTUBS_PER_GROUP
+ ExitNo exitno = (ExitNo)lj_lib_checkint(L, 1);
+ jit_State *J = L2J(L);
+ if (exitno < EXITSTUBS_PER_GROUP*LJ_MAX_EXITSTUBGR) {
+ setintptrV(L->top-1, (intptr_t)(void *)exitstub_addr(J, exitno));
+ return 1;
+ }
+#else
+ if (L->top > L->base+1) { /* Don't throw for one-argument variant. */
+ GCtrace *T = jit_checktrace(L);
+ ExitNo exitno = (ExitNo)lj_lib_checkint(L, 2);
+ ExitNo maxexit = T->root ? T->nsnap+1 : T->nsnap;
+ if (T && T->mcode != NULL && exitno < maxexit) {
+ setintptrV(L->top-1, (intptr_t)(void *)exitstub_trace_addr(T, exitno));
+ return 1;
+ }
+ }
+#endif
+ return 0;
+}
+
+/* local addr = jit.util.ircalladdr(idx) */
+LJLIB_CF(jit_util_ircalladdr)
+{
+ uint32_t idx = (uint32_t)lj_lib_checkint(L, 1);
+ if (idx < IRCALL__MAX) {
+ setintptrV(L->top-1, (intptr_t)(void *)lj_ir_callinfo[idx].func);
+ return 1;
+ }
+ return 0;
+}
+
+#endif
+
+#include "lj_libdef.h"
+
+static int luaopen_jit_util(lua_State *L)
+{
+ LJ_LIB_REG(L, NULL, jit_util);
+ return 1;
+}
+
+/* -- jit.opt module ------------------------------------------------------ */
+
+#if LJ_HASJIT
+
+#define LJLIB_MODULE_jit_opt
+
+/* Parse optimization level. */
+static int jitopt_level(jit_State *J, const char *str)
+{
+ if (str[0] >= '0' && str[0] <= '9' && str[1] == '\0') {
+ uint32_t flags;
+ if (str[0] == '0') flags = JIT_F_OPT_0;
+ else if (str[0] == '1') flags = JIT_F_OPT_1;
+ else if (str[0] == '2') flags = JIT_F_OPT_2;
+ else flags = JIT_F_OPT_3;
+ J->flags = (J->flags & ~JIT_F_OPT_MASK) | flags;
+ return 1; /* Ok. */
+ }
+ return 0; /* No match. */
+}
+
+/* Parse optimization flag. */
+static int jitopt_flag(jit_State *J, const char *str)
+{
+ const char *lst = JIT_F_OPTSTRING;
+ uint32_t opt;
+ int set = 1;
+ if (str[0] == '+') {
+ str++;
+ } else if (str[0] == '-') {
+ str++;
+ set = 0;
+ } else if (str[0] == 'n' && str[1] == 'o') {
+ str += str[2] == '-' ? 3 : 2;
+ set = 0;
+ }
+ for (opt = JIT_F_OPT; ; opt <<= 1) {
+ size_t len = *(const uint8_t *)lst;
+ if (len == 0)
+ break;
+ if (strncmp(str, lst+1, len) == 0 && str[len] == '\0') {
+ if (set) J->flags |= opt; else J->flags &= ~opt;
+ return 1; /* Ok. */
+ }
+ lst += 1+len;
+ }
+ return 0; /* No match. */
+}
+
+/* Parse optimization parameter. */
+static int jitopt_param(jit_State *J, const char *str)
+{
+ const char *lst = JIT_P_STRING;
+ int i;
+ for (i = 0; i < JIT_P__MAX; i++) {
+ size_t len = *(const uint8_t *)lst;
+ lj_assertJ(len != 0, "bad JIT_P_STRING");
+ if (strncmp(str, lst+1, len) == 0 && str[len] == '=') {
+ int32_t n = 0;
+ const char *p = &str[len+1];
+ while (*p >= '0' && *p <= '9')
+ n = n*10 + (*p++ - '0');
+ if (*p) return 0; /* Malformed number. */
+ J->param[i] = n;
+ if (i == JIT_P_hotloop)
+ lj_dispatch_init_hotcount(J2G(J));
+ return 1; /* Ok. */
+ }
+ lst += 1+len;
+ }
+ return 0; /* No match. */
+}
+
+/* jit.opt.start(flags...) */
+LJLIB_CF(jit_opt_start)
+{
+ jit_State *J = L2J(L);
+ int nargs = (int)(L->top - L->base);
+ if (nargs == 0) {
+ J->flags = (J->flags & ~JIT_F_OPT_MASK) | JIT_F_OPT_DEFAULT;
+ } else {
+ int i;
+ for (i = 1; i <= nargs; i++) {
+ const char *str = strdata(lj_lib_checkstr(L, i));
+ if (!jitopt_level(J, str) &&
+ !jitopt_flag(J, str) &&
+ !jitopt_param(J, str))
+ lj_err_callerv(L, LJ_ERR_JITOPT, str);
+ }
+ }
+ return 0;
+}
+
+#include "lj_libdef.h"
+
+#endif
+
+/* -- jit.profile module -------------------------------------------------- */
+
+#if LJ_HASPROFILE
+
+#define LJLIB_MODULE_jit_profile
+
+/* Not loaded by default, use: local profile = require("jit.profile") */
+
+#define KEY_PROFILE_THREAD (U64x(80000000,00000000)|'t')
+#define KEY_PROFILE_FUNC (U64x(80000000,00000000)|'f')
+
+static void jit_profile_callback(lua_State *L2, lua_State *L, int samples,
+ int vmstate)
+{
+ TValue key;
+ cTValue *tv;
+ key.u64 = KEY_PROFILE_FUNC;
+ tv = lj_tab_get(L, tabV(registry(L)), &key);
+ if (tvisfunc(tv)) {
+ char vmst = (char)vmstate;
+ int status;
+ setfuncV(L2, L2->top++, funcV(tv));
+ setthreadV(L2, L2->top++, L);
+ setintV(L2->top++, samples);
+ setstrV(L2, L2->top++, lj_str_new(L2, &vmst, 1));
+ status = lua_pcall(L2, 3, 0, 0); /* callback(thread, samples, vmstate) */
+ if (status) {
+ if (G(L2)->panic) G(L2)->panic(L2);
+ exit(EXIT_FAILURE);
+ }
+ lj_trace_abort(G(L2));
+ }
+}
+
+/* profile.start(mode, cb) */
+LJLIB_CF(jit_profile_start)
+{
+ GCtab *registry = tabV(registry(L));
+ GCstr *mode = lj_lib_optstr(L, 1);
+ GCfunc *func = lj_lib_checkfunc(L, 2);
+ lua_State *L2 = lua_newthread(L); /* Thread that runs profiler callback. */
+ TValue key;
+ /* Anchor thread and function in registry. */
+ key.u64 = KEY_PROFILE_THREAD;
+ setthreadV(L, lj_tab_set(L, registry, &key), L2);
+ key.u64 = KEY_PROFILE_FUNC;
+ setfuncV(L, lj_tab_set(L, registry, &key), func);
+ lj_gc_anybarriert(L, registry);
+ luaJIT_profile_start(L, mode ? strdata(mode) : "",
+ (luaJIT_profile_callback)jit_profile_callback, L2);
+ return 0;
+}
+
+/* profile.stop() */
+LJLIB_CF(jit_profile_stop)
+{
+ GCtab *registry;
+ TValue key;
+ luaJIT_profile_stop(L);
+ registry = tabV(registry(L));
+ key.u64 = KEY_PROFILE_THREAD;
+ setnilV(lj_tab_set(L, registry, &key));
+ key.u64 = KEY_PROFILE_FUNC;
+ setnilV(lj_tab_set(L, registry, &key));
+ lj_gc_anybarriert(L, registry);
+ return 0;
+}
+
+/* dump = profile.dumpstack([thread,] fmt, depth) */
+LJLIB_CF(jit_profile_dumpstack)
+{
+ lua_State *L2 = L;
+ int arg = 0;
+ size_t len;
+ int depth;
+ GCstr *fmt;
+ const char *p;
+ if (L->top > L->base && tvisthread(L->base)) {
+ L2 = threadV(L->base);
+ arg = 1;
+ }
+ fmt = lj_lib_checkstr(L, arg+1);
+ depth = lj_lib_checkint(L, arg+2);
+ p = luaJIT_profile_dumpstack(L2, strdata(fmt), depth, &len);
+ lua_pushlstring(L, p, len);
+ return 1;
+}
+
+#include "lj_libdef.h"
+
+static int luaopen_jit_profile(lua_State *L)
+{
+ LJ_LIB_REG(L, NULL, jit_profile);
+ return 1;
+}
+
+#endif
+
+/* -- JIT compiler initialization ----------------------------------------- */
+
+#if LJ_HASJIT
+/* Default values for JIT parameters. */
+static const int32_t jit_param_default[JIT_P__MAX+1] = {
+#define JIT_PARAMINIT(len, name, value) (value),
+JIT_PARAMDEF(JIT_PARAMINIT)
+#undef JIT_PARAMINIT
+ 0
+};
+
+#if LJ_TARGET_ARM && LJ_TARGET_LINUX
+#include <sys/utsname.h>
+#endif
+
+/* Arch-dependent CPU feature detection. */
+static uint32_t jit_cpudetect(void)
+{
+ uint32_t flags = 0;
+#if LJ_TARGET_X86ORX64
+
+ uint32_t vendor[4];
+ uint32_t features[4];
+ if (lj_vm_cpuid(0, vendor) && lj_vm_cpuid(1, features)) {
+ flags |= ((features[2] >> 0)&1) * JIT_F_SSE3;
+ flags |= ((features[2] >> 19)&1) * JIT_F_SSE4_1;
+ if (vendor[0] >= 7) {
+ uint32_t xfeatures[4];
+ lj_vm_cpuid(7, xfeatures);
+ flags |= ((xfeatures[1] >> 8)&1) * JIT_F_BMI2;
+ }
+ }
+ /* Don't bother checking for SSE2 -- the VM will crash before getting here. */
+
+#elif LJ_TARGET_ARM
+
+ int ver = LJ_ARCH_VERSION; /* Compile-time ARM CPU detection. */
+#if LJ_TARGET_LINUX
+ if (ver < 70) { /* Runtime ARM CPU detection. */
+ struct utsname ut;
+ uname(&ut);
+ if (strncmp(ut.machine, "armv", 4) == 0) {
+ if (ut.machine[4] >= '8') ver = 80;
+ else if (ut.machine[4] == '7') ver = 70;
+ else if (ut.machine[4] == '6') ver = 60;
+ }
+ }
+#endif
+ flags |= ver >= 70 ? JIT_F_ARMV7 :
+ ver >= 61 ? JIT_F_ARMV6T2_ :
+ ver >= 60 ? JIT_F_ARMV6_ : 0;
+ flags |= LJ_ARCH_HASFPU == 0 ? 0 : ver >= 70 ? JIT_F_VFPV3 : JIT_F_VFPV2;
+
+#elif LJ_TARGET_ARM64
+
+ /* No optional CPU features to detect (for now). */
+
+#elif LJ_TARGET_PPC
+
+#if LJ_ARCH_SQRT
+ flags |= JIT_F_SQRT;
+#endif
+#if LJ_ARCH_ROUND
+ flags |= JIT_F_ROUND;
+#endif
+
+#elif LJ_TARGET_MIPS
+
+ /* Compile-time MIPS CPU detection. */
+#if LJ_ARCH_VERSION >= 20
+ flags |= JIT_F_MIPSXXR2;
+#endif
+ /* Runtime MIPS CPU detection. */
+#if defined(__GNUC__)
+ if (!(flags & JIT_F_MIPSXXR2)) {
+ int x;
+#ifdef __mips16
+ x = 0; /* Runtime detection is difficult. Ensure optimal -march flags. */
+#else
+ /* On MIPS32R1 rotr is treated as srl. rotr r2,r2,1 -> srl r2,r2,1. */
+ __asm__("li $2, 1\n\t.long 0x00221042\n\tmove %0, $2" : "=r"(x) : : "$2");
+#endif
+ if (x) flags |= JIT_F_MIPSXXR2; /* Either 0x80000000 (R2) or 0 (R1). */
+ }
+#endif
+
+#else
+#error "Missing CPU detection for this architecture"
+#endif
+ return flags;
+}
+
+/* Initialize JIT compiler. */
+static void jit_init(lua_State *L)
+{
+ jit_State *J = L2J(L);
+ J->flags = jit_cpudetect() | JIT_F_ON | JIT_F_OPT_DEFAULT;
+ memcpy(J->param, jit_param_default, sizeof(J->param));
+ lj_dispatch_update(G(L));
+}
+#endif
+
+LUALIB_API int luaopen_jit(lua_State *L)
+{
+#if LJ_HASJIT
+ jit_init(L);
+#endif
+ lua_pushliteral(L, LJ_OS_NAME);
+ lua_pushliteral(L, LJ_ARCH_NAME);
+ lua_pushinteger(L, LUAJIT_VERSION_NUM);
+ lua_pushliteral(L, LUAJIT_VERSION);
+ LJ_LIB_REG(L, LUA_JITLIBNAME, jit);
+#if LJ_HASPROFILE
+ lj_lib_prereg(L, LUA_JITLIBNAME ".profile", luaopen_jit_profile,
+ tabref(L->env));
+#endif
+#ifndef LUAJIT_DISABLE_JITUTIL
+ lj_lib_prereg(L, LUA_JITLIBNAME ".util", luaopen_jit_util, tabref(L->env));
+#endif
+#if LJ_HASJIT
+ LJ_LIB_REG(L, "jit.opt", jit_opt);
+#endif
+ L->top -= 2;
+ return 1;
+}
+
diff --git a/libs/luajit-cmake/luajit/src/lib_math.c b/libs/luajit-cmake/luajit/src/lib_math.c
new file mode 100644
index 0000000..b677bbc
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/lib_math.c
@@ -0,0 +1,201 @@
+/*
+** Math library.
+** Copyright (C) 2005-2022 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#include <math.h>
+
+#define lib_math_c
+#define LUA_LIB
+
+#include "lua.h"
+#include "lauxlib.h"
+#include "lualib.h"
+
+#include "lj_obj.h"
+#include "lj_lib.h"
+#include "lj_vm.h"
+#include "lj_prng.h"
+
+/* ------------------------------------------------------------------------ */
+
+#define LJLIB_MODULE_math
+
+LJLIB_ASM(math_abs) LJLIB_REC(.)
+{
+ lj_lib_checknumber(L, 1);
+ return FFH_RETRY;
+}
+LJLIB_ASM_(math_floor) LJLIB_REC(math_round IRFPM_FLOOR)
+LJLIB_ASM_(math_ceil) LJLIB_REC(math_round IRFPM_CEIL)
+
+LJLIB_ASM(math_sqrt) LJLIB_REC(math_unary IRFPM_SQRT)
+{
+ lj_lib_checknum(L, 1);
+ return FFH_RETRY;
+}
+LJLIB_ASM_(math_log10) LJLIB_REC(math_call IRCALL_log10)
+LJLIB_ASM_(math_exp) LJLIB_REC(math_call IRCALL_exp)
+LJLIB_ASM_(math_sin) LJLIB_REC(math_call IRCALL_sin)
+LJLIB_ASM_(math_cos) LJLIB_REC(math_call IRCALL_cos)
+LJLIB_ASM_(math_tan) LJLIB_REC(math_call IRCALL_tan)
+LJLIB_ASM_(math_asin) LJLIB_REC(math_call IRCALL_asin)
+LJLIB_ASM_(math_acos) LJLIB_REC(math_call IRCALL_acos)
+LJLIB_ASM_(math_atan) LJLIB_REC(math_call IRCALL_atan)
+LJLIB_ASM_(math_sinh) LJLIB_REC(math_call IRCALL_sinh)
+LJLIB_ASM_(math_cosh) LJLIB_REC(math_call IRCALL_cosh)
+LJLIB_ASM_(math_tanh) LJLIB_REC(math_call IRCALL_tanh)
+LJLIB_ASM_(math_frexp)
+LJLIB_ASM_(math_modf)
+
+LJLIB_ASM(math_log) LJLIB_REC(math_log)
+{
+ double x = lj_lib_checknum(L, 1);
+ if (L->base+1 < L->top) {
+ double y = lj_lib_checknum(L, 2);
+#ifdef LUAJIT_NO_LOG2
+ x = log(x); y = 1.0 / log(y);
+#else
+ x = lj_vm_log2(x); y = 1.0 / lj_vm_log2(y);
+#endif
+ setnumV(L->base-1-LJ_FR2, x*y); /* Do NOT join the expression to x / y. */
+ return FFH_RES(1);
+ }
+ return FFH_RETRY;
+}
+
+LJLIB_LUA(math_deg) /* function(x) return x * 57.29577951308232 end */
+LJLIB_LUA(math_rad) /* function(x) return x * 0.017453292519943295 end */
+
+LJLIB_ASM(math_atan2) LJLIB_REC(.)
+{
+ lj_lib_checknum(L, 1);
+ lj_lib_checknum(L, 2);
+ return FFH_RETRY;
+}
+LJLIB_ASM_(math_pow) LJLIB_REC(.)
+LJLIB_ASM_(math_fmod)
+
+LJLIB_ASM(math_ldexp) LJLIB_REC(.)
+{
+ lj_lib_checknum(L, 1);
+#if LJ_DUALNUM && !LJ_TARGET_X86ORX64
+ lj_lib_checkint(L, 2);
+#else
+ lj_lib_checknum(L, 2);
+#endif
+ return FFH_RETRY;
+}
+
+LJLIB_ASM(math_min) LJLIB_REC(math_minmax IR_MIN)
+{
+ int i = 0;
+ do { lj_lib_checknumber(L, ++i); } while (L->base+i < L->top);
+ return FFH_RETRY;
+}
+LJLIB_ASM_(math_max) LJLIB_REC(math_minmax IR_MAX)
+
+LJLIB_PUSH(3.14159265358979323846) LJLIB_SET(pi)
+LJLIB_PUSH(1e310) LJLIB_SET(huge)
+
+/* ------------------------------------------------------------------------ */
+
+/* This implements a Tausworthe PRNG with period 2^223. Based on:
+** Tables of maximally-equidistributed combined LFSR generators,
+** Pierre L'Ecuyer, 1991, table 3, 1st entry.
+** Full-period ME-CF generator with L=64, J=4, k=223, N1=49.
+*/
+
+/* Union needed for bit-pattern conversion between uint64_t and double. */
+typedef union { uint64_t u64; double d; } U64double;
+
+/* PRNG seeding function. */
+static void random_seed(PRNGState *rs, double d)
+{
+ uint32_t r = 0x11090601; /* 64-k[i] as four 8 bit constants. */
+ int i;
+ for (i = 0; i < 4; i++) {
+ U64double u;
+ uint32_t m = 1u << (r&255);
+ r >>= 8;
+ u.d = d = d * 3.14159265358979323846 + 2.7182818284590452354;
+ if (u.u64 < m) u.u64 += m; /* Ensure k[i] MSB of u[i] are non-zero. */
+ rs->u[i] = u.u64;
+ }
+ for (i = 0; i < 10; i++)
+ (void)lj_prng_u64(rs);
+}
+
+/* PRNG extract function. */
+LJLIB_PUSH(top-2) /* Upvalue holds userdata with PRNGState. */
+LJLIB_CF(math_random) LJLIB_REC(.)
+{
+ int n = (int)(L->top - L->base);
+ PRNGState *rs = (PRNGState *)(uddata(udataV(lj_lib_upvalue(L, 1))));
+ U64double u;
+ double d;
+ u.u64 = lj_prng_u64d(rs);
+ d = u.d - 1.0;
+ if (n > 0) {
+#if LJ_DUALNUM
+ int isint = 1;
+ double r1;
+ lj_lib_checknumber(L, 1);
+ if (tvisint(L->base)) {
+ r1 = (lua_Number)intV(L->base);
+ } else {
+ isint = 0;
+ r1 = numV(L->base);
+ }
+#else
+ double r1 = lj_lib_checknum(L, 1);
+#endif
+ if (n == 1) {
+ d = lj_vm_floor(d*r1) + 1.0; /* d is an int in range [1, r1] */
+ } else {
+#if LJ_DUALNUM
+ double r2;
+ lj_lib_checknumber(L, 2);
+ if (tvisint(L->base+1)) {
+ r2 = (lua_Number)intV(L->base+1);
+ } else {
+ isint = 0;
+ r2 = numV(L->base+1);
+ }
+#else
+ double r2 = lj_lib_checknum(L, 2);
+#endif
+ d = lj_vm_floor(d*(r2-r1+1.0)) + r1; /* d is an int in range [r1, r2] */
+ }
+#if LJ_DUALNUM
+ if (isint) {
+ setintV(L->top-1, lj_num2int(d));
+ return 1;
+ }
+#endif
+ } /* else: d is a double in range [0, 1] */
+ setnumV(L->top++, d);
+ return 1;
+}
+
+/* PRNG seed function. */
+LJLIB_PUSH(top-2) /* Upvalue holds userdata with PRNGState. */
+LJLIB_CF(math_randomseed)
+{
+ PRNGState *rs = (PRNGState *)(uddata(udataV(lj_lib_upvalue(L, 1))));
+ random_seed(rs, lj_lib_checknum(L, 1));
+ return 0;
+}
+
+/* ------------------------------------------------------------------------ */
+
+#include "lj_libdef.h"
+
+LUALIB_API int luaopen_math(lua_State *L)
+{
+ PRNGState *rs = (PRNGState *)lua_newuserdata(L, sizeof(PRNGState));
+ lj_prng_seed_fixed(rs);
+ LJ_LIB_REG(L, LUA_MATHLIBNAME, math);
+ return 1;
+}
+
diff --git a/libs/luajit-cmake/luajit/src/lib_os.c b/libs/luajit-cmake/luajit/src/lib_os.c
new file mode 100644
index 0000000..6bcd014
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/lib_os.c
@@ -0,0 +1,292 @@
+/*
+** OS library.
+** Copyright (C) 2005-2022 Mike Pall. See Copyright Notice in luajit.h
+**
+** Major portions taken verbatim or adapted from the Lua interpreter.
+** Copyright (C) 1994-2008 Lua.org, PUC-Rio. See Copyright Notice in lua.h
+*/
+
+#include <errno.h>
+#include <time.h>
+
+#define lib_os_c
+#define LUA_LIB
+
+#include "lua.h"
+#include "lauxlib.h"
+#include "lualib.h"
+
+#include "lj_obj.h"
+#include "lj_gc.h"
+#include "lj_err.h"
+#include "lj_buf.h"
+#include "lj_str.h"
+#include "lj_lib.h"
+
+#if LJ_TARGET_POSIX
+#include <unistd.h>
+#else
+#include <stdio.h>
+#endif
+
+#if !LJ_TARGET_PSVITA
+#include <locale.h>
+#endif
+
+/* ------------------------------------------------------------------------ */
+
+#define LJLIB_MODULE_os
+
+LJLIB_CF(os_execute)
+{
+#if LJ_NO_SYSTEM
+#if LJ_52
+ errno = ENOSYS;
+ return luaL_fileresult(L, 0, NULL);
+#else
+ lua_pushinteger(L, -1);
+ return 1;
+#endif
+#else
+ const char *cmd = luaL_optstring(L, 1, NULL);
+ int stat = system(cmd);
+#if LJ_52
+ if (cmd)
+ return luaL_execresult(L, stat);
+ setboolV(L->top++, 1);
+#else
+ setintV(L->top++, stat);
+#endif
+ return 1;
+#endif
+}
+
+LJLIB_CF(os_remove)
+{
+ const char *filename = luaL_checkstring(L, 1);
+ return luaL_fileresult(L, remove(filename) == 0, filename);
+}
+
+LJLIB_CF(os_rename)
+{
+ const char *fromname = luaL_checkstring(L, 1);
+ const char *toname = luaL_checkstring(L, 2);
+ return luaL_fileresult(L, rename(fromname, toname) == 0, fromname);
+}
+
+LJLIB_CF(os_tmpname)
+{
+#if LJ_TARGET_PS3 || LJ_TARGET_PS4 || LJ_TARGET_PS5 || LJ_TARGET_PSVITA || LJ_TARGET_NX
+ lj_err_caller(L, LJ_ERR_OSUNIQF);
+ return 0;
+#else
+#if LJ_TARGET_POSIX
+ char buf[15+1];
+ int fp;
+ strcpy(buf, "/tmp/lua_XXXXXX");
+ fp = mkstemp(buf);
+ if (fp != -1)
+ close(fp);
+ else
+ lj_err_caller(L, LJ_ERR_OSUNIQF);
+#else
+ char buf[L_tmpnam];
+ if (tmpnam(buf) == NULL)
+ lj_err_caller(L, LJ_ERR_OSUNIQF);
+#endif
+ lua_pushstring(L, buf);
+ return 1;
+#endif
+}
+
+LJLIB_CF(os_getenv)
+{
+#if LJ_TARGET_CONSOLE
+ lua_pushnil(L);
+#else
+ lua_pushstring(L, getenv(luaL_checkstring(L, 1))); /* if NULL push nil */
+#endif
+ return 1;
+}
+
+LJLIB_CF(os_exit)
+{
+ int status;
+ if (L->base < L->top && tvisbool(L->base))
+ status = boolV(L->base) ? EXIT_SUCCESS : EXIT_FAILURE;
+ else
+ status = lj_lib_optint(L, 1, EXIT_SUCCESS);
+ if (L->base+1 < L->top && tvistruecond(L->base+1))
+ lua_close(L);
+ exit(status);
+ return 0; /* Unreachable. */
+}
+
+LJLIB_CF(os_clock)
+{
+ setnumV(L->top++, ((lua_Number)clock())*(1.0/(lua_Number)CLOCKS_PER_SEC));
+ return 1;
+}
+
+/* ------------------------------------------------------------------------ */
+
+static void setfield(lua_State *L, const char *key, int value)
+{
+ lua_pushinteger(L, value);
+ lua_setfield(L, -2, key);
+}
+
+static void setboolfield(lua_State *L, const char *key, int value)
+{
+ if (value < 0) /* undefined? */
+ return; /* does not set field */
+ lua_pushboolean(L, value);
+ lua_setfield(L, -2, key);
+}
+
+static int getboolfield(lua_State *L, const char *key)
+{
+ int res;
+ lua_getfield(L, -1, key);
+ res = lua_isnil(L, -1) ? -1 : lua_toboolean(L, -1);
+ lua_pop(L, 1);
+ return res;
+}
+
+static int getfield(lua_State *L, const char *key, int d)
+{
+ int res;
+ lua_getfield(L, -1, key);
+ if (lua_isnumber(L, -1)) {
+ res = (int)lua_tointeger(L, -1);
+ } else {
+ if (d < 0)
+ lj_err_callerv(L, LJ_ERR_OSDATEF, key);
+ res = d;
+ }
+ lua_pop(L, 1);
+ return res;
+}
+
+LJLIB_CF(os_date)
+{
+ const char *s = luaL_optstring(L, 1, "%c");
+ time_t t = luaL_opt(L, (time_t)luaL_checknumber, 2, time(NULL));
+ struct tm *stm;
+#if LJ_TARGET_POSIX
+ struct tm rtm;
+#endif
+ if (*s == '!') { /* UTC? */
+ s++; /* Skip '!' */
+#if LJ_TARGET_POSIX
+ stm = gmtime_r(&t, &rtm);
+#else
+ stm = gmtime(&t);
+#endif
+ } else {
+#if LJ_TARGET_POSIX
+ stm = localtime_r(&t, &rtm);
+#else
+ stm = localtime(&t);
+#endif
+ }
+ if (stm == NULL) { /* Invalid date? */
+ setnilV(L->top++);
+ } else if (strcmp(s, "*t") == 0) {
+ lua_createtable(L, 0, 9); /* 9 = number of fields */
+ setfield(L, "sec", stm->tm_sec);
+ setfield(L, "min", stm->tm_min);
+ setfield(L, "hour", stm->tm_hour);
+ setfield(L, "day", stm->tm_mday);
+ setfield(L, "month", stm->tm_mon+1);
+ setfield(L, "year", stm->tm_year+1900);
+ setfield(L, "wday", stm->tm_wday+1);
+ setfield(L, "yday", stm->tm_yday+1);
+ setboolfield(L, "isdst", stm->tm_isdst);
+ } else if (*s) {
+ SBuf *sb = &G(L)->tmpbuf;
+ MSize sz = 0, retry = 4;
+ const char *q;
+ for (q = s; *q; q++)
+ sz += (*q == '%') ? 30 : 1; /* Overflow doesn't matter. */
+ setsbufL(sb, L);
+ while (retry--) { /* Limit growth for invalid format or empty result. */
+ char *buf = lj_buf_need(sb, sz);
+ size_t len = strftime(buf, sbufsz(sb), s, stm);
+ if (len) {
+ setstrV(L, L->top++, lj_str_new(L, buf, len));
+ lj_gc_check(L);
+ break;
+ }
+ sz += (sz|1);
+ }
+ } else {
+ setstrV(L, L->top++, &G(L)->strempty);
+ }
+ return 1;
+}
+
+LJLIB_CF(os_time)
+{
+ time_t t;
+ if (lua_isnoneornil(L, 1)) { /* called without args? */
+ t = time(NULL); /* get current time */
+ } else {
+ struct tm ts;
+ luaL_checktype(L, 1, LUA_TTABLE);
+ lua_settop(L, 1); /* make sure table is at the top */
+ ts.tm_sec = getfield(L, "sec", 0);
+ ts.tm_min = getfield(L, "min", 0);
+ ts.tm_hour = getfield(L, "hour", 12);
+ ts.tm_mday = getfield(L, "day", -1);
+ ts.tm_mon = getfield(L, "month", -1) - 1;
+ ts.tm_year = getfield(L, "year", -1) - 1900;
+ ts.tm_isdst = getboolfield(L, "isdst");
+ t = mktime(&ts);
+ }
+ if (t == (time_t)(-1))
+ lua_pushnil(L);
+ else
+ lua_pushnumber(L, (lua_Number)t);
+ return 1;
+}
+
+LJLIB_CF(os_difftime)
+{
+ lua_pushnumber(L, difftime((time_t)(luaL_checknumber(L, 1)),
+ (time_t)(luaL_optnumber(L, 2, (lua_Number)0))));
+ return 1;
+}
+
+/* ------------------------------------------------------------------------ */
+
+LJLIB_CF(os_setlocale)
+{
+#if LJ_TARGET_PSVITA
+ lua_pushliteral(L, "C");
+#else
+ GCstr *s = lj_lib_optstr(L, 1);
+ const char *str = s ? strdata(s) : NULL;
+ int opt = lj_lib_checkopt(L, 2, 6,
+ "\5ctype\7numeric\4time\7collate\10monetary\1\377\3all");
+ if (opt == 0) opt = LC_CTYPE;
+ else if (opt == 1) opt = LC_NUMERIC;
+ else if (opt == 2) opt = LC_TIME;
+ else if (opt == 3) opt = LC_COLLATE;
+ else if (opt == 4) opt = LC_MONETARY;
+ else if (opt == 6) opt = LC_ALL;
+ lua_pushstring(L, setlocale(opt, str));
+#endif
+ return 1;
+}
+
+/* ------------------------------------------------------------------------ */
+
+#include "lj_libdef.h"
+
+LUALIB_API int luaopen_os(lua_State *L)
+{
+ LJ_LIB_REG(L, LUA_OSLIBNAME, os);
+ return 1;
+}
+
diff --git a/libs/luajit-cmake/luajit/src/lib_package.c b/libs/luajit-cmake/luajit/src/lib_package.c
new file mode 100644
index 0000000..63a9121
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/lib_package.c
@@ -0,0 +1,628 @@
+/*
+** Package library.
+** Copyright (C) 2005-2022 Mike Pall. See Copyright Notice in luajit.h
+**
+** Major portions taken verbatim or adapted from the Lua interpreter.
+** Copyright (C) 1994-2012 Lua.org, PUC-Rio. See Copyright Notice in lua.h
+*/
+
+#define lib_package_c
+#define LUA_LIB
+
+#include "lua.h"
+#include "lauxlib.h"
+#include "lualib.h"
+
+#include "lj_obj.h"
+#include "lj_err.h"
+#include "lj_lib.h"
+
+/* ------------------------------------------------------------------------ */
+
+/* Error codes for ll_loadfunc. */
+#define PACKAGE_ERR_LIB 1
+#define PACKAGE_ERR_FUNC 2
+#define PACKAGE_ERR_LOAD 3
+
+/* Redefined in platform specific part. */
+#define PACKAGE_LIB_FAIL "open"
+#define setprogdir(L) ((void)0)
+
+/* Symbol name prefixes. */
+#define SYMPREFIX_CF "luaopen_%s"
+#define SYMPREFIX_BC "luaJIT_BC_%s"
+
+#if LJ_TARGET_DLOPEN
+
+#include <dlfcn.h>
+
+static void ll_unloadlib(void *lib)
+{
+ dlclose(lib);
+}
+
+static void *ll_load(lua_State *L, const char *path, int gl)
+{
+ void *lib = dlopen(path, RTLD_NOW | (gl ? RTLD_GLOBAL : RTLD_LOCAL));
+ if (lib == NULL) lua_pushstring(L, dlerror());
+ return lib;
+}
+
+static lua_CFunction ll_sym(lua_State *L, void *lib, const char *sym)
+{
+ lua_CFunction f = (lua_CFunction)dlsym(lib, sym);
+ if (f == NULL) lua_pushstring(L, dlerror());
+ return f;
+}
+
+static const char *ll_bcsym(void *lib, const char *sym)
+{
+#if defined(RTLD_DEFAULT) && !defined(NO_RTLD_DEFAULT)
+ if (lib == NULL) lib = RTLD_DEFAULT;
+#elif LJ_TARGET_OSX || LJ_TARGET_BSD
+ if (lib == NULL) lib = (void *)(intptr_t)-2;
+#endif
+ return (const char *)dlsym(lib, sym);
+}
+
+#elif LJ_TARGET_WINDOWS
+
+#define WIN32_LEAN_AND_MEAN
+#include <windows.h>
+
+#ifndef GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS
+#define GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS 4
+#define GET_MODULE_HANDLE_EX_FLAG_UNCHANGED_REFCOUNT 2
+BOOL WINAPI GetModuleHandleExA(DWORD, LPCSTR, HMODULE*);
+#endif
+
+#if LJ_TARGET_UWP
+void *LJ_WIN_LOADLIBA(const char *path)
+{
+ DWORD err = GetLastError();
+ wchar_t wpath[256];
+ HANDLE lib = NULL;
+ if (MultiByteToWideChar(CP_ACP, 0, path, -1, wpath, 256) > 0) {
+ lib = LoadPackagedLibrary(wpath, 0);
+ }
+ SetLastError(err);
+ return lib;
+}
+#endif
+
+#undef setprogdir
+
+static void setprogdir(lua_State *L)
+{
+ char buff[MAX_PATH + 1];
+ char *lb;
+ DWORD nsize = sizeof(buff);
+ DWORD n = GetModuleFileNameA(NULL, buff, nsize);
+ if (n == 0 || n == nsize || (lb = strrchr(buff, '\\')) == NULL) {
+ luaL_error(L, "unable to get ModuleFileName");
+ } else {
+ *lb = '\0';
+ luaL_gsub(L, lua_tostring(L, -1), LUA_EXECDIR, buff);
+ lua_remove(L, -2); /* remove original string */
+ }
+}
+
+static void pusherror(lua_State *L)
+{
+ DWORD error = GetLastError();
+#if LJ_TARGET_XBOXONE
+ wchar_t wbuffer[128];
+ char buffer[128*2];
+ if (FormatMessageW(FORMAT_MESSAGE_IGNORE_INSERTS | FORMAT_MESSAGE_FROM_SYSTEM,
+ NULL, error, 0, wbuffer, sizeof(wbuffer)/sizeof(wchar_t), NULL) &&
+ WideCharToMultiByte(CP_ACP, 0, wbuffer, 128, buffer, 128*2, NULL, NULL))
+#else
+ char buffer[128];
+ if (FormatMessageA(FORMAT_MESSAGE_IGNORE_INSERTS | FORMAT_MESSAGE_FROM_SYSTEM,
+ NULL, error, 0, buffer, sizeof(buffer), NULL))
+#endif
+ lua_pushstring(L, buffer);
+ else
+ lua_pushfstring(L, "system error %d\n", error);
+}
+
+static void ll_unloadlib(void *lib)
+{
+ FreeLibrary((HINSTANCE)lib);
+}
+
+static void *ll_load(lua_State *L, const char *path, int gl)
+{
+ HINSTANCE lib = LJ_WIN_LOADLIBA(path);
+ if (lib == NULL) pusherror(L);
+ UNUSED(gl);
+ return lib;
+}
+
+static lua_CFunction ll_sym(lua_State *L, void *lib, const char *sym)
+{
+ lua_CFunction f = (lua_CFunction)GetProcAddress((HINSTANCE)lib, sym);
+ if (f == NULL) pusherror(L);
+ return f;
+}
+
+#if LJ_TARGET_UWP
+EXTERN_C IMAGE_DOS_HEADER __ImageBase;
+#endif
+
+static const char *ll_bcsym(void *lib, const char *sym)
+{
+ if (lib) {
+ return (const char *)GetProcAddress((HINSTANCE)lib, sym);
+ } else {
+#if LJ_TARGET_UWP
+ return (const char *)GetProcAddress((HINSTANCE)&__ImageBase, sym);
+#else
+ HINSTANCE h = GetModuleHandleA(NULL);
+ const char *p = (const char *)GetProcAddress(h, sym);
+ if (p == NULL && GetModuleHandleExA(GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS|GET_MODULE_HANDLE_EX_FLAG_UNCHANGED_REFCOUNT,
+ (const char *)ll_bcsym, &h))
+ p = (const char *)GetProcAddress(h, sym);
+ return p;
+#endif
+ }
+}
+
+#else
+
+#undef PACKAGE_LIB_FAIL
+#define PACKAGE_LIB_FAIL "absent"
+
+#define DLMSG "dynamic libraries not enabled; no support for target OS"
+
+static void ll_unloadlib(void *lib)
+{
+ UNUSED(lib);
+}
+
+static void *ll_load(lua_State *L, const char *path, int gl)
+{
+ UNUSED(path); UNUSED(gl);
+ lua_pushliteral(L, DLMSG);
+ return NULL;
+}
+
+static lua_CFunction ll_sym(lua_State *L, void *lib, const char *sym)
+{
+ UNUSED(lib); UNUSED(sym);
+ lua_pushliteral(L, DLMSG);
+ return NULL;
+}
+
+static const char *ll_bcsym(void *lib, const char *sym)
+{
+ UNUSED(lib); UNUSED(sym);
+ return NULL;
+}
+
+#endif
+
+/* ------------------------------------------------------------------------ */
+
+static void **ll_register(lua_State *L, const char *path)
+{
+ void **plib;
+ lua_pushfstring(L, "LOADLIB: %s", path);
+ lua_gettable(L, LUA_REGISTRYINDEX); /* check library in registry? */
+ if (!lua_isnil(L, -1)) { /* is there an entry? */
+ plib = (void **)lua_touserdata(L, -1);
+ } else { /* no entry yet; create one */
+ lua_pop(L, 1);
+ plib = (void **)lua_newuserdata(L, sizeof(void *));
+ *plib = NULL;
+ luaL_setmetatable(L, "_LOADLIB");
+ lua_pushfstring(L, "LOADLIB: %s", path);
+ lua_pushvalue(L, -2);
+ lua_settable(L, LUA_REGISTRYINDEX);
+ }
+ return plib;
+}
+
+static const char *mksymname(lua_State *L, const char *modname,
+ const char *prefix)
+{
+ const char *funcname;
+ const char *mark = strchr(modname, *LUA_IGMARK);
+ if (mark) modname = mark + 1;
+ funcname = luaL_gsub(L, modname, ".", "_");
+ funcname = lua_pushfstring(L, prefix, funcname);
+ lua_remove(L, -2); /* remove 'gsub' result */
+ return funcname;
+}
+
+static int ll_loadfunc(lua_State *L, const char *path, const char *name, int r)
+{
+ void **reg;
+ if (strlen(path) >= 4096) {
+ lua_pushliteral(L, "path too long");
+ return PACKAGE_ERR_LIB;
+ }
+ reg = ll_register(L, path);
+ if (*reg == NULL) *reg = ll_load(L, path, (*name == '*'));
+ if (*reg == NULL) {
+ return PACKAGE_ERR_LIB; /* Unable to load library. */
+ } else if (*name == '*') { /* Only load library into global namespace. */
+ lua_pushboolean(L, 1);
+ return 0;
+ } else {
+ const char *sym = r ? name : mksymname(L, name, SYMPREFIX_CF);
+ lua_CFunction f = ll_sym(L, *reg, sym);
+ if (f) {
+ lua_pushcfunction(L, f);
+ return 0;
+ }
+ if (!r) {
+ const char *bcdata = ll_bcsym(*reg, mksymname(L, name, SYMPREFIX_BC));
+ lua_pop(L, 1);
+ if (bcdata) {
+ if (luaL_loadbuffer(L, bcdata, ~(size_t)0, name) != 0)
+ return PACKAGE_ERR_LOAD;
+ return 0;
+ }
+ }
+ return PACKAGE_ERR_FUNC; /* Unable to find function. */
+ }
+}
+
+static int lj_cf_package_loadlib(lua_State *L)
+{
+ const char *path = luaL_checkstring(L, 1);
+ const char *init = luaL_checkstring(L, 2);
+ int st = ll_loadfunc(L, path, init, 1);
+ if (st == 0) { /* no errors? */
+ return 1; /* return the loaded function */
+ } else { /* error; error message is on stack top */
+ lua_pushnil(L);
+ lua_insert(L, -2);
+ lua_pushstring(L, (st == PACKAGE_ERR_LIB) ? PACKAGE_LIB_FAIL : "init");
+ return 3; /* return nil, error message, and where */
+ }
+}
+
+static int lj_cf_package_unloadlib(lua_State *L)
+{
+ void **lib = (void **)luaL_checkudata(L, 1, "_LOADLIB");
+ if (*lib) ll_unloadlib(*lib);
+ *lib = NULL; /* mark library as closed */
+ return 0;
+}
+
+/* ------------------------------------------------------------------------ */
+
+static int readable(const char *filename)
+{
+ FILE *f = fopen(filename, "r"); /* try to open file */
+ if (f == NULL) return 0; /* open failed */
+ fclose(f);
+ return 1;
+}
+
+static const char *pushnexttemplate(lua_State *L, const char *path)
+{
+ const char *l;
+ while (*path == *LUA_PATHSEP) path++; /* skip separators */
+ if (*path == '\0') return NULL; /* no more templates */
+ l = strchr(path, *LUA_PATHSEP); /* find next separator */
+ if (l == NULL) l = path + strlen(path);
+ lua_pushlstring(L, path, (size_t)(l - path)); /* template */
+ return l;
+}
+
+static const char *searchpath (lua_State *L, const char *name,
+ const char *path, const char *sep,
+ const char *dirsep)
+{
+ luaL_Buffer msg; /* to build error message */
+ luaL_buffinit(L, &msg);
+ if (*sep != '\0') /* non-empty separator? */
+ name = luaL_gsub(L, name, sep, dirsep); /* replace it by 'dirsep' */
+ while ((path = pushnexttemplate(L, path)) != NULL) {
+ const char *filename = luaL_gsub(L, lua_tostring(L, -1),
+ LUA_PATH_MARK, name);
+ lua_remove(L, -2); /* remove path template */
+ if (readable(filename)) /* does file exist and is readable? */
+ return filename; /* return that file name */
+ lua_pushfstring(L, "\n\tno file " LUA_QS, filename);
+ lua_remove(L, -2); /* remove file name */
+ luaL_addvalue(&msg); /* concatenate error msg. entry */
+ }
+ luaL_pushresult(&msg); /* create error message */
+ return NULL; /* not found */
+}
+
+static int lj_cf_package_searchpath(lua_State *L)
+{
+ const char *f = searchpath(L, luaL_checkstring(L, 1),
+ luaL_checkstring(L, 2),
+ luaL_optstring(L, 3, "."),
+ luaL_optstring(L, 4, LUA_DIRSEP));
+ if (f != NULL) {
+ return 1;
+ } else { /* error message is on top of the stack */
+ lua_pushnil(L);
+ lua_insert(L, -2);
+ return 2; /* return nil + error message */
+ }
+}
+
+static const char *findfile(lua_State *L, const char *name,
+ const char *pname)
+{
+ const char *path;
+ lua_getfield(L, LUA_ENVIRONINDEX, pname);
+ path = lua_tostring(L, -1);
+ if (path == NULL)
+ luaL_error(L, LUA_QL("package.%s") " must be a string", pname);
+ return searchpath(L, name, path, ".", LUA_DIRSEP);
+}
+
+static void loaderror(lua_State *L, const char *filename)
+{
+ luaL_error(L, "error loading module " LUA_QS " from file " LUA_QS ":\n\t%s",
+ lua_tostring(L, 1), filename, lua_tostring(L, -1));
+}
+
+static int lj_cf_package_loader_lua(lua_State *L)
+{
+ const char *filename;
+ const char *name = luaL_checkstring(L, 1);
+ filename = findfile(L, name, "path");
+ if (filename == NULL) return 1; /* library not found in this path */
+ if (luaL_loadfile(L, filename) != 0)
+ loaderror(L, filename);
+ return 1; /* library loaded successfully */
+}
+
+static int lj_cf_package_loader_c(lua_State *L)
+{
+ const char *name = luaL_checkstring(L, 1);
+ const char *filename = findfile(L, name, "cpath");
+ if (filename == NULL) return 1; /* library not found in this path */
+ if (ll_loadfunc(L, filename, name, 0) != 0)
+ loaderror(L, filename);
+ return 1; /* library loaded successfully */
+}
+
+static int lj_cf_package_loader_croot(lua_State *L)
+{
+ const char *filename;
+ const char *name = luaL_checkstring(L, 1);
+ const char *p = strchr(name, '.');
+ int st;
+ if (p == NULL) return 0; /* is root */
+ lua_pushlstring(L, name, (size_t)(p - name));
+ filename = findfile(L, lua_tostring(L, -1), "cpath");
+ if (filename == NULL) return 1; /* root not found */
+ if ((st = ll_loadfunc(L, filename, name, 0)) != 0) {
+ if (st != PACKAGE_ERR_FUNC) loaderror(L, filename); /* real error */
+ lua_pushfstring(L, "\n\tno module " LUA_QS " in file " LUA_QS,
+ name, filename);
+ return 1; /* function not found */
+ }
+ return 1;
+}
+
+static int lj_cf_package_loader_preload(lua_State *L)
+{
+ const char *name = luaL_checkstring(L, 1);
+ lua_getfield(L, LUA_ENVIRONINDEX, "preload");
+ if (!lua_istable(L, -1))
+ luaL_error(L, LUA_QL("package.preload") " must be a table");
+ lua_getfield(L, -1, name);
+ if (lua_isnil(L, -1)) { /* Not found? */
+ const char *bcname = mksymname(L, name, SYMPREFIX_BC);
+ const char *bcdata = ll_bcsym(NULL, bcname);
+ if (bcdata == NULL || luaL_loadbuffer(L, bcdata, ~(size_t)0, name) != 0)
+ lua_pushfstring(L, "\n\tno field package.preload['%s']", name);
+ }
+ return 1;
+}
+
+/* ------------------------------------------------------------------------ */
+
+#define KEY_SENTINEL (U64x(80000000,00000000)|'s')
+
+static int lj_cf_package_require(lua_State *L)
+{
+ const char *name = luaL_checkstring(L, 1);
+ int i;
+ lua_settop(L, 1); /* _LOADED table will be at index 2 */
+ lua_getfield(L, LUA_REGISTRYINDEX, "_LOADED");
+ lua_getfield(L, 2, name);
+ if (lua_toboolean(L, -1)) { /* is it there? */
+ if ((L->top-1)->u64 == KEY_SENTINEL) /* check loops */
+ luaL_error(L, "loop or previous error loading module " LUA_QS, name);
+ return 1; /* package is already loaded */
+ }
+ /* else must load it; iterate over available loaders */
+ lua_getfield(L, LUA_ENVIRONINDEX, "loaders");
+ if (!lua_istable(L, -1))
+ luaL_error(L, LUA_QL("package.loaders") " must be a table");
+ lua_pushliteral(L, ""); /* error message accumulator */
+ for (i = 1; ; i++) {
+ lua_rawgeti(L, -2, i); /* get a loader */
+ if (lua_isnil(L, -1))
+ luaL_error(L, "module " LUA_QS " not found:%s",
+ name, lua_tostring(L, -2));
+ lua_pushstring(L, name);
+ lua_call(L, 1, 1); /* call it */
+ if (lua_isfunction(L, -1)) /* did it find module? */
+ break; /* module loaded successfully */
+ else if (lua_isstring(L, -1)) /* loader returned error message? */
+ lua_concat(L, 2); /* accumulate it */
+ else
+ lua_pop(L, 1);
+ }
+ (L->top++)->u64 = KEY_SENTINEL;
+ lua_setfield(L, 2, name); /* _LOADED[name] = sentinel */
+ lua_pushstring(L, name); /* pass name as argument to module */
+ lua_call(L, 1, 1); /* run loaded module */
+ if (!lua_isnil(L, -1)) /* non-nil return? */
+ lua_setfield(L, 2, name); /* _LOADED[name] = returned value */
+ lua_getfield(L, 2, name);
+ if ((L->top-1)->u64 == KEY_SENTINEL) { /* module did not set a value? */
+ lua_pushboolean(L, 1); /* use true as result */
+ lua_pushvalue(L, -1); /* extra copy to be returned */
+ lua_setfield(L, 2, name); /* _LOADED[name] = true */
+ }
+ lj_lib_checkfpu(L);
+ return 1;
+}
+
+/* ------------------------------------------------------------------------ */
+
+static void setfenv(lua_State *L)
+{
+ lua_Debug ar;
+ if (lua_getstack(L, 1, &ar) == 0 ||
+ lua_getinfo(L, "f", &ar) == 0 || /* get calling function */
+ lua_iscfunction(L, -1))
+ luaL_error(L, LUA_QL("module") " not called from a Lua function");
+ lua_pushvalue(L, -2);
+ lua_setfenv(L, -2);
+ lua_pop(L, 1);
+}
+
+static void dooptions(lua_State *L, int n)
+{
+ int i;
+ for (i = 2; i <= n; i++) {
+ lua_pushvalue(L, i); /* get option (a function) */
+ lua_pushvalue(L, -2); /* module */
+ lua_call(L, 1, 0);
+ }
+}
+
+static void modinit(lua_State *L, const char *modname)
+{
+ const char *dot;
+ lua_pushvalue(L, -1);
+ lua_setfield(L, -2, "_M"); /* module._M = module */
+ lua_pushstring(L, modname);
+ lua_setfield(L, -2, "_NAME");
+ dot = strrchr(modname, '.'); /* look for last dot in module name */
+ if (dot == NULL) dot = modname; else dot++;
+ /* set _PACKAGE as package name (full module name minus last part) */
+ lua_pushlstring(L, modname, (size_t)(dot - modname));
+ lua_setfield(L, -2, "_PACKAGE");
+}
+
+static int lj_cf_package_module(lua_State *L)
+{
+ const char *modname = luaL_checkstring(L, 1);
+ int lastarg = (int)(L->top - L->base);
+ luaL_pushmodule(L, modname, 1);
+ lua_getfield(L, -1, "_NAME");
+ if (!lua_isnil(L, -1)) { /* Module already initialized? */
+ lua_pop(L, 1);
+ } else {
+ lua_pop(L, 1);
+ modinit(L, modname);
+ }
+ lua_pushvalue(L, -1);
+ setfenv(L);
+ dooptions(L, lastarg);
+ return LJ_52;
+}
+
+static int lj_cf_package_seeall(lua_State *L)
+{
+ luaL_checktype(L, 1, LUA_TTABLE);
+ if (!lua_getmetatable(L, 1)) {
+ lua_createtable(L, 0, 1); /* create new metatable */
+ lua_pushvalue(L, -1);
+ lua_setmetatable(L, 1);
+ }
+ lua_pushvalue(L, LUA_GLOBALSINDEX);
+ lua_setfield(L, -2, "__index"); /* mt.__index = _G */
+ return 0;
+}
+
+/* ------------------------------------------------------------------------ */
+
+#define AUXMARK "\1"
+
+static void setpath(lua_State *L, const char *fieldname, const char *envname,
+ const char *def, int noenv)
+{
+#if LJ_TARGET_CONSOLE
+ const char *path = NULL;
+ UNUSED(envname);
+#else
+ const char *path = getenv(envname);
+#endif
+ if (path == NULL || noenv) {
+ lua_pushstring(L, def);
+ } else {
+ path = luaL_gsub(L, path, LUA_PATHSEP LUA_PATHSEP,
+ LUA_PATHSEP AUXMARK LUA_PATHSEP);
+ luaL_gsub(L, path, AUXMARK, def);
+ lua_remove(L, -2);
+ }
+ setprogdir(L);
+ lua_setfield(L, -2, fieldname);
+}
+
+static const luaL_Reg package_lib[] = {
+ { "loadlib", lj_cf_package_loadlib },
+ { "searchpath", lj_cf_package_searchpath },
+ { "seeall", lj_cf_package_seeall },
+ { NULL, NULL }
+};
+
+static const luaL_Reg package_global[] = {
+ { "module", lj_cf_package_module },
+ { "require", lj_cf_package_require },
+ { NULL, NULL }
+};
+
+static const lua_CFunction package_loaders[] =
+{
+ lj_cf_package_loader_preload,
+ lj_cf_package_loader_lua,
+ lj_cf_package_loader_c,
+ lj_cf_package_loader_croot,
+ NULL
+};
+
+LUALIB_API int luaopen_package(lua_State *L)
+{
+ int i;
+ int noenv;
+ luaL_newmetatable(L, "_LOADLIB");
+ lj_lib_pushcf(L, lj_cf_package_unloadlib, 1);
+ lua_setfield(L, -2, "__gc");
+ luaL_register(L, LUA_LOADLIBNAME, package_lib);
+ lua_copy(L, -1, LUA_ENVIRONINDEX);
+ lua_createtable(L, sizeof(package_loaders)/sizeof(package_loaders[0])-1, 0);
+ for (i = 0; package_loaders[i] != NULL; i++) {
+ lj_lib_pushcf(L, package_loaders[i], 1);
+ lua_rawseti(L, -2, i+1);
+ }
+#if LJ_52
+ lua_pushvalue(L, -1);
+ lua_setfield(L, -3, "searchers");
+#endif
+ lua_setfield(L, -2, "loaders");
+ lua_getfield(L, LUA_REGISTRYINDEX, "LUA_NOENV");
+ noenv = lua_toboolean(L, -1);
+ lua_pop(L, 1);
+ setpath(L, "path", LUA_PATH, LUA_PATH_DEFAULT, noenv);
+ setpath(L, "cpath", LUA_CPATH, LUA_CPATH_DEFAULT, noenv);
+ lua_pushliteral(L, LUA_PATH_CONFIG);
+ lua_setfield(L, -2, "config");
+ luaL_findtable(L, LUA_REGISTRYINDEX, "_LOADED", 16);
+ lua_setfield(L, -2, "loaded");
+ luaL_findtable(L, LUA_REGISTRYINDEX, "_PRELOAD", 4);
+ lua_setfield(L, -2, "preload");
+ lua_pushvalue(L, LUA_GLOBALSINDEX);
+ luaL_register(L, NULL, package_global);
+ lua_pop(L, 1);
+ return 1;
+}
+
diff --git a/libs/luajit-cmake/luajit/src/lib_string.c b/libs/luajit-cmake/luajit/src/lib_string.c
new file mode 100644
index 0000000..79aeddf
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/lib_string.c
@@ -0,0 +1,676 @@
+/*
+** String library.
+** Copyright (C) 2005-2022 Mike Pall. See Copyright Notice in luajit.h
+**
+** Major portions taken verbatim or adapted from the Lua interpreter.
+** Copyright (C) 1994-2008 Lua.org, PUC-Rio. See Copyright Notice in lua.h
+*/
+
+#define lib_string_c
+#define LUA_LIB
+
+#include "lua.h"
+#include "lauxlib.h"
+#include "lualib.h"
+
+#include "lj_obj.h"
+#include "lj_gc.h"
+#include "lj_err.h"
+#include "lj_buf.h"
+#include "lj_str.h"
+#include "lj_tab.h"
+#include "lj_meta.h"
+#include "lj_state.h"
+#include "lj_ff.h"
+#include "lj_bcdump.h"
+#include "lj_char.h"
+#include "lj_strfmt.h"
+#include "lj_lib.h"
+
+/* ------------------------------------------------------------------------ */
+
+#define LJLIB_MODULE_string
+
+LJLIB_LUA(string_len) /*
+ function(s)
+ CHECK_str(s)
+ return #s
+ end
+*/
+
+LJLIB_ASM(string_byte) LJLIB_REC(string_range 0)
+{
+ GCstr *s = lj_lib_checkstr(L, 1);
+ int32_t len = (int32_t)s->len;
+ int32_t start = lj_lib_optint(L, 2, 1);
+ int32_t stop = lj_lib_optint(L, 3, start);
+ int32_t n, i;
+ const unsigned char *p;
+ if (stop < 0) stop += len+1;
+ if (start < 0) start += len+1;
+ if (start <= 0) start = 1;
+ if (stop > len) stop = len;
+ if (start > stop) return FFH_RES(0); /* Empty interval: return no results. */
+ start--;
+ n = stop - start;
+ if ((uint32_t)n > LUAI_MAXCSTACK)
+ lj_err_caller(L, LJ_ERR_STRSLC);
+ lj_state_checkstack(L, (MSize)n);
+ p = (const unsigned char *)strdata(s) + start;
+ for (i = 0; i < n; i++)
+ setintV(L->base + i-1-LJ_FR2, p[i]);
+ return FFH_RES(n);
+}
+
+LJLIB_ASM(string_char) LJLIB_REC(.)
+{
+ int i, nargs = (int)(L->top - L->base);
+ char *buf = lj_buf_tmp(L, (MSize)nargs);
+ for (i = 1; i <= nargs; i++) {
+ int32_t k = lj_lib_checkint(L, i);
+ if (!checku8(k))
+ lj_err_arg(L, i, LJ_ERR_BADVAL);
+ buf[i-1] = (char)k;
+ }
+ setstrV(L, L->base-1-LJ_FR2, lj_str_new(L, buf, (size_t)nargs));
+ return FFH_RES(1);
+}
+
+LJLIB_ASM(string_sub) LJLIB_REC(string_range 1)
+{
+ lj_lib_checkstr(L, 1);
+ lj_lib_checkint(L, 2);
+ setintV(L->base+2, lj_lib_optint(L, 3, -1));
+ return FFH_RETRY;
+}
+
+LJLIB_CF(string_rep) LJLIB_REC(.)
+{
+ GCstr *s = lj_lib_checkstr(L, 1);
+ int32_t rep = lj_lib_checkint(L, 2);
+ GCstr *sep = lj_lib_optstr(L, 3);
+ SBuf *sb = lj_buf_tmp_(L);
+ if (sep && rep > 1) {
+ GCstr *s2 = lj_buf_cat2str(L, sep, s);
+ lj_buf_reset(sb);
+ lj_buf_putstr(sb, s);
+ s = s2;
+ rep--;
+ }
+ sb = lj_buf_putstr_rep(sb, s, rep);
+ setstrV(L, L->top-1, lj_buf_str(L, sb));
+ lj_gc_check(L);
+ return 1;
+}
+
+LJLIB_ASM(string_reverse) LJLIB_REC(string_op IRCALL_lj_buf_putstr_reverse)
+{
+ lj_lib_checkstr(L, 1);
+ return FFH_RETRY;
+}
+LJLIB_ASM_(string_lower) LJLIB_REC(string_op IRCALL_lj_buf_putstr_lower)
+LJLIB_ASM_(string_upper) LJLIB_REC(string_op IRCALL_lj_buf_putstr_upper)
+
+/* ------------------------------------------------------------------------ */
+
+static int writer_buf(lua_State *L, const void *p, size_t size, void *sb)
+{
+ lj_buf_putmem((SBuf *)sb, p, (MSize)size);
+ UNUSED(L);
+ return 0;
+}
+
+LJLIB_CF(string_dump)
+{
+ GCfunc *fn = lj_lib_checkfunc(L, 1);
+ int strip = L->base+1 < L->top && tvistruecond(L->base+1);
+ SBuf *sb = lj_buf_tmp_(L); /* Assumes lj_bcwrite() doesn't use tmpbuf. */
+ L->top = L->base+1;
+ if (!isluafunc(fn) || lj_bcwrite(L, funcproto(fn), writer_buf, sb, strip))
+ lj_err_caller(L, LJ_ERR_STRDUMP);
+ setstrV(L, L->top-1, lj_buf_str(L, sb));
+ lj_gc_check(L);
+ return 1;
+}
+
+/* ------------------------------------------------------------------------ */
+
+/* macro to `unsign' a character */
+#define uchar(c) ((unsigned char)(c))
+
+#define CAP_UNFINISHED (-1)
+#define CAP_POSITION (-2)
+
+typedef struct MatchState {
+ const char *src_init; /* init of source string */
+ const char *src_end; /* end (`\0') of source string */
+ lua_State *L;
+ int level; /* total number of captures (finished or unfinished) */
+ int depth;
+ struct {
+ const char *init;
+ ptrdiff_t len;
+ } capture[LUA_MAXCAPTURES];
+} MatchState;
+
+#define L_ESC '%'
+
+static int check_capture(MatchState *ms, int l)
+{
+ l -= '1';
+ if (l < 0 || l >= ms->level || ms->capture[l].len == CAP_UNFINISHED)
+ lj_err_caller(ms->L, LJ_ERR_STRCAPI);
+ return l;
+}
+
+static int capture_to_close(MatchState *ms)
+{
+ int level = ms->level;
+ for (level--; level>=0; level--)
+ if (ms->capture[level].len == CAP_UNFINISHED) return level;
+ lj_err_caller(ms->L, LJ_ERR_STRPATC);
+ return 0; /* unreachable */
+}
+
+static const char *classend(MatchState *ms, const char *p)
+{
+ switch (*p++) {
+ case L_ESC:
+ if (*p == '\0')
+ lj_err_caller(ms->L, LJ_ERR_STRPATE);
+ return p+1;
+ case '[':
+ if (*p == '^') p++;
+ do { /* look for a `]' */
+ if (*p == '\0')
+ lj_err_caller(ms->L, LJ_ERR_STRPATM);
+ if (*(p++) == L_ESC && *p != '\0')
+ p++; /* skip escapes (e.g. `%]') */
+ } while (*p != ']');
+ return p+1;
+ default:
+ return p;
+ }
+}
+
+static const unsigned char match_class_map[32] = {
+ 0,LJ_CHAR_ALPHA,0,LJ_CHAR_CNTRL,LJ_CHAR_DIGIT,0,0,LJ_CHAR_GRAPH,0,0,0,0,
+ LJ_CHAR_LOWER,0,0,0,LJ_CHAR_PUNCT,0,0,LJ_CHAR_SPACE,0,
+ LJ_CHAR_UPPER,0,LJ_CHAR_ALNUM,LJ_CHAR_XDIGIT,0,0,0,0,0,0,0
+};
+
+static int match_class(int c, int cl)
+{
+ if ((cl & 0xc0) == 0x40) {
+ int t = match_class_map[(cl&0x1f)];
+ if (t) {
+ t = lj_char_isa(c, t);
+ return (cl & 0x20) ? t : !t;
+ }
+ if (cl == 'z') return c == 0;
+ if (cl == 'Z') return c != 0;
+ }
+ return (cl == c);
+}
+
+static int matchbracketclass(int c, const char *p, const char *ec)
+{
+ int sig = 1;
+ if (*(p+1) == '^') {
+ sig = 0;
+ p++; /* skip the `^' */
+ }
+ while (++p < ec) {
+ if (*p == L_ESC) {
+ p++;
+ if (match_class(c, uchar(*p)))
+ return sig;
+ }
+ else if ((*(p+1) == '-') && (p+2 < ec)) {
+ p+=2;
+ if (uchar(*(p-2)) <= c && c <= uchar(*p))
+ return sig;
+ }
+ else if (uchar(*p) == c) return sig;
+ }
+ return !sig;
+}
+
+static int singlematch(int c, const char *p, const char *ep)
+{
+ switch (*p) {
+ case '.': return 1; /* matches any char */
+ case L_ESC: return match_class(c, uchar(*(p+1)));
+ case '[': return matchbracketclass(c, p, ep-1);
+ default: return (uchar(*p) == c);
+ }
+}
+
+static const char *match(MatchState *ms, const char *s, const char *p);
+
+static const char *matchbalance(MatchState *ms, const char *s, const char *p)
+{
+ if (*p == 0 || *(p+1) == 0)
+ lj_err_caller(ms->L, LJ_ERR_STRPATU);
+ if (*s != *p) {
+ return NULL;
+ } else {
+ int b = *p;
+ int e = *(p+1);
+ int cont = 1;
+ while (++s < ms->src_end) {
+ if (*s == e) {
+ if (--cont == 0) return s+1;
+ } else if (*s == b) {
+ cont++;
+ }
+ }
+ }
+ return NULL; /* string ends out of balance */
+}
+
+static const char *max_expand(MatchState *ms, const char *s,
+ const char *p, const char *ep)
+{
+ ptrdiff_t i = 0; /* counts maximum expand for item */
+ while ((s+i)<ms->src_end && singlematch(uchar(*(s+i)), p, ep))
+ i++;
+ /* keeps trying to match with the maximum repetitions */
+ while (i>=0) {
+ const char *res = match(ms, (s+i), ep+1);
+ if (res) return res;
+ i--; /* else didn't match; reduce 1 repetition to try again */
+ }
+ return NULL;
+}
+
+static const char *min_expand(MatchState *ms, const char *s,
+ const char *p, const char *ep)
+{
+ for (;;) {
+ const char *res = match(ms, s, ep+1);
+ if (res != NULL)
+ return res;
+ else if (s<ms->src_end && singlematch(uchar(*s), p, ep))
+ s++; /* try with one more repetition */
+ else
+ return NULL;
+ }
+}
+
+static const char *start_capture(MatchState *ms, const char *s,
+ const char *p, int what)
+{
+ const char *res;
+ int level = ms->level;
+ if (level >= LUA_MAXCAPTURES) lj_err_caller(ms->L, LJ_ERR_STRCAPN);
+ ms->capture[level].init = s;
+ ms->capture[level].len = what;
+ ms->level = level+1;
+ if ((res=match(ms, s, p)) == NULL) /* match failed? */
+ ms->level--; /* undo capture */
+ return res;
+}
+
+static const char *end_capture(MatchState *ms, const char *s,
+ const char *p)
+{
+ int l = capture_to_close(ms);
+ const char *res;
+ ms->capture[l].len = s - ms->capture[l].init; /* close capture */
+ if ((res = match(ms, s, p)) == NULL) /* match failed? */
+ ms->capture[l].len = CAP_UNFINISHED; /* undo capture */
+ return res;
+}
+
+static const char *match_capture(MatchState *ms, const char *s, int l)
+{
+ size_t len;
+ l = check_capture(ms, l);
+ len = (size_t)ms->capture[l].len;
+ if ((size_t)(ms->src_end-s) >= len &&
+ memcmp(ms->capture[l].init, s, len) == 0)
+ return s+len;
+ else
+ return NULL;
+}
+
+static const char *match(MatchState *ms, const char *s, const char *p)
+{
+ if (++ms->depth > LJ_MAX_XLEVEL)
+ lj_err_caller(ms->L, LJ_ERR_STRPATX);
+ init: /* using goto's to optimize tail recursion */
+ switch (*p) {
+ case '(': /* start capture */
+ if (*(p+1) == ')') /* position capture? */
+ s = start_capture(ms, s, p+2, CAP_POSITION);
+ else
+ s = start_capture(ms, s, p+1, CAP_UNFINISHED);
+ break;
+ case ')': /* end capture */
+ s = end_capture(ms, s, p+1);
+ break;
+ case L_ESC:
+ switch (*(p+1)) {
+ case 'b': /* balanced string? */
+ s = matchbalance(ms, s, p+2);
+ if (s == NULL) break;
+ p+=4;
+ goto init; /* else s = match(ms, s, p+4); */
+ case 'f': { /* frontier? */
+ const char *ep; char previous;
+ p += 2;
+ if (*p != '[')
+ lj_err_caller(ms->L, LJ_ERR_STRPATB);
+ ep = classend(ms, p); /* points to what is next */
+ previous = (s == ms->src_init) ? '\0' : *(s-1);
+ if (matchbracketclass(uchar(previous), p, ep-1) ||
+ !matchbracketclass(uchar(*s), p, ep-1)) { s = NULL; break; }
+ p=ep;
+ goto init; /* else s = match(ms, s, ep); */
+ }
+ default:
+ if (lj_char_isdigit(uchar(*(p+1)))) { /* capture results (%0-%9)? */
+ s = match_capture(ms, s, uchar(*(p+1)));
+ if (s == NULL) break;
+ p+=2;
+ goto init; /* else s = match(ms, s, p+2) */
+ }
+ goto dflt; /* case default */
+ }
+ break;
+ case '\0': /* end of pattern */
+ break; /* match succeeded */
+ case '$':
+ /* is the `$' the last char in pattern? */
+ if (*(p+1) != '\0') goto dflt;
+ if (s != ms->src_end) s = NULL; /* check end of string */
+ break;
+ default: dflt: { /* it is a pattern item */
+ const char *ep = classend(ms, p); /* points to what is next */
+ int m = s<ms->src_end && singlematch(uchar(*s), p, ep);
+ switch (*ep) {
+ case '?': { /* optional */
+ const char *res;
+ if (m && ((res=match(ms, s+1, ep+1)) != NULL)) {
+ s = res;
+ break;
+ }
+ p=ep+1;
+ goto init; /* else s = match(ms, s, ep+1); */
+ }
+ case '*': /* 0 or more repetitions */
+ s = max_expand(ms, s, p, ep);
+ break;
+ case '+': /* 1 or more repetitions */
+ s = (m ? max_expand(ms, s+1, p, ep) : NULL);
+ break;
+ case '-': /* 0 or more repetitions (minimum) */
+ s = min_expand(ms, s, p, ep);
+ break;
+ default:
+ if (m) { s++; p=ep; goto init; } /* else s = match(ms, s+1, ep); */
+ s = NULL;
+ break;
+ }
+ break;
+ }
+ }
+ ms->depth--;
+ return s;
+}
+
+static void push_onecapture(MatchState *ms, int i, const char *s, const char *e)
+{
+ if (i >= ms->level) {
+ if (i == 0) /* ms->level == 0, too */
+ lua_pushlstring(ms->L, s, (size_t)(e - s)); /* add whole match */
+ else
+ lj_err_caller(ms->L, LJ_ERR_STRCAPI);
+ } else {
+ ptrdiff_t l = ms->capture[i].len;
+ if (l == CAP_UNFINISHED) lj_err_caller(ms->L, LJ_ERR_STRCAPU);
+ if (l == CAP_POSITION)
+ lua_pushinteger(ms->L, ms->capture[i].init - ms->src_init + 1);
+ else
+ lua_pushlstring(ms->L, ms->capture[i].init, (size_t)l);
+ }
+}
+
+static int push_captures(MatchState *ms, const char *s, const char *e)
+{
+ int i;
+ int nlevels = (ms->level == 0 && s) ? 1 : ms->level;
+ luaL_checkstack(ms->L, nlevels, "too many captures");
+ for (i = 0; i < nlevels; i++)
+ push_onecapture(ms, i, s, e);
+ return nlevels; /* number of strings pushed */
+}
+
+static int str_find_aux(lua_State *L, int find)
+{
+ GCstr *s = lj_lib_checkstr(L, 1);
+ GCstr *p = lj_lib_checkstr(L, 2);
+ int32_t start = lj_lib_optint(L, 3, 1);
+ MSize st;
+ if (start < 0) start += (int32_t)s->len; else start--;
+ if (start < 0) start = 0;
+ st = (MSize)start;
+ if (st > s->len) {
+#if LJ_52
+ setnilV(L->top-1);
+ return 1;
+#else
+ st = s->len;
+#endif
+ }
+ if (find && ((L->base+3 < L->top && tvistruecond(L->base+3)) ||
+ !lj_str_haspattern(p))) { /* Search for fixed string. */
+ const char *q = lj_str_find(strdata(s)+st, strdata(p), s->len-st, p->len);
+ if (q) {
+ setintV(L->top-2, (int32_t)(q-strdata(s)) + 1);
+ setintV(L->top-1, (int32_t)(q-strdata(s)) + (int32_t)p->len);
+ return 2;
+ }
+ } else { /* Search for pattern. */
+ MatchState ms;
+ const char *pstr = strdata(p);
+ const char *sstr = strdata(s) + st;
+ int anchor = 0;
+ if (*pstr == '^') { pstr++; anchor = 1; }
+ ms.L = L;
+ ms.src_init = strdata(s);
+ ms.src_end = strdata(s) + s->len;
+ do { /* Loop through string and try to match the pattern. */
+ const char *q;
+ ms.level = ms.depth = 0;
+ q = match(&ms, sstr, pstr);
+ if (q) {
+ if (find) {
+ setintV(L->top++, (int32_t)(sstr-(strdata(s)-1)));
+ setintV(L->top++, (int32_t)(q-strdata(s)));
+ return push_captures(&ms, NULL, NULL) + 2;
+ } else {
+ return push_captures(&ms, sstr, q);
+ }
+ }
+ } while (sstr++ < ms.src_end && !anchor);
+ }
+ setnilV(L->top-1); /* Not found. */
+ return 1;
+}
+
+LJLIB_CF(string_find) LJLIB_REC(.)
+{
+ return str_find_aux(L, 1);
+}
+
+LJLIB_CF(string_match)
+{
+ return str_find_aux(L, 0);
+}
+
+LJLIB_NOREG LJLIB_CF(string_gmatch_aux)
+{
+ const char *p = strVdata(lj_lib_upvalue(L, 2));
+ GCstr *str = strV(lj_lib_upvalue(L, 1));
+ const char *s = strdata(str);
+ TValue *tvpos = lj_lib_upvalue(L, 3);
+ const char *src = s + tvpos->u32.lo;
+ MatchState ms;
+ ms.L = L;
+ ms.src_init = s;
+ ms.src_end = s + str->len;
+ for (; src <= ms.src_end; src++) {
+ const char *e;
+ ms.level = ms.depth = 0;
+ if ((e = match(&ms, src, p)) != NULL) {
+ int32_t pos = (int32_t)(e - s);
+ if (e == src) pos++; /* Ensure progress for empty match. */
+ tvpos->u32.lo = (uint32_t)pos;
+ return push_captures(&ms, src, e);
+ }
+ }
+ return 0; /* not found */
+}
+
+LJLIB_CF(string_gmatch)
+{
+ lj_lib_checkstr(L, 1);
+ lj_lib_checkstr(L, 2);
+ L->top = L->base+3;
+ (L->top-1)->u64 = 0;
+ lj_lib_pushcc(L, lj_cf_string_gmatch_aux, FF_string_gmatch_aux, 3);
+ return 1;
+}
+
+static void add_s(MatchState *ms, luaL_Buffer *b, const char *s, const char *e)
+{
+ size_t l, i;
+ const char *news = lua_tolstring(ms->L, 3, &l);
+ for (i = 0; i < l; i++) {
+ if (news[i] != L_ESC) {
+ luaL_addchar(b, news[i]);
+ } else {
+ i++; /* skip ESC */
+ if (!lj_char_isdigit(uchar(news[i]))) {
+ luaL_addchar(b, news[i]);
+ } else if (news[i] == '0') {
+ luaL_addlstring(b, s, (size_t)(e - s));
+ } else {
+ push_onecapture(ms, news[i] - '1', s, e);
+ luaL_addvalue(b); /* add capture to accumulated result */
+ }
+ }
+ }
+}
+
+static void add_value(MatchState *ms, luaL_Buffer *b,
+ const char *s, const char *e)
+{
+ lua_State *L = ms->L;
+ switch (lua_type(L, 3)) {
+ case LUA_TNUMBER:
+ case LUA_TSTRING: {
+ add_s(ms, b, s, e);
+ return;
+ }
+ case LUA_TFUNCTION: {
+ int n;
+ lua_pushvalue(L, 3);
+ n = push_captures(ms, s, e);
+ lua_call(L, n, 1);
+ break;
+ }
+ case LUA_TTABLE: {
+ push_onecapture(ms, 0, s, e);
+ lua_gettable(L, 3);
+ break;
+ }
+ }
+ if (!lua_toboolean(L, -1)) { /* nil or false? */
+ lua_pop(L, 1);
+ lua_pushlstring(L, s, (size_t)(e - s)); /* keep original text */
+ } else if (!lua_isstring(L, -1)) {
+ lj_err_callerv(L, LJ_ERR_STRGSRV, luaL_typename(L, -1));
+ }
+ luaL_addvalue(b); /* add result to accumulator */
+}
+
+LJLIB_CF(string_gsub)
+{
+ size_t srcl;
+ const char *src = luaL_checklstring(L, 1, &srcl);
+ const char *p = luaL_checkstring(L, 2);
+ int tr = lua_type(L, 3);
+ int max_s = luaL_optint(L, 4, (int)(srcl+1));
+ int anchor = (*p == '^') ? (p++, 1) : 0;
+ int n = 0;
+ MatchState ms;
+ luaL_Buffer b;
+ if (!(tr == LUA_TNUMBER || tr == LUA_TSTRING ||
+ tr == LUA_TFUNCTION || tr == LUA_TTABLE))
+ lj_err_arg(L, 3, LJ_ERR_NOSFT);
+ luaL_buffinit(L, &b);
+ ms.L = L;
+ ms.src_init = src;
+ ms.src_end = src+srcl;
+ while (n < max_s) {
+ const char *e;
+ ms.level = ms.depth = 0;
+ e = match(&ms, src, p);
+ if (e) {
+ n++;
+ add_value(&ms, &b, src, e);
+ }
+ if (e && e>src) /* non empty match? */
+ src = e; /* skip it */
+ else if (src < ms.src_end)
+ luaL_addchar(&b, *src++);
+ else
+ break;
+ if (anchor)
+ break;
+ }
+ luaL_addlstring(&b, src, (size_t)(ms.src_end-src));
+ luaL_pushresult(&b);
+ lua_pushinteger(L, n); /* number of substitutions */
+ return 2;
+}
+
+/* ------------------------------------------------------------------------ */
+
+LJLIB_CF(string_format) LJLIB_REC(.)
+{
+ int retry = 0;
+ SBuf *sb;
+ do {
+ sb = lj_buf_tmp_(L);
+ retry = lj_strfmt_putarg(L, sb, 1, -retry);
+ } while (retry > 0);
+ setstrV(L, L->top-1, lj_buf_str(L, sb));
+ lj_gc_check(L);
+ return 1;
+}
+
+/* ------------------------------------------------------------------------ */
+
+#include "lj_libdef.h"
+
+LUALIB_API int luaopen_string(lua_State *L)
+{
+ GCtab *mt;
+ global_State *g;
+ LJ_LIB_REG(L, LUA_STRLIBNAME, string);
+ mt = lj_tab_new(L, 0, 1);
+ /* NOBARRIER: basemt is a GC root. */
+ g = G(L);
+ setgcref(basemt_it(g, LJ_TSTR), obj2gco(mt));
+ settabV(L, lj_tab_setstr(L, mt, mmname_str(g, MM_index)), tabV(L->top-1));
+ mt->nomm = (uint8_t)(~(1u<<MM_index));
+#if LJ_HASBUFFER
+ lj_lib_prereg(L, LUA_STRLIBNAME ".buffer", luaopen_string_buffer, tabV(L->top-1));
+#endif
+ return 1;
+}
+
diff --git a/libs/luajit-cmake/luajit/src/lib_table.c b/libs/luajit-cmake/luajit/src/lib_table.c
new file mode 100644
index 0000000..a723326
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/lib_table.c
@@ -0,0 +1,327 @@
+/*
+** Table library.
+** Copyright (C) 2005-2022 Mike Pall. See Copyright Notice in luajit.h
+**
+** Major portions taken verbatim or adapted from the Lua interpreter.
+** Copyright (C) 1994-2008 Lua.org, PUC-Rio. See Copyright Notice in lua.h
+*/
+
+#define lib_table_c
+#define LUA_LIB
+
+#include "lua.h"
+#include "lauxlib.h"
+#include "lualib.h"
+
+#include "lj_obj.h"
+#include "lj_gc.h"
+#include "lj_err.h"
+#include "lj_buf.h"
+#include "lj_tab.h"
+#include "lj_ff.h"
+#include "lj_lib.h"
+
+/* ------------------------------------------------------------------------ */
+
+#define LJLIB_MODULE_table
+
+LJLIB_LUA(table_foreachi) /*
+ function(t, f)
+ CHECK_tab(t)
+ CHECK_func(f)
+ for i=1,#t do
+ local r = f(i, t[i])
+ if r ~= nil then return r end
+ end
+ end
+*/
+
+LJLIB_LUA(table_foreach) /*
+ function(t, f)
+ CHECK_tab(t)
+ CHECK_func(f)
+ for k, v in PAIRS(t) do
+ local r = f(k, v)
+ if r ~= nil then return r end
+ end
+ end
+*/
+
+LJLIB_LUA(table_getn) /*
+ function(t)
+ CHECK_tab(t)
+ return #t
+ end
+*/
+
+LJLIB_CF(table_maxn)
+{
+ GCtab *t = lj_lib_checktab(L, 1);
+ TValue *array = tvref(t->array);
+ Node *node;
+ lua_Number m = 0;
+ ptrdiff_t i;
+ for (i = (ptrdiff_t)t->asize - 1; i >= 0; i--)
+ if (!tvisnil(&array[i])) {
+ m = (lua_Number)(int32_t)i;
+ break;
+ }
+ node = noderef(t->node);
+ for (i = (ptrdiff_t)t->hmask; i >= 0; i--)
+ if (!tvisnil(&node[i].val) && tvisnumber(&node[i].key)) {
+ lua_Number n = numberVnum(&node[i].key);
+ if (n > m) m = n;
+ }
+ setnumV(L->top-1, m);
+ return 1;
+}
+
+LJLIB_CF(table_insert) LJLIB_REC(.)
+{
+ GCtab *t = lj_lib_checktab(L, 1);
+ int32_t n, i = (int32_t)lj_tab_len(t) + 1;
+ int nargs = (int)((char *)L->top - (char *)L->base);
+ if (nargs != 2*sizeof(TValue)) {
+ if (nargs != 3*sizeof(TValue))
+ lj_err_caller(L, LJ_ERR_TABINS);
+ /* NOBARRIER: This just moves existing elements around. */
+ for (n = lj_lib_checkint(L, 2); i > n; i--) {
+ /* The set may invalidate the get pointer, so need to do it first! */
+ TValue *dst = lj_tab_setint(L, t, i);
+ cTValue *src = lj_tab_getint(t, i-1);
+ if (src) {
+ copyTV(L, dst, src);
+ } else {
+ setnilV(dst);
+ }
+ }
+ i = n;
+ }
+ {
+ TValue *dst = lj_tab_setint(L, t, i);
+ copyTV(L, dst, L->top-1); /* Set new value. */
+ lj_gc_barriert(L, t, dst);
+ }
+ return 0;
+}
+
+LJLIB_LUA(table_remove) /*
+ function(t, pos)
+ CHECK_tab(t)
+ local len = #t
+ if pos == nil then
+ if len ~= 0 then
+ local old = t[len]
+ t[len] = nil
+ return old
+ end
+ else
+ CHECK_int(pos)
+ if pos >= 1 and pos <= len then
+ local old = t[pos]
+ for i=pos+1,len do
+ t[i-1] = t[i]
+ end
+ t[len] = nil
+ return old
+ end
+ end
+ end
+*/
+
+LJLIB_LUA(table_move) /*
+ function(a1, f, e, t, a2)
+ CHECK_tab(a1)
+ CHECK_int(f)
+ CHECK_int(e)
+ CHECK_int(t)
+ if a2 == nil then a2 = a1 end
+ CHECK_tab(a2)
+ if e >= f then
+ local d = t - f
+ if t > e or t <= f or a2 ~= a1 then
+ for i=f,e do a2[i+d] = a1[i] end
+ else
+ for i=e,f,-1 do a2[i+d] = a1[i] end
+ end
+ end
+ return a2
+ end
+*/
+
+LJLIB_CF(table_concat) LJLIB_REC(.)
+{
+ GCtab *t = lj_lib_checktab(L, 1);
+ GCstr *sep = lj_lib_optstr(L, 2);
+ int32_t i = lj_lib_optint(L, 3, 1);
+ int32_t e = (L->base+3 < L->top && !tvisnil(L->base+3)) ?
+ lj_lib_checkint(L, 4) : (int32_t)lj_tab_len(t);
+ SBuf *sb = lj_buf_tmp_(L);
+ SBuf *sbx = lj_buf_puttab(sb, t, sep, i, e);
+ if (LJ_UNLIKELY(!sbx)) { /* Error: bad element type. */
+ int32_t idx = (int32_t)(intptr_t)sb->w;
+ cTValue *o = lj_tab_getint(t, idx);
+ lj_err_callerv(L, LJ_ERR_TABCAT,
+ lj_obj_itypename[o ? itypemap(o) : ~LJ_TNIL], idx);
+ }
+ setstrV(L, L->top-1, lj_buf_str(L, sbx));
+ lj_gc_check(L);
+ return 1;
+}
+
+/* ------------------------------------------------------------------------ */
+
+static void set2(lua_State *L, int i, int j)
+{
+ lua_rawseti(L, 1, i);
+ lua_rawseti(L, 1, j);
+}
+
+static int sort_comp(lua_State *L, int a, int b)
+{
+ if (!lua_isnil(L, 2)) { /* function? */
+ int res;
+ lua_pushvalue(L, 2);
+ lua_pushvalue(L, a-1); /* -1 to compensate function */
+ lua_pushvalue(L, b-2); /* -2 to compensate function and `a' */
+ lua_call(L, 2, 1);
+ res = lua_toboolean(L, -1);
+ lua_pop(L, 1);
+ return res;
+ } else { /* a < b? */
+ return lua_lessthan(L, a, b);
+ }
+}
+
+static void auxsort(lua_State *L, int l, int u)
+{
+ while (l < u) { /* for tail recursion */
+ int i, j;
+ /* sort elements a[l], a[(l+u)/2] and a[u] */
+ lua_rawgeti(L, 1, l);
+ lua_rawgeti(L, 1, u);
+ if (sort_comp(L, -1, -2)) /* a[u] < a[l]? */
+ set2(L, l, u); /* swap a[l] - a[u] */
+ else
+ lua_pop(L, 2);
+ if (u-l == 1) break; /* only 2 elements */
+ i = (l+u)/2;
+ lua_rawgeti(L, 1, i);
+ lua_rawgeti(L, 1, l);
+ if (sort_comp(L, -2, -1)) { /* a[i]<a[l]? */
+ set2(L, i, l);
+ } else {
+ lua_pop(L, 1); /* remove a[l] */
+ lua_rawgeti(L, 1, u);
+ if (sort_comp(L, -1, -2)) /* a[u]<a[i]? */
+ set2(L, i, u);
+ else
+ lua_pop(L, 2);
+ }
+ if (u-l == 2) break; /* only 3 elements */
+ lua_rawgeti(L, 1, i); /* Pivot */
+ lua_pushvalue(L, -1);
+ lua_rawgeti(L, 1, u-1);
+ set2(L, i, u-1);
+ /* a[l] <= P == a[u-1] <= a[u], only need to sort from l+1 to u-2 */
+ i = l; j = u-1;
+ for (;;) { /* invariant: a[l..i] <= P <= a[j..u] */
+ /* repeat ++i until a[i] >= P */
+ while (lua_rawgeti(L, 1, ++i), sort_comp(L, -1, -2)) {
+ if (i>=u) lj_err_caller(L, LJ_ERR_TABSORT);
+ lua_pop(L, 1); /* remove a[i] */
+ }
+ /* repeat --j until a[j] <= P */
+ while (lua_rawgeti(L, 1, --j), sort_comp(L, -3, -1)) {
+ if (j<=l) lj_err_caller(L, LJ_ERR_TABSORT);
+ lua_pop(L, 1); /* remove a[j] */
+ }
+ if (j<i) {
+ lua_pop(L, 3); /* pop pivot, a[i], a[j] */
+ break;
+ }
+ set2(L, i, j);
+ }
+ lua_rawgeti(L, 1, u-1);
+ lua_rawgeti(L, 1, i);
+ set2(L, u-1, i); /* swap pivot (a[u-1]) with a[i] */
+ /* a[l..i-1] <= a[i] == P <= a[i+1..u] */
+ /* adjust so that smaller half is in [j..i] and larger one in [l..u] */
+ if (i-l < u-i) {
+ j=l; i=i-1; l=i+2;
+ } else {
+ j=i+1; i=u; u=j-2;
+ }
+ auxsort(L, j, i); /* call recursively the smaller one */
+ } /* repeat the routine for the larger one */
+}
+
+LJLIB_CF(table_sort)
+{
+ GCtab *t = lj_lib_checktab(L, 1);
+ int32_t n = (int32_t)lj_tab_len(t);
+ lua_settop(L, 2);
+ if (!tvisnil(L->base+1))
+ lj_lib_checkfunc(L, 2);
+ auxsort(L, 1, n);
+ return 0;
+}
+
+#if LJ_52
+LJLIB_PUSH("n")
+LJLIB_CF(table_pack)
+{
+ TValue *array, *base = L->base;
+ MSize i, n = (uint32_t)(L->top - base);
+ GCtab *t = lj_tab_new(L, n ? n+1 : 0, 1);
+ /* NOBARRIER: The table is new (marked white). */
+ setintV(lj_tab_setstr(L, t, strV(lj_lib_upvalue(L, 1))), (int32_t)n);
+ for (array = tvref(t->array) + 1, i = 0; i < n; i++)
+ copyTV(L, &array[i], &base[i]);
+ settabV(L, base, t);
+ L->top = base+1;
+ lj_gc_check(L);
+ return 1;
+}
+#endif
+
+LJLIB_NOREG LJLIB_CF(table_new) LJLIB_REC(.)
+{
+ int32_t a = lj_lib_checkint(L, 1);
+ int32_t h = lj_lib_checkint(L, 2);
+ lua_createtable(L, a, h);
+ return 1;
+}
+
+LJLIB_NOREG LJLIB_CF(table_clear) LJLIB_REC(.)
+{
+ lj_tab_clear(lj_lib_checktab(L, 1));
+ return 0;
+}
+
+static int luaopen_table_new(lua_State *L)
+{
+ return lj_lib_postreg(L, lj_cf_table_new, FF_table_new, "new");
+}
+
+static int luaopen_table_clear(lua_State *L)
+{
+ return lj_lib_postreg(L, lj_cf_table_clear, FF_table_clear, "clear");
+}
+
+/* ------------------------------------------------------------------------ */
+
+#include "lj_libdef.h"
+
+LUALIB_API int luaopen_table(lua_State *L)
+{
+ LJ_LIB_REG(L, LUA_TABLIBNAME, table);
+#if LJ_52
+ lua_getglobal(L, "unpack");
+ lua_setfield(L, -2, "unpack");
+#endif
+ lj_lib_prereg(L, LUA_TABLIBNAME ".new", luaopen_table_new, tabV(L->top-1));
+ lj_lib_prereg(L, LUA_TABLIBNAME ".clear", luaopen_table_clear, tabV(L->top-1));
+ return 1;
+}
+
diff --git a/libs/luajit-cmake/luajit/src/lj_alloc.c b/libs/luajit-cmake/luajit/src/lj_alloc.c
new file mode 100644
index 0000000..20e6049
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/lj_alloc.c
@@ -0,0 +1,1485 @@
+/*
+** Bundled memory allocator.
+**
+** Beware: this is a HEAVILY CUSTOMIZED version of dlmalloc.
+** The original bears the following remark:
+**
+** This is a version (aka dlmalloc) of malloc/free/realloc written by
+** Doug Lea and released to the public domain, as explained at
+** https://creativecommons.org/licenses/publicdomain.
+**
+** * Version pre-2.8.4 Wed Mar 29 19:46:29 2006 (dl at gee)
+**
+** No additional copyright is claimed over the customizations.
+** Please do NOT bother the original author about this version here!
+**
+** If you want to use dlmalloc in another project, you should get
+** the original from: ftp://gee.cs.oswego.edu/pub/misc/
+** For thread-safe derivatives, take a look at:
+** - ptmalloc: https://www.malloc.de/
+** - nedmalloc: https://www.nedprod.com/programs/portable/nedmalloc/
+*/
+
+#define lj_alloc_c
+#define LUA_CORE
+
+/* To get the mremap prototype. Must be defined before any system includes. */
+#if defined(__linux__) && !defined(_GNU_SOURCE)
+#define _GNU_SOURCE
+#endif
+
+#include "lj_def.h"
+#include "lj_arch.h"
+#include "lj_alloc.h"
+#include "lj_prng.h"
+
+#ifndef LUAJIT_USE_SYSMALLOC
+
+#define MAX_SIZE_T (~(size_t)0)
+#define MALLOC_ALIGNMENT ((size_t)8U)
+
+#define DEFAULT_GRANULARITY ((size_t)128U * (size_t)1024U)
+#define DEFAULT_TRIM_THRESHOLD ((size_t)2U * (size_t)1024U * (size_t)1024U)
+#define DEFAULT_MMAP_THRESHOLD ((size_t)128U * (size_t)1024U)
+#define MAX_RELEASE_CHECK_RATE 255
+
+/* ------------------- size_t and alignment properties -------------------- */
+
+/* The byte and bit size of a size_t */
+#define SIZE_T_SIZE (sizeof(size_t))
+#define SIZE_T_BITSIZE (sizeof(size_t) << 3)
+
+/* Some constants coerced to size_t */
+/* Annoying but necessary to avoid errors on some platforms */
+#define SIZE_T_ZERO ((size_t)0)
+#define SIZE_T_ONE ((size_t)1)
+#define SIZE_T_TWO ((size_t)2)
+#define TWO_SIZE_T_SIZES (SIZE_T_SIZE<<1)
+#define FOUR_SIZE_T_SIZES (SIZE_T_SIZE<<2)
+#define SIX_SIZE_T_SIZES (FOUR_SIZE_T_SIZES+TWO_SIZE_T_SIZES)
+
+/* The bit mask value corresponding to MALLOC_ALIGNMENT */
+#define CHUNK_ALIGN_MASK (MALLOC_ALIGNMENT - SIZE_T_ONE)
+
+/* the number of bytes to offset an address to align it */
+#define align_offset(A)\
+ ((((size_t)(A) & CHUNK_ALIGN_MASK) == 0)? 0 :\
+ ((MALLOC_ALIGNMENT - ((size_t)(A) & CHUNK_ALIGN_MASK)) & CHUNK_ALIGN_MASK))
+
+/* -------------------------- MMAP support ------------------------------- */
+
+#define MFAIL ((void *)(MAX_SIZE_T))
+#define CMFAIL ((char *)(MFAIL)) /* defined for convenience */
+
+#define IS_DIRECT_BIT (SIZE_T_ONE)
+
+
+/* Determine system-specific block allocation method. */
+#if LJ_TARGET_WINDOWS
+
+#define WIN32_LEAN_AND_MEAN
+#include <windows.h>
+
+#define LJ_ALLOC_VIRTUALALLOC 1
+
+#if LJ_64 && !LJ_GC64
+#define LJ_ALLOC_NTAVM 1
+#endif
+
+#else
+
+#include <errno.h>
+/* If this include fails, then rebuild with: -DLUAJIT_USE_SYSMALLOC */
+#include <sys/mman.h>
+
+#define LJ_ALLOC_MMAP 1
+
+#if LJ_64
+
+#define LJ_ALLOC_MMAP_PROBE 1
+
+#if LJ_GC64
+#define LJ_ALLOC_MBITS 47 /* 128 TB in LJ_GC64 mode. */
+#elif LJ_TARGET_X64 && LJ_HASJIT
+/* Due to limitations in the x64 compiler backend. */
+#define LJ_ALLOC_MBITS 31 /* 2 GB on x64 with !LJ_GC64. */
+#else
+#define LJ_ALLOC_MBITS 32 /* 4 GB on other archs with !LJ_GC64. */
+#endif
+
+#endif
+
+#if LJ_64 && !LJ_GC64 && defined(MAP_32BIT)
+#define LJ_ALLOC_MMAP32 1
+#endif
+
+#if LJ_TARGET_LINUX
+#define LJ_ALLOC_MREMAP 1
+#endif
+
+#endif
+
+
+#if LJ_ALLOC_VIRTUALALLOC
+
+#if LJ_ALLOC_NTAVM
+/* Undocumented, but hey, that's what we all love so much about Windows. */
+typedef long (*PNTAVM)(HANDLE handle, void **addr, ULONG_PTR zbits,
+ size_t *size, ULONG alloctype, ULONG prot);
+static PNTAVM ntavm;
+
+/* Number of top bits of the lower 32 bits of an address that must be zero.
+** Apparently 0 gives us full 64 bit addresses and 1 gives us the lower 2GB.
+*/
+#define NTAVM_ZEROBITS 1
+
+static void init_mmap(void)
+{
+ ntavm = (PNTAVM)GetProcAddress(GetModuleHandleA("ntdll.dll"),
+ "NtAllocateVirtualMemory");
+}
+#define INIT_MMAP() init_mmap()
+
+/* Win64 32 bit MMAP via NtAllocateVirtualMemory. */
+static void *mmap_plain(size_t size)
+{
+ DWORD olderr = GetLastError();
+ void *ptr = NULL;
+ long st = ntavm(INVALID_HANDLE_VALUE, &ptr, NTAVM_ZEROBITS, &size,
+ MEM_RESERVE|MEM_COMMIT, PAGE_READWRITE);
+ SetLastError(olderr);
+ return st == 0 ? ptr : MFAIL;
+}
+
+/* For direct MMAP, use MEM_TOP_DOWN to minimize interference */
+static void *direct_mmap(size_t size)
+{
+ DWORD olderr = GetLastError();
+ void *ptr = NULL;
+ long st = ntavm(INVALID_HANDLE_VALUE, &ptr, NTAVM_ZEROBITS, &size,
+ MEM_RESERVE|MEM_COMMIT|MEM_TOP_DOWN, PAGE_READWRITE);
+ SetLastError(olderr);
+ return st == 0 ? ptr : MFAIL;
+}
+
+#else
+
+/* Win32 MMAP via VirtualAlloc */
+static void *mmap_plain(size_t size)
+{
+ DWORD olderr = GetLastError();
+ void *ptr = LJ_WIN_VALLOC(0, size, MEM_RESERVE|MEM_COMMIT, PAGE_READWRITE);
+ SetLastError(olderr);
+ return ptr ? ptr : MFAIL;
+}
+
+/* For direct MMAP, use MEM_TOP_DOWN to minimize interference */
+static void *direct_mmap(size_t size)
+{
+ DWORD olderr = GetLastError();
+ void *ptr = LJ_WIN_VALLOC(0, size, MEM_RESERVE|MEM_COMMIT|MEM_TOP_DOWN,
+ PAGE_READWRITE);
+ SetLastError(olderr);
+ return ptr ? ptr : MFAIL;
+}
+
+#endif
+
+#define CALL_MMAP(prng, size) mmap_plain(size)
+#define DIRECT_MMAP(prng, size) direct_mmap(size)
+
+/* This function supports releasing coalesed segments */
+static int CALL_MUNMAP(void *ptr, size_t size)
+{
+ DWORD olderr = GetLastError();
+ MEMORY_BASIC_INFORMATION minfo;
+ char *cptr = (char *)ptr;
+ while (size) {
+ if (VirtualQuery(cptr, &minfo, sizeof(minfo)) == 0)
+ return -1;
+ if (minfo.BaseAddress != cptr || minfo.AllocationBase != cptr ||
+ minfo.State != MEM_COMMIT || minfo.RegionSize > size)
+ return -1;
+ if (VirtualFree(cptr, 0, MEM_RELEASE) == 0)
+ return -1;
+ cptr += minfo.RegionSize;
+ size -= minfo.RegionSize;
+ }
+ SetLastError(olderr);
+ return 0;
+}
+
+#elif LJ_ALLOC_MMAP
+
+#define MMAP_PROT (PROT_READ|PROT_WRITE)
+#if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
+#define MAP_ANONYMOUS MAP_ANON
+#endif
+#define MMAP_FLAGS (MAP_PRIVATE|MAP_ANONYMOUS)
+
+#if LJ_ALLOC_MMAP_PROBE
+
+#ifdef MAP_TRYFIXED
+#define MMAP_FLAGS_PROBE (MMAP_FLAGS|MAP_TRYFIXED)
+#else
+#define MMAP_FLAGS_PROBE MMAP_FLAGS
+#endif
+
+#define LJ_ALLOC_MMAP_PROBE_MAX 30
+#define LJ_ALLOC_MMAP_PROBE_LINEAR 5
+
+#define LJ_ALLOC_MMAP_PROBE_LOWER ((uintptr_t)0x4000)
+
+static void *mmap_probe(PRNGState *rs, size_t size)
+{
+ /* Hint for next allocation. Doesn't need to be thread-safe. */
+ static uintptr_t hint_addr = 0;
+ int olderr = errno;
+ int retry;
+ for (retry = 0; retry < LJ_ALLOC_MMAP_PROBE_MAX; retry++) {
+ void *p = mmap((void *)hint_addr, size, MMAP_PROT, MMAP_FLAGS_PROBE, -1, 0);
+ uintptr_t addr = (uintptr_t)p;
+ if ((addr >> LJ_ALLOC_MBITS) == 0 && addr >= LJ_ALLOC_MMAP_PROBE_LOWER &&
+ ((addr + size) >> LJ_ALLOC_MBITS) == 0) {
+ /* We got a suitable address. Bump the hint address. */
+ hint_addr = addr + size;
+ errno = olderr;
+ return p;
+ }
+ if (p != MFAIL) {
+ munmap(p, size);
+ } else if (errno == ENOMEM) {
+ return MFAIL;
+ }
+ if (hint_addr) {
+ /* First, try linear probing. */
+ if (retry < LJ_ALLOC_MMAP_PROBE_LINEAR) {
+ hint_addr += 0x1000000;
+ if (((hint_addr + size) >> LJ_ALLOC_MBITS) != 0)
+ hint_addr = 0;
+ continue;
+ } else if (retry == LJ_ALLOC_MMAP_PROBE_LINEAR) {
+ /* Next, try a no-hint probe to get back an ASLR address. */
+ hint_addr = 0;
+ continue;
+ }
+ }
+ /* Finally, try pseudo-random probing. */
+ do {
+ hint_addr = lj_prng_u64(rs) & (((uintptr_t)1<<LJ_ALLOC_MBITS)-LJ_PAGESIZE);
+ } while (hint_addr < LJ_ALLOC_MMAP_PROBE_LOWER);
+ }
+ errno = olderr;
+ return MFAIL;
+}
+
+#endif
+
+#if LJ_ALLOC_MMAP32
+
+#if LJ_TARGET_SOLARIS
+#define LJ_ALLOC_MMAP32_START ((uintptr_t)0x1000)
+#else
+#define LJ_ALLOC_MMAP32_START ((uintptr_t)0)
+#endif
+
+#if LJ_ALLOC_MMAP_PROBE
+static void *mmap_map32(PRNGState *rs, size_t size)
+#else
+static void *mmap_map32(size_t size)
+#endif
+{
+#if LJ_ALLOC_MMAP_PROBE
+ static int fallback = 0;
+ if (fallback)
+ return mmap_probe(rs, size);
+#endif
+ {
+ int olderr = errno;
+ void *ptr = mmap((void *)LJ_ALLOC_MMAP32_START, size, MMAP_PROT, MAP_32BIT|MMAP_FLAGS, -1, 0);
+ errno = olderr;
+ /* This only allows 1GB on Linux. So fallback to probing to get 2GB. */
+#if LJ_ALLOC_MMAP_PROBE
+ if (ptr == MFAIL) {
+ fallback = 1;
+ return mmap_probe(rs, size);
+ }
+#endif
+ return ptr;
+ }
+}
+
+#endif
+
+#if LJ_ALLOC_MMAP32
+#if LJ_ALLOC_MMAP_PROBE
+#define CALL_MMAP(prng, size) mmap_map32(prng, size)
+#else
+#define CALL_MMAP(prng, size) mmap_map32(size)
+#endif
+#elif LJ_ALLOC_MMAP_PROBE
+#define CALL_MMAP(prng, size) mmap_probe(prng, size)
+#else
+static void *mmap_plain(size_t size)
+{
+ int olderr = errno;
+ void *ptr = mmap(NULL, size, MMAP_PROT, MMAP_FLAGS, -1, 0);
+ errno = olderr;
+ return ptr;
+}
+#define CALL_MMAP(prng, size) mmap_plain(size)
+#endif
+
+#if LJ_64 && !LJ_GC64 && ((defined(__FreeBSD__) && __FreeBSD__ < 10) || defined(__FreeBSD_kernel__)) && !LJ_TARGET_PS4 && !LJ_TARGET_PS5
+
+#include <sys/resource.h>
+
+static void init_mmap(void)
+{
+ struct rlimit rlim;
+ rlim.rlim_cur = rlim.rlim_max = 0x10000;
+ setrlimit(RLIMIT_DATA, &rlim); /* Ignore result. May fail later. */
+}
+#define INIT_MMAP() init_mmap()
+
+#endif
+
+static int CALL_MUNMAP(void *ptr, size_t size)
+{
+ int olderr = errno;
+ int ret = munmap(ptr, size);
+ errno = olderr;
+ return ret;
+}
+
+#if LJ_ALLOC_MREMAP
+/* Need to define _GNU_SOURCE to get the mremap prototype. */
+static void *CALL_MREMAP_(void *ptr, size_t osz, size_t nsz, int flags)
+{
+ int olderr = errno;
+ ptr = mremap(ptr, osz, nsz, flags);
+ errno = olderr;
+ return ptr;
+}
+
+#define CALL_MREMAP(addr, osz, nsz, mv) CALL_MREMAP_((addr), (osz), (nsz), (mv))
+#define CALL_MREMAP_NOMOVE 0
+#define CALL_MREMAP_MAYMOVE 1
+#if LJ_64 && (!LJ_GC64 || LJ_TARGET_ARM64)
+#define CALL_MREMAP_MV CALL_MREMAP_NOMOVE
+#else
+#define CALL_MREMAP_MV CALL_MREMAP_MAYMOVE
+#endif
+#endif
+
+#endif
+
+
+#ifndef INIT_MMAP
+#define INIT_MMAP() ((void)0)
+#endif
+
+#ifndef DIRECT_MMAP
+#define DIRECT_MMAP(prng, s) CALL_MMAP(prng, s)
+#endif
+
+#ifndef CALL_MREMAP
+#define CALL_MREMAP(addr, osz, nsz, mv) ((void)osz, MFAIL)
+#endif
+
+/* ----------------------- Chunk representations ------------------------ */
+
+struct malloc_chunk {
+ size_t prev_foot; /* Size of previous chunk (if free). */
+ size_t head; /* Size and inuse bits. */
+ struct malloc_chunk *fd; /* double links -- used only if free. */
+ struct malloc_chunk *bk;
+};
+
+typedef struct malloc_chunk mchunk;
+typedef struct malloc_chunk *mchunkptr;
+typedef struct malloc_chunk *sbinptr; /* The type of bins of chunks */
+typedef size_t bindex_t; /* Described below */
+typedef unsigned int binmap_t; /* Described below */
+typedef unsigned int flag_t; /* The type of various bit flag sets */
+
+/* ------------------- Chunks sizes and alignments ----------------------- */
+
+#define MCHUNK_SIZE (sizeof(mchunk))
+
+#define CHUNK_OVERHEAD (SIZE_T_SIZE)
+
+/* Direct chunks need a second word of overhead ... */
+#define DIRECT_CHUNK_OVERHEAD (TWO_SIZE_T_SIZES)
+/* ... and additional padding for fake next-chunk at foot */
+#define DIRECT_FOOT_PAD (FOUR_SIZE_T_SIZES)
+
+/* The smallest size we can malloc is an aligned minimal chunk */
+#define MIN_CHUNK_SIZE\
+ ((MCHUNK_SIZE + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
+
+/* conversion from malloc headers to user pointers, and back */
+#define chunk2mem(p) ((void *)((char *)(p) + TWO_SIZE_T_SIZES))
+#define mem2chunk(mem) ((mchunkptr)((char *)(mem) - TWO_SIZE_T_SIZES))
+/* chunk associated with aligned address A */
+#define align_as_chunk(A) (mchunkptr)((A) + align_offset(chunk2mem(A)))
+
+/* Bounds on request (not chunk) sizes. */
+#define MAX_REQUEST ((~MIN_CHUNK_SIZE+1) << 2)
+#define MIN_REQUEST (MIN_CHUNK_SIZE - CHUNK_OVERHEAD - SIZE_T_ONE)
+
+/* pad request bytes into a usable size */
+#define pad_request(req) \
+ (((req) + CHUNK_OVERHEAD + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
+
+/* pad request, checking for minimum (but not maximum) */
+#define request2size(req) \
+ (((req) < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(req))
+
+/* ------------------ Operations on head and foot fields ----------------- */
+
+#define PINUSE_BIT (SIZE_T_ONE)
+#define CINUSE_BIT (SIZE_T_TWO)
+#define INUSE_BITS (PINUSE_BIT|CINUSE_BIT)
+
+/* Head value for fenceposts */
+#define FENCEPOST_HEAD (INUSE_BITS|SIZE_T_SIZE)
+
+/* extraction of fields from head words */
+#define cinuse(p) ((p)->head & CINUSE_BIT)
+#define pinuse(p) ((p)->head & PINUSE_BIT)
+#define chunksize(p) ((p)->head & ~(INUSE_BITS))
+
+#define clear_pinuse(p) ((p)->head &= ~PINUSE_BIT)
+#define clear_cinuse(p) ((p)->head &= ~CINUSE_BIT)
+
+/* Treat space at ptr +/- offset as a chunk */
+#define chunk_plus_offset(p, s) ((mchunkptr)(((char *)(p)) + (s)))
+#define chunk_minus_offset(p, s) ((mchunkptr)(((char *)(p)) - (s)))
+
+/* Ptr to next or previous physical malloc_chunk. */
+#define next_chunk(p) ((mchunkptr)(((char *)(p)) + ((p)->head & ~INUSE_BITS)))
+#define prev_chunk(p) ((mchunkptr)(((char *)(p)) - ((p)->prev_foot) ))
+
+/* extract next chunk's pinuse bit */
+#define next_pinuse(p) ((next_chunk(p)->head) & PINUSE_BIT)
+
+/* Get/set size at footer */
+#define get_foot(p, s) (((mchunkptr)((char *)(p) + (s)))->prev_foot)
+#define set_foot(p, s) (((mchunkptr)((char *)(p) + (s)))->prev_foot = (s))
+
+/* Set size, pinuse bit, and foot */
+#define set_size_and_pinuse_of_free_chunk(p, s)\
+ ((p)->head = (s|PINUSE_BIT), set_foot(p, s))
+
+/* Set size, pinuse bit, foot, and clear next pinuse */
+#define set_free_with_pinuse(p, s, n)\
+ (clear_pinuse(n), set_size_and_pinuse_of_free_chunk(p, s))
+
+#define is_direct(p)\
+ (!((p)->head & PINUSE_BIT) && ((p)->prev_foot & IS_DIRECT_BIT))
+
+/* Get the internal overhead associated with chunk p */
+#define overhead_for(p)\
+ (is_direct(p)? DIRECT_CHUNK_OVERHEAD : CHUNK_OVERHEAD)
+
+/* ---------------------- Overlaid data structures ----------------------- */
+
+struct malloc_tree_chunk {
+ /* The first four fields must be compatible with malloc_chunk */
+ size_t prev_foot;
+ size_t head;
+ struct malloc_tree_chunk *fd;
+ struct malloc_tree_chunk *bk;
+
+ struct malloc_tree_chunk *child[2];
+ struct malloc_tree_chunk *parent;
+ bindex_t index;
+};
+
+typedef struct malloc_tree_chunk tchunk;
+typedef struct malloc_tree_chunk *tchunkptr;
+typedef struct malloc_tree_chunk *tbinptr; /* The type of bins of trees */
+
+/* A little helper macro for trees */
+#define leftmost_child(t) ((t)->child[0] != 0? (t)->child[0] : (t)->child[1])
+
+/* ----------------------------- Segments -------------------------------- */
+
+struct malloc_segment {
+ char *base; /* base address */
+ size_t size; /* allocated size */
+ struct malloc_segment *next; /* ptr to next segment */
+};
+
+typedef struct malloc_segment msegment;
+typedef struct malloc_segment *msegmentptr;
+
+/* ---------------------------- malloc_state ----------------------------- */
+
+/* Bin types, widths and sizes */
+#define NSMALLBINS (32U)
+#define NTREEBINS (32U)
+#define SMALLBIN_SHIFT (3U)
+#define SMALLBIN_WIDTH (SIZE_T_ONE << SMALLBIN_SHIFT)
+#define TREEBIN_SHIFT (8U)
+#define MIN_LARGE_SIZE (SIZE_T_ONE << TREEBIN_SHIFT)
+#define MAX_SMALL_SIZE (MIN_LARGE_SIZE - SIZE_T_ONE)
+#define MAX_SMALL_REQUEST (MAX_SMALL_SIZE - CHUNK_ALIGN_MASK - CHUNK_OVERHEAD)
+
+struct malloc_state {
+ binmap_t smallmap;
+ binmap_t treemap;
+ size_t dvsize;
+ size_t topsize;
+ mchunkptr dv;
+ mchunkptr top;
+ size_t trim_check;
+ size_t release_checks;
+ mchunkptr smallbins[(NSMALLBINS+1)*2];
+ tbinptr treebins[NTREEBINS];
+ msegment seg;
+ PRNGState *prng;
+};
+
+typedef struct malloc_state *mstate;
+
+#define is_initialized(M) ((M)->top != 0)
+
+/* -------------------------- system alloc setup ------------------------- */
+
+/* page-align a size */
+#define page_align(S)\
+ (((S) + (LJ_PAGESIZE - SIZE_T_ONE)) & ~(LJ_PAGESIZE - SIZE_T_ONE))
+
+/* granularity-align a size */
+#define granularity_align(S)\
+ (((S) + (DEFAULT_GRANULARITY - SIZE_T_ONE))\
+ & ~(DEFAULT_GRANULARITY - SIZE_T_ONE))
+
+#if LJ_TARGET_WINDOWS
+#define mmap_align(S) granularity_align(S)
+#else
+#define mmap_align(S) page_align(S)
+#endif
+
+/* True if segment S holds address A */
+#define segment_holds(S, A)\
+ ((char *)(A) >= S->base && (char *)(A) < S->base + S->size)
+
+/* Return segment holding given address */
+static msegmentptr segment_holding(mstate m, char *addr)
+{
+ msegmentptr sp = &m->seg;
+ for (;;) {
+ if (addr >= sp->base && addr < sp->base + sp->size)
+ return sp;
+ if ((sp = sp->next) == 0)
+ return 0;
+ }
+}
+
+/* Return true if segment contains a segment link */
+static int has_segment_link(mstate m, msegmentptr ss)
+{
+ msegmentptr sp = &m->seg;
+ for (;;) {
+ if ((char *)sp >= ss->base && (char *)sp < ss->base + ss->size)
+ return 1;
+ if ((sp = sp->next) == 0)
+ return 0;
+ }
+}
+
+/*
+ TOP_FOOT_SIZE is padding at the end of a segment, including space
+ that may be needed to place segment records and fenceposts when new
+ noncontiguous segments are added.
+*/
+#define TOP_FOOT_SIZE\
+ (align_offset(TWO_SIZE_T_SIZES)+pad_request(sizeof(struct malloc_segment))+MIN_CHUNK_SIZE)
+
+/* ---------------------------- Indexing Bins ---------------------------- */
+
+#define is_small(s) (((s) >> SMALLBIN_SHIFT) < NSMALLBINS)
+#define small_index(s) ((s) >> SMALLBIN_SHIFT)
+#define small_index2size(i) ((i) << SMALLBIN_SHIFT)
+#define MIN_SMALL_INDEX (small_index(MIN_CHUNK_SIZE))
+
+/* addressing by index. See above about smallbin repositioning */
+#define smallbin_at(M, i) ((sbinptr)((char *)&((M)->smallbins[(i)<<1])))
+#define treebin_at(M,i) (&((M)->treebins[i]))
+
+/* assign tree index for size S to variable I */
+#define compute_tree_index(S, I)\
+{\
+ unsigned int X = (unsigned int)(S >> TREEBIN_SHIFT);\
+ if (X == 0) {\
+ I = 0;\
+ } else if (X > 0xFFFF) {\
+ I = NTREEBINS-1;\
+ } else {\
+ unsigned int K = lj_fls(X);\
+ I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\
+ }\
+}
+
+/* Bit representing maximum resolved size in a treebin at i */
+#define bit_for_tree_index(i) \
+ (i == NTREEBINS-1)? (SIZE_T_BITSIZE-1) : (((i) >> 1) + TREEBIN_SHIFT - 2)
+
+/* Shift placing maximum resolved bit in a treebin at i as sign bit */
+#define leftshift_for_tree_index(i) \
+ ((i == NTREEBINS-1)? 0 : \
+ ((SIZE_T_BITSIZE-SIZE_T_ONE) - (((i) >> 1) + TREEBIN_SHIFT - 2)))
+
+/* The size of the smallest chunk held in bin with index i */
+#define minsize_for_tree_index(i) \
+ ((SIZE_T_ONE << (((i) >> 1) + TREEBIN_SHIFT)) | \
+ (((size_t)((i) & SIZE_T_ONE)) << (((i) >> 1) + TREEBIN_SHIFT - 1)))
+
+/* ------------------------ Operations on bin maps ----------------------- */
+
+/* bit corresponding to given index */
+#define idx2bit(i) ((binmap_t)(1) << (i))
+
+/* Mark/Clear bits with given index */
+#define mark_smallmap(M,i) ((M)->smallmap |= idx2bit(i))
+#define clear_smallmap(M,i) ((M)->smallmap &= ~idx2bit(i))
+#define smallmap_is_marked(M,i) ((M)->smallmap & idx2bit(i))
+
+#define mark_treemap(M,i) ((M)->treemap |= idx2bit(i))
+#define clear_treemap(M,i) ((M)->treemap &= ~idx2bit(i))
+#define treemap_is_marked(M,i) ((M)->treemap & idx2bit(i))
+
+/* mask with all bits to left of least bit of x on */
+#define left_bits(x) ((x<<1) | (~(x<<1)+1))
+
+/* Set cinuse bit and pinuse bit of next chunk */
+#define set_inuse(M,p,s)\
+ ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\
+ ((mchunkptr)(((char *)(p)) + (s)))->head |= PINUSE_BIT)
+
+/* Set cinuse and pinuse of this chunk and pinuse of next chunk */
+#define set_inuse_and_pinuse(M,p,s)\
+ ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
+ ((mchunkptr)(((char *)(p)) + (s)))->head |= PINUSE_BIT)
+
+/* Set size, cinuse and pinuse bit of this chunk */
+#define set_size_and_pinuse_of_inuse_chunk(M, p, s)\
+ ((p)->head = (s|PINUSE_BIT|CINUSE_BIT))
+
+/* ----------------------- Operations on smallbins ----------------------- */
+
+/* Link a free chunk into a smallbin */
+#define insert_small_chunk(M, P, S) {\
+ bindex_t I = small_index(S);\
+ mchunkptr B = smallbin_at(M, I);\
+ mchunkptr F = B;\
+ if (!smallmap_is_marked(M, I))\
+ mark_smallmap(M, I);\
+ else\
+ F = B->fd;\
+ B->fd = P;\
+ F->bk = P;\
+ P->fd = F;\
+ P->bk = B;\
+}
+
+/* Unlink a chunk from a smallbin */
+#define unlink_small_chunk(M, P, S) {\
+ mchunkptr F = P->fd;\
+ mchunkptr B = P->bk;\
+ bindex_t I = small_index(S);\
+ if (F == B) {\
+ clear_smallmap(M, I);\
+ } else {\
+ F->bk = B;\
+ B->fd = F;\
+ }\
+}
+
+/* Unlink the first chunk from a smallbin */
+#define unlink_first_small_chunk(M, B, P, I) {\
+ mchunkptr F = P->fd;\
+ if (B == F) {\
+ clear_smallmap(M, I);\
+ } else {\
+ B->fd = F;\
+ F->bk = B;\
+ }\
+}
+
+/* Replace dv node, binning the old one */
+/* Used only when dvsize known to be small */
+#define replace_dv(M, P, S) {\
+ size_t DVS = M->dvsize;\
+ if (DVS != 0) {\
+ mchunkptr DV = M->dv;\
+ insert_small_chunk(M, DV, DVS);\
+ }\
+ M->dvsize = S;\
+ M->dv = P;\
+}
+
+/* ------------------------- Operations on trees ------------------------- */
+
+/* Insert chunk into tree */
+#define insert_large_chunk(M, X, S) {\
+ tbinptr *H;\
+ bindex_t I;\
+ compute_tree_index(S, I);\
+ H = treebin_at(M, I);\
+ X->index = I;\
+ X->child[0] = X->child[1] = 0;\
+ if (!treemap_is_marked(M, I)) {\
+ mark_treemap(M, I);\
+ *H = X;\
+ X->parent = (tchunkptr)H;\
+ X->fd = X->bk = X;\
+ } else {\
+ tchunkptr T = *H;\
+ size_t K = S << leftshift_for_tree_index(I);\
+ for (;;) {\
+ if (chunksize(T) != S) {\
+ tchunkptr *C = &(T->child[(K >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1]);\
+ K <<= 1;\
+ if (*C != 0) {\
+ T = *C;\
+ } else {\
+ *C = X;\
+ X->parent = T;\
+ X->fd = X->bk = X;\
+ break;\
+ }\
+ } else {\
+ tchunkptr F = T->fd;\
+ T->fd = F->bk = X;\
+ X->fd = F;\
+ X->bk = T;\
+ X->parent = 0;\
+ break;\
+ }\
+ }\
+ }\
+}
+
+#define unlink_large_chunk(M, X) {\
+ tchunkptr XP = X->parent;\
+ tchunkptr R;\
+ if (X->bk != X) {\
+ tchunkptr F = X->fd;\
+ R = X->bk;\
+ F->bk = R;\
+ R->fd = F;\
+ } else {\
+ tchunkptr *RP;\
+ if (((R = *(RP = &(X->child[1]))) != 0) ||\
+ ((R = *(RP = &(X->child[0]))) != 0)) {\
+ tchunkptr *CP;\
+ while ((*(CP = &(R->child[1])) != 0) ||\
+ (*(CP = &(R->child[0])) != 0)) {\
+ R = *(RP = CP);\
+ }\
+ *RP = 0;\
+ }\
+ }\
+ if (XP != 0) {\
+ tbinptr *H = treebin_at(M, X->index);\
+ if (X == *H) {\
+ if ((*H = R) == 0) \
+ clear_treemap(M, X->index);\
+ } else {\
+ if (XP->child[0] == X) \
+ XP->child[0] = R;\
+ else \
+ XP->child[1] = R;\
+ }\
+ if (R != 0) {\
+ tchunkptr C0, C1;\
+ R->parent = XP;\
+ if ((C0 = X->child[0]) != 0) {\
+ R->child[0] = C0;\
+ C0->parent = R;\
+ }\
+ if ((C1 = X->child[1]) != 0) {\
+ R->child[1] = C1;\
+ C1->parent = R;\
+ }\
+ }\
+ }\
+}
+
+/* Relays to large vs small bin operations */
+
+#define insert_chunk(M, P, S)\
+ if (is_small(S)) { insert_small_chunk(M, P, S)\
+ } else { tchunkptr TP = (tchunkptr)(P); insert_large_chunk(M, TP, S); }
+
+#define unlink_chunk(M, P, S)\
+ if (is_small(S)) { unlink_small_chunk(M, P, S)\
+ } else { tchunkptr TP = (tchunkptr)(P); unlink_large_chunk(M, TP); }
+
+/* ----------------------- Direct-mmapping chunks ----------------------- */
+
+static void *direct_alloc(mstate m, size_t nb)
+{
+ size_t mmsize = mmap_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
+ if (LJ_LIKELY(mmsize > nb)) { /* Check for wrap around 0 */
+ char *mm = (char *)(DIRECT_MMAP(m->prng, mmsize));
+ if (mm != CMFAIL) {
+ size_t offset = align_offset(chunk2mem(mm));
+ size_t psize = mmsize - offset - DIRECT_FOOT_PAD;
+ mchunkptr p = (mchunkptr)(mm + offset);
+ p->prev_foot = offset | IS_DIRECT_BIT;
+ p->head = psize|CINUSE_BIT;
+ chunk_plus_offset(p, psize)->head = FENCEPOST_HEAD;
+ chunk_plus_offset(p, psize+SIZE_T_SIZE)->head = 0;
+ return chunk2mem(p);
+ }
+ }
+ UNUSED(m);
+ return NULL;
+}
+
+static mchunkptr direct_resize(mchunkptr oldp, size_t nb)
+{
+ size_t oldsize = chunksize(oldp);
+ if (is_small(nb)) /* Can't shrink direct regions below small size */
+ return NULL;
+ /* Keep old chunk if big enough but not too big */
+ if (oldsize >= nb + SIZE_T_SIZE &&
+ (oldsize - nb) <= (DEFAULT_GRANULARITY >> 1)) {
+ return oldp;
+ } else {
+ size_t offset = oldp->prev_foot & ~IS_DIRECT_BIT;
+ size_t oldmmsize = oldsize + offset + DIRECT_FOOT_PAD;
+ size_t newmmsize = mmap_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
+ char *cp = (char *)CALL_MREMAP((char *)oldp - offset,
+ oldmmsize, newmmsize, CALL_MREMAP_MV);
+ if (cp != CMFAIL) {
+ mchunkptr newp = (mchunkptr)(cp + offset);
+ size_t psize = newmmsize - offset - DIRECT_FOOT_PAD;
+ newp->head = psize|CINUSE_BIT;
+ chunk_plus_offset(newp, psize)->head = FENCEPOST_HEAD;
+ chunk_plus_offset(newp, psize+SIZE_T_SIZE)->head = 0;
+ return newp;
+ }
+ }
+ return NULL;
+}
+
+/* -------------------------- mspace management -------------------------- */
+
+/* Initialize top chunk and its size */
+static void init_top(mstate m, mchunkptr p, size_t psize)
+{
+ /* Ensure alignment */
+ size_t offset = align_offset(chunk2mem(p));
+ p = (mchunkptr)((char *)p + offset);
+ psize -= offset;
+
+ m->top = p;
+ m->topsize = psize;
+ p->head = psize | PINUSE_BIT;
+ /* set size of fake trailing chunk holding overhead space only once */
+ chunk_plus_offset(p, psize)->head = TOP_FOOT_SIZE;
+ m->trim_check = DEFAULT_TRIM_THRESHOLD; /* reset on each update */
+}
+
+/* Initialize bins for a new mstate that is otherwise zeroed out */
+static void init_bins(mstate m)
+{
+ /* Establish circular links for smallbins */
+ bindex_t i;
+ for (i = 0; i < NSMALLBINS; i++) {
+ sbinptr bin = smallbin_at(m,i);
+ bin->fd = bin->bk = bin;
+ }
+}
+
+/* Allocate chunk and prepend remainder with chunk in successor base. */
+static void *prepend_alloc(mstate m, char *newbase, char *oldbase, size_t nb)
+{
+ mchunkptr p = align_as_chunk(newbase);
+ mchunkptr oldfirst = align_as_chunk(oldbase);
+ size_t psize = (size_t)((char *)oldfirst - (char *)p);
+ mchunkptr q = chunk_plus_offset(p, nb);
+ size_t qsize = psize - nb;
+ set_size_and_pinuse_of_inuse_chunk(m, p, nb);
+
+ /* consolidate remainder with first chunk of old base */
+ if (oldfirst == m->top) {
+ size_t tsize = m->topsize += qsize;
+ m->top = q;
+ q->head = tsize | PINUSE_BIT;
+ } else if (oldfirst == m->dv) {
+ size_t dsize = m->dvsize += qsize;
+ m->dv = q;
+ set_size_and_pinuse_of_free_chunk(q, dsize);
+ } else {
+ if (!cinuse(oldfirst)) {
+ size_t nsize = chunksize(oldfirst);
+ unlink_chunk(m, oldfirst, nsize);
+ oldfirst = chunk_plus_offset(oldfirst, nsize);
+ qsize += nsize;
+ }
+ set_free_with_pinuse(q, qsize, oldfirst);
+ insert_chunk(m, q, qsize);
+ }
+
+ return chunk2mem(p);
+}
+
+/* Add a segment to hold a new noncontiguous region */
+static void add_segment(mstate m, char *tbase, size_t tsize)
+{
+ /* Determine locations and sizes of segment, fenceposts, old top */
+ char *old_top = (char *)m->top;
+ msegmentptr oldsp = segment_holding(m, old_top);
+ char *old_end = oldsp->base + oldsp->size;
+ size_t ssize = pad_request(sizeof(struct malloc_segment));
+ char *rawsp = old_end - (ssize + FOUR_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
+ size_t offset = align_offset(chunk2mem(rawsp));
+ char *asp = rawsp + offset;
+ char *csp = (asp < (old_top + MIN_CHUNK_SIZE))? old_top : asp;
+ mchunkptr sp = (mchunkptr)csp;
+ msegmentptr ss = (msegmentptr)(chunk2mem(sp));
+ mchunkptr tnext = chunk_plus_offset(sp, ssize);
+ mchunkptr p = tnext;
+
+ /* reset top to new space */
+ init_top(m, (mchunkptr)tbase, tsize - TOP_FOOT_SIZE);
+
+ /* Set up segment record */
+ set_size_and_pinuse_of_inuse_chunk(m, sp, ssize);
+ *ss = m->seg; /* Push current record */
+ m->seg.base = tbase;
+ m->seg.size = tsize;
+ m->seg.next = ss;
+
+ /* Insert trailing fenceposts */
+ for (;;) {
+ mchunkptr nextp = chunk_plus_offset(p, SIZE_T_SIZE);
+ p->head = FENCEPOST_HEAD;
+ if ((char *)(&(nextp->head)) < old_end)
+ p = nextp;
+ else
+ break;
+ }
+
+ /* Insert the rest of old top into a bin as an ordinary free chunk */
+ if (csp != old_top) {
+ mchunkptr q = (mchunkptr)old_top;
+ size_t psize = (size_t)(csp - old_top);
+ mchunkptr tn = chunk_plus_offset(q, psize);
+ set_free_with_pinuse(q, psize, tn);
+ insert_chunk(m, q, psize);
+ }
+}
+
+/* -------------------------- System allocation -------------------------- */
+
+static void *alloc_sys(mstate m, size_t nb)
+{
+ char *tbase = CMFAIL;
+ size_t tsize = 0;
+
+ /* Directly map large chunks */
+ if (LJ_UNLIKELY(nb >= DEFAULT_MMAP_THRESHOLD)) {
+ void *mem = direct_alloc(m, nb);
+ if (mem != 0)
+ return mem;
+ }
+
+ {
+ size_t req = nb + TOP_FOOT_SIZE + SIZE_T_ONE;
+ size_t rsize = granularity_align(req);
+ if (LJ_LIKELY(rsize > nb)) { /* Fail if wraps around zero */
+ char *mp = (char *)(CALL_MMAP(m->prng, rsize));
+ if (mp != CMFAIL) {
+ tbase = mp;
+ tsize = rsize;
+ }
+ }
+ }
+
+ if (tbase != CMFAIL) {
+ msegmentptr sp = &m->seg;
+ /* Try to merge with an existing segment */
+ while (sp != 0 && tbase != sp->base + sp->size)
+ sp = sp->next;
+ if (sp != 0 && segment_holds(sp, m->top)) { /* append */
+ sp->size += tsize;
+ init_top(m, m->top, m->topsize + tsize);
+ } else {
+ sp = &m->seg;
+ while (sp != 0 && sp->base != tbase + tsize)
+ sp = sp->next;
+ if (sp != 0) {
+ char *oldbase = sp->base;
+ sp->base = tbase;
+ sp->size += tsize;
+ return prepend_alloc(m, tbase, oldbase, nb);
+ } else {
+ add_segment(m, tbase, tsize);
+ }
+ }
+
+ if (nb < m->topsize) { /* Allocate from new or extended top space */
+ size_t rsize = m->topsize -= nb;
+ mchunkptr p = m->top;
+ mchunkptr r = m->top = chunk_plus_offset(p, nb);
+ r->head = rsize | PINUSE_BIT;
+ set_size_and_pinuse_of_inuse_chunk(m, p, nb);
+ return chunk2mem(p);
+ }
+ }
+
+ return NULL;
+}
+
+/* ----------------------- system deallocation -------------------------- */
+
+/* Unmap and unlink any mmapped segments that don't contain used chunks */
+static size_t release_unused_segments(mstate m)
+{
+ size_t released = 0;
+ size_t nsegs = 0;
+ msegmentptr pred = &m->seg;
+ msegmentptr sp = pred->next;
+ while (sp != 0) {
+ char *base = sp->base;
+ size_t size = sp->size;
+ msegmentptr next = sp->next;
+ nsegs++;
+ {
+ mchunkptr p = align_as_chunk(base);
+ size_t psize = chunksize(p);
+ /* Can unmap if first chunk holds entire segment and not pinned */
+ if (!cinuse(p) && (char *)p + psize >= base + size - TOP_FOOT_SIZE) {
+ tchunkptr tp = (tchunkptr)p;
+ if (p == m->dv) {
+ m->dv = 0;
+ m->dvsize = 0;
+ } else {
+ unlink_large_chunk(m, tp);
+ }
+ if (CALL_MUNMAP(base, size) == 0) {
+ released += size;
+ /* unlink obsoleted record */
+ sp = pred;
+ sp->next = next;
+ } else { /* back out if cannot unmap */
+ insert_large_chunk(m, tp, psize);
+ }
+ }
+ }
+ pred = sp;
+ sp = next;
+ }
+ /* Reset check counter */
+ m->release_checks = nsegs > MAX_RELEASE_CHECK_RATE ?
+ nsegs : MAX_RELEASE_CHECK_RATE;
+ return released;
+}
+
+static int alloc_trim(mstate m, size_t pad)
+{
+ size_t released = 0;
+ if (pad < MAX_REQUEST && is_initialized(m)) {
+ pad += TOP_FOOT_SIZE; /* ensure enough room for segment overhead */
+
+ if (m->topsize > pad) {
+ /* Shrink top space in granularity-size units, keeping at least one */
+ size_t unit = DEFAULT_GRANULARITY;
+ size_t extra = ((m->topsize - pad + (unit - SIZE_T_ONE)) / unit -
+ SIZE_T_ONE) * unit;
+ msegmentptr sp = segment_holding(m, (char *)m->top);
+
+ if (sp->size >= extra &&
+ !has_segment_link(m, sp)) { /* can't shrink if pinned */
+ size_t newsize = sp->size - extra;
+ /* Prefer mremap, fall back to munmap */
+ if ((CALL_MREMAP(sp->base, sp->size, newsize, CALL_MREMAP_NOMOVE) != MFAIL) ||
+ (CALL_MUNMAP(sp->base + newsize, extra) == 0)) {
+ released = extra;
+ }
+ }
+
+ if (released != 0) {
+ sp->size -= released;
+ init_top(m, m->top, m->topsize - released);
+ }
+ }
+
+ /* Unmap any unused mmapped segments */
+ released += release_unused_segments(m);
+
+ /* On failure, disable autotrim to avoid repeated failed future calls */
+ if (released == 0 && m->topsize > m->trim_check)
+ m->trim_check = MAX_SIZE_T;
+ }
+
+ return (released != 0)? 1 : 0;
+}
+
+/* ---------------------------- malloc support --------------------------- */
+
+/* allocate a large request from the best fitting chunk in a treebin */
+static void *tmalloc_large(mstate m, size_t nb)
+{
+ tchunkptr v = 0;
+ size_t rsize = ~nb+1; /* Unsigned negation */
+ tchunkptr t;
+ bindex_t idx;
+ compute_tree_index(nb, idx);
+
+ if ((t = *treebin_at(m, idx)) != 0) {
+ /* Traverse tree for this bin looking for node with size == nb */
+ size_t sizebits = nb << leftshift_for_tree_index(idx);
+ tchunkptr rst = 0; /* The deepest untaken right subtree */
+ for (;;) {
+ tchunkptr rt;
+ size_t trem = chunksize(t) - nb;
+ if (trem < rsize) {
+ v = t;
+ if ((rsize = trem) == 0)
+ break;
+ }
+ rt = t->child[1];
+ t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1];
+ if (rt != 0 && rt != t)
+ rst = rt;
+ if (t == 0) {
+ t = rst; /* set t to least subtree holding sizes > nb */
+ break;
+ }
+ sizebits <<= 1;
+ }
+ }
+
+ if (t == 0 && v == 0) { /* set t to root of next non-empty treebin */
+ binmap_t leftbits = left_bits(idx2bit(idx)) & m->treemap;
+ if (leftbits != 0)
+ t = *treebin_at(m, lj_ffs(leftbits));
+ }
+
+ while (t != 0) { /* find smallest of tree or subtree */
+ size_t trem = chunksize(t) - nb;
+ if (trem < rsize) {
+ rsize = trem;
+ v = t;
+ }
+ t = leftmost_child(t);
+ }
+
+ /* If dv is a better fit, return NULL so malloc will use it */
+ if (v != 0 && rsize < (size_t)(m->dvsize - nb)) {
+ mchunkptr r = chunk_plus_offset(v, nb);
+ unlink_large_chunk(m, v);
+ if (rsize < MIN_CHUNK_SIZE) {
+ set_inuse_and_pinuse(m, v, (rsize + nb));
+ } else {
+ set_size_and_pinuse_of_inuse_chunk(m, v, nb);
+ set_size_and_pinuse_of_free_chunk(r, rsize);
+ insert_chunk(m, r, rsize);
+ }
+ return chunk2mem(v);
+ }
+ return NULL;
+}
+
+/* allocate a small request from the best fitting chunk in a treebin */
+static void *tmalloc_small(mstate m, size_t nb)
+{
+ tchunkptr t, v;
+ mchunkptr r;
+ size_t rsize;
+ bindex_t i = lj_ffs(m->treemap);
+
+ v = t = *treebin_at(m, i);
+ rsize = chunksize(t) - nb;
+
+ while ((t = leftmost_child(t)) != 0) {
+ size_t trem = chunksize(t) - nb;
+ if (trem < rsize) {
+ rsize = trem;
+ v = t;
+ }
+ }
+
+ r = chunk_plus_offset(v, nb);
+ unlink_large_chunk(m, v);
+ if (rsize < MIN_CHUNK_SIZE) {
+ set_inuse_and_pinuse(m, v, (rsize + nb));
+ } else {
+ set_size_and_pinuse_of_inuse_chunk(m, v, nb);
+ set_size_and_pinuse_of_free_chunk(r, rsize);
+ replace_dv(m, r, rsize);
+ }
+ return chunk2mem(v);
+}
+
+/* ----------------------------------------------------------------------- */
+
+void *lj_alloc_create(PRNGState *rs)
+{
+ size_t tsize = DEFAULT_GRANULARITY;
+ char *tbase;
+ INIT_MMAP();
+ UNUSED(rs);
+ tbase = (char *)(CALL_MMAP(rs, tsize));
+ if (tbase != CMFAIL) {
+ size_t msize = pad_request(sizeof(struct malloc_state));
+ mchunkptr mn;
+ mchunkptr msp = align_as_chunk(tbase);
+ mstate m = (mstate)(chunk2mem(msp));
+ memset(m, 0, msize);
+ msp->head = (msize|PINUSE_BIT|CINUSE_BIT);
+ m->seg.base = tbase;
+ m->seg.size = tsize;
+ m->release_checks = MAX_RELEASE_CHECK_RATE;
+ init_bins(m);
+ mn = next_chunk(mem2chunk(m));
+ init_top(m, mn, (size_t)((tbase + tsize) - (char *)mn) - TOP_FOOT_SIZE);
+ return m;
+ }
+ return NULL;
+}
+
+void lj_alloc_setprng(void *msp, PRNGState *rs)
+{
+ mstate ms = (mstate)msp;
+ ms->prng = rs;
+}
+
+void lj_alloc_destroy(void *msp)
+{
+ mstate ms = (mstate)msp;
+ msegmentptr sp = &ms->seg;
+ while (sp != 0) {
+ char *base = sp->base;
+ size_t size = sp->size;
+ sp = sp->next;
+ CALL_MUNMAP(base, size);
+ }
+}
+
+static LJ_NOINLINE void *lj_alloc_malloc(void *msp, size_t nsize)
+{
+ mstate ms = (mstate)msp;
+ void *mem;
+ size_t nb;
+ if (nsize <= MAX_SMALL_REQUEST) {
+ bindex_t idx;
+ binmap_t smallbits;
+ nb = (nsize < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(nsize);
+ idx = small_index(nb);
+ smallbits = ms->smallmap >> idx;
+
+ if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */
+ mchunkptr b, p;
+ idx += ~smallbits & 1; /* Uses next bin if idx empty */
+ b = smallbin_at(ms, idx);
+ p = b->fd;
+ unlink_first_small_chunk(ms, b, p, idx);
+ set_inuse_and_pinuse(ms, p, small_index2size(idx));
+ mem = chunk2mem(p);
+ return mem;
+ } else if (nb > ms->dvsize) {
+ if (smallbits != 0) { /* Use chunk in next nonempty smallbin */
+ mchunkptr b, p, r;
+ size_t rsize;
+ binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx));
+ bindex_t i = lj_ffs(leftbits);
+ b = smallbin_at(ms, i);
+ p = b->fd;
+ unlink_first_small_chunk(ms, b, p, i);
+ rsize = small_index2size(i) - nb;
+ /* Fit here cannot be remainderless if 4byte sizes */
+ if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE) {
+ set_inuse_and_pinuse(ms, p, small_index2size(i));
+ } else {
+ set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
+ r = chunk_plus_offset(p, nb);
+ set_size_and_pinuse_of_free_chunk(r, rsize);
+ replace_dv(ms, r, rsize);
+ }
+ mem = chunk2mem(p);
+ return mem;
+ } else if (ms->treemap != 0 && (mem = tmalloc_small(ms, nb)) != 0) {
+ return mem;
+ }
+ }
+ } else if (nsize >= MAX_REQUEST) {
+ nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */
+ } else {
+ nb = pad_request(nsize);
+ if (ms->treemap != 0 && (mem = tmalloc_large(ms, nb)) != 0) {
+ return mem;
+ }
+ }
+
+ if (nb <= ms->dvsize) {
+ size_t rsize = ms->dvsize - nb;
+ mchunkptr p = ms->dv;
+ if (rsize >= MIN_CHUNK_SIZE) { /* split dv */
+ mchunkptr r = ms->dv = chunk_plus_offset(p, nb);
+ ms->dvsize = rsize;
+ set_size_and_pinuse_of_free_chunk(r, rsize);
+ set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
+ } else { /* exhaust dv */
+ size_t dvs = ms->dvsize;
+ ms->dvsize = 0;
+ ms->dv = 0;
+ set_inuse_and_pinuse(ms, p, dvs);
+ }
+ mem = chunk2mem(p);
+ return mem;
+ } else if (nb < ms->topsize) { /* Split top */
+ size_t rsize = ms->topsize -= nb;
+ mchunkptr p = ms->top;
+ mchunkptr r = ms->top = chunk_plus_offset(p, nb);
+ r->head = rsize | PINUSE_BIT;
+ set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
+ mem = chunk2mem(p);
+ return mem;
+ }
+ return alloc_sys(ms, nb);
+}
+
+static LJ_NOINLINE void *lj_alloc_free(void *msp, void *ptr)
+{
+ if (ptr != 0) {
+ mchunkptr p = mem2chunk(ptr);
+ mstate fm = (mstate)msp;
+ size_t psize = chunksize(p);
+ mchunkptr next = chunk_plus_offset(p, psize);
+ if (!pinuse(p)) {
+ size_t prevsize = p->prev_foot;
+ if ((prevsize & IS_DIRECT_BIT) != 0) {
+ prevsize &= ~IS_DIRECT_BIT;
+ psize += prevsize + DIRECT_FOOT_PAD;
+ CALL_MUNMAP((char *)p - prevsize, psize);
+ return NULL;
+ } else {
+ mchunkptr prev = chunk_minus_offset(p, prevsize);
+ psize += prevsize;
+ p = prev;
+ /* consolidate backward */
+ if (p != fm->dv) {
+ unlink_chunk(fm, p, prevsize);
+ } else if ((next->head & INUSE_BITS) == INUSE_BITS) {
+ fm->dvsize = psize;
+ set_free_with_pinuse(p, psize, next);
+ return NULL;
+ }
+ }
+ }
+ if (!cinuse(next)) { /* consolidate forward */
+ if (next == fm->top) {
+ size_t tsize = fm->topsize += psize;
+ fm->top = p;
+ p->head = tsize | PINUSE_BIT;
+ if (p == fm->dv) {
+ fm->dv = 0;
+ fm->dvsize = 0;
+ }
+ if (tsize > fm->trim_check)
+ alloc_trim(fm, 0);
+ return NULL;
+ } else if (next == fm->dv) {
+ size_t dsize = fm->dvsize += psize;
+ fm->dv = p;
+ set_size_and_pinuse_of_free_chunk(p, dsize);
+ return NULL;
+ } else {
+ size_t nsize = chunksize(next);
+ psize += nsize;
+ unlink_chunk(fm, next, nsize);
+ set_size_and_pinuse_of_free_chunk(p, psize);
+ if (p == fm->dv) {
+ fm->dvsize = psize;
+ return NULL;
+ }
+ }
+ } else {
+ set_free_with_pinuse(p, psize, next);
+ }
+
+ if (is_small(psize)) {
+ insert_small_chunk(fm, p, psize);
+ } else {
+ tchunkptr tp = (tchunkptr)p;
+ insert_large_chunk(fm, tp, psize);
+ if (--fm->release_checks == 0)
+ release_unused_segments(fm);
+ }
+ }
+ return NULL;
+}
+
+static LJ_NOINLINE void *lj_alloc_realloc(void *msp, void *ptr, size_t nsize)
+{
+ if (nsize >= MAX_REQUEST) {
+ return NULL;
+ } else {
+ mstate m = (mstate)msp;
+ mchunkptr oldp = mem2chunk(ptr);
+ size_t oldsize = chunksize(oldp);
+ mchunkptr next = chunk_plus_offset(oldp, oldsize);
+ mchunkptr newp = 0;
+ size_t nb = request2size(nsize);
+
+ /* Try to either shrink or extend into top. Else malloc-copy-free */
+ if (is_direct(oldp)) {
+ newp = direct_resize(oldp, nb); /* this may return NULL. */
+ } else if (oldsize >= nb) { /* already big enough */
+ size_t rsize = oldsize - nb;
+ newp = oldp;
+ if (rsize >= MIN_CHUNK_SIZE) {
+ mchunkptr rem = chunk_plus_offset(newp, nb);
+ set_inuse(m, newp, nb);
+ set_inuse(m, rem, rsize);
+ lj_alloc_free(m, chunk2mem(rem));
+ }
+ } else if (next == m->top && oldsize + m->topsize > nb) {
+ /* Expand into top */
+ size_t newsize = oldsize + m->topsize;
+ size_t newtopsize = newsize - nb;
+ mchunkptr newtop = chunk_plus_offset(oldp, nb);
+ set_inuse(m, oldp, nb);
+ newtop->head = newtopsize |PINUSE_BIT;
+ m->top = newtop;
+ m->topsize = newtopsize;
+ newp = oldp;
+ }
+
+ if (newp != 0) {
+ return chunk2mem(newp);
+ } else {
+ void *newmem = lj_alloc_malloc(m, nsize);
+ if (newmem != 0) {
+ size_t oc = oldsize - overhead_for(oldp);
+ memcpy(newmem, ptr, oc < nsize ? oc : nsize);
+ lj_alloc_free(m, ptr);
+ }
+ return newmem;
+ }
+ }
+}
+
+void *lj_alloc_f(void *msp, void *ptr, size_t osize, size_t nsize)
+{
+ (void)osize;
+ if (nsize == 0) {
+ return lj_alloc_free(msp, ptr);
+ } else if (ptr == NULL) {
+ return lj_alloc_malloc(msp, nsize);
+ } else {
+ return lj_alloc_realloc(msp, ptr, nsize);
+ }
+}
+
+#endif
diff --git a/libs/luajit-cmake/luajit/src/lj_alloc.h b/libs/luajit-cmake/luajit/src/lj_alloc.h
new file mode 100644
index 0000000..669f50b
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/lj_alloc.h
@@ -0,0 +1,18 @@
+/*
+** Bundled memory allocator.
+** Donated to the public domain.
+*/
+
+#ifndef _LJ_ALLOC_H
+#define _LJ_ALLOC_H
+
+#include "lj_def.h"
+
+#ifndef LUAJIT_USE_SYSMALLOC
+LJ_FUNC void *lj_alloc_create(PRNGState *rs);
+LJ_FUNC void lj_alloc_setprng(void *msp, PRNGState *rs);
+LJ_FUNC void lj_alloc_destroy(void *msp);
+LJ_FUNC void *lj_alloc_f(void *msp, void *ptr, size_t osize, size_t nsize);
+#endif
+
+#endif
diff --git a/libs/luajit-cmake/luajit/src/lj_api.c b/libs/luajit-cmake/luajit/src/lj_api.c
new file mode 100644
index 0000000..e6b6747
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/lj_api.c
@@ -0,0 +1,1313 @@
+/*
+** Public Lua/C API.
+** Copyright (C) 2005-2022 Mike Pall. See Copyright Notice in luajit.h
+**
+** Major portions taken verbatim or adapted from the Lua interpreter.
+** Copyright (C) 1994-2008 Lua.org, PUC-Rio. See Copyright Notice in lua.h
+*/
+
+#define lj_api_c
+#define LUA_CORE
+
+#include "lj_obj.h"
+#include "lj_gc.h"
+#include "lj_err.h"
+#include "lj_debug.h"
+#include "lj_str.h"
+#include "lj_tab.h"
+#include "lj_func.h"
+#include "lj_udata.h"
+#include "lj_meta.h"
+#include "lj_state.h"
+#include "lj_bc.h"
+#include "lj_frame.h"
+#include "lj_trace.h"
+#include "lj_vm.h"
+#include "lj_strscan.h"
+#include "lj_strfmt.h"
+
+/* -- Common helper functions --------------------------------------------- */
+
+#define lj_checkapi_slot(idx) \
+ lj_checkapi((idx) <= (L->top - L->base), "stack slot %d out of range", (idx))
+
+static TValue *index2adr(lua_State *L, int idx)
+{
+ if (idx > 0) {
+ TValue *o = L->base + (idx - 1);
+ return o < L->top ? o : niltv(L);
+ } else if (idx > LUA_REGISTRYINDEX) {
+ lj_checkapi(idx != 0 && -idx <= L->top - L->base,
+ "bad stack slot %d", idx);
+ return L->top + idx;
+ } else if (idx == LUA_GLOBALSINDEX) {
+ TValue *o = &G(L)->tmptv;
+ settabV(L, o, tabref(L->env));
+ return o;
+ } else if (idx == LUA_REGISTRYINDEX) {
+ return registry(L);
+ } else {
+ GCfunc *fn = curr_func(L);
+ lj_checkapi(fn->c.gct == ~LJ_TFUNC && !isluafunc(fn),
+ "calling frame is not a C function");
+ if (idx == LUA_ENVIRONINDEX) {
+ TValue *o = &G(L)->tmptv;
+ settabV(L, o, tabref(fn->c.env));
+ return o;
+ } else {
+ idx = LUA_GLOBALSINDEX - idx;
+ return idx <= fn->c.nupvalues ? &fn->c.upvalue[idx-1] : niltv(L);
+ }
+ }
+}
+
+static LJ_AINLINE TValue *index2adr_check(lua_State *L, int idx)
+{
+ TValue *o = index2adr(L, idx);
+ lj_checkapi(o != niltv(L), "invalid stack slot %d", idx);
+ return o;
+}
+
+static TValue *index2adr_stack(lua_State *L, int idx)
+{
+ if (idx > 0) {
+ TValue *o = L->base + (idx - 1);
+ if (o < L->top) {
+ return o;
+ } else {
+ lj_checkapi(0, "invalid stack slot %d", idx);
+ return niltv(L);
+ }
+ return o < L->top ? o : niltv(L);
+ } else {
+ lj_checkapi(idx != 0 && -idx <= L->top - L->base,
+ "invalid stack slot %d", idx);
+ return L->top + idx;
+ }
+}
+
+static GCtab *getcurrenv(lua_State *L)
+{
+ GCfunc *fn = curr_func(L);
+ return fn->c.gct == ~LJ_TFUNC ? tabref(fn->c.env) : tabref(L->env);
+}
+
+/* -- Miscellaneous API functions ----------------------------------------- */
+
+LUA_API int lua_status(lua_State *L)
+{
+ return L->status;
+}
+
+LUA_API int lua_checkstack(lua_State *L, int size)
+{
+ if (size > LUAI_MAXCSTACK || (L->top - L->base + size) > LUAI_MAXCSTACK) {
+ return 0; /* Stack overflow. */
+ } else if (size > 0) {
+ lj_state_checkstack(L, (MSize)size);
+ }
+ return 1;
+}
+
+LUALIB_API void luaL_checkstack(lua_State *L, int size, const char *msg)
+{
+ if (!lua_checkstack(L, size))
+ lj_err_callerv(L, LJ_ERR_STKOVM, msg);
+}
+
+LUA_API void lua_xmove(lua_State *L, lua_State *to, int n)
+{
+ TValue *f, *t;
+ if (L == to) return;
+ lj_checkapi_slot(n);
+ lj_checkapi(G(L) == G(to), "move across global states");
+ lj_state_checkstack(to, (MSize)n);
+ f = L->top;
+ t = to->top = to->top + n;
+ while (--n >= 0) copyTV(to, --t, --f);
+ L->top = f;
+}
+
+LUA_API const lua_Number *lua_version(lua_State *L)
+{
+ static const lua_Number version = LUA_VERSION_NUM;
+ UNUSED(L);
+ return &version;
+}
+
+/* -- Stack manipulation -------------------------------------------------- */
+
+LUA_API int lua_gettop(lua_State *L)
+{
+ return (int)(L->top - L->base);
+}
+
+LUA_API void lua_settop(lua_State *L, int idx)
+{
+ if (idx >= 0) {
+ lj_checkapi(idx <= tvref(L->maxstack) - L->base, "bad stack slot %d", idx);
+ if (L->base + idx > L->top) {
+ if (L->base + idx >= tvref(L->maxstack))
+ lj_state_growstack(L, (MSize)idx - (MSize)(L->top - L->base));
+ do { setnilV(L->top++); } while (L->top < L->base + idx);
+ } else {
+ L->top = L->base + idx;
+ }
+ } else {
+ lj_checkapi(-(idx+1) <= (L->top - L->base), "bad stack slot %d", idx);
+ L->top += idx+1; /* Shrinks top (idx < 0). */
+ }
+}
+
+LUA_API void lua_remove(lua_State *L, int idx)
+{
+ TValue *p = index2adr_stack(L, idx);
+ while (++p < L->top) copyTV(L, p-1, p);
+ L->top--;
+}
+
+LUA_API void lua_insert(lua_State *L, int idx)
+{
+ TValue *q, *p = index2adr_stack(L, idx);
+ for (q = L->top; q > p; q--) copyTV(L, q, q-1);
+ copyTV(L, p, L->top);
+}
+
+static void copy_slot(lua_State *L, TValue *f, int idx)
+{
+ if (idx == LUA_GLOBALSINDEX) {
+ lj_checkapi(tvistab(f), "stack slot %d is not a table", idx);
+ /* NOBARRIER: A thread (i.e. L) is never black. */
+ setgcref(L->env, obj2gco(tabV(f)));
+ } else if (idx == LUA_ENVIRONINDEX) {
+ GCfunc *fn = curr_func(L);
+ if (fn->c.gct != ~LJ_TFUNC)
+ lj_err_msg(L, LJ_ERR_NOENV);
+ lj_checkapi(tvistab(f), "stack slot %d is not a table", idx);
+ setgcref(fn->c.env, obj2gco(tabV(f)));
+ lj_gc_barrier(L, fn, f);
+ } else {
+ TValue *o = index2adr_check(L, idx);
+ copyTV(L, o, f);
+ if (idx < LUA_GLOBALSINDEX) /* Need a barrier for upvalues. */
+ lj_gc_barrier(L, curr_func(L), f);
+ }
+}
+
+LUA_API void lua_replace(lua_State *L, int idx)
+{
+ lj_checkapi_slot(1);
+ copy_slot(L, L->top - 1, idx);
+ L->top--;
+}
+
+LUA_API void lua_copy(lua_State *L, int fromidx, int toidx)
+{
+ copy_slot(L, index2adr(L, fromidx), toidx);
+}
+
+LUA_API void lua_pushvalue(lua_State *L, int idx)
+{
+ copyTV(L, L->top, index2adr(L, idx));
+ incr_top(L);
+}
+
+/* -- Stack getters ------------------------------------------------------- */
+
+LUA_API int lua_type(lua_State *L, int idx)
+{
+ cTValue *o = index2adr(L, idx);
+ if (tvisnumber(o)) {
+ return LUA_TNUMBER;
+#if LJ_64 && !LJ_GC64
+ } else if (tvislightud(o)) {
+ return LUA_TLIGHTUSERDATA;
+#endif
+ } else if (o == niltv(L)) {
+ return LUA_TNONE;
+ } else { /* Magic internal/external tag conversion. ORDER LJ_T */
+ uint32_t t = ~itype(o);
+#if LJ_64
+ int tt = (int)((U64x(75a06,98042110) >> 4*t) & 15u);
+#else
+ int tt = (int)(((t < 8 ? 0x98042110u : 0x75a06u) >> 4*(t&7)) & 15u);
+#endif
+ lj_assertL(tt != LUA_TNIL || tvisnil(o), "bad tag conversion");
+ return tt;
+ }
+}
+
+LUALIB_API void luaL_checktype(lua_State *L, int idx, int tt)
+{
+ if (lua_type(L, idx) != tt)
+ lj_err_argt(L, idx, tt);
+}
+
+LUALIB_API void luaL_checkany(lua_State *L, int idx)
+{
+ if (index2adr(L, idx) == niltv(L))
+ lj_err_arg(L, idx, LJ_ERR_NOVAL);
+}
+
+LUA_API const char *lua_typename(lua_State *L, int t)
+{
+ UNUSED(L);
+ return lj_obj_typename[t+1];
+}
+
+LUA_API int lua_iscfunction(lua_State *L, int idx)
+{
+ cTValue *o = index2adr(L, idx);
+ return tvisfunc(o) && !isluafunc(funcV(o));
+}
+
+LUA_API int lua_isnumber(lua_State *L, int idx)
+{
+ cTValue *o = index2adr(L, idx);
+ TValue tmp;
+ return (tvisnumber(o) || (tvisstr(o) && lj_strscan_number(strV(o), &tmp)));
+}
+
+LUA_API int lua_isstring(lua_State *L, int idx)
+{
+ cTValue *o = index2adr(L, idx);
+ return (tvisstr(o) || tvisnumber(o));
+}
+
+LUA_API int lua_isuserdata(lua_State *L, int idx)
+{
+ cTValue *o = index2adr(L, idx);
+ return (tvisudata(o) || tvislightud(o));
+}
+
+LUA_API int lua_rawequal(lua_State *L, int idx1, int idx2)
+{
+ cTValue *o1 = index2adr(L, idx1);
+ cTValue *o2 = index2adr(L, idx2);
+ return (o1 == niltv(L) || o2 == niltv(L)) ? 0 : lj_obj_equal(o1, o2);
+}
+
+LUA_API int lua_equal(lua_State *L, int idx1, int idx2)
+{
+ cTValue *o1 = index2adr(L, idx1);
+ cTValue *o2 = index2adr(L, idx2);
+ if (tvisint(o1) && tvisint(o2)) {
+ return intV(o1) == intV(o2);
+ } else if (tvisnumber(o1) && tvisnumber(o2)) {
+ return numberVnum(o1) == numberVnum(o2);
+ } else if (itype(o1) != itype(o2)) {
+ return 0;
+ } else if (tvispri(o1)) {
+ return o1 != niltv(L) && o2 != niltv(L);
+#if LJ_64 && !LJ_GC64
+ } else if (tvislightud(o1)) {
+ return o1->u64 == o2->u64;
+#endif
+ } else if (gcrefeq(o1->gcr, o2->gcr)) {
+ return 1;
+ } else if (!tvistabud(o1)) {
+ return 0;
+ } else {
+ TValue *base = lj_meta_equal(L, gcV(o1), gcV(o2), 0);
+ if ((uintptr_t)base <= 1) {
+ return (int)(uintptr_t)base;
+ } else {
+ L->top = base+2;
+ lj_vm_call(L, base, 1+1);
+ L->top -= 2+LJ_FR2;
+ return tvistruecond(L->top+1+LJ_FR2);
+ }
+ }
+}
+
+LUA_API int lua_lessthan(lua_State *L, int idx1, int idx2)
+{
+ cTValue *o1 = index2adr(L, idx1);
+ cTValue *o2 = index2adr(L, idx2);
+ if (o1 == niltv(L) || o2 == niltv(L)) {
+ return 0;
+ } else if (tvisint(o1) && tvisint(o2)) {
+ return intV(o1) < intV(o2);
+ } else if (tvisnumber(o1) && tvisnumber(o2)) {
+ return numberVnum(o1) < numberVnum(o2);
+ } else {
+ TValue *base = lj_meta_comp(L, o1, o2, 0);
+ if ((uintptr_t)base <= 1) {
+ return (int)(uintptr_t)base;
+ } else {
+ L->top = base+2;
+ lj_vm_call(L, base, 1+1);
+ L->top -= 2+LJ_FR2;
+ return tvistruecond(L->top+1+LJ_FR2);
+ }
+ }
+}
+
+LUA_API lua_Number lua_tonumber(lua_State *L, int idx)
+{
+ cTValue *o = index2adr(L, idx);
+ TValue tmp;
+ if (LJ_LIKELY(tvisnumber(o)))
+ return numberVnum(o);
+ else if (tvisstr(o) && lj_strscan_num(strV(o), &tmp))
+ return numV(&tmp);
+ else
+ return 0;
+}
+
+LUA_API lua_Number lua_tonumberx(lua_State *L, int idx, int *ok)
+{
+ cTValue *o = index2adr(L, idx);
+ TValue tmp;
+ if (LJ_LIKELY(tvisnumber(o))) {
+ if (ok) *ok = 1;
+ return numberVnum(o);
+ } else if (tvisstr(o) && lj_strscan_num(strV(o), &tmp)) {
+ if (ok) *ok = 1;
+ return numV(&tmp);
+ } else {
+ if (ok) *ok = 0;
+ return 0;
+ }
+}
+
+LUALIB_API lua_Number luaL_checknumber(lua_State *L, int idx)
+{
+ cTValue *o = index2adr(L, idx);
+ TValue tmp;
+ if (LJ_LIKELY(tvisnumber(o)))
+ return numberVnum(o);
+ else if (!(tvisstr(o) && lj_strscan_num(strV(o), &tmp)))
+ lj_err_argt(L, idx, LUA_TNUMBER);
+ return numV(&tmp);
+}
+
+LUALIB_API lua_Number luaL_optnumber(lua_State *L, int idx, lua_Number def)
+{
+ cTValue *o = index2adr(L, idx);
+ TValue tmp;
+ if (LJ_LIKELY(tvisnumber(o)))
+ return numberVnum(o);
+ else if (tvisnil(o))
+ return def;
+ else if (!(tvisstr(o) && lj_strscan_num(strV(o), &tmp)))
+ lj_err_argt(L, idx, LUA_TNUMBER);
+ return numV(&tmp);
+}
+
+LUA_API lua_Integer lua_tointeger(lua_State *L, int idx)
+{
+ cTValue *o = index2adr(L, idx);
+ TValue tmp;
+ lua_Number n;
+ if (LJ_LIKELY(tvisint(o))) {
+ return intV(o);
+ } else if (LJ_LIKELY(tvisnum(o))) {
+ n = numV(o);
+ } else {
+ if (!(tvisstr(o) && lj_strscan_number(strV(o), &tmp)))
+ return 0;
+ if (tvisint(&tmp))
+ return intV(&tmp);
+ n = numV(&tmp);
+ }
+#if LJ_64
+ return (lua_Integer)n;
+#else
+ return lj_num2int(n);
+#endif
+}
+
+LUA_API lua_Integer lua_tointegerx(lua_State *L, int idx, int *ok)
+{
+ cTValue *o = index2adr(L, idx);
+ TValue tmp;
+ lua_Number n;
+ if (LJ_LIKELY(tvisint(o))) {
+ if (ok) *ok = 1;
+ return intV(o);
+ } else if (LJ_LIKELY(tvisnum(o))) {
+ n = numV(o);
+ } else {
+ if (!(tvisstr(o) && lj_strscan_number(strV(o), &tmp))) {
+ if (ok) *ok = 0;
+ return 0;
+ }
+ if (tvisint(&tmp)) {
+ if (ok) *ok = 1;
+ return intV(&tmp);
+ }
+ n = numV(&tmp);
+ }
+ if (ok) *ok = 1;
+#if LJ_64
+ return (lua_Integer)n;
+#else
+ return lj_num2int(n);
+#endif
+}
+
+LUALIB_API lua_Integer luaL_checkinteger(lua_State *L, int idx)
+{
+ cTValue *o = index2adr(L, idx);
+ TValue tmp;
+ lua_Number n;
+ if (LJ_LIKELY(tvisint(o))) {
+ return intV(o);
+ } else if (LJ_LIKELY(tvisnum(o))) {
+ n = numV(o);
+ } else {
+ if (!(tvisstr(o) && lj_strscan_number(strV(o), &tmp)))
+ lj_err_argt(L, idx, LUA_TNUMBER);
+ if (tvisint(&tmp))
+ return (lua_Integer)intV(&tmp);
+ n = numV(&tmp);
+ }
+#if LJ_64
+ return (lua_Integer)n;
+#else
+ return lj_num2int(n);
+#endif
+}
+
+LUALIB_API lua_Integer luaL_optinteger(lua_State *L, int idx, lua_Integer def)
+{
+ cTValue *o = index2adr(L, idx);
+ TValue tmp;
+ lua_Number n;
+ if (LJ_LIKELY(tvisint(o))) {
+ return intV(o);
+ } else if (LJ_LIKELY(tvisnum(o))) {
+ n = numV(o);
+ } else if (tvisnil(o)) {
+ return def;
+ } else {
+ if (!(tvisstr(o) && lj_strscan_number(strV(o), &tmp)))
+ lj_err_argt(L, idx, LUA_TNUMBER);
+ if (tvisint(&tmp))
+ return (lua_Integer)intV(&tmp);
+ n = numV(&tmp);
+ }
+#if LJ_64
+ return (lua_Integer)n;
+#else
+ return lj_num2int(n);
+#endif
+}
+
+LUA_API int lua_toboolean(lua_State *L, int idx)
+{
+ cTValue *o = index2adr(L, idx);
+ return tvistruecond(o);
+}
+
+LUA_API const char *lua_tolstring(lua_State *L, int idx, size_t *len)
+{
+ TValue *o = index2adr(L, idx);
+ GCstr *s;
+ if (LJ_LIKELY(tvisstr(o))) {
+ s = strV(o);
+ } else if (tvisnumber(o)) {
+ lj_gc_check(L);
+ o = index2adr(L, idx); /* GC may move the stack. */
+ s = lj_strfmt_number(L, o);
+ setstrV(L, o, s);
+ } else {
+ if (len != NULL) *len = 0;
+ return NULL;
+ }
+ if (len != NULL) *len = s->len;
+ return strdata(s);
+}
+
+LUALIB_API const char *luaL_checklstring(lua_State *L, int idx, size_t *len)
+{
+ TValue *o = index2adr(L, idx);
+ GCstr *s;
+ if (LJ_LIKELY(tvisstr(o))) {
+ s = strV(o);
+ } else if (tvisnumber(o)) {
+ lj_gc_check(L);
+ o = index2adr(L, idx); /* GC may move the stack. */
+ s = lj_strfmt_number(L, o);
+ setstrV(L, o, s);
+ } else {
+ lj_err_argt(L, idx, LUA_TSTRING);
+ }
+ if (len != NULL) *len = s->len;
+ return strdata(s);
+}
+
+LUALIB_API const char *luaL_optlstring(lua_State *L, int idx,
+ const char *def, size_t *len)
+{
+ TValue *o = index2adr(L, idx);
+ GCstr *s;
+ if (LJ_LIKELY(tvisstr(o))) {
+ s = strV(o);
+ } else if (tvisnil(o)) {
+ if (len != NULL) *len = def ? strlen(def) : 0;
+ return def;
+ } else if (tvisnumber(o)) {
+ lj_gc_check(L);
+ o = index2adr(L, idx); /* GC may move the stack. */
+ s = lj_strfmt_number(L, o);
+ setstrV(L, o, s);
+ } else {
+ lj_err_argt(L, idx, LUA_TSTRING);
+ }
+ if (len != NULL) *len = s->len;
+ return strdata(s);
+}
+
+LUALIB_API int luaL_checkoption(lua_State *L, int idx, const char *def,
+ const char *const lst[])
+{
+ ptrdiff_t i;
+ const char *s = lua_tolstring(L, idx, NULL);
+ if (s == NULL && (s = def) == NULL)
+ lj_err_argt(L, idx, LUA_TSTRING);
+ for (i = 0; lst[i]; i++)
+ if (strcmp(lst[i], s) == 0)
+ return (int)i;
+ lj_err_argv(L, idx, LJ_ERR_INVOPTM, s);
+}
+
+LUA_API size_t lua_objlen(lua_State *L, int idx)
+{
+ TValue *o = index2adr(L, idx);
+ if (tvisstr(o)) {
+ return strV(o)->len;
+ } else if (tvistab(o)) {
+ return (size_t)lj_tab_len(tabV(o));
+ } else if (tvisudata(o)) {
+ return udataV(o)->len;
+ } else if (tvisnumber(o)) {
+ GCstr *s = lj_strfmt_number(L, o);
+ setstrV(L, o, s);
+ return s->len;
+ } else {
+ return 0;
+ }
+}
+
+LUA_API lua_CFunction lua_tocfunction(lua_State *L, int idx)
+{
+ cTValue *o = index2adr(L, idx);
+ if (tvisfunc(o)) {
+ BCOp op = bc_op(*mref(funcV(o)->c.pc, BCIns));
+ if (op == BC_FUNCC || op == BC_FUNCCW)
+ return funcV(o)->c.f;
+ }
+ return NULL;
+}
+
+LUA_API void *lua_touserdata(lua_State *L, int idx)
+{
+ cTValue *o = index2adr(L, idx);
+ if (tvisudata(o))
+ return uddata(udataV(o));
+ else if (tvislightud(o))
+ return lightudV(G(L), o);
+ else
+ return NULL;
+}
+
+LUA_API lua_State *lua_tothread(lua_State *L, int idx)
+{
+ cTValue *o = index2adr(L, idx);
+ return (!tvisthread(o)) ? NULL : threadV(o);
+}
+
+LUA_API const void *lua_topointer(lua_State *L, int idx)
+{
+ return lj_obj_ptr(G(L), index2adr(L, idx));
+}
+
+/* -- Stack setters (object creation) ------------------------------------- */
+
+LUA_API void lua_pushnil(lua_State *L)
+{
+ setnilV(L->top);
+ incr_top(L);
+}
+
+LUA_API void lua_pushnumber(lua_State *L, lua_Number n)
+{
+ setnumV(L->top, n);
+ if (LJ_UNLIKELY(tvisnan(L->top)))
+ setnanV(L->top); /* Canonicalize injected NaNs. */
+ incr_top(L);
+}
+
+LUA_API void lua_pushinteger(lua_State *L, lua_Integer n)
+{
+ setintptrV(L->top, n);
+ incr_top(L);
+}
+
+LUA_API void lua_pushlstring(lua_State *L, const char *str, size_t len)
+{
+ GCstr *s;
+ lj_gc_check(L);
+ s = lj_str_new(L, str, len);
+ setstrV(L, L->top, s);
+ incr_top(L);
+}
+
+LUA_API void lua_pushstring(lua_State *L, const char *str)
+{
+ if (str == NULL) {
+ setnilV(L->top);
+ } else {
+ GCstr *s;
+ lj_gc_check(L);
+ s = lj_str_newz(L, str);
+ setstrV(L, L->top, s);
+ }
+ incr_top(L);
+}
+
+LUA_API const char *lua_pushvfstring(lua_State *L, const char *fmt,
+ va_list argp)
+{
+ lj_gc_check(L);
+ return lj_strfmt_pushvf(L, fmt, argp);
+}
+
+LUA_API const char *lua_pushfstring(lua_State *L, const char *fmt, ...)
+{
+ const char *ret;
+ va_list argp;
+ lj_gc_check(L);
+ va_start(argp, fmt);
+ ret = lj_strfmt_pushvf(L, fmt, argp);
+ va_end(argp);
+ return ret;
+}
+
+LUA_API void lua_pushcclosure(lua_State *L, lua_CFunction f, int n)
+{
+ GCfunc *fn;
+ lj_gc_check(L);
+ lj_checkapi_slot(n);
+ fn = lj_func_newC(L, (MSize)n, getcurrenv(L));
+ fn->c.f = f;
+ L->top -= n;
+ while (n--)
+ copyTV(L, &fn->c.upvalue[n], L->top+n);
+ setfuncV(L, L->top, fn);
+ lj_assertL(iswhite(obj2gco(fn)), "new GC object is not white");
+ incr_top(L);
+}
+
+LUA_API void lua_pushboolean(lua_State *L, int b)
+{
+ setboolV(L->top, (b != 0));
+ incr_top(L);
+}
+
+LUA_API void lua_pushlightuserdata(lua_State *L, void *p)
+{
+#if LJ_64
+ p = lj_lightud_intern(L, p);
+#endif
+ setrawlightudV(L->top, p);
+ incr_top(L);
+}
+
+LUA_API void lua_createtable(lua_State *L, int narray, int nrec)
+{
+ lj_gc_check(L);
+ settabV(L, L->top, lj_tab_new_ah(L, narray, nrec));
+ incr_top(L);
+}
+
+LUALIB_API int luaL_newmetatable(lua_State *L, const char *tname)
+{
+ GCtab *regt = tabV(registry(L));
+ TValue *tv = lj_tab_setstr(L, regt, lj_str_newz(L, tname));
+ if (tvisnil(tv)) {
+ GCtab *mt = lj_tab_new(L, 0, 1);
+ settabV(L, tv, mt);
+ settabV(L, L->top++, mt);
+ lj_gc_anybarriert(L, regt);
+ return 1;
+ } else {
+ copyTV(L, L->top++, tv);
+ return 0;
+ }
+}
+
+LUA_API int lua_pushthread(lua_State *L)
+{
+ setthreadV(L, L->top, L);
+ incr_top(L);
+ return (mainthread(G(L)) == L);
+}
+
+LUA_API lua_State *lua_newthread(lua_State *L)
+{
+ lua_State *L1;
+ lj_gc_check(L);
+ L1 = lj_state_new(L);
+ setthreadV(L, L->top, L1);
+ incr_top(L);
+ return L1;
+}
+
+LUA_API void *lua_newuserdata(lua_State *L, size_t size)
+{
+ GCudata *ud;
+ lj_gc_check(L);
+ if (size > LJ_MAX_UDATA)
+ lj_err_msg(L, LJ_ERR_UDATAOV);
+ ud = lj_udata_new(L, (MSize)size, getcurrenv(L));
+ setudataV(L, L->top, ud);
+ incr_top(L);
+ return uddata(ud);
+}
+
+LUA_API void lua_concat(lua_State *L, int n)
+{
+ lj_checkapi_slot(n);
+ if (n >= 2) {
+ n--;
+ do {
+ TValue *top = lj_meta_cat(L, L->top-1, -n);
+ if (top == NULL) {
+ L->top -= n;
+ break;
+ }
+ n -= (int)(L->top - (top - 2*LJ_FR2));
+ L->top = top+2;
+ lj_vm_call(L, top, 1+1);
+ L->top -= 1+LJ_FR2;
+ copyTV(L, L->top-1, L->top+LJ_FR2);
+ } while (--n > 0);
+ } else if (n == 0) { /* Push empty string. */
+ setstrV(L, L->top, &G(L)->strempty);
+ incr_top(L);
+ }
+ /* else n == 1: nothing to do. */
+}
+
+/* -- Object getters ------------------------------------------------------ */
+
+LUA_API void lua_gettable(lua_State *L, int idx)
+{
+ cTValue *t = index2adr_check(L, idx);
+ cTValue *v = lj_meta_tget(L, t, L->top-1);
+ if (v == NULL) {
+ L->top += 2;
+ lj_vm_call(L, L->top-2, 1+1);
+ L->top -= 2+LJ_FR2;
+ v = L->top+1+LJ_FR2;
+ }
+ copyTV(L, L->top-1, v);
+}
+
+LUA_API void lua_getfield(lua_State *L, int idx, const char *k)
+{
+ cTValue *v, *t = index2adr_check(L, idx);
+ TValue key;
+ setstrV(L, &key, lj_str_newz(L, k));
+ v = lj_meta_tget(L, t, &key);
+ if (v == NULL) {
+ L->top += 2;
+ lj_vm_call(L, L->top-2, 1+1);
+ L->top -= 2+LJ_FR2;
+ v = L->top+1+LJ_FR2;
+ }
+ copyTV(L, L->top, v);
+ incr_top(L);
+}
+
+LUA_API void lua_rawget(lua_State *L, int idx)
+{
+ cTValue *t = index2adr(L, idx);
+ lj_checkapi(tvistab(t), "stack slot %d is not a table", idx);
+ copyTV(L, L->top-1, lj_tab_get(L, tabV(t), L->top-1));
+}
+
+LUA_API void lua_rawgeti(lua_State *L, int idx, int n)
+{
+ cTValue *v, *t = index2adr(L, idx);
+ lj_checkapi(tvistab(t), "stack slot %d is not a table", idx);
+ v = lj_tab_getint(tabV(t), n);
+ if (v) {
+ copyTV(L, L->top, v);
+ } else {
+ setnilV(L->top);
+ }
+ incr_top(L);
+}
+
+LUA_API int lua_getmetatable(lua_State *L, int idx)
+{
+ cTValue *o = index2adr(L, idx);
+ GCtab *mt = NULL;
+ if (tvistab(o))
+ mt = tabref(tabV(o)->metatable);
+ else if (tvisudata(o))
+ mt = tabref(udataV(o)->metatable);
+ else
+ mt = tabref(basemt_obj(G(L), o));
+ if (mt == NULL)
+ return 0;
+ settabV(L, L->top, mt);
+ incr_top(L);
+ return 1;
+}
+
+LUALIB_API int luaL_getmetafield(lua_State *L, int idx, const char *field)
+{
+ if (lua_getmetatable(L, idx)) {
+ cTValue *tv = lj_tab_getstr(tabV(L->top-1), lj_str_newz(L, field));
+ if (tv && !tvisnil(tv)) {
+ copyTV(L, L->top-1, tv);
+ return 1;
+ }
+ L->top--;
+ }
+ return 0;
+}
+
+LUA_API void lua_getfenv(lua_State *L, int idx)
+{
+ cTValue *o = index2adr_check(L, idx);
+ if (tvisfunc(o)) {
+ settabV(L, L->top, tabref(funcV(o)->c.env));
+ } else if (tvisudata(o)) {
+ settabV(L, L->top, tabref(udataV(o)->env));
+ } else if (tvisthread(o)) {
+ settabV(L, L->top, tabref(threadV(o)->env));
+ } else {
+ setnilV(L->top);
+ }
+ incr_top(L);
+}
+
+LUA_API int lua_next(lua_State *L, int idx)
+{
+ cTValue *t = index2adr(L, idx);
+ int more;
+ lj_checkapi(tvistab(t), "stack slot %d is not a table", idx);
+ more = lj_tab_next(tabV(t), L->top-1, L->top-1);
+ if (more > 0) {
+ incr_top(L); /* Return new key and value slot. */
+ } else if (!more) { /* End of traversal. */
+ L->top--; /* Remove key slot. */
+ } else {
+ lj_err_msg(L, LJ_ERR_NEXTIDX);
+ }
+ return more;
+}
+
+LUA_API const char *lua_getupvalue(lua_State *L, int idx, int n)
+{
+ TValue *val;
+ GCobj *o;
+ const char *name = lj_debug_uvnamev(index2adr(L, idx), (uint32_t)(n-1), &val, &o);
+ if (name) {
+ copyTV(L, L->top, val);
+ incr_top(L);
+ }
+ return name;
+}
+
+LUA_API void *lua_upvalueid(lua_State *L, int idx, int n)
+{
+ GCfunc *fn = funcV(index2adr(L, idx));
+ n--;
+ lj_checkapi((uint32_t)n < fn->l.nupvalues, "bad upvalue %d", n);
+ return isluafunc(fn) ? (void *)gcref(fn->l.uvptr[n]) :
+ (void *)&fn->c.upvalue[n];
+}
+
+LUA_API void lua_upvaluejoin(lua_State *L, int idx1, int n1, int idx2, int n2)
+{
+ GCfunc *fn1 = funcV(index2adr(L, idx1));
+ GCfunc *fn2 = funcV(index2adr(L, idx2));
+ n1--; n2--;
+ lj_checkapi(isluafunc(fn1), "stack slot %d is not a Lua function", idx1);
+ lj_checkapi(isluafunc(fn2), "stack slot %d is not a Lua function", idx2);
+ lj_checkapi((uint32_t)n1 < fn1->l.nupvalues, "bad upvalue %d", n1+1);
+ lj_checkapi((uint32_t)n2 < fn2->l.nupvalues, "bad upvalue %d", n2+1);
+ setgcrefr(fn1->l.uvptr[n1], fn2->l.uvptr[n2]);
+ lj_gc_objbarrier(L, fn1, gcref(fn1->l.uvptr[n1]));
+}
+
+LUALIB_API void *luaL_testudata(lua_State *L, int idx, const char *tname)
+{
+ cTValue *o = index2adr(L, idx);
+ if (tvisudata(o)) {
+ GCudata *ud = udataV(o);
+ cTValue *tv = lj_tab_getstr(tabV(registry(L)), lj_str_newz(L, tname));
+ if (tv && tvistab(tv) && tabV(tv) == tabref(ud->metatable))
+ return uddata(ud);
+ }
+ return NULL; /* value is not a userdata with a metatable */
+}
+
+LUALIB_API void *luaL_checkudata(lua_State *L, int idx, const char *tname)
+{
+ void *p = luaL_testudata(L, idx, tname);
+ if (!p) lj_err_argtype(L, idx, tname);
+ return p;
+}
+
+/* -- Object setters ------------------------------------------------------ */
+
+LUA_API void lua_settable(lua_State *L, int idx)
+{
+ TValue *o;
+ cTValue *t = index2adr_check(L, idx);
+ lj_checkapi_slot(2);
+ o = lj_meta_tset(L, t, L->top-2);
+ if (o) {
+ /* NOBARRIER: lj_meta_tset ensures the table is not black. */
+ L->top -= 2;
+ copyTV(L, o, L->top+1);
+ } else {
+ TValue *base = L->top;
+ copyTV(L, base+2, base-3-2*LJ_FR2);
+ L->top = base+3;
+ lj_vm_call(L, base, 0+1);
+ L->top -= 3+LJ_FR2;
+ }
+}
+
+LUA_API void lua_setfield(lua_State *L, int idx, const char *k)
+{
+ TValue *o;
+ TValue key;
+ cTValue *t = index2adr_check(L, idx);
+ lj_checkapi_slot(1);
+ setstrV(L, &key, lj_str_newz(L, k));
+ o = lj_meta_tset(L, t, &key);
+ if (o) {
+ /* NOBARRIER: lj_meta_tset ensures the table is not black. */
+ copyTV(L, o, --L->top);
+ } else {
+ TValue *base = L->top;
+ copyTV(L, base+2, base-3-2*LJ_FR2);
+ L->top = base+3;
+ lj_vm_call(L, base, 0+1);
+ L->top -= 2+LJ_FR2;
+ }
+}
+
+LUA_API void lua_rawset(lua_State *L, int idx)
+{
+ GCtab *t = tabV(index2adr(L, idx));
+ TValue *dst, *key;
+ lj_checkapi_slot(2);
+ key = L->top-2;
+ dst = lj_tab_set(L, t, key);
+ copyTV(L, dst, key+1);
+ lj_gc_anybarriert(L, t);
+ L->top = key;
+}
+
+LUA_API void lua_rawseti(lua_State *L, int idx, int n)
+{
+ GCtab *t = tabV(index2adr(L, idx));
+ TValue *dst, *src;
+ lj_checkapi_slot(1);
+ dst = lj_tab_setint(L, t, n);
+ src = L->top-1;
+ copyTV(L, dst, src);
+ lj_gc_barriert(L, t, dst);
+ L->top = src;
+}
+
+LUA_API int lua_setmetatable(lua_State *L, int idx)
+{
+ global_State *g;
+ GCtab *mt;
+ cTValue *o = index2adr_check(L, idx);
+ lj_checkapi_slot(1);
+ if (tvisnil(L->top-1)) {
+ mt = NULL;
+ } else {
+ lj_checkapi(tvistab(L->top-1), "top stack slot is not a table");
+ mt = tabV(L->top-1);
+ }
+ g = G(L);
+ if (tvistab(o)) {
+ setgcref(tabV(o)->metatable, obj2gco(mt));
+ if (mt)
+ lj_gc_objbarriert(L, tabV(o), mt);
+ } else if (tvisudata(o)) {
+ setgcref(udataV(o)->metatable, obj2gco(mt));
+ if (mt)
+ lj_gc_objbarrier(L, udataV(o), mt);
+ } else {
+ /* Flush cache, since traces specialize to basemt. But not during __gc. */
+ if (lj_trace_flushall(L))
+ lj_err_caller(L, LJ_ERR_NOGCMM);
+ if (tvisbool(o)) {
+ /* NOBARRIER: basemt is a GC root. */
+ setgcref(basemt_it(g, LJ_TTRUE), obj2gco(mt));
+ setgcref(basemt_it(g, LJ_TFALSE), obj2gco(mt));
+ } else {
+ /* NOBARRIER: basemt is a GC root. */
+ setgcref(basemt_obj(g, o), obj2gco(mt));
+ }
+ }
+ L->top--;
+ return 1;
+}
+
+LUALIB_API void luaL_setmetatable(lua_State *L, const char *tname)
+{
+ lua_getfield(L, LUA_REGISTRYINDEX, tname);
+ lua_setmetatable(L, -2);
+}
+
+LUA_API int lua_setfenv(lua_State *L, int idx)
+{
+ cTValue *o = index2adr_check(L, idx);
+ GCtab *t;
+ lj_checkapi_slot(1);
+ lj_checkapi(tvistab(L->top-1), "top stack slot is not a table");
+ t = tabV(L->top-1);
+ if (tvisfunc(o)) {
+ setgcref(funcV(o)->c.env, obj2gco(t));
+ } else if (tvisudata(o)) {
+ setgcref(udataV(o)->env, obj2gco(t));
+ } else if (tvisthread(o)) {
+ setgcref(threadV(o)->env, obj2gco(t));
+ } else {
+ L->top--;
+ return 0;
+ }
+ lj_gc_objbarrier(L, gcV(o), t);
+ L->top--;
+ return 1;
+}
+
+LUA_API const char *lua_setupvalue(lua_State *L, int idx, int n)
+{
+ cTValue *f = index2adr(L, idx);
+ TValue *val;
+ GCobj *o;
+ const char *name;
+ lj_checkapi_slot(1);
+ name = lj_debug_uvnamev(f, (uint32_t)(n-1), &val, &o);
+ if (name) {
+ L->top--;
+ copyTV(L, val, L->top);
+ lj_gc_barrier(L, o, L->top);
+ }
+ return name;
+}
+
+/* -- Calls --------------------------------------------------------------- */
+
+#if LJ_FR2
+static TValue *api_call_base(lua_State *L, int nargs)
+{
+ TValue *o = L->top, *base = o - nargs;
+ L->top = o+1;
+ for (; o > base; o--) copyTV(L, o, o-1);
+ setnilV(o);
+ return o+1;
+}
+#else
+#define api_call_base(L, nargs) (L->top - (nargs))
+#endif
+
+LUA_API void lua_call(lua_State *L, int nargs, int nresults)
+{
+ lj_checkapi(L->status == LUA_OK || L->status == LUA_ERRERR,
+ "thread called in wrong state %d", L->status);
+ lj_checkapi_slot(nargs+1);
+ lj_vm_call(L, api_call_base(L, nargs), nresults+1);
+}
+
+LUA_API int lua_pcall(lua_State *L, int nargs, int nresults, int errfunc)
+{
+ global_State *g = G(L);
+ uint8_t oldh = hook_save(g);
+ ptrdiff_t ef;
+ int status;
+ lj_checkapi(L->status == LUA_OK || L->status == LUA_ERRERR,
+ "thread called in wrong state %d", L->status);
+ lj_checkapi_slot(nargs+1);
+ if (errfunc == 0) {
+ ef = 0;
+ } else {
+ cTValue *o = index2adr_stack(L, errfunc);
+ ef = savestack(L, o);
+ }
+ status = lj_vm_pcall(L, api_call_base(L, nargs), nresults+1, ef);
+ if (status) hook_restore(g, oldh);
+ return status;
+}
+
+static TValue *cpcall(lua_State *L, lua_CFunction func, void *ud)
+{
+ GCfunc *fn = lj_func_newC(L, 0, getcurrenv(L));
+ TValue *top = L->top;
+ fn->c.f = func;
+ setfuncV(L, top++, fn);
+ if (LJ_FR2) setnilV(top++);
+#if LJ_64
+ ud = lj_lightud_intern(L, ud);
+#endif
+ setrawlightudV(top++, ud);
+ cframe_nres(L->cframe) = 1+0; /* Zero results. */
+ L->top = top;
+ return top-1; /* Now call the newly allocated C function. */
+}
+
+LUA_API int lua_cpcall(lua_State *L, lua_CFunction func, void *ud)
+{
+ global_State *g = G(L);
+ uint8_t oldh = hook_save(g);
+ int status;
+ lj_checkapi(L->status == LUA_OK || L->status == LUA_ERRERR,
+ "thread called in wrong state %d", L->status);
+ status = lj_vm_cpcall(L, func, ud, cpcall);
+ if (status) hook_restore(g, oldh);
+ return status;
+}
+
+LUALIB_API int luaL_callmeta(lua_State *L, int idx, const char *field)
+{
+ if (luaL_getmetafield(L, idx, field)) {
+ TValue *top = L->top--;
+ if (LJ_FR2) setnilV(top++);
+ copyTV(L, top++, index2adr(L, idx));
+ L->top = top;
+ lj_vm_call(L, top-1, 1+1);
+ return 1;
+ }
+ return 0;
+}
+
+/* -- Coroutine yield and resume ------------------------------------------ */
+
+LUA_API int lua_isyieldable(lua_State *L)
+{
+ return cframe_canyield(L->cframe);
+}
+
+LUA_API int lua_yield(lua_State *L, int nresults)
+{
+ void *cf = L->cframe;
+ global_State *g = G(L);
+ if (cframe_canyield(cf)) {
+ cf = cframe_raw(cf);
+ if (!hook_active(g)) { /* Regular yield: move results down if needed. */
+ cTValue *f = L->top - nresults;
+ if (f > L->base) {
+ TValue *t = L->base;
+ while (--nresults >= 0) copyTV(L, t++, f++);
+ L->top = t;
+ }
+ L->cframe = NULL;
+ L->status = LUA_YIELD;
+ return -1;
+ } else { /* Yield from hook: add a pseudo-frame. */
+ TValue *top = L->top;
+ hook_leave(g);
+ (top++)->u64 = cframe_multres(cf);
+ setcont(top, lj_cont_hook);
+ if (LJ_FR2) top++;
+ setframe_pc(top, cframe_pc(cf)-1);
+ top++;
+ setframe_gc(top, obj2gco(L), LJ_TTHREAD);
+ if (LJ_FR2) top++;
+ setframe_ftsz(top, ((char *)(top+1)-(char *)L->base)+FRAME_CONT);
+ L->top = L->base = top+1;
+#if ((defined(__GNUC__) || defined(__clang__)) && (LJ_TARGET_X64 || defined(LUAJIT_UNWIND_EXTERNAL)) && !LJ_NO_UNWIND) || LJ_TARGET_WINDOWS
+ lj_err_throw(L, LUA_YIELD);
+#else
+ L->cframe = NULL;
+ L->status = LUA_YIELD;
+ lj_vm_unwind_c(cf, LUA_YIELD);
+#endif
+ }
+ }
+ lj_err_msg(L, LJ_ERR_CYIELD);
+ return 0; /* unreachable */
+}
+
+LUA_API int lua_resume(lua_State *L, int nargs)
+{
+ if (L->cframe == NULL && L->status <= LUA_YIELD)
+ return lj_vm_resume(L,
+ L->status == LUA_OK ? api_call_base(L, nargs) : L->top - nargs,
+ 0, 0);
+ L->top = L->base;
+ setstrV(L, L->top, lj_err_str(L, LJ_ERR_COSUSP));
+ incr_top(L);
+ return LUA_ERRRUN;
+}
+
+/* -- GC and memory management -------------------------------------------- */
+
+LUA_API int lua_gc(lua_State *L, int what, int data)
+{
+ global_State *g = G(L);
+ int res = 0;
+ switch (what) {
+ case LUA_GCSTOP:
+ g->gc.threshold = LJ_MAX_MEM;
+ break;
+ case LUA_GCRESTART:
+ g->gc.threshold = data == -1 ? (g->gc.total/100)*g->gc.pause : g->gc.total;
+ break;
+ case LUA_GCCOLLECT:
+ lj_gc_fullgc(L);
+ break;
+ case LUA_GCCOUNT:
+ res = (int)(g->gc.total >> 10);
+ break;
+ case LUA_GCCOUNTB:
+ res = (int)(g->gc.total & 0x3ff);
+ break;
+ case LUA_GCSTEP: {
+ GCSize a = (GCSize)data << 10;
+ g->gc.threshold = (a <= g->gc.total) ? (g->gc.total - a) : 0;
+ while (g->gc.total >= g->gc.threshold)
+ if (lj_gc_step(L) > 0) {
+ res = 1;
+ break;
+ }
+ break;
+ }
+ case LUA_GCSETPAUSE:
+ res = (int)(g->gc.pause);
+ g->gc.pause = (MSize)data;
+ break;
+ case LUA_GCSETSTEPMUL:
+ res = (int)(g->gc.stepmul);
+ g->gc.stepmul = (MSize)data;
+ break;
+ case LUA_GCISRUNNING:
+ res = (g->gc.threshold != LJ_MAX_MEM);
+ break;
+ default:
+ res = -1; /* Invalid option. */
+ }
+ return res;
+}
+
+LUA_API lua_Alloc lua_getallocf(lua_State *L, void **ud)
+{
+ global_State *g = G(L);
+ if (ud) *ud = g->allocd;
+ return g->allocf;
+}
+
+LUA_API void lua_setallocf(lua_State *L, lua_Alloc f, void *ud)
+{
+ global_State *g = G(L);
+ g->allocd = ud;
+ g->allocf = f;
+}
+
diff --git a/libs/luajit-cmake/luajit/src/lj_arch.h b/libs/luajit-cmake/luajit/src/lj_arch.h
new file mode 100644
index 0000000..5fb798d
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/lj_arch.h
@@ -0,0 +1,719 @@
+/*
+** Target architecture selection.
+** Copyright (C) 2005-2022 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LJ_ARCH_H
+#define _LJ_ARCH_H
+
+#include "lua.h"
+
+/* -- Target definitions -------------------------------------------------- */
+
+/* Target endianess. */
+#define LUAJIT_LE 0
+#define LUAJIT_BE 1
+
+/* Target architectures. */
+#define LUAJIT_ARCH_X86 1
+#define LUAJIT_ARCH_x86 1
+#define LUAJIT_ARCH_X64 2
+#define LUAJIT_ARCH_x64 2
+#define LUAJIT_ARCH_ARM 3
+#define LUAJIT_ARCH_arm 3
+#define LUAJIT_ARCH_ARM64 4
+#define LUAJIT_ARCH_arm64 4
+#define LUAJIT_ARCH_PPC 5
+#define LUAJIT_ARCH_ppc 5
+#define LUAJIT_ARCH_MIPS 6
+#define LUAJIT_ARCH_mips 6
+#define LUAJIT_ARCH_MIPS32 6
+#define LUAJIT_ARCH_mips32 6
+#define LUAJIT_ARCH_MIPS64 7
+#define LUAJIT_ARCH_mips64 7
+
+/* Target OS. */
+#define LUAJIT_OS_OTHER 0
+#define LUAJIT_OS_WINDOWS 1
+#define LUAJIT_OS_LINUX 2
+#define LUAJIT_OS_OSX 3
+#define LUAJIT_OS_BSD 4
+#define LUAJIT_OS_POSIX 5
+
+/* Number mode. */
+#define LJ_NUMMODE_SINGLE 0 /* Single-number mode only. */
+#define LJ_NUMMODE_SINGLE_DUAL 1 /* Default to single-number mode. */
+#define LJ_NUMMODE_DUAL 2 /* Dual-number mode only. */
+#define LJ_NUMMODE_DUAL_SINGLE 3 /* Default to dual-number mode. */
+
+/* -- Target detection ---------------------------------------------------- */
+
+/* Select native target if no target defined. */
+#ifndef LUAJIT_TARGET
+
+#if defined(__i386) || defined(__i386__) || defined(_M_IX86)
+#define LUAJIT_TARGET LUAJIT_ARCH_X86
+#elif defined(__x86_64__) || defined(__x86_64) || defined(_M_X64) || defined(_M_AMD64)
+#define LUAJIT_TARGET LUAJIT_ARCH_X64
+#elif defined(__arm__) || defined(__arm) || defined(__ARM__) || defined(__ARM)
+#define LUAJIT_TARGET LUAJIT_ARCH_ARM
+#elif defined(__aarch64__)
+#define LUAJIT_TARGET LUAJIT_ARCH_ARM64
+#elif defined(__ppc__) || defined(__ppc) || defined(__PPC__) || defined(__PPC) || defined(__powerpc__) || defined(__powerpc) || defined(__POWERPC__) || defined(__POWERPC) || defined(_M_PPC)
+#define LUAJIT_TARGET LUAJIT_ARCH_PPC
+#elif defined(__mips64__) || defined(__mips64) || defined(__MIPS64__) || defined(__MIPS64)
+#define LUAJIT_TARGET LUAJIT_ARCH_MIPS64
+#elif defined(__mips__) || defined(__mips) || defined(__MIPS__) || defined(__MIPS)
+#define LUAJIT_TARGET LUAJIT_ARCH_MIPS32
+#else
+#error "No support for this architecture (yet)"
+#endif
+
+#endif
+
+/* Select native OS if no target OS defined. */
+#ifndef LUAJIT_OS
+
+#if defined(_WIN32) && !defined(_XBOX_VER)
+#define LUAJIT_OS LUAJIT_OS_WINDOWS
+#elif defined(__linux__)
+#define LUAJIT_OS LUAJIT_OS_LINUX
+#elif defined(__MACH__) && defined(__APPLE__)
+#include "TargetConditionals.h"
+#define LUAJIT_OS LUAJIT_OS_OSX
+#elif (defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || \
+ defined(__NetBSD__) || defined(__OpenBSD__) || \
+ defined(__DragonFly__)) && !defined(__ORBIS__) && !defined(__PROSPERO__)
+#define LUAJIT_OS LUAJIT_OS_BSD
+#elif (defined(__sun__) && defined(__svr4__))
+#define LJ_TARGET_SOLARIS 1
+#define LUAJIT_OS LUAJIT_OS_POSIX
+#elif defined(__HAIKU__)
+#define LUAJIT_OS LUAJIT_OS_POSIX
+#elif defined(__CYGWIN__)
+#define LJ_TARGET_CYGWIN 1
+#define LUAJIT_OS LUAJIT_OS_POSIX
+#elif defined(__QNX__)
+#define LJ_TARGET_QNX 1
+#define LUAJIT_OS LUAJIT_OS_POSIX
+#else
+#define LUAJIT_OS LUAJIT_OS_OTHER
+#endif
+
+#endif
+
+/* Set target OS properties. */
+#if LUAJIT_OS == LUAJIT_OS_WINDOWS
+#define LJ_OS_NAME "Windows"
+#elif LUAJIT_OS == LUAJIT_OS_LINUX
+#define LJ_OS_NAME "Linux"
+#elif LUAJIT_OS == LUAJIT_OS_OSX
+#define LJ_OS_NAME "OSX"
+#elif LUAJIT_OS == LUAJIT_OS_BSD
+#define LJ_OS_NAME "BSD"
+#elif LUAJIT_OS == LUAJIT_OS_POSIX
+#define LJ_OS_NAME "POSIX"
+#else
+#define LJ_OS_NAME "Other"
+#endif
+
+#define LJ_TARGET_WINDOWS (LUAJIT_OS == LUAJIT_OS_WINDOWS)
+#define LJ_TARGET_LINUX (LUAJIT_OS == LUAJIT_OS_LINUX)
+#define LJ_TARGET_OSX (LUAJIT_OS == LUAJIT_OS_OSX)
+#define LJ_TARGET_BSD (LUAJIT_OS == LUAJIT_OS_BSD)
+#define LJ_TARGET_POSIX (LUAJIT_OS > LUAJIT_OS_WINDOWS)
+#define LJ_TARGET_DLOPEN LJ_TARGET_POSIX
+
+#if TARGET_OS_IPHONE
+#define LJ_TARGET_IOS 1
+#else
+#define LJ_TARGET_IOS 0
+#endif
+
+#ifdef __CELLOS_LV2__
+#define LJ_TARGET_PS3 1
+#define LJ_TARGET_CONSOLE 1
+#endif
+
+#ifdef __ORBIS__
+#define LJ_TARGET_PS4 1
+#define LJ_TARGET_CONSOLE 1
+#undef NULL
+#define NULL ((void*)0)
+#endif
+
+#ifdef __PROSPERO__
+#define LJ_TARGET_PS5 1
+#define LJ_TARGET_CONSOLE 1
+#undef NULL
+#define NULL ((void*)0)
+#endif
+
+#ifdef __psp2__
+#define LJ_TARGET_PSVITA 1
+#define LJ_TARGET_CONSOLE 1
+#endif
+
+#if _XBOX_VER >= 200
+#define LJ_TARGET_XBOX360 1
+#define LJ_TARGET_CONSOLE 1
+#endif
+
+#ifdef _DURANGO
+#define LJ_TARGET_XBOXONE 1
+#define LJ_TARGET_CONSOLE 1
+#define LJ_TARGET_GC64 1
+#endif
+
+#ifdef __NX__
+#define LJ_TARGET_NX 1
+#define LJ_TARGET_CONSOLE 1
+#undef NULL
+#define NULL ((void*)0)
+#endif
+
+#ifdef _UWP
+#define LJ_TARGET_UWP 1
+#if LUAJIT_TARGET == LUAJIT_ARCH_X64
+#define LJ_TARGET_GC64 1
+#endif
+#endif
+
+/* -- Arch-specific settings ---------------------------------------------- */
+
+/* Set target architecture properties. */
+#if LUAJIT_TARGET == LUAJIT_ARCH_X86
+
+#define LJ_ARCH_NAME "x86"
+#define LJ_ARCH_BITS 32
+#define LJ_ARCH_ENDIAN LUAJIT_LE
+#define LJ_TARGET_X86 1
+#define LJ_TARGET_X86ORX64 1
+#define LJ_TARGET_EHRETREG 0
+#define LJ_TARGET_EHRAREG 8
+#define LJ_TARGET_MASKSHIFT 1
+#define LJ_TARGET_MASKROT 1
+#define LJ_TARGET_UNALIGNED 1
+#define LJ_ARCH_NUMMODE LJ_NUMMODE_SINGLE_DUAL
+
+#elif LUAJIT_TARGET == LUAJIT_ARCH_X64
+
+#define LJ_ARCH_NAME "x64"
+#define LJ_ARCH_BITS 64
+#define LJ_ARCH_ENDIAN LUAJIT_LE
+#define LJ_TARGET_X64 1
+#define LJ_TARGET_X86ORX64 1
+#define LJ_TARGET_EHRETREG 0
+#define LJ_TARGET_EHRAREG 16
+#define LJ_TARGET_JUMPRANGE 31 /* +-2^31 = +-2GB */
+#define LJ_TARGET_MASKSHIFT 1
+#define LJ_TARGET_MASKROT 1
+#define LJ_TARGET_UNALIGNED 1
+#define LJ_ARCH_NUMMODE LJ_NUMMODE_SINGLE_DUAL
+#ifndef LUAJIT_DISABLE_GC64
+#define LJ_TARGET_GC64 1
+#elif LJ_TARGET_OSX
+#error "macOS requires GC64 -- don't disable it"
+#endif
+
+#elif LUAJIT_TARGET == LUAJIT_ARCH_ARM
+
+#define LJ_ARCH_NAME "arm"
+#define LJ_ARCH_BITS 32
+#define LJ_ARCH_ENDIAN LUAJIT_LE
+#if !defined(LJ_ARCH_HASFPU) && __SOFTFP__
+#define LJ_ARCH_HASFPU 0
+#endif
+#if !defined(LJ_ABI_SOFTFP) && !__ARM_PCS_VFP
+#define LJ_ABI_SOFTFP 1
+#endif
+#define LJ_ABI_EABI 1
+#define LJ_TARGET_ARM 1
+#define LJ_TARGET_EHRETREG 0
+#define LJ_TARGET_EHRAREG 14
+#define LJ_TARGET_JUMPRANGE 25 /* +-2^25 = +-32MB */
+#define LJ_TARGET_MASKSHIFT 0
+#define LJ_TARGET_MASKROT 1
+#define LJ_TARGET_UNIFYROT 2 /* Want only IR_BROR. */
+#define LJ_ARCH_NUMMODE LJ_NUMMODE_DUAL
+
+#if __ARM_ARCH == 8 || __ARM_ARCH_8__ || __ARM_ARCH_8A__
+#define LJ_ARCH_VERSION 80
+#elif __ARM_ARCH == 7 || __ARM_ARCH_7__ || __ARM_ARCH_7A__ || __ARM_ARCH_7R__ || __ARM_ARCH_7S__ || __ARM_ARCH_7VE__
+#define LJ_ARCH_VERSION 70
+#elif __ARM_ARCH_6T2__
+#define LJ_ARCH_VERSION 61
+#elif __ARM_ARCH == 6 || __ARM_ARCH_6__ || __ARM_ARCH_6J__ || __ARM_ARCH_6K__ || __ARM_ARCH_6Z__ || __ARM_ARCH_6ZK__
+#define LJ_ARCH_VERSION 60
+#else
+#define LJ_ARCH_VERSION 50
+#endif
+
+#elif LUAJIT_TARGET == LUAJIT_ARCH_ARM64
+
+#define LJ_ARCH_BITS 64
+#if defined(__AARCH64EB__)
+#define LJ_ARCH_NAME "arm64be"
+#define LJ_ARCH_ENDIAN LUAJIT_BE
+#else
+#define LJ_ARCH_NAME "arm64"
+#define LJ_ARCH_ENDIAN LUAJIT_LE
+#endif
+#define LJ_TARGET_ARM64 1
+#define LJ_TARGET_EHRETREG 0
+#define LJ_TARGET_EHRAREG 30
+#define LJ_TARGET_JUMPRANGE 27 /* +-2^27 = +-128MB */
+#define LJ_TARGET_MASKSHIFT 1
+#define LJ_TARGET_MASKROT 1
+#define LJ_TARGET_UNIFYROT 2 /* Want only IR_BROR. */
+#define LJ_TARGET_GC64 1
+#define LJ_ARCH_NUMMODE LJ_NUMMODE_DUAL
+
+#define LJ_ARCH_VERSION 80
+
+#elif LUAJIT_TARGET == LUAJIT_ARCH_PPC
+
+#ifndef LJ_ARCH_ENDIAN
+#if __BYTE_ORDER__ != __ORDER_BIG_ENDIAN__
+#define LJ_ARCH_ENDIAN LUAJIT_LE
+#else
+#define LJ_ARCH_ENDIAN LUAJIT_BE
+#endif
+#endif
+
+#if _LP64
+#define LJ_ARCH_BITS 64
+#if LJ_ARCH_ENDIAN == LUAJIT_LE
+#define LJ_ARCH_NAME "ppc64le"
+#else
+#define LJ_ARCH_NAME "ppc64"
+#endif
+#else
+#define LJ_ARCH_BITS 32
+#define LJ_ARCH_NAME "ppc"
+
+#if !defined(LJ_ARCH_HASFPU)
+#if defined(_SOFT_FLOAT) || defined(_SOFT_DOUBLE)
+#define LJ_ARCH_HASFPU 0
+#else
+#define LJ_ARCH_HASFPU 1
+#endif
+#endif
+
+#if !defined(LJ_ABI_SOFTFP)
+#if defined(_SOFT_FLOAT) || defined(_SOFT_DOUBLE)
+#define LJ_ABI_SOFTFP 1
+#else
+#define LJ_ABI_SOFTFP 0
+#endif
+#endif
+#endif
+
+#if LJ_ABI_SOFTFP
+#define LJ_ARCH_NUMMODE LJ_NUMMODE_DUAL
+#else
+#define LJ_ARCH_NUMMODE LJ_NUMMODE_DUAL_SINGLE
+#endif
+
+#define LJ_TARGET_PPC 1
+#define LJ_TARGET_EHRETREG 3
+#define LJ_TARGET_EHRAREG 65
+#define LJ_TARGET_JUMPRANGE 25 /* +-2^25 = +-32MB */
+#define LJ_TARGET_MASKSHIFT 0
+#define LJ_TARGET_MASKROT 1
+#define LJ_TARGET_UNIFYROT 1 /* Want only IR_BROL. */
+
+#if LJ_TARGET_CONSOLE
+#define LJ_ARCH_PPC32ON64 1
+#define LJ_ARCH_NOFFI 1
+#elif LJ_ARCH_BITS == 64
+#error "No support for PPC64"
+#endif
+
+#if _ARCH_PWR7
+#define LJ_ARCH_VERSION 70
+#elif _ARCH_PWR6
+#define LJ_ARCH_VERSION 60
+#elif _ARCH_PWR5X
+#define LJ_ARCH_VERSION 51
+#elif _ARCH_PWR5
+#define LJ_ARCH_VERSION 50
+#elif _ARCH_PWR4
+#define LJ_ARCH_VERSION 40
+#else
+#define LJ_ARCH_VERSION 0
+#endif
+#if _ARCH_PPCSQ
+#define LJ_ARCH_SQRT 1
+#endif
+#if _ARCH_PWR5X
+#define LJ_ARCH_ROUND 1
+#endif
+#if __PPU__
+#define LJ_ARCH_CELL 1
+#endif
+#if LJ_TARGET_XBOX360
+#define LJ_ARCH_XENON 1
+#endif
+
+#elif LUAJIT_TARGET == LUAJIT_ARCH_MIPS32 || LUAJIT_TARGET == LUAJIT_ARCH_MIPS64
+
+#if defined(__MIPSEL__) || defined(__MIPSEL) || defined(_MIPSEL)
+#if __mips_isa_rev >= 6
+#define LJ_TARGET_MIPSR6 1
+#define LJ_TARGET_UNALIGNED 1
+#endif
+#if LUAJIT_TARGET == LUAJIT_ARCH_MIPS32
+#if LJ_TARGET_MIPSR6
+#define LJ_ARCH_NAME "mips32r6el"
+#else
+#define LJ_ARCH_NAME "mipsel"
+#endif
+#else
+#if LJ_TARGET_MIPSR6
+#define LJ_ARCH_NAME "mips64r6el"
+#else
+#define LJ_ARCH_NAME "mips64el"
+#endif
+#endif
+#define LJ_ARCH_ENDIAN LUAJIT_LE
+#else
+#if LUAJIT_TARGET == LUAJIT_ARCH_MIPS32
+#if LJ_TARGET_MIPSR6
+#define LJ_ARCH_NAME "mips32r6"
+#else
+#define LJ_ARCH_NAME "mips"
+#endif
+#else
+#if LJ_TARGET_MIPSR6
+#define LJ_ARCH_NAME "mips64r6"
+#else
+#define LJ_ARCH_NAME "mips64"
+#endif
+#endif
+#define LJ_ARCH_ENDIAN LUAJIT_BE
+#endif
+
+#if !defined(LJ_ARCH_HASFPU)
+#ifdef __mips_soft_float
+#define LJ_ARCH_HASFPU 0
+#else
+#define LJ_ARCH_HASFPU 1
+#endif
+#endif
+
+#if !defined(LJ_ABI_SOFTFP)
+#ifdef __mips_soft_float
+#define LJ_ABI_SOFTFP 1
+#else
+#define LJ_ABI_SOFTFP 0
+#endif
+#endif
+
+#if LUAJIT_TARGET == LUAJIT_ARCH_MIPS32
+#define LJ_ARCH_BITS 32
+#define LJ_TARGET_MIPS32 1
+#else
+#define LJ_ARCH_BITS 64
+#define LJ_TARGET_MIPS64 1
+#define LJ_TARGET_GC64 1
+#endif
+#define LJ_TARGET_MIPS 1
+#define LJ_TARGET_EHRETREG 4
+#define LJ_TARGET_EHRAREG 31
+#define LJ_TARGET_JUMPRANGE 27 /* 2*2^27 = 256MB-aligned region */
+#define LJ_TARGET_MASKSHIFT 1
+#define LJ_TARGET_MASKROT 1
+#define LJ_TARGET_UNIFYROT 2 /* Want only IR_BROR. */
+#define LJ_ARCH_NUMMODE LJ_NUMMODE_DUAL
+
+#if LJ_TARGET_MIPSR6
+#define LJ_ARCH_VERSION 60
+#elif _MIPS_ARCH_MIPS32R2 || _MIPS_ARCH_MIPS64R2
+#define LJ_ARCH_VERSION 20
+#else
+#define LJ_ARCH_VERSION 10
+#endif
+
+#else
+#error "No target architecture defined"
+#endif
+
+/* -- Checks for requirements --------------------------------------------- */
+
+/* Check for minimum required compiler versions. */
+#if defined(__GNUC__)
+#if LJ_TARGET_X86
+#if (__GNUC__ < 3) || ((__GNUC__ == 3) && __GNUC_MINOR__ < 4)
+#error "Need at least GCC 3.4 or newer"
+#endif
+#elif LJ_TARGET_X64
+#if __GNUC__ < 4
+#error "Need at least GCC 4.0 or newer"
+#endif
+#elif LJ_TARGET_ARM
+#if (__GNUC__ < 4) || ((__GNUC__ == 4) && __GNUC_MINOR__ < 2)
+#error "Need at least GCC 4.2 or newer"
+#endif
+#elif LJ_TARGET_ARM64
+#if __clang__
+#if ((__clang_major__ < 3) || ((__clang_major__ == 3) && __clang_minor__ < 5)) && !defined(__NX_TOOLCHAIN_MAJOR__)
+#error "Need at least Clang 3.5 or newer"
+#endif
+#else
+#if (__GNUC__ < 4) || ((__GNUC__ == 4) && __GNUC_MINOR__ < 8)
+#error "Need at least GCC 4.8 or newer"
+#endif
+#endif
+#elif !LJ_TARGET_PS3
+#if (__GNUC__ < 4) || ((__GNUC__ == 4) && __GNUC_MINOR__ < 3)
+#error "Need at least GCC 4.3 or newer"
+#endif
+#endif
+#endif
+
+/* Check target-specific constraints. */
+#ifndef _BUILDVM_H
+#if LJ_TARGET_X64
+#if __USING_SJLJ_EXCEPTIONS__
+#error "Need a C compiler with native exception handling on x64"
+#endif
+#elif LJ_TARGET_ARM
+#if defined(__ARMEB__)
+#error "No support for big-endian ARM"
+#endif
+#if __ARM_ARCH_6M__ || __ARM_ARCH_7M__ || __ARM_ARCH_7EM__
+#error "No support for Cortex-M CPUs"
+#endif
+#if !(__ARM_EABI__ || LJ_TARGET_IOS)
+#error "Only ARM EABI or iOS 3.0+ ABI is supported"
+#endif
+#elif LJ_TARGET_ARM64
+#if defined(_ILP32)
+#error "No support for ILP32 model on ARM64"
+#endif
+#elif LJ_TARGET_PPC
+#if defined(_LITTLE_ENDIAN) && (!defined(_BYTE_ORDER) || (_BYTE_ORDER == _LITTLE_ENDIAN))
+#error "No support for little-endian PPC32"
+#endif
+#if defined(__NO_FPRS__) && !defined(_SOFT_FLOAT)
+#error "No support for PPC/e500 anymore (use LuaJIT 2.0)"
+#endif
+#elif LJ_TARGET_MIPS32
+#if !((defined(_MIPS_SIM_ABI32) && _MIPS_SIM == _MIPS_SIM_ABI32) || (defined(_ABIO32) && _MIPS_SIM == _ABIO32))
+#error "Only o32 ABI supported for MIPS32"
+#endif
+#if LJ_TARGET_MIPSR6
+/* Not that useful, since most available r6 CPUs are 64 bit. */
+#error "No support for MIPS32R6"
+#endif
+#elif LJ_TARGET_MIPS64
+#if !((defined(_MIPS_SIM_ABI64) && _MIPS_SIM == _MIPS_SIM_ABI64) || (defined(_ABI64) && _MIPS_SIM == _ABI64))
+/* MIPS32ON64 aka n32 ABI support might be desirable, but difficult. */
+#error "Only n64 ABI supported for MIPS64"
+#endif
+#endif
+#endif
+
+/* -- Derived defines ----------------------------------------------------- */
+
+/* Enable or disable the dual-number mode for the VM. */
+#if (LJ_ARCH_NUMMODE == LJ_NUMMODE_SINGLE && LUAJIT_NUMMODE == 2) || \
+ (LJ_ARCH_NUMMODE == LJ_NUMMODE_DUAL && LUAJIT_NUMMODE == 1)
+#error "No support for this number mode on this architecture"
+#endif
+#if LJ_ARCH_NUMMODE == LJ_NUMMODE_DUAL || \
+ (LJ_ARCH_NUMMODE == LJ_NUMMODE_DUAL_SINGLE && LUAJIT_NUMMODE != 1) || \
+ (LJ_ARCH_NUMMODE == LJ_NUMMODE_SINGLE_DUAL && LUAJIT_NUMMODE == 2)
+#define LJ_DUALNUM 1
+#else
+#define LJ_DUALNUM 0
+#endif
+
+#if LJ_TARGET_IOS || LJ_TARGET_CONSOLE
+/* Runtime code generation is restricted on iOS. Complain to Apple, not me. */
+/* Ditto for the consoles. Complain to Sony or MS, not me. */
+#ifndef LUAJIT_ENABLE_JIT
+#define LJ_OS_NOJIT 1
+#endif
+#endif
+
+/* 64 bit GC references. */
+#if LJ_TARGET_GC64
+#define LJ_GC64 1
+#else
+#define LJ_GC64 0
+#endif
+
+/* 2-slot frame info. */
+#if LJ_GC64
+#define LJ_FR2 1
+#else
+#define LJ_FR2 0
+#endif
+
+/* Disable or enable the JIT compiler. */
+#if defined(LUAJIT_DISABLE_JIT) || defined(LJ_ARCH_NOJIT) || defined(LJ_OS_NOJIT)
+#define LJ_HASJIT 0
+#else
+#define LJ_HASJIT 1
+#endif
+
+/* Disable or enable the FFI extension. */
+#if defined(LUAJIT_DISABLE_FFI) || defined(LJ_ARCH_NOFFI)
+#define LJ_HASFFI 0
+#else
+#define LJ_HASFFI 1
+#endif
+
+/* Disable or enable the string buffer extension. */
+#if defined(LUAJIT_DISABLE_BUFFER)
+#define LJ_HASBUFFER 0
+#else
+#define LJ_HASBUFFER 1
+#endif
+
+#if defined(LUAJIT_DISABLE_PROFILE)
+#define LJ_HASPROFILE 0
+#elif LJ_TARGET_POSIX
+#define LJ_HASPROFILE 1
+#define LJ_PROFILE_SIGPROF 1
+#elif LJ_TARGET_PS3
+#define LJ_HASPROFILE 1
+#define LJ_PROFILE_PTHREAD 1
+#elif LJ_TARGET_WINDOWS || LJ_TARGET_XBOX360
+#define LJ_HASPROFILE 1
+#define LJ_PROFILE_WTHREAD 1
+#else
+#define LJ_HASPROFILE 0
+#endif
+
+#ifndef LJ_ARCH_HASFPU
+#define LJ_ARCH_HASFPU 1
+#endif
+#ifndef LJ_ABI_SOFTFP
+#define LJ_ABI_SOFTFP 0
+#endif
+#define LJ_SOFTFP (!LJ_ARCH_HASFPU)
+#define LJ_SOFTFP32 (LJ_SOFTFP && LJ_32)
+
+#if LJ_ARCH_ENDIAN == LUAJIT_BE
+#define LJ_LE 0
+#define LJ_BE 1
+#define LJ_ENDIAN_SELECT(le, be) be
+#define LJ_ENDIAN_LOHI(lo, hi) hi lo
+#else
+#define LJ_LE 1
+#define LJ_BE 0
+#define LJ_ENDIAN_SELECT(le, be) le
+#define LJ_ENDIAN_LOHI(lo, hi) lo hi
+#endif
+
+#if LJ_ARCH_BITS == 32
+#define LJ_32 1
+#define LJ_64 0
+#else
+#define LJ_32 0
+#define LJ_64 1
+#endif
+
+#ifndef LJ_TARGET_UNALIGNED
+#define LJ_TARGET_UNALIGNED 0
+#endif
+
+#ifndef LJ_PAGESIZE
+#define LJ_PAGESIZE 4096
+#endif
+
+/* Various workarounds for embedded operating systems or weak C runtimes. */
+#if defined(__ANDROID__) || defined(__symbian__) || LJ_TARGET_XBOX360 || LJ_TARGET_WINDOWS
+#define LUAJIT_NO_LOG2
+#endif
+#if LJ_TARGET_CONSOLE || (LJ_TARGET_IOS && __IPHONE_OS_VERSION_MIN_REQUIRED >= __IPHONE_8_0)
+#define LJ_NO_SYSTEM 1
+#endif
+
+#if LJ_TARGET_WINDOWS || LJ_TARGET_CYGWIN
+#define LJ_ABI_WIN 1
+#else
+#define LJ_ABI_WIN 0
+#endif
+
+#if LJ_TARGET_WINDOWS
+#if LJ_TARGET_UWP
+#define LJ_WIN_VALLOC VirtualAllocFromApp
+#define LJ_WIN_VPROTECT VirtualProtectFromApp
+extern void *LJ_WIN_LOADLIBA(const char *path);
+#else
+#define LJ_WIN_VALLOC VirtualAlloc
+#define LJ_WIN_VPROTECT VirtualProtect
+#define LJ_WIN_LOADLIBA(path) LoadLibraryExA((path), NULL, 0)
+#endif
+#endif
+
+#if defined(LUAJIT_NO_UNWIND) || __GNU_COMPACT_EH__ || defined(__symbian__) || LJ_TARGET_IOS || LJ_TARGET_PS3 || LJ_TARGET_PS4 || LJ_TARGET_PS5
+#define LJ_NO_UNWIND 1
+#endif
+
+#if !LJ_NO_UNWIND && !defined(LUAJIT_UNWIND_INTERNAL) && (LJ_ABI_WIN || (defined(LUAJIT_UNWIND_EXTERNAL) && (defined(__GNUC__) || defined(__clang__))))
+#define LJ_UNWIND_EXT 1
+#else
+#define LJ_UNWIND_EXT 0
+#endif
+
+#if LJ_UNWIND_EXT && LJ_HASJIT && !LJ_TARGET_ARM && !(LJ_ABI_WIN && LJ_TARGET_X86)
+#define LJ_UNWIND_JIT 1
+#else
+#define LJ_UNWIND_JIT 0
+#endif
+
+/* Compatibility with Lua 5.1 vs. 5.2. */
+#ifdef LUAJIT_ENABLE_LUA52COMPAT
+#define LJ_52 1
+#else
+#define LJ_52 0
+#endif
+
+/* -- VM security --------------------------------------------------------- */
+
+/* Don't make any changes here. Instead build with:
+** make "XCFLAGS=-DLUAJIT_SECURITY_flag=value"
+**
+** Important note to distro maintainers: DO NOT change the defaults for a
+** regular distro build -- neither upwards, nor downwards!
+** These build-time configurable security flags are intended for embedders
+** who may have specific needs wrt. security vs. performance.
+*/
+
+/* Security defaults. */
+#ifndef LUAJIT_SECURITY_PRNG
+/* PRNG init: 0 = fixed/insecure, 1 = secure from OS. */
+#define LUAJIT_SECURITY_PRNG 1
+#endif
+
+#ifndef LUAJIT_SECURITY_STRHASH
+/* String hash: 0 = sparse only, 1 = sparse + dense. */
+#define LUAJIT_SECURITY_STRHASH 1
+#endif
+
+#ifndef LUAJIT_SECURITY_STRID
+/* String IDs: 0 = linear, 1 = reseed < 255, 2 = reseed < 15, 3 = random. */
+#define LUAJIT_SECURITY_STRID 1
+#endif
+
+#ifndef LUAJIT_SECURITY_MCODE
+/* Machine code page protection: 0 = insecure RWX, 1 = secure RW^X. */
+#define LUAJIT_SECURITY_MCODE 1
+#endif
+
+#define LJ_SECURITY_MODE \
+ ( 0u \
+ | ((LUAJIT_SECURITY_PRNG & 3) << 0) \
+ | ((LUAJIT_SECURITY_STRHASH & 3) << 2) \
+ | ((LUAJIT_SECURITY_STRID & 3) << 4) \
+ | ((LUAJIT_SECURITY_MCODE & 3) << 6) \
+ )
+#define LJ_SECURITY_MODESTRING \
+ "\004prng\007strhash\005strid\005mcode"
+
+#endif
diff --git a/libs/luajit-cmake/luajit/src/lj_asm.c b/libs/luajit-cmake/luajit/src/lj_asm.c
new file mode 100644
index 0000000..6f5e0c4
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/lj_asm.c
@@ -0,0 +1,2571 @@
+/*
+** IR assembler (SSA IR -> machine code).
+** Copyright (C) 2005-2022 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#define lj_asm_c
+#define LUA_CORE
+
+#include "lj_obj.h"
+
+#if LJ_HASJIT
+
+#include "lj_gc.h"
+#include "lj_buf.h"
+#include "lj_str.h"
+#include "lj_tab.h"
+#include "lj_frame.h"
+#if LJ_HASFFI
+#include "lj_ctype.h"
+#endif
+#include "lj_ir.h"
+#include "lj_jit.h"
+#include "lj_ircall.h"
+#include "lj_iropt.h"
+#include "lj_mcode.h"
+#include "lj_trace.h"
+#include "lj_snap.h"
+#include "lj_asm.h"
+#include "lj_dispatch.h"
+#include "lj_vm.h"
+#include "lj_target.h"
+
+#ifdef LUA_USE_ASSERT
+#include <stdio.h>
+#endif
+
+/* -- Assembler state and common macros ----------------------------------- */
+
+/* Assembler state. */
+typedef struct ASMState {
+ RegCost cost[RID_MAX]; /* Reference and blended allocation cost for regs. */
+
+ MCode *mcp; /* Current MCode pointer (grows down). */
+ MCode *mclim; /* Lower limit for MCode memory + red zone. */
+#ifdef LUA_USE_ASSERT
+ MCode *mcp_prev; /* Red zone overflow check. */
+#endif
+
+ IRIns *ir; /* Copy of pointer to IR instructions/constants. */
+ jit_State *J; /* JIT compiler state. */
+
+#if LJ_TARGET_X86ORX64
+ x86ModRM mrm; /* Fused x86 address operand. */
+#endif
+
+ RegSet freeset; /* Set of free registers. */
+ RegSet modset; /* Set of registers modified inside the loop. */
+ RegSet weakset; /* Set of weakly referenced registers. */
+ RegSet phiset; /* Set of PHI registers. */
+
+ uint32_t flags; /* Copy of JIT compiler flags. */
+ int loopinv; /* Loop branch inversion (0:no, 1:yes, 2:yes+CC_P). */
+
+ int32_t evenspill; /* Next even spill slot. */
+ int32_t oddspill; /* Next odd spill slot (or 0). */
+
+ IRRef curins; /* Reference of current instruction. */
+ IRRef stopins; /* Stop assembly before hitting this instruction. */
+ IRRef orignins; /* Original T->nins. */
+
+ IRRef snapref; /* Current snapshot is active after this reference. */
+ IRRef snaprename; /* Rename highwater mark for snapshot check. */
+ SnapNo snapno; /* Current snapshot number. */
+ SnapNo loopsnapno; /* Loop snapshot number. */
+ int snapalloc; /* Current snapshot needs allocation. */
+ BloomFilter snapfilt1, snapfilt2; /* Filled with snapshot refs. */
+
+ IRRef fuseref; /* Fusion limit (loopref, 0 or FUSE_DISABLED). */
+ IRRef sectref; /* Section base reference (loopref or 0). */
+ IRRef loopref; /* Reference of LOOP instruction (or 0). */
+
+ BCReg topslot; /* Number of slots for stack check (unless 0). */
+ int32_t gcsteps; /* Accumulated number of GC steps (per section). */
+
+ GCtrace *T; /* Trace to assemble. */
+ GCtrace *parent; /* Parent trace (or NULL). */
+
+ MCode *mcbot; /* Bottom of reserved MCode. */
+ MCode *mctop; /* Top of generated MCode. */
+ MCode *mctoporig; /* Original top of generated MCode. */
+ MCode *mcloop; /* Pointer to loop MCode (or NULL). */
+ MCode *invmcp; /* Points to invertible loop branch (or NULL). */
+ MCode *flagmcp; /* Pending opportunity to merge flag setting ins. */
+ MCode *realign; /* Realign loop if not NULL. */
+
+#ifdef RID_NUM_KREF
+ intptr_t krefk[RID_NUM_KREF];
+#endif
+ IRRef1 phireg[RID_MAX]; /* PHI register references. */
+ uint16_t parentmap[LJ_MAX_JSLOTS]; /* Parent instruction to RegSP map. */
+} ASMState;
+
+#ifdef LUA_USE_ASSERT
+#define lj_assertA(c, ...) lj_assertG_(J2G(as->J), (c), __VA_ARGS__)
+#else
+#define lj_assertA(c, ...) ((void)as)
+#endif
+
+#define IR(ref) (&as->ir[(ref)])
+
+#define ASMREF_TMP1 REF_TRUE /* Temp. register. */
+#define ASMREF_TMP2 REF_FALSE /* Temp. register. */
+#define ASMREF_L REF_NIL /* Stores register for L. */
+
+/* Check for variant to invariant references. */
+#define iscrossref(as, ref) ((ref) < as->sectref)
+
+/* Inhibit memory op fusion from variant to invariant references. */
+#define FUSE_DISABLED (~(IRRef)0)
+#define mayfuse(as, ref) ((ref) > as->fuseref)
+#define neverfuse(as) (as->fuseref == FUSE_DISABLED)
+#define canfuse(as, ir) (!neverfuse(as) && !irt_isphi((ir)->t))
+#define opisfusableload(o) \
+ ((o) == IR_ALOAD || (o) == IR_HLOAD || (o) == IR_ULOAD || \
+ (o) == IR_FLOAD || (o) == IR_XLOAD || (o) == IR_SLOAD || (o) == IR_VLOAD)
+
+/* Sparse limit checks using a red zone before the actual limit. */
+#define MCLIM_REDZONE 64
+
+static LJ_NORET LJ_NOINLINE void asm_mclimit(ASMState *as)
+{
+ lj_mcode_limiterr(as->J, (size_t)(as->mctop - as->mcp + 4*MCLIM_REDZONE));
+}
+
+static LJ_AINLINE void checkmclim(ASMState *as)
+{
+#ifdef LUA_USE_ASSERT
+ if (as->mcp + MCLIM_REDZONE < as->mcp_prev) {
+ IRIns *ir = IR(as->curins+1);
+ lj_assertA(0, "red zone overflow: %p IR %04d %02d %04d %04d\n", as->mcp,
+ as->curins+1-REF_BIAS, ir->o, ir->op1-REF_BIAS, ir->op2-REF_BIAS);
+ }
+#endif
+ if (LJ_UNLIKELY(as->mcp < as->mclim)) asm_mclimit(as);
+#ifdef LUA_USE_ASSERT
+ as->mcp_prev = as->mcp;
+#endif
+}
+
+#ifdef RID_NUM_KREF
+#define ra_iskref(ref) ((ref) < RID_NUM_KREF)
+#define ra_krefreg(ref) ((Reg)(RID_MIN_KREF + (Reg)(ref)))
+#define ra_krefk(as, ref) (as->krefk[(ref)])
+
+static LJ_AINLINE void ra_setkref(ASMState *as, Reg r, intptr_t k)
+{
+ IRRef ref = (IRRef)(r - RID_MIN_KREF);
+ as->krefk[ref] = k;
+ as->cost[r] = REGCOST(ref, ref);
+}
+
+#else
+#define ra_iskref(ref) 0
+#define ra_krefreg(ref) RID_MIN_GPR
+#define ra_krefk(as, ref) 0
+#endif
+
+/* Arch-specific field offsets. */
+static const uint8_t field_ofs[IRFL__MAX+1] = {
+#define FLOFS(name, ofs) (uint8_t)(ofs),
+IRFLDEF(FLOFS)
+#undef FLOFS
+ 0
+};
+
+/* -- Target-specific instruction emitter --------------------------------- */
+
+#if LJ_TARGET_X86ORX64
+#include "lj_emit_x86.h"
+#elif LJ_TARGET_ARM
+#include "lj_emit_arm.h"
+#elif LJ_TARGET_ARM64
+#include "lj_emit_arm64.h"
+#elif LJ_TARGET_PPC
+#include "lj_emit_ppc.h"
+#elif LJ_TARGET_MIPS
+#include "lj_emit_mips.h"
+#else
+#error "Missing instruction emitter for target CPU"
+#endif
+
+/* Generic load/store of register from/to stack slot. */
+#define emit_spload(as, ir, r, ofs) \
+ emit_loadofs(as, ir, (r), RID_SP, (ofs))
+#define emit_spstore(as, ir, r, ofs) \
+ emit_storeofs(as, ir, (r), RID_SP, (ofs))
+
+/* -- Register allocator debugging ---------------------------------------- */
+
+/* #define LUAJIT_DEBUG_RA */
+
+#ifdef LUAJIT_DEBUG_RA
+
+#include <stdio.h>
+#include <stdarg.h>
+
+#define RIDNAME(name) #name,
+static const char *const ra_regname[] = {
+ GPRDEF(RIDNAME)
+ FPRDEF(RIDNAME)
+ VRIDDEF(RIDNAME)
+ NULL
+};
+#undef RIDNAME
+
+static char ra_dbg_buf[65536];
+static char *ra_dbg_p;
+static char *ra_dbg_merge;
+static MCode *ra_dbg_mcp;
+
+static void ra_dstart(void)
+{
+ ra_dbg_p = ra_dbg_buf;
+ ra_dbg_merge = NULL;
+ ra_dbg_mcp = NULL;
+}
+
+static void ra_dflush(void)
+{
+ fwrite(ra_dbg_buf, 1, (size_t)(ra_dbg_p-ra_dbg_buf), stdout);
+ ra_dstart();
+}
+
+static void ra_dprintf(ASMState *as, const char *fmt, ...)
+{
+ char *p;
+ va_list argp;
+ va_start(argp, fmt);
+ p = ra_dbg_mcp == as->mcp ? ra_dbg_merge : ra_dbg_p;
+ ra_dbg_mcp = NULL;
+ p += sprintf(p, "%08x \e[36m%04d ", (uintptr_t)as->mcp, as->curins-REF_BIAS);
+ for (;;) {
+ const char *e = strchr(fmt, '$');
+ if (e == NULL) break;
+ memcpy(p, fmt, (size_t)(e-fmt));
+ p += e-fmt;
+ if (e[1] == 'r') {
+ Reg r = va_arg(argp, Reg) & RID_MASK;
+ if (r <= RID_MAX) {
+ const char *q;
+ for (q = ra_regname[r]; *q; q++)
+ *p++ = *q >= 'A' && *q <= 'Z' ? *q + 0x20 : *q;
+ } else {
+ *p++ = '?';
+ lj_assertA(0, "bad register %d for debug format \"%s\"", r, fmt);
+ }
+ } else if (e[1] == 'f' || e[1] == 'i') {
+ IRRef ref;
+ if (e[1] == 'f')
+ ref = va_arg(argp, IRRef);
+ else
+ ref = va_arg(argp, IRIns *) - as->ir;
+ if (ref >= REF_BIAS)
+ p += sprintf(p, "%04d", ref - REF_BIAS);
+ else
+ p += sprintf(p, "K%03d", REF_BIAS - ref);
+ } else if (e[1] == 's') {
+ uint32_t slot = va_arg(argp, uint32_t);
+ p += sprintf(p, "[sp+0x%x]", sps_scale(slot));
+ } else if (e[1] == 'x') {
+ p += sprintf(p, "%08x", va_arg(argp, int32_t));
+ } else {
+ lj_assertA(0, "bad debug format code");
+ }
+ fmt = e+2;
+ }
+ va_end(argp);
+ while (*fmt)
+ *p++ = *fmt++;
+ *p++ = '\e'; *p++ = '['; *p++ = 'm'; *p++ = '\n';
+ if (p > ra_dbg_buf+sizeof(ra_dbg_buf)-256) {
+ fwrite(ra_dbg_buf, 1, (size_t)(p-ra_dbg_buf), stdout);
+ p = ra_dbg_buf;
+ }
+ ra_dbg_p = p;
+}
+
+#define RA_DBG_START() ra_dstart()
+#define RA_DBG_FLUSH() ra_dflush()
+#define RA_DBG_REF() \
+ do { char *_p = ra_dbg_p; ra_dprintf(as, ""); \
+ ra_dbg_merge = _p; ra_dbg_mcp = as->mcp; } while (0)
+#define RA_DBGX(x) ra_dprintf x
+
+#else
+#define RA_DBG_START() ((void)0)
+#define RA_DBG_FLUSH() ((void)0)
+#define RA_DBG_REF() ((void)0)
+#define RA_DBGX(x) ((void)0)
+#endif
+
+/* -- Register allocator -------------------------------------------------- */
+
+#define ra_free(as, r) rset_set(as->freeset, (r))
+#define ra_modified(as, r) rset_set(as->modset, (r))
+#define ra_weak(as, r) rset_set(as->weakset, (r))
+#define ra_noweak(as, r) rset_clear(as->weakset, (r))
+
+#define ra_used(ir) (ra_hasreg((ir)->r) || ra_hasspill((ir)->s))
+
+/* Setup register allocator. */
+static void ra_setup(ASMState *as)
+{
+ Reg r;
+ /* Initially all regs (except the stack pointer) are free for use. */
+ as->freeset = RSET_INIT;
+ as->modset = RSET_EMPTY;
+ as->weakset = RSET_EMPTY;
+ as->phiset = RSET_EMPTY;
+ memset(as->phireg, 0, sizeof(as->phireg));
+ for (r = RID_MIN_GPR; r < RID_MAX; r++)
+ as->cost[r] = REGCOST(~0u, 0u);
+}
+
+/* Rematerialize constants. */
+static Reg ra_rematk(ASMState *as, IRRef ref)
+{
+ IRIns *ir;
+ Reg r;
+ if (ra_iskref(ref)) {
+ r = ra_krefreg(ref);
+ lj_assertA(!rset_test(as->freeset, r), "rematk of free reg %d", r);
+ ra_free(as, r);
+ ra_modified(as, r);
+#if LJ_64
+ emit_loadu64(as, r, ra_krefk(as, ref));
+#else
+ emit_loadi(as, r, ra_krefk(as, ref));
+#endif
+ return r;
+ }
+ ir = IR(ref);
+ r = ir->r;
+ lj_assertA(ra_hasreg(r), "rematk of K%03d has no reg", REF_BIAS - ref);
+ lj_assertA(!ra_hasspill(ir->s),
+ "rematk of K%03d has spill slot [%x]", REF_BIAS - ref, ir->s);
+ ra_free(as, r);
+ ra_modified(as, r);
+ ir->r = RID_INIT; /* Do not keep any hint. */
+ RA_DBGX((as, "remat $i $r", ir, r));
+#if !LJ_SOFTFP32
+ if (ir->o == IR_KNUM) {
+ emit_loadk64(as, r, ir);
+ } else
+#endif
+ if (emit_canremat(REF_BASE) && ir->o == IR_BASE) {
+ ra_sethint(ir->r, RID_BASE); /* Restore BASE register hint. */
+ emit_getgl(as, r, jit_base);
+ } else if (emit_canremat(ASMREF_L) && ir->o == IR_KPRI) {
+ /* REF_NIL stores ASMREF_L register. */
+ lj_assertA(irt_isnil(ir->t), "rematk of bad ASMREF_L");
+ emit_getgl(as, r, cur_L);
+#if LJ_64
+ } else if (ir->o == IR_KINT64) {
+ emit_loadu64(as, r, ir_kint64(ir)->u64);
+#if LJ_GC64
+ } else if (ir->o == IR_KGC) {
+ emit_loadu64(as, r, (uintptr_t)ir_kgc(ir));
+ } else if (ir->o == IR_KPTR || ir->o == IR_KKPTR) {
+ emit_loadu64(as, r, (uintptr_t)ir_kptr(ir));
+#endif
+#endif
+ } else {
+ lj_assertA(ir->o == IR_KINT || ir->o == IR_KGC ||
+ ir->o == IR_KPTR || ir->o == IR_KKPTR || ir->o == IR_KNULL,
+ "rematk of bad IR op %d", ir->o);
+ emit_loadi(as, r, ir->i);
+ }
+ return r;
+}
+
+/* Force a spill. Allocate a new spill slot if needed. */
+static int32_t ra_spill(ASMState *as, IRIns *ir)
+{
+ int32_t slot = ir->s;
+ lj_assertA(ir >= as->ir + REF_TRUE,
+ "spill of K%03d", REF_BIAS - (int)(ir - as->ir));
+ if (!ra_hasspill(slot)) {
+ if (irt_is64(ir->t)) {
+ slot = as->evenspill;
+ as->evenspill += 2;
+ } else if (as->oddspill) {
+ slot = as->oddspill;
+ as->oddspill = 0;
+ } else {
+ slot = as->evenspill;
+ as->oddspill = slot+1;
+ as->evenspill += 2;
+ }
+ if (as->evenspill > 256)
+ lj_trace_err(as->J, LJ_TRERR_SPILLOV);
+ ir->s = (uint8_t)slot;
+ }
+ return sps_scale(slot);
+}
+
+/* Release the temporarily allocated register in ASMREF_TMP1/ASMREF_TMP2. */
+static Reg ra_releasetmp(ASMState *as, IRRef ref)
+{
+ IRIns *ir = IR(ref);
+ Reg r = ir->r;
+ lj_assertA(ra_hasreg(r), "release of TMP%d has no reg", ref-ASMREF_TMP1+1);
+ lj_assertA(!ra_hasspill(ir->s),
+ "release of TMP%d has spill slot [%x]", ref-ASMREF_TMP1+1, ir->s);
+ ra_free(as, r);
+ ra_modified(as, r);
+ ir->r = RID_INIT;
+ return r;
+}
+
+/* Restore a register (marked as free). Rematerialize or force a spill. */
+static Reg ra_restore(ASMState *as, IRRef ref)
+{
+ if (emit_canremat(ref)) {
+ return ra_rematk(as, ref);
+ } else {
+ IRIns *ir = IR(ref);
+ int32_t ofs = ra_spill(as, ir); /* Force a spill slot. */
+ Reg r = ir->r;
+ lj_assertA(ra_hasreg(r), "restore of IR %04d has no reg", ref - REF_BIAS);
+ ra_sethint(ir->r, r); /* Keep hint. */
+ ra_free(as, r);
+ if (!rset_test(as->weakset, r)) { /* Only restore non-weak references. */
+ ra_modified(as, r);
+ RA_DBGX((as, "restore $i $r", ir, r));
+ emit_spload(as, ir, r, ofs);
+ }
+ return r;
+ }
+}
+
+/* Save a register to a spill slot. */
+static void ra_save(ASMState *as, IRIns *ir, Reg r)
+{
+ RA_DBGX((as, "save $i $r", ir, r));
+ emit_spstore(as, ir, r, sps_scale(ir->s));
+}
+
+#define MINCOST(name) \
+ if (rset_test(RSET_ALL, RID_##name) && \
+ LJ_LIKELY(allow&RID2RSET(RID_##name)) && as->cost[RID_##name] < cost) \
+ cost = as->cost[RID_##name];
+
+/* Evict the register with the lowest cost, forcing a restore. */
+static Reg ra_evict(ASMState *as, RegSet allow)
+{
+ IRRef ref;
+ RegCost cost = ~(RegCost)0;
+ lj_assertA(allow != RSET_EMPTY, "evict from empty set");
+ if (RID_NUM_FPR == 0 || allow < RID2RSET(RID_MAX_GPR)) {
+ GPRDEF(MINCOST)
+ } else {
+ FPRDEF(MINCOST)
+ }
+ ref = regcost_ref(cost);
+ lj_assertA(ra_iskref(ref) || (ref >= as->T->nk && ref < as->T->nins),
+ "evict of out-of-range IR %04d", ref - REF_BIAS);
+ /* Preferably pick any weak ref instead of a non-weak, non-const ref. */
+ if (!irref_isk(ref) && (as->weakset & allow)) {
+ IRIns *ir = IR(ref);
+ if (!rset_test(as->weakset, ir->r))
+ ref = regcost_ref(as->cost[rset_pickbot((as->weakset & allow))]);
+ }
+ return ra_restore(as, ref);
+}
+
+/* Pick any register (marked as free). Evict on-demand. */
+static Reg ra_pick(ASMState *as, RegSet allow)
+{
+ RegSet pick = as->freeset & allow;
+ if (!pick)
+ return ra_evict(as, allow);
+ else
+ return rset_picktop(pick);
+}
+
+/* Get a scratch register (marked as free). */
+static Reg ra_scratch(ASMState *as, RegSet allow)
+{
+ Reg r = ra_pick(as, allow);
+ ra_modified(as, r);
+ RA_DBGX((as, "scratch $r", r));
+ return r;
+}
+
+/* Evict all registers from a set (if not free). */
+static void ra_evictset(ASMState *as, RegSet drop)
+{
+ RegSet work;
+ as->modset |= drop;
+#if !LJ_SOFTFP
+ work = (drop & ~as->freeset) & RSET_FPR;
+ while (work) {
+ Reg r = rset_pickbot(work);
+ ra_restore(as, regcost_ref(as->cost[r]));
+ rset_clear(work, r);
+ checkmclim(as);
+ }
+#endif
+ work = (drop & ~as->freeset);
+ while (work) {
+ Reg r = rset_pickbot(work);
+ ra_restore(as, regcost_ref(as->cost[r]));
+ rset_clear(work, r);
+ checkmclim(as);
+ }
+}
+
+/* Evict (rematerialize) all registers allocated to constants. */
+static void ra_evictk(ASMState *as)
+{
+ RegSet work;
+#if !LJ_SOFTFP
+ work = ~as->freeset & RSET_FPR;
+ while (work) {
+ Reg r = rset_pickbot(work);
+ IRRef ref = regcost_ref(as->cost[r]);
+ if (emit_canremat(ref) && irref_isk(ref)) {
+ ra_rematk(as, ref);
+ checkmclim(as);
+ }
+ rset_clear(work, r);
+ }
+#endif
+ work = ~as->freeset & RSET_GPR;
+ while (work) {
+ Reg r = rset_pickbot(work);
+ IRRef ref = regcost_ref(as->cost[r]);
+ if (emit_canremat(ref) && irref_isk(ref)) {
+ ra_rematk(as, ref);
+ checkmclim(as);
+ }
+ rset_clear(work, r);
+ }
+}
+
+#ifdef RID_NUM_KREF
+/* Allocate a register for a constant. */
+static Reg ra_allock(ASMState *as, intptr_t k, RegSet allow)
+{
+ /* First try to find a register which already holds the same constant. */
+ RegSet pick, work = ~as->freeset & RSET_GPR;
+ Reg r;
+ while (work) {
+ IRRef ref;
+ r = rset_pickbot(work);
+ ref = regcost_ref(as->cost[r]);
+#if LJ_64
+ if (ref < ASMREF_L) {
+ if (ra_iskref(ref)) {
+ if (k == ra_krefk(as, ref))
+ return r;
+ } else {
+ IRIns *ir = IR(ref);
+ if ((ir->o == IR_KINT64 && k == (int64_t)ir_kint64(ir)->u64) ||
+#if LJ_GC64
+ (ir->o == IR_KINT && k == ir->i) ||
+ (ir->o == IR_KGC && k == (intptr_t)ir_kgc(ir)) ||
+ ((ir->o == IR_KPTR || ir->o == IR_KKPTR) &&
+ k == (intptr_t)ir_kptr(ir))
+#else
+ (ir->o != IR_KINT64 && k == ir->i)
+#endif
+ )
+ return r;
+ }
+ }
+#else
+ if (ref < ASMREF_L &&
+ k == (ra_iskref(ref) ? ra_krefk(as, ref) : IR(ref)->i))
+ return r;
+#endif
+ rset_clear(work, r);
+ }
+ pick = as->freeset & allow;
+ if (pick) {
+ /* Constants should preferably get unmodified registers. */
+ if ((pick & ~as->modset))
+ pick &= ~as->modset;
+ r = rset_pickbot(pick); /* Reduce conflicts with inverse allocation. */
+ } else {
+ r = ra_evict(as, allow);
+ }
+ RA_DBGX((as, "allock $x $r", k, r));
+ ra_setkref(as, r, k);
+ rset_clear(as->freeset, r);
+ ra_noweak(as, r);
+ return r;
+}
+
+/* Allocate a specific register for a constant. */
+static void ra_allockreg(ASMState *as, intptr_t k, Reg r)
+{
+ Reg kr = ra_allock(as, k, RID2RSET(r));
+ if (kr != r) {
+ IRIns irdummy;
+ irdummy.t.irt = IRT_INT;
+ ra_scratch(as, RID2RSET(r));
+ emit_movrr(as, &irdummy, r, kr);
+ }
+}
+#else
+#define ra_allockreg(as, k, r) emit_loadi(as, (r), (k))
+#endif
+
+/* Allocate a register for ref from the allowed set of registers.
+** Note: this function assumes the ref does NOT have a register yet!
+** Picks an optimal register, sets the cost and marks the register as non-free.
+*/
+static Reg ra_allocref(ASMState *as, IRRef ref, RegSet allow)
+{
+ IRIns *ir = IR(ref);
+ RegSet pick = as->freeset & allow;
+ Reg r;
+ lj_assertA(ra_noreg(ir->r),
+ "IR %04d already has reg %d", ref - REF_BIAS, ir->r);
+ if (pick) {
+ /* First check register hint from propagation or PHI. */
+ if (ra_hashint(ir->r)) {
+ r = ra_gethint(ir->r);
+ if (rset_test(pick, r)) /* Use hint register if possible. */
+ goto found;
+ /* Rematerialization is cheaper than missing a hint. */
+ if (rset_test(allow, r) && emit_canremat(regcost_ref(as->cost[r]))) {
+ ra_rematk(as, regcost_ref(as->cost[r]));
+ goto found;
+ }
+ RA_DBGX((as, "hintmiss $f $r", ref, r));
+ }
+ /* Invariants should preferably get unmodified registers. */
+ if (ref < as->loopref && !irt_isphi(ir->t)) {
+ if ((pick & ~as->modset))
+ pick &= ~as->modset;
+ r = rset_pickbot(pick); /* Reduce conflicts with inverse allocation. */
+ } else {
+ /* We've got plenty of regs, so get callee-save regs if possible. */
+ if (RID_NUM_GPR > 8 && (pick & ~RSET_SCRATCH))
+ pick &= ~RSET_SCRATCH;
+ r = rset_picktop(pick);
+ }
+ } else {
+ r = ra_evict(as, allow);
+ }
+found:
+ RA_DBGX((as, "alloc $f $r", ref, r));
+ ir->r = (uint8_t)r;
+ rset_clear(as->freeset, r);
+ ra_noweak(as, r);
+ as->cost[r] = REGCOST_REF_T(ref, irt_t(ir->t));
+ return r;
+}
+
+/* Allocate a register on-demand. */
+static Reg ra_alloc1(ASMState *as, IRRef ref, RegSet allow)
+{
+ Reg r = IR(ref)->r;
+ /* Note: allow is ignored if the register is already allocated. */
+ if (ra_noreg(r)) r = ra_allocref(as, ref, allow);
+ ra_noweak(as, r);
+ return r;
+}
+
+/* Add a register rename to the IR. */
+static void ra_addrename(ASMState *as, Reg down, IRRef ref, SnapNo snapno)
+{
+ IRRef ren;
+ lj_ir_set(as->J, IRT(IR_RENAME, IRT_NIL), ref, snapno);
+ ren = tref_ref(lj_ir_emit(as->J));
+ as->J->cur.ir[ren].r = (uint8_t)down;
+ as->J->cur.ir[ren].s = SPS_NONE;
+}
+
+/* Rename register allocation and emit move. */
+static void ra_rename(ASMState *as, Reg down, Reg up)
+{
+ IRRef ref = regcost_ref(as->cost[up] = as->cost[down]);
+ IRIns *ir = IR(ref);
+ ir->r = (uint8_t)up;
+ as->cost[down] = 0;
+ lj_assertA((down < RID_MAX_GPR) == (up < RID_MAX_GPR),
+ "rename between GPR/FPR %d and %d", down, up);
+ lj_assertA(!rset_test(as->freeset, down), "rename from free reg %d", down);
+ lj_assertA(rset_test(as->freeset, up), "rename to non-free reg %d", up);
+ ra_free(as, down); /* 'down' is free ... */
+ ra_modified(as, down);
+ rset_clear(as->freeset, up); /* ... and 'up' is now allocated. */
+ ra_noweak(as, up);
+ RA_DBGX((as, "rename $f $r $r", regcost_ref(as->cost[up]), down, up));
+ emit_movrr(as, ir, down, up); /* Backwards codegen needs inverse move. */
+ if (!ra_hasspill(IR(ref)->s)) { /* Add the rename to the IR. */
+ /*
+ ** The rename is effective at the subsequent (already emitted) exit
+ ** branch. This is for the current snapshot (as->snapno). Except if we
+ ** haven't yet allocated any refs for the snapshot (as->snapalloc == 1),
+ ** then it belongs to the next snapshot.
+ ** See also the discussion at asm_snap_checkrename().
+ */
+ ra_addrename(as, down, ref, as->snapno + as->snapalloc);
+ }
+}
+
+/* Pick a destination register (marked as free).
+** Caveat: allow is ignored if there's already a destination register.
+** Use ra_destreg() to get a specific register.
+*/
+static Reg ra_dest(ASMState *as, IRIns *ir, RegSet allow)
+{
+ Reg dest = ir->r;
+ if (ra_hasreg(dest)) {
+ ra_free(as, dest);
+ ra_modified(as, dest);
+ } else {
+ if (ra_hashint(dest) && rset_test((as->freeset&allow), ra_gethint(dest))) {
+ dest = ra_gethint(dest);
+ ra_modified(as, dest);
+ RA_DBGX((as, "dest $r", dest));
+ } else {
+ dest = ra_scratch(as, allow);
+ }
+ ir->r = dest;
+ }
+ if (LJ_UNLIKELY(ra_hasspill(ir->s))) ra_save(as, ir, dest);
+ return dest;
+}
+
+/* Force a specific destination register (marked as free). */
+static void ra_destreg(ASMState *as, IRIns *ir, Reg r)
+{
+ Reg dest = ra_dest(as, ir, RID2RSET(r));
+ if (dest != r) {
+ lj_assertA(rset_test(as->freeset, r), "dest reg %d is not free", r);
+ ra_modified(as, r);
+ emit_movrr(as, ir, dest, r);
+ }
+}
+
+#if LJ_TARGET_X86ORX64
+/* Propagate dest register to left reference. Emit moves as needed.
+** This is a required fixup step for all 2-operand machine instructions.
+*/
+static void ra_left(ASMState *as, Reg dest, IRRef lref)
+{
+ IRIns *ir = IR(lref);
+ Reg left = ir->r;
+ if (ra_noreg(left)) {
+ if (irref_isk(lref)) {
+ if (ir->o == IR_KNUM) {
+ /* FP remat needs a load except for +0. Still better than eviction. */
+ if (tvispzero(ir_knum(ir)) || !(as->freeset & RSET_FPR)) {
+ emit_loadk64(as, dest, ir);
+ return;
+ }
+#if LJ_64
+ } else if (ir->o == IR_KINT64) {
+ emit_loadk64(as, dest, ir);
+ return;
+#if LJ_GC64
+ } else if (ir->o == IR_KGC || ir->o == IR_KPTR || ir->o == IR_KKPTR) {
+ emit_loadk64(as, dest, ir);
+ return;
+#endif
+#endif
+ } else if (ir->o != IR_KPRI) {
+ lj_assertA(ir->o == IR_KINT || ir->o == IR_KGC ||
+ ir->o == IR_KPTR || ir->o == IR_KKPTR || ir->o == IR_KNULL,
+ "K%03d has bad IR op %d", REF_BIAS - lref, ir->o);
+ emit_loadi(as, dest, ir->i);
+ return;
+ }
+ }
+ if (!ra_hashint(left) && !iscrossref(as, lref))
+ ra_sethint(ir->r, dest); /* Propagate register hint. */
+ left = ra_allocref(as, lref, dest < RID_MAX_GPR ? RSET_GPR : RSET_FPR);
+ }
+ ra_noweak(as, left);
+ /* Move needed for true 3-operand instruction: y=a+b ==> y=a; y+=b. */
+ if (dest != left) {
+ /* Use register renaming if dest is the PHI reg. */
+ if (irt_isphi(ir->t) && as->phireg[dest] == lref) {
+ ra_modified(as, left);
+ ra_rename(as, left, dest);
+ } else {
+ emit_movrr(as, ir, dest, left);
+ }
+ }
+}
+#else
+/* Similar to ra_left, except we override any hints. */
+static void ra_leftov(ASMState *as, Reg dest, IRRef lref)
+{
+ IRIns *ir = IR(lref);
+ Reg left = ir->r;
+ if (ra_noreg(left)) {
+ ra_sethint(ir->r, dest); /* Propagate register hint. */
+ left = ra_allocref(as, lref,
+ (LJ_SOFTFP || dest < RID_MAX_GPR) ? RSET_GPR : RSET_FPR);
+ }
+ ra_noweak(as, left);
+ if (dest != left) {
+ /* Use register renaming if dest is the PHI reg. */
+ if (irt_isphi(ir->t) && as->phireg[dest] == lref) {
+ ra_modified(as, left);
+ ra_rename(as, left, dest);
+ } else {
+ emit_movrr(as, ir, dest, left);
+ }
+ }
+}
+#endif
+
+/* Force a RID_RETLO/RID_RETHI destination register pair (marked as free). */
+static void ra_destpair(ASMState *as, IRIns *ir)
+{
+ Reg destlo = ir->r, desthi = (ir+1)->r;
+ IRIns *irx = (LJ_64 && !irt_is64(ir->t)) ? ir+1 : ir;
+ /* First spill unrelated refs blocking the destination registers. */
+ if (!rset_test(as->freeset, RID_RETLO) &&
+ destlo != RID_RETLO && desthi != RID_RETLO)
+ ra_restore(as, regcost_ref(as->cost[RID_RETLO]));
+ if (!rset_test(as->freeset, RID_RETHI) &&
+ destlo != RID_RETHI && desthi != RID_RETHI)
+ ra_restore(as, regcost_ref(as->cost[RID_RETHI]));
+ /* Next free the destination registers (if any). */
+ if (ra_hasreg(destlo)) {
+ ra_free(as, destlo);
+ ra_modified(as, destlo);
+ } else {
+ destlo = RID_RETLO;
+ }
+ if (ra_hasreg(desthi)) {
+ ra_free(as, desthi);
+ ra_modified(as, desthi);
+ } else {
+ desthi = RID_RETHI;
+ }
+ /* Check for conflicts and shuffle the registers as needed. */
+ if (destlo == RID_RETHI) {
+ if (desthi == RID_RETLO) {
+#if LJ_TARGET_X86ORX64
+ *--as->mcp = XI_XCHGa + RID_RETHI;
+ if (LJ_64 && irt_is64(irx->t)) *--as->mcp = 0x48;
+#else
+ emit_movrr(as, irx, RID_RETHI, RID_TMP);
+ emit_movrr(as, irx, RID_RETLO, RID_RETHI);
+ emit_movrr(as, irx, RID_TMP, RID_RETLO);
+#endif
+ } else {
+ emit_movrr(as, irx, RID_RETHI, RID_RETLO);
+ if (desthi != RID_RETHI) emit_movrr(as, irx, desthi, RID_RETHI);
+ }
+ } else if (desthi == RID_RETLO) {
+ emit_movrr(as, irx, RID_RETLO, RID_RETHI);
+ if (destlo != RID_RETLO) emit_movrr(as, irx, destlo, RID_RETLO);
+ } else {
+ if (desthi != RID_RETHI) emit_movrr(as, irx, desthi, RID_RETHI);
+ if (destlo != RID_RETLO) emit_movrr(as, irx, destlo, RID_RETLO);
+ }
+ /* Restore spill slots (if any). */
+ if (ra_hasspill((ir+1)->s)) ra_save(as, ir+1, RID_RETHI);
+ if (ra_hasspill(ir->s)) ra_save(as, ir, RID_RETLO);
+}
+
+/* -- Snapshot handling --------- ----------------------------------------- */
+
+/* Can we rematerialize a KNUM instead of forcing a spill? */
+static int asm_snap_canremat(ASMState *as)
+{
+ Reg r;
+ for (r = RID_MIN_FPR; r < RID_MAX_FPR; r++)
+ if (irref_isk(regcost_ref(as->cost[r])))
+ return 1;
+ return 0;
+}
+
+/* Check whether a sunk store corresponds to an allocation. */
+static int asm_sunk_store(ASMState *as, IRIns *ira, IRIns *irs)
+{
+ if (irs->s == 255) {
+ if (irs->o == IR_ASTORE || irs->o == IR_HSTORE ||
+ irs->o == IR_FSTORE || irs->o == IR_XSTORE) {
+ IRIns *irk = IR(irs->op1);
+ if (irk->o == IR_AREF || irk->o == IR_HREFK)
+ irk = IR(irk->op1);
+ return (IR(irk->op1) == ira);
+ }
+ return 0;
+ } else {
+ return (ira + irs->s == irs); /* Quick check. */
+ }
+}
+
+/* Allocate register or spill slot for a ref that escapes to a snapshot. */
+static void asm_snap_alloc1(ASMState *as, IRRef ref)
+{
+ IRIns *ir = IR(ref);
+ if (!irref_isk(ref) && ir->r != RID_SUNK) {
+ bloomset(as->snapfilt1, ref);
+ bloomset(as->snapfilt2, hashrot(ref, ref + HASH_BIAS));
+ if (ra_used(ir)) return;
+ if (ir->r == RID_SINK) {
+ ir->r = RID_SUNK;
+#if LJ_HASFFI
+ if (ir->o == IR_CNEWI) { /* Allocate CNEWI value. */
+ asm_snap_alloc1(as, ir->op2);
+ if (LJ_32 && (ir+1)->o == IR_HIOP)
+ asm_snap_alloc1(as, (ir+1)->op2);
+ } else
+#endif
+ { /* Allocate stored values for TNEW, TDUP and CNEW. */
+ IRIns *irs;
+ lj_assertA(ir->o == IR_TNEW || ir->o == IR_TDUP || ir->o == IR_CNEW,
+ "sink of IR %04d has bad op %d", ref - REF_BIAS, ir->o);
+ for (irs = IR(as->snapref-1); irs > ir; irs--)
+ if (irs->r == RID_SINK && asm_sunk_store(as, ir, irs)) {
+ lj_assertA(irs->o == IR_ASTORE || irs->o == IR_HSTORE ||
+ irs->o == IR_FSTORE || irs->o == IR_XSTORE,
+ "sunk store IR %04d has bad op %d",
+ (int)(irs - as->ir) - REF_BIAS, irs->o);
+ asm_snap_alloc1(as, irs->op2);
+ if (LJ_32 && (irs+1)->o == IR_HIOP)
+ asm_snap_alloc1(as, (irs+1)->op2);
+ }
+ }
+ } else {
+ RegSet allow;
+ if (ir->o == IR_CONV && ir->op2 == IRCONV_NUM_INT) {
+ IRIns *irc;
+ for (irc = IR(as->curins); irc > ir; irc--)
+ if ((irc->op1 == ref || irc->op2 == ref) &&
+ !(irc->r == RID_SINK || irc->r == RID_SUNK))
+ goto nosink; /* Don't sink conversion if result is used. */
+ asm_snap_alloc1(as, ir->op1);
+ return;
+ }
+ nosink:
+ allow = (!LJ_SOFTFP && irt_isfp(ir->t)) ? RSET_FPR : RSET_GPR;
+ if ((as->freeset & allow) ||
+ (allow == RSET_FPR && asm_snap_canremat(as))) {
+ /* Get a weak register if we have a free one or can rematerialize. */
+ Reg r = ra_allocref(as, ref, allow); /* Allocate a register. */
+ if (!irt_isphi(ir->t))
+ ra_weak(as, r); /* But mark it as weakly referenced. */
+ checkmclim(as);
+ RA_DBGX((as, "snapreg $f $r", ref, ir->r));
+ } else {
+ ra_spill(as, ir); /* Otherwise force a spill slot. */
+ RA_DBGX((as, "snapspill $f $s", ref, ir->s));
+ }
+ }
+ }
+}
+
+/* Allocate refs escaping to a snapshot. */
+static void asm_snap_alloc(ASMState *as, int snapno)
+{
+ SnapShot *snap = &as->T->snap[snapno];
+ SnapEntry *map = &as->T->snapmap[snap->mapofs];
+ MSize n, nent = snap->nent;
+ as->snapfilt1 = as->snapfilt2 = 0;
+ for (n = 0; n < nent; n++) {
+ SnapEntry sn = map[n];
+ IRRef ref = snap_ref(sn);
+ if (!irref_isk(ref)) {
+ asm_snap_alloc1(as, ref);
+ if (LJ_SOFTFP && (sn & SNAP_SOFTFPNUM)) {
+ lj_assertA(irt_type(IR(ref+1)->t) == IRT_SOFTFP,
+ "snap %d[%d] points to bad SOFTFP IR %04d",
+ snapno, n, ref - REF_BIAS);
+ asm_snap_alloc1(as, ref+1);
+ }
+ }
+ }
+}
+
+/* All guards for a snapshot use the same exitno. This is currently the
+** same as the snapshot number. Since the exact origin of the exit cannot
+** be determined, all guards for the same snapshot must exit with the same
+** RegSP mapping.
+** A renamed ref which has been used in a prior guard for the same snapshot
+** would cause an inconsistency. The easy way out is to force a spill slot.
+*/
+static int asm_snap_checkrename(ASMState *as, IRRef ren)
+{
+ if (bloomtest(as->snapfilt1, ren) &&
+ bloomtest(as->snapfilt2, hashrot(ren, ren + HASH_BIAS))) {
+ IRIns *ir = IR(ren);
+ ra_spill(as, ir); /* Register renamed, so force a spill slot. */
+ RA_DBGX((as, "snaprensp $f $s", ren, ir->s));
+ return 1; /* Found. */
+ }
+ return 0; /* Not found. */
+}
+
+/* Prepare snapshot for next guard or throwing instruction. */
+static void asm_snap_prep(ASMState *as)
+{
+ if (as->snapalloc) {
+ /* Alloc on first invocation for each snapshot. */
+ as->snapalloc = 0;
+ asm_snap_alloc(as, as->snapno);
+ as->snaprename = as->T->nins;
+ } else {
+ /* Check any renames above the highwater mark. */
+ for (; as->snaprename < as->T->nins; as->snaprename++) {
+ IRIns *ir = &as->T->ir[as->snaprename];
+ if (asm_snap_checkrename(as, ir->op1))
+ ir->op2 = REF_BIAS-1; /* Kill rename. */
+ }
+ }
+}
+
+/* Move to previous snapshot when we cross the current snapshot ref. */
+static void asm_snap_prev(ASMState *as)
+{
+ if (as->curins < as->snapref) {
+ uintptr_t ofs = (uintptr_t)(as->mctoporig - as->mcp);
+ if (ofs >= 0x10000) lj_trace_err(as->J, LJ_TRERR_MCODEOV);
+ do {
+ if (as->snapno == 0) return;
+ as->snapno--;
+ as->snapref = as->T->snap[as->snapno].ref;
+ as->T->snap[as->snapno].mcofs = (uint16_t)ofs; /* Remember mcode ofs. */
+ } while (as->curins < as->snapref); /* May have no ins inbetween. */
+ as->snapalloc = 1;
+ }
+}
+
+/* Fixup snapshot mcode offsetst. */
+static void asm_snap_fixup_mcofs(ASMState *as)
+{
+ uint32_t sz = (uint32_t)(as->mctoporig - as->mcp);
+ SnapShot *snap = as->T->snap;
+ SnapNo i;
+ for (i = as->T->nsnap-1; i > 0; i--) {
+ /* Compute offset from mcode start and store in correct snapshot. */
+ snap[i].mcofs = (uint16_t)(sz - snap[i-1].mcofs);
+ }
+ snap[0].mcofs = 0;
+}
+
+/* -- Miscellaneous helpers ----------------------------------------------- */
+
+/* Calculate stack adjustment. */
+static int32_t asm_stack_adjust(ASMState *as)
+{
+ if (as->evenspill <= SPS_FIXED)
+ return 0;
+ return sps_scale(sps_align(as->evenspill));
+}
+
+/* Must match with hash*() in lj_tab.c. */
+static uint32_t ir_khash(ASMState *as, IRIns *ir)
+{
+ uint32_t lo, hi;
+ UNUSED(as);
+ if (irt_isstr(ir->t)) {
+ return ir_kstr(ir)->sid;
+ } else if (irt_isnum(ir->t)) {
+ lo = ir_knum(ir)->u32.lo;
+ hi = ir_knum(ir)->u32.hi << 1;
+ } else if (irt_ispri(ir->t)) {
+ lj_assertA(!irt_isnil(ir->t), "hash of nil key");
+ return irt_type(ir->t)-IRT_FALSE;
+ } else {
+ lj_assertA(irt_isgcv(ir->t), "hash of bad IR type %d", irt_type(ir->t));
+ lo = u32ptr(ir_kgc(ir));
+#if LJ_GC64
+ hi = (uint32_t)(u64ptr(ir_kgc(ir)) >> 32) | (irt_toitype(ir->t) << 15);
+#else
+ hi = lo + HASH_BIAS;
+#endif
+ }
+ return hashrot(lo, hi);
+}
+
+/* -- Allocations --------------------------------------------------------- */
+
+static void asm_gencall(ASMState *as, const CCallInfo *ci, IRRef *args);
+static void asm_setupresult(ASMState *as, IRIns *ir, const CCallInfo *ci);
+
+static void asm_snew(ASMState *as, IRIns *ir)
+{
+ const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_str_new];
+ IRRef args[3];
+ asm_snap_prep(as);
+ args[0] = ASMREF_L; /* lua_State *L */
+ args[1] = ir->op1; /* const char *str */
+ args[2] = ir->op2; /* size_t len */
+ as->gcsteps++;
+ asm_setupresult(as, ir, ci); /* GCstr * */
+ asm_gencall(as, ci, args);
+}
+
+static void asm_tnew(ASMState *as, IRIns *ir)
+{
+ const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_tab_new1];
+ IRRef args[2];
+ asm_snap_prep(as);
+ args[0] = ASMREF_L; /* lua_State *L */
+ args[1] = ASMREF_TMP1; /* uint32_t ahsize */
+ as->gcsteps++;
+ asm_setupresult(as, ir, ci); /* GCtab * */
+ asm_gencall(as, ci, args);
+ ra_allockreg(as, ir->op1 | (ir->op2 << 24), ra_releasetmp(as, ASMREF_TMP1));
+}
+
+static void asm_tdup(ASMState *as, IRIns *ir)
+{
+ const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_tab_dup];
+ IRRef args[2];
+ asm_snap_prep(as);
+ args[0] = ASMREF_L; /* lua_State *L */
+ args[1] = ir->op1; /* const GCtab *kt */
+ as->gcsteps++;
+ asm_setupresult(as, ir, ci); /* GCtab * */
+ asm_gencall(as, ci, args);
+}
+
+static void asm_gc_check(ASMState *as);
+
+/* Explicit GC step. */
+static void asm_gcstep(ASMState *as, IRIns *ir)
+{
+ IRIns *ira;
+ for (ira = IR(as->stopins+1); ira < ir; ira++)
+ if ((ira->o == IR_TNEW || ira->o == IR_TDUP ||
+ (LJ_HASFFI && (ira->o == IR_CNEW || ira->o == IR_CNEWI))) &&
+ ra_used(ira))
+ as->gcsteps++;
+ if (as->gcsteps)
+ asm_gc_check(as);
+ as->gcsteps = 0x80000000; /* Prevent implicit GC check further up. */
+}
+
+/* -- Buffer operations --------------------------------------------------- */
+
+static void asm_tvptr(ASMState *as, Reg dest, IRRef ref, MSize mode);
+#if LJ_HASBUFFER
+static void asm_bufhdr_write(ASMState *as, Reg sb);
+#endif
+
+static void asm_bufhdr(ASMState *as, IRIns *ir)
+{
+ Reg sb = ra_dest(as, ir, RSET_GPR);
+ switch (ir->op2) {
+ case IRBUFHDR_RESET: {
+ Reg tmp = ra_scratch(as, rset_exclude(RSET_GPR, sb));
+ IRIns irbp;
+ irbp.ot = IRT(0, IRT_PTR); /* Buffer data pointer type. */
+ emit_storeofs(as, &irbp, tmp, sb, offsetof(SBuf, w));
+ emit_loadofs(as, &irbp, tmp, sb, offsetof(SBuf, b));
+ break;
+ }
+ case IRBUFHDR_APPEND: {
+ /* Rematerialize const buffer pointer instead of likely spill. */
+ IRIns *irp = IR(ir->op1);
+ if (!(ra_hasreg(irp->r) || irp == ir-1 ||
+ (irp == ir-2 && !ra_used(ir-1)))) {
+ while (!(irp->o == IR_BUFHDR && irp->op2 == IRBUFHDR_RESET))
+ irp = IR(irp->op1);
+ if (irref_isk(irp->op1)) {
+ ra_weak(as, ra_allocref(as, ir->op1, RSET_GPR));
+ ir = irp;
+ }
+ }
+ break;
+ }
+#if LJ_HASBUFFER
+ case IRBUFHDR_WRITE:
+ asm_bufhdr_write(as, sb);
+ break;
+#endif
+ default: lj_assertA(0, "bad BUFHDR op2 %d", ir->op2); break;
+ }
+#if LJ_TARGET_X86ORX64
+ ra_left(as, sb, ir->op1);
+#else
+ ra_leftov(as, sb, ir->op1);
+#endif
+}
+
+static void asm_bufput(ASMState *as, IRIns *ir)
+{
+ const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_buf_putstr];
+ IRRef args[3];
+ IRIns *irs;
+ int kchar = -129;
+ args[0] = ir->op1; /* SBuf * */
+ args[1] = ir->op2; /* GCstr * */
+ irs = IR(ir->op2);
+ lj_assertA(irt_isstr(irs->t),
+ "BUFPUT of non-string IR %04d", ir->op2 - REF_BIAS);
+ if (irs->o == IR_KGC) {
+ GCstr *s = ir_kstr(irs);
+ if (s->len == 1) { /* Optimize put of single-char string constant. */
+ kchar = (int8_t)strdata(s)[0]; /* Signed! */
+ args[1] = ASMREF_TMP1; /* int, truncated to char */
+ ci = &lj_ir_callinfo[IRCALL_lj_buf_putchar];
+ }
+ } else if (mayfuse(as, ir->op2) && ra_noreg(irs->r)) {
+ if (irs->o == IR_TOSTR) { /* Fuse number to string conversions. */
+ if (irs->op2 == IRTOSTR_NUM) {
+ args[1] = ASMREF_TMP1; /* TValue * */
+ ci = &lj_ir_callinfo[IRCALL_lj_strfmt_putnum];
+ } else {
+ lj_assertA(irt_isinteger(IR(irs->op1)->t),
+ "TOSTR of non-numeric IR %04d", irs->op1);
+ args[1] = irs->op1; /* int */
+ if (irs->op2 == IRTOSTR_INT)
+ ci = &lj_ir_callinfo[IRCALL_lj_strfmt_putint];
+ else
+ ci = &lj_ir_callinfo[IRCALL_lj_buf_putchar];
+ }
+ } else if (irs->o == IR_SNEW) { /* Fuse string allocation. */
+ args[1] = irs->op1; /* const void * */
+ args[2] = irs->op2; /* MSize */
+ ci = &lj_ir_callinfo[IRCALL_lj_buf_putmem];
+ }
+ }
+ asm_setupresult(as, ir, ci); /* SBuf * */
+ asm_gencall(as, ci, args);
+ if (args[1] == ASMREF_TMP1) {
+ Reg tmp = ra_releasetmp(as, ASMREF_TMP1);
+ if (kchar == -129)
+ asm_tvptr(as, tmp, irs->op1, IRTMPREF_IN1);
+ else
+ ra_allockreg(as, kchar, tmp);
+ }
+}
+
+static void asm_bufstr(ASMState *as, IRIns *ir)
+{
+ const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_buf_tostr];
+ IRRef args[1];
+ args[0] = ir->op1; /* SBuf *sb */
+ as->gcsteps++;
+ asm_setupresult(as, ir, ci); /* GCstr * */
+ asm_gencall(as, ci, args);
+}
+
+/* -- Type conversions ---------------------------------------------------- */
+
+static void asm_tostr(ASMState *as, IRIns *ir)
+{
+ const CCallInfo *ci;
+ IRRef args[2];
+ asm_snap_prep(as);
+ args[0] = ASMREF_L;
+ as->gcsteps++;
+ if (ir->op2 == IRTOSTR_NUM) {
+ args[1] = ASMREF_TMP1; /* cTValue * */
+ ci = &lj_ir_callinfo[IRCALL_lj_strfmt_num];
+ } else {
+ args[1] = ir->op1; /* int32_t k */
+ if (ir->op2 == IRTOSTR_INT)
+ ci = &lj_ir_callinfo[IRCALL_lj_strfmt_int];
+ else
+ ci = &lj_ir_callinfo[IRCALL_lj_strfmt_char];
+ }
+ asm_setupresult(as, ir, ci); /* GCstr * */
+ asm_gencall(as, ci, args);
+ if (ir->op2 == IRTOSTR_NUM)
+ asm_tvptr(as, ra_releasetmp(as, ASMREF_TMP1), ir->op1, IRTMPREF_IN1);
+}
+
+#if LJ_32 && LJ_HASFFI && !LJ_SOFTFP && !LJ_TARGET_X86
+static void asm_conv64(ASMState *as, IRIns *ir)
+{
+ IRType st = (IRType)((ir-1)->op2 & IRCONV_SRCMASK);
+ IRType dt = (((ir-1)->op2 & IRCONV_DSTMASK) >> IRCONV_DSH);
+ IRCallID id;
+ IRRef args[2];
+ lj_assertA((ir-1)->o == IR_CONV && ir->o == IR_HIOP,
+ "not a CONV/HIOP pair at IR %04d", (int)(ir - as->ir) - REF_BIAS);
+ args[LJ_BE] = (ir-1)->op1;
+ args[LJ_LE] = ir->op1;
+ if (st == IRT_NUM || st == IRT_FLOAT) {
+ id = IRCALL_fp64_d2l + ((st == IRT_FLOAT) ? 2 : 0) + (dt - IRT_I64);
+ ir--;
+ } else {
+ id = IRCALL_fp64_l2d + ((dt == IRT_FLOAT) ? 2 : 0) + (st - IRT_I64);
+ }
+ {
+#if LJ_TARGET_ARM && !LJ_ABI_SOFTFP
+ CCallInfo cim = lj_ir_callinfo[id], *ci = &cim;
+ cim.flags |= CCI_VARARG; /* These calls don't use the hard-float ABI! */
+#else
+ const CCallInfo *ci = &lj_ir_callinfo[id];
+#endif
+ asm_setupresult(as, ir, ci);
+ asm_gencall(as, ci, args);
+ }
+}
+#endif
+
+/* -- Memory references --------------------------------------------------- */
+
+static void asm_newref(ASMState *as, IRIns *ir)
+{
+ const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_tab_newkey];
+ IRRef args[3];
+ if (ir->r == RID_SINK)
+ return;
+ asm_snap_prep(as);
+ args[0] = ASMREF_L; /* lua_State *L */
+ args[1] = ir->op1; /* GCtab *t */
+ args[2] = ASMREF_TMP1; /* cTValue *key */
+ asm_setupresult(as, ir, ci); /* TValue * */
+ asm_gencall(as, ci, args);
+ asm_tvptr(as, ra_releasetmp(as, ASMREF_TMP1), ir->op2, IRTMPREF_IN1);
+}
+
+static void asm_tmpref(ASMState *as, IRIns *ir)
+{
+ Reg r = ra_dest(as, ir, RSET_GPR);
+ asm_tvptr(as, r, ir->op1, ir->op2);
+}
+
+static void asm_lref(ASMState *as, IRIns *ir)
+{
+ Reg r = ra_dest(as, ir, RSET_GPR);
+#if LJ_TARGET_X86ORX64
+ ra_left(as, r, ASMREF_L);
+#else
+ ra_leftov(as, r, ASMREF_L);
+#endif
+}
+
+/* -- Calls --------------------------------------------------------------- */
+
+/* Collect arguments from CALL* and CARG instructions. */
+static void asm_collectargs(ASMState *as, IRIns *ir,
+ const CCallInfo *ci, IRRef *args)
+{
+ uint32_t n = CCI_XNARGS(ci);
+ /* Account for split args. */
+ lj_assertA(n <= CCI_NARGS_MAX*2, "too many args %d to collect", n);
+ if ((ci->flags & CCI_L)) { *args++ = ASMREF_L; n--; }
+ while (n-- > 1) {
+ ir = IR(ir->op1);
+ lj_assertA(ir->o == IR_CARG, "malformed CALL arg tree");
+ args[n] = ir->op2 == REF_NIL ? 0 : ir->op2;
+ }
+ args[0] = ir->op1 == REF_NIL ? 0 : ir->op1;
+ lj_assertA(IR(ir->op1)->o != IR_CARG, "malformed CALL arg tree");
+}
+
+/* Reconstruct CCallInfo flags for CALLX*. */
+static uint32_t asm_callx_flags(ASMState *as, IRIns *ir)
+{
+ uint32_t nargs = 0;
+ if (ir->op1 != REF_NIL) { /* Count number of arguments first. */
+ IRIns *ira = IR(ir->op1);
+ nargs++;
+ while (ira->o == IR_CARG) { nargs++; ira = IR(ira->op1); }
+ }
+#if LJ_HASFFI
+ if (IR(ir->op2)->o == IR_CARG) { /* Copy calling convention info. */
+ CTypeID id = (CTypeID)IR(IR(ir->op2)->op2)->i;
+ CType *ct = ctype_get(ctype_ctsG(J2G(as->J)), id);
+ nargs |= ((ct->info & CTF_VARARG) ? CCI_VARARG : 0);
+#if LJ_TARGET_X86
+ nargs |= (ctype_cconv(ct->info) << CCI_CC_SHIFT);
+#endif
+ }
+#endif
+ return (nargs | (ir->t.irt << CCI_OTSHIFT));
+}
+
+static void asm_callid(ASMState *as, IRIns *ir, IRCallID id)
+{
+ const CCallInfo *ci = &lj_ir_callinfo[id];
+ IRRef args[2];
+ args[0] = ir->op1;
+ args[1] = ir->op2;
+ asm_setupresult(as, ir, ci);
+ asm_gencall(as, ci, args);
+}
+
+static void asm_call(ASMState *as, IRIns *ir)
+{
+ IRRef args[CCI_NARGS_MAX];
+ const CCallInfo *ci = &lj_ir_callinfo[ir->op2];
+ asm_collectargs(as, ir, ci, args);
+ asm_setupresult(as, ir, ci);
+ asm_gencall(as, ci, args);
+}
+
+/* -- PHI and loop handling ----------------------------------------------- */
+
+/* Break a PHI cycle by renaming to a free register (evict if needed). */
+static void asm_phi_break(ASMState *as, RegSet blocked, RegSet blockedby,
+ RegSet allow)
+{
+ RegSet candidates = blocked & allow;
+ if (candidates) { /* If this register file has candidates. */
+ /* Note: the set for ra_pick cannot be empty, since each register file
+ ** has some registers never allocated to PHIs.
+ */
+ Reg down, up = ra_pick(as, ~blocked & allow); /* Get a free register. */
+ if (candidates & ~blockedby) /* Optimize shifts, else it's a cycle. */
+ candidates = candidates & ~blockedby;
+ down = rset_picktop(candidates); /* Pick candidate PHI register. */
+ ra_rename(as, down, up); /* And rename it to the free register. */
+ }
+}
+
+/* PHI register shuffling.
+**
+** The allocator tries hard to preserve PHI register assignments across
+** the loop body. Most of the time this loop does nothing, since there
+** are no register mismatches.
+**
+** If a register mismatch is detected and ...
+** - the register is currently free: rename it.
+** - the register is blocked by an invariant: restore/remat and rename it.
+** - Otherwise the register is used by another PHI, so mark it as blocked.
+**
+** The renames are order-sensitive, so just retry the loop if a register
+** is marked as blocked, but has been freed in the meantime. A cycle is
+** detected if all of the blocked registers are allocated. To break the
+** cycle rename one of them to a free register and retry.
+**
+** Note that PHI spill slots are kept in sync and don't need to be shuffled.
+*/
+static void asm_phi_shuffle(ASMState *as)
+{
+ RegSet work;
+
+ /* Find and resolve PHI register mismatches. */
+ for (;;) {
+ RegSet blocked = RSET_EMPTY;
+ RegSet blockedby = RSET_EMPTY;
+ RegSet phiset = as->phiset;
+ while (phiset) { /* Check all left PHI operand registers. */
+ Reg r = rset_pickbot(phiset);
+ IRIns *irl = IR(as->phireg[r]);
+ Reg left = irl->r;
+ if (r != left) { /* Mismatch? */
+ if (!rset_test(as->freeset, r)) { /* PHI register blocked? */
+ IRRef ref = regcost_ref(as->cost[r]);
+ /* Blocked by other PHI (w/reg)? */
+ if (!ra_iskref(ref) && irt_ismarked(IR(ref)->t)) {
+ rset_set(blocked, r);
+ if (ra_hasreg(left))
+ rset_set(blockedby, left);
+ left = RID_NONE;
+ } else { /* Otherwise grab register from invariant. */
+ ra_restore(as, ref);
+ checkmclim(as);
+ }
+ }
+ if (ra_hasreg(left)) {
+ ra_rename(as, left, r);
+ checkmclim(as);
+ }
+ }
+ rset_clear(phiset, r);
+ }
+ if (!blocked) break; /* Finished. */
+ if (!(as->freeset & blocked)) { /* Break cycles if none are free. */
+ asm_phi_break(as, blocked, blockedby, RSET_GPR);
+ if (!LJ_SOFTFP) asm_phi_break(as, blocked, blockedby, RSET_FPR);
+ checkmclim(as);
+ } /* Else retry some more renames. */
+ }
+
+ /* Restore/remat invariants whose registers are modified inside the loop. */
+#if !LJ_SOFTFP
+ work = as->modset & ~(as->freeset | as->phiset) & RSET_FPR;
+ while (work) {
+ Reg r = rset_pickbot(work);
+ ra_restore(as, regcost_ref(as->cost[r]));
+ rset_clear(work, r);
+ checkmclim(as);
+ }
+#endif
+ work = as->modset & ~(as->freeset | as->phiset);
+ while (work) {
+ Reg r = rset_pickbot(work);
+ ra_restore(as, regcost_ref(as->cost[r]));
+ rset_clear(work, r);
+ checkmclim(as);
+ }
+
+ /* Allocate and save all unsaved PHI regs and clear marks. */
+ work = as->phiset;
+ while (work) {
+ Reg r = rset_picktop(work);
+ IRRef lref = as->phireg[r];
+ IRIns *ir = IR(lref);
+ if (ra_hasspill(ir->s)) { /* Left PHI gained a spill slot? */
+ irt_clearmark(ir->t); /* Handled here, so clear marker now. */
+ ra_alloc1(as, lref, RID2RSET(r));
+ ra_save(as, ir, r); /* Save to spill slot inside the loop. */
+ checkmclim(as);
+ }
+ rset_clear(work, r);
+ }
+}
+
+/* Copy unsynced left/right PHI spill slots. Rarely needed. */
+static void asm_phi_copyspill(ASMState *as)
+{
+ int need = 0;
+ IRIns *ir;
+ for (ir = IR(as->orignins-1); ir->o == IR_PHI; ir--)
+ if (ra_hasspill(ir->s) && ra_hasspill(IR(ir->op1)->s))
+ need |= irt_isfp(ir->t) ? 2 : 1; /* Unsynced spill slot? */
+ if ((need & 1)) { /* Copy integer spill slots. */
+#if !LJ_TARGET_X86ORX64
+ Reg r = RID_TMP;
+#else
+ Reg r = RID_RET;
+ if ((as->freeset & RSET_GPR))
+ r = rset_pickbot((as->freeset & RSET_GPR));
+ else
+ emit_spload(as, IR(regcost_ref(as->cost[r])), r, SPOFS_TMP);
+#endif
+ for (ir = IR(as->orignins-1); ir->o == IR_PHI; ir--) {
+ if (ra_hasspill(ir->s)) {
+ IRIns *irl = IR(ir->op1);
+ if (ra_hasspill(irl->s) && !irt_isfp(ir->t)) {
+ emit_spstore(as, irl, r, sps_scale(irl->s));
+ emit_spload(as, ir, r, sps_scale(ir->s));
+ checkmclim(as);
+ }
+ }
+ }
+#if LJ_TARGET_X86ORX64
+ if (!rset_test(as->freeset, r))
+ emit_spstore(as, IR(regcost_ref(as->cost[r])), r, SPOFS_TMP);
+#endif
+ }
+#if !LJ_SOFTFP
+ if ((need & 2)) { /* Copy FP spill slots. */
+#if LJ_TARGET_X86
+ Reg r = RID_XMM0;
+#else
+ Reg r = RID_FPRET;
+#endif
+ if ((as->freeset & RSET_FPR))
+ r = rset_pickbot((as->freeset & RSET_FPR));
+ if (!rset_test(as->freeset, r))
+ emit_spload(as, IR(regcost_ref(as->cost[r])), r, SPOFS_TMP);
+ for (ir = IR(as->orignins-1); ir->o == IR_PHI; ir--) {
+ if (ra_hasspill(ir->s)) {
+ IRIns *irl = IR(ir->op1);
+ if (ra_hasspill(irl->s) && irt_isfp(ir->t)) {
+ emit_spstore(as, irl, r, sps_scale(irl->s));
+ emit_spload(as, ir, r, sps_scale(ir->s));
+ checkmclim(as);
+ }
+ }
+ }
+ if (!rset_test(as->freeset, r))
+ emit_spstore(as, IR(regcost_ref(as->cost[r])), r, SPOFS_TMP);
+ }
+#endif
+}
+
+/* Emit renames for left PHIs which are only spilled outside the loop. */
+static void asm_phi_fixup(ASMState *as)
+{
+ RegSet work = as->phiset;
+ while (work) {
+ Reg r = rset_picktop(work);
+ IRRef lref = as->phireg[r];
+ IRIns *ir = IR(lref);
+ if (irt_ismarked(ir->t)) {
+ irt_clearmark(ir->t);
+ /* Left PHI gained a spill slot before the loop? */
+ if (ra_hasspill(ir->s)) {
+ ra_addrename(as, r, lref, as->loopsnapno);
+ }
+ }
+ rset_clear(work, r);
+ }
+}
+
+/* Setup right PHI reference. */
+static void asm_phi(ASMState *as, IRIns *ir)
+{
+ RegSet allow = ((!LJ_SOFTFP && irt_isfp(ir->t)) ? RSET_FPR : RSET_GPR) &
+ ~as->phiset;
+ RegSet afree = (as->freeset & allow);
+ IRIns *irl = IR(ir->op1);
+ IRIns *irr = IR(ir->op2);
+ if (ir->r == RID_SINK) /* Sink PHI. */
+ return;
+ /* Spill slot shuffling is not implemented yet (but rarely needed). */
+ if (ra_hasspill(irl->s) || ra_hasspill(irr->s))
+ lj_trace_err(as->J, LJ_TRERR_NYIPHI);
+ /* Leave at least one register free for non-PHIs (and PHI cycle breaking). */
+ if ((afree & (afree-1))) { /* Two or more free registers? */
+ Reg r;
+ if (ra_noreg(irr->r)) { /* Get a register for the right PHI. */
+ r = ra_allocref(as, ir->op2, allow);
+ } else { /* Duplicate right PHI, need a copy (rare). */
+ r = ra_scratch(as, allow);
+ emit_movrr(as, irr, r, irr->r);
+ }
+ ir->r = (uint8_t)r;
+ rset_set(as->phiset, r);
+ as->phireg[r] = (IRRef1)ir->op1;
+ irt_setmark(irl->t); /* Marks left PHIs _with_ register. */
+ if (ra_noreg(irl->r))
+ ra_sethint(irl->r, r); /* Set register hint for left PHI. */
+ } else { /* Otherwise allocate a spill slot. */
+ /* This is overly restrictive, but it triggers only on synthetic code. */
+ if (ra_hasreg(irl->r) || ra_hasreg(irr->r))
+ lj_trace_err(as->J, LJ_TRERR_NYIPHI);
+ ra_spill(as, ir);
+ irr->s = ir->s; /* Set right PHI spill slot. Sync left slot later. */
+ }
+}
+
+static void asm_loop_fixup(ASMState *as);
+
+/* Middle part of a loop. */
+static void asm_loop(ASMState *as)
+{
+ MCode *mcspill;
+ /* LOOP is a guard, so the snapno is up to date. */
+ as->loopsnapno = as->snapno;
+ if (as->gcsteps)
+ asm_gc_check(as);
+ /* LOOP marks the transition from the variant to the invariant part. */
+ as->flagmcp = as->invmcp = NULL;
+ as->sectref = 0;
+ if (!neverfuse(as)) as->fuseref = 0;
+ asm_phi_shuffle(as);
+ mcspill = as->mcp;
+ asm_phi_copyspill(as);
+ asm_loop_fixup(as);
+ as->mcloop = as->mcp;
+ RA_DBGX((as, "===== LOOP ====="));
+ if (!as->realign) RA_DBG_FLUSH();
+ if (as->mcp != mcspill)
+ emit_jmp(as, mcspill);
+}
+
+/* -- Target-specific assembler ------------------------------------------- */
+
+#if LJ_TARGET_X86ORX64
+#include "lj_asm_x86.h"
+#elif LJ_TARGET_ARM
+#include "lj_asm_arm.h"
+#elif LJ_TARGET_ARM64
+#include "lj_asm_arm64.h"
+#elif LJ_TARGET_PPC
+#include "lj_asm_ppc.h"
+#elif LJ_TARGET_MIPS
+#include "lj_asm_mips.h"
+#else
+#error "Missing assembler for target CPU"
+#endif
+
+/* -- Common instruction helpers ------------------------------------------ */
+
+#if !LJ_SOFTFP32
+#if !LJ_TARGET_X86ORX64
+#define asm_ldexp(as, ir) asm_callid(as, ir, IRCALL_ldexp)
+#endif
+
+static void asm_pow(ASMState *as, IRIns *ir)
+{
+#if LJ_64 && LJ_HASFFI
+ if (!irt_isnum(ir->t))
+ asm_callid(as, ir, irt_isi64(ir->t) ? IRCALL_lj_carith_powi64 :
+ IRCALL_lj_carith_powu64);
+ else
+#endif
+ asm_callid(as, ir, IRCALL_pow);
+}
+
+static void asm_div(ASMState *as, IRIns *ir)
+{
+#if LJ_64 && LJ_HASFFI
+ if (!irt_isnum(ir->t))
+ asm_callid(as, ir, irt_isi64(ir->t) ? IRCALL_lj_carith_divi64 :
+ IRCALL_lj_carith_divu64);
+ else
+#endif
+ asm_fpdiv(as, ir);
+}
+#endif
+
+static void asm_mod(ASMState *as, IRIns *ir)
+{
+#if LJ_64 && LJ_HASFFI
+ if (!irt_isint(ir->t))
+ asm_callid(as, ir, irt_isi64(ir->t) ? IRCALL_lj_carith_modi64 :
+ IRCALL_lj_carith_modu64);
+ else
+#endif
+ asm_callid(as, ir, IRCALL_lj_vm_modi);
+}
+
+static void asm_fuseequal(ASMState *as, IRIns *ir)
+{
+ /* Fuse HREF + EQ/NE. */
+ if ((ir-1)->o == IR_HREF && ir->op1 == as->curins-1) {
+ as->curins--;
+ asm_href(as, ir-1, (IROp)ir->o);
+ } else {
+ asm_equal(as, ir);
+ }
+}
+
+static void asm_alen(ASMState *as, IRIns *ir)
+{
+ asm_callid(as, ir, ir->op2 == REF_NIL ? IRCALL_lj_tab_len :
+ IRCALL_lj_tab_len_hint);
+}
+
+/* -- Instruction dispatch ------------------------------------------------ */
+
+/* Assemble a single instruction. */
+static void asm_ir(ASMState *as, IRIns *ir)
+{
+ switch ((IROp)ir->o) {
+ /* Miscellaneous ops. */
+ case IR_LOOP: asm_loop(as); break;
+ case IR_NOP: case IR_XBAR:
+ lj_assertA(!ra_used(ir),
+ "IR %04d not unused", (int)(ir - as->ir) - REF_BIAS);
+ break;
+ case IR_USE:
+ ra_alloc1(as, ir->op1, irt_isfp(ir->t) ? RSET_FPR : RSET_GPR); break;
+ case IR_PHI: asm_phi(as, ir); break;
+ case IR_HIOP: asm_hiop(as, ir); break;
+ case IR_GCSTEP: asm_gcstep(as, ir); break;
+ case IR_PROF: asm_prof(as, ir); break;
+
+ /* Guarded assertions. */
+ case IR_LT: case IR_GE: case IR_LE: case IR_GT:
+ case IR_ULT: case IR_UGE: case IR_ULE: case IR_UGT:
+ case IR_ABC:
+ asm_comp(as, ir);
+ break;
+ case IR_EQ: case IR_NE: asm_fuseequal(as, ir); break;
+
+ case IR_RETF: asm_retf(as, ir); break;
+
+ /* Bit ops. */
+ case IR_BNOT: asm_bnot(as, ir); break;
+ case IR_BSWAP: asm_bswap(as, ir); break;
+ case IR_BAND: asm_band(as, ir); break;
+ case IR_BOR: asm_bor(as, ir); break;
+ case IR_BXOR: asm_bxor(as, ir); break;
+ case IR_BSHL: asm_bshl(as, ir); break;
+ case IR_BSHR: asm_bshr(as, ir); break;
+ case IR_BSAR: asm_bsar(as, ir); break;
+ case IR_BROL: asm_brol(as, ir); break;
+ case IR_BROR: asm_bror(as, ir); break;
+
+ /* Arithmetic ops. */
+ case IR_ADD: asm_add(as, ir); break;
+ case IR_SUB: asm_sub(as, ir); break;
+ case IR_MUL: asm_mul(as, ir); break;
+ case IR_MOD: asm_mod(as, ir); break;
+ case IR_NEG: asm_neg(as, ir); break;
+#if LJ_SOFTFP32
+ case IR_DIV: case IR_POW: case IR_ABS:
+ case IR_LDEXP: case IR_FPMATH: case IR_TOBIT:
+ /* Unused for LJ_SOFTFP32. */
+ lj_assertA(0, "IR %04d with unused op %d",
+ (int)(ir - as->ir) - REF_BIAS, ir->o);
+ break;
+#else
+ case IR_DIV: asm_div(as, ir); break;
+ case IR_POW: asm_pow(as, ir); break;
+ case IR_ABS: asm_abs(as, ir); break;
+ case IR_LDEXP: asm_ldexp(as, ir); break;
+ case IR_FPMATH: asm_fpmath(as, ir); break;
+ case IR_TOBIT: asm_tobit(as, ir); break;
+#endif
+ case IR_MIN: asm_min(as, ir); break;
+ case IR_MAX: asm_max(as, ir); break;
+
+ /* Overflow-checking arithmetic ops. */
+ case IR_ADDOV: asm_addov(as, ir); break;
+ case IR_SUBOV: asm_subov(as, ir); break;
+ case IR_MULOV: asm_mulov(as, ir); break;
+
+ /* Memory references. */
+ case IR_AREF: asm_aref(as, ir); break;
+ case IR_HREF: asm_href(as, ir, 0); break;
+ case IR_HREFK: asm_hrefk(as, ir); break;
+ case IR_NEWREF: asm_newref(as, ir); break;
+ case IR_UREFO: case IR_UREFC: asm_uref(as, ir); break;
+ case IR_FREF: asm_fref(as, ir); break;
+ case IR_TMPREF: asm_tmpref(as, ir); break;
+ case IR_STRREF: asm_strref(as, ir); break;
+ case IR_LREF: asm_lref(as, ir); break;
+
+ /* Loads and stores. */
+ case IR_ALOAD: case IR_HLOAD: case IR_ULOAD: case IR_VLOAD:
+ asm_ahuvload(as, ir);
+ break;
+ case IR_FLOAD: asm_fload(as, ir); break;
+ case IR_XLOAD: asm_xload(as, ir); break;
+ case IR_SLOAD: asm_sload(as, ir); break;
+ case IR_ALEN: asm_alen(as, ir); break;
+
+ case IR_ASTORE: case IR_HSTORE: case IR_USTORE: asm_ahustore(as, ir); break;
+ case IR_FSTORE: asm_fstore(as, ir); break;
+ case IR_XSTORE: asm_xstore(as, ir); break;
+
+ /* Allocations. */
+ case IR_SNEW: case IR_XSNEW: asm_snew(as, ir); break;
+ case IR_TNEW: asm_tnew(as, ir); break;
+ case IR_TDUP: asm_tdup(as, ir); break;
+ case IR_CNEW: case IR_CNEWI:
+#if LJ_HASFFI
+ asm_cnew(as, ir);
+#else
+ lj_assertA(0, "IR %04d with unused op %d",
+ (int)(ir - as->ir) - REF_BIAS, ir->o);
+#endif
+ break;
+
+ /* Buffer operations. */
+ case IR_BUFHDR: asm_bufhdr(as, ir); break;
+ case IR_BUFPUT: asm_bufput(as, ir); break;
+ case IR_BUFSTR: asm_bufstr(as, ir); break;
+
+ /* Write barriers. */
+ case IR_TBAR: asm_tbar(as, ir); break;
+ case IR_OBAR: asm_obar(as, ir); break;
+
+ /* Type conversions. */
+ case IR_CONV: asm_conv(as, ir); break;
+ case IR_TOSTR: asm_tostr(as, ir); break;
+ case IR_STRTO: asm_strto(as, ir); break;
+
+ /* Calls. */
+ case IR_CALLA:
+ as->gcsteps++;
+ /* fallthrough */
+ case IR_CALLN: case IR_CALLL: case IR_CALLS: asm_call(as, ir); break;
+ case IR_CALLXS: asm_callx(as, ir); break;
+ case IR_CARG: break;
+
+ default:
+ setintV(&as->J->errinfo, ir->o);
+ lj_trace_err_info(as->J, LJ_TRERR_NYIIR);
+ break;
+ }
+}
+
+/* -- Head of trace ------------------------------------------------------- */
+
+/* Head of a root trace. */
+static void asm_head_root(ASMState *as)
+{
+ int32_t spadj;
+ asm_head_root_base(as);
+ emit_setvmstate(as, (int32_t)as->T->traceno);
+ spadj = asm_stack_adjust(as);
+ as->T->spadjust = (uint16_t)spadj;
+ emit_spsub(as, spadj);
+ /* Root traces assume a checked stack for the starting proto. */
+ as->T->topslot = gcref(as->T->startpt)->pt.framesize;
+}
+
+/* Head of a side trace.
+**
+** The current simplistic algorithm requires that all slots inherited
+** from the parent are live in a register between pass 2 and pass 3. This
+** avoids the complexity of stack slot shuffling. But of course this may
+** overflow the register set in some cases and cause the dreaded error:
+** "NYI: register coalescing too complex". A refined algorithm is needed.
+*/
+static void asm_head_side(ASMState *as)
+{
+ IRRef1 sloadins[RID_MAX];
+ RegSet allow = RSET_ALL; /* Inverse of all coalesced registers. */
+ RegSet live = RSET_EMPTY; /* Live parent registers. */
+ IRIns *irp = &as->parent->ir[REF_BASE]; /* Parent base. */
+ int32_t spadj, spdelta;
+ int pass2 = 0;
+ int pass3 = 0;
+ IRRef i;
+
+ if (as->snapno && as->topslot > as->parent->topslot) {
+ /* Force snap #0 alloc to prevent register overwrite in stack check. */
+ asm_snap_alloc(as, 0);
+ }
+ allow = asm_head_side_base(as, irp, allow);
+
+ /* Scan all parent SLOADs and collect register dependencies. */
+ for (i = as->stopins; i > REF_BASE; i--) {
+ IRIns *ir = IR(i);
+ RegSP rs;
+ lj_assertA((ir->o == IR_SLOAD && (ir->op2 & IRSLOAD_PARENT)) ||
+ (LJ_SOFTFP && ir->o == IR_HIOP) || ir->o == IR_PVAL,
+ "IR %04d has bad parent op %d",
+ (int)(ir - as->ir) - REF_BIAS, ir->o);
+ rs = as->parentmap[i - REF_FIRST];
+ if (ra_hasreg(ir->r)) {
+ rset_clear(allow, ir->r);
+ if (ra_hasspill(ir->s)) {
+ ra_save(as, ir, ir->r);
+ checkmclim(as);
+ }
+ } else if (ra_hasspill(ir->s)) {
+ irt_setmark(ir->t);
+ pass2 = 1;
+ }
+ if (ir->r == rs) { /* Coalesce matching registers right now. */
+ ra_free(as, ir->r);
+ } else if (ra_hasspill(regsp_spill(rs))) {
+ if (ra_hasreg(ir->r))
+ pass3 = 1;
+ } else if (ra_used(ir)) {
+ sloadins[rs] = (IRRef1)i;
+ rset_set(live, rs); /* Block live parent register. */
+ }
+ }
+
+ /* Calculate stack frame adjustment. */
+ spadj = asm_stack_adjust(as);
+ spdelta = spadj - (int32_t)as->parent->spadjust;
+ if (spdelta < 0) { /* Don't shrink the stack frame. */
+ spadj = (int32_t)as->parent->spadjust;
+ spdelta = 0;
+ }
+ as->T->spadjust = (uint16_t)spadj;
+
+ /* Reload spilled target registers. */
+ if (pass2) {
+ for (i = as->stopins; i > REF_BASE; i--) {
+ IRIns *ir = IR(i);
+ if (irt_ismarked(ir->t)) {
+ RegSet mask;
+ Reg r;
+ RegSP rs;
+ irt_clearmark(ir->t);
+ rs = as->parentmap[i - REF_FIRST];
+ if (!ra_hasspill(regsp_spill(rs)))
+ ra_sethint(ir->r, rs); /* Hint may be gone, set it again. */
+ else if (sps_scale(regsp_spill(rs))+spdelta == sps_scale(ir->s))
+ continue; /* Same spill slot, do nothing. */
+ mask = ((!LJ_SOFTFP && irt_isfp(ir->t)) ? RSET_FPR : RSET_GPR) & allow;
+ if (mask == RSET_EMPTY)
+ lj_trace_err(as->J, LJ_TRERR_NYICOAL);
+ r = ra_allocref(as, i, mask);
+ ra_save(as, ir, r);
+ rset_clear(allow, r);
+ if (r == rs) { /* Coalesce matching registers right now. */
+ ra_free(as, r);
+ rset_clear(live, r);
+ } else if (ra_hasspill(regsp_spill(rs))) {
+ pass3 = 1;
+ }
+ checkmclim(as);
+ }
+ }
+ }
+
+ /* Store trace number and adjust stack frame relative to the parent. */
+ emit_setvmstate(as, (int32_t)as->T->traceno);
+ emit_spsub(as, spdelta);
+
+#if !LJ_TARGET_X86ORX64
+ /* Restore BASE register from parent spill slot. */
+ if (ra_hasspill(irp->s))
+ emit_spload(as, IR(REF_BASE), IR(REF_BASE)->r, sps_scale(irp->s));
+#endif
+
+ /* Restore target registers from parent spill slots. */
+ if (pass3) {
+ RegSet work = ~as->freeset & RSET_ALL;
+ while (work) {
+ Reg r = rset_pickbot(work);
+ IRRef ref = regcost_ref(as->cost[r]);
+ RegSP rs = as->parentmap[ref - REF_FIRST];
+ rset_clear(work, r);
+ if (ra_hasspill(regsp_spill(rs))) {
+ int32_t ofs = sps_scale(regsp_spill(rs));
+ ra_free(as, r);
+ emit_spload(as, IR(ref), r, ofs);
+ checkmclim(as);
+ }
+ }
+ }
+
+ /* Shuffle registers to match up target regs with parent regs. */
+ for (;;) {
+ RegSet work;
+
+ /* Repeatedly coalesce free live registers by moving to their target. */
+ while ((work = as->freeset & live) != RSET_EMPTY) {
+ Reg rp = rset_pickbot(work);
+ IRIns *ir = IR(sloadins[rp]);
+ rset_clear(live, rp);
+ rset_clear(allow, rp);
+ ra_free(as, ir->r);
+ emit_movrr(as, ir, ir->r, rp);
+ checkmclim(as);
+ }
+
+ /* We're done if no live registers remain. */
+ if (live == RSET_EMPTY)
+ break;
+
+ /* Break cycles by renaming one target to a temp. register. */
+ if (live & RSET_GPR) {
+ RegSet tmpset = as->freeset & ~live & allow & RSET_GPR;
+ if (tmpset == RSET_EMPTY)
+ lj_trace_err(as->J, LJ_TRERR_NYICOAL);
+ ra_rename(as, rset_pickbot(live & RSET_GPR), rset_pickbot(tmpset));
+ }
+ if (!LJ_SOFTFP && (live & RSET_FPR)) {
+ RegSet tmpset = as->freeset & ~live & allow & RSET_FPR;
+ if (tmpset == RSET_EMPTY)
+ lj_trace_err(as->J, LJ_TRERR_NYICOAL);
+ ra_rename(as, rset_pickbot(live & RSET_FPR), rset_pickbot(tmpset));
+ }
+ checkmclim(as);
+ /* Continue with coalescing to fix up the broken cycle(s). */
+ }
+
+ /* Inherit top stack slot already checked by parent trace. */
+ as->T->topslot = as->parent->topslot;
+ if (as->topslot > as->T->topslot) { /* Need to check for higher slot? */
+#ifdef EXITSTATE_CHECKEXIT
+ /* Highest exit + 1 indicates stack check. */
+ ExitNo exitno = as->T->nsnap;
+#else
+ /* Reuse the parent exit in the context of the parent trace. */
+ ExitNo exitno = as->J->exitno;
+#endif
+ as->T->topslot = (uint8_t)as->topslot; /* Remember for child traces. */
+ asm_stack_check(as, as->topslot, irp, allow & RSET_GPR, exitno);
+ }
+}
+
+/* -- Tail of trace ------------------------------------------------------- */
+
+/* Get base slot for a snapshot. */
+static BCReg asm_baseslot(ASMState *as, SnapShot *snap, int *gotframe)
+{
+ SnapEntry *map = &as->T->snapmap[snap->mapofs];
+ MSize n;
+ for (n = snap->nent; n > 0; n--) {
+ SnapEntry sn = map[n-1];
+ if ((sn & SNAP_FRAME)) {
+ *gotframe = 1;
+ return snap_slot(sn) - LJ_FR2;
+ }
+ }
+ return 0;
+}
+
+/* Link to another trace. */
+static void asm_tail_link(ASMState *as)
+{
+ SnapNo snapno = as->T->nsnap-1; /* Last snapshot. */
+ SnapShot *snap = &as->T->snap[snapno];
+ int gotframe = 0;
+ BCReg baseslot = asm_baseslot(as, snap, &gotframe);
+
+ as->topslot = snap->topslot;
+ checkmclim(as);
+ ra_allocref(as, REF_BASE, RID2RSET(RID_BASE));
+
+ if (as->T->link == 0) {
+ /* Setup fixed registers for exit to interpreter. */
+ const BCIns *pc = snap_pc(&as->T->snapmap[snap->mapofs + snap->nent]);
+ int32_t mres;
+ if (bc_op(*pc) == BC_JLOOP) { /* NYI: find a better way to do this. */
+ BCIns *retpc = &traceref(as->J, bc_d(*pc))->startins;
+ if (bc_isret(bc_op(*retpc)))
+ pc = retpc;
+ }
+#if LJ_GC64
+ emit_loadu64(as, RID_LPC, u64ptr(pc));
+#else
+ ra_allockreg(as, i32ptr(J2GG(as->J)->dispatch), RID_DISPATCH);
+ ra_allockreg(as, i32ptr(pc), RID_LPC);
+#endif
+ mres = (int32_t)(snap->nslots - baseslot - LJ_FR2);
+ switch (bc_op(*pc)) {
+ case BC_CALLM: case BC_CALLMT:
+ mres -= (int32_t)(1 + LJ_FR2 + bc_a(*pc) + bc_c(*pc)); break;
+ case BC_RETM: mres -= (int32_t)(bc_a(*pc) + bc_d(*pc)); break;
+ case BC_TSETM: mres -= (int32_t)bc_a(*pc); break;
+ default: if (bc_op(*pc) < BC_FUNCF) mres = 0; break;
+ }
+ ra_allockreg(as, mres, RID_RET); /* Return MULTRES or 0. */
+ } else if (baseslot) {
+ /* Save modified BASE for linking to trace with higher start frame. */
+ emit_setgl(as, RID_BASE, jit_base);
+ }
+ emit_addptr(as, RID_BASE, 8*(int32_t)baseslot);
+
+ if (as->J->ktrace) { /* Patch ktrace slot with the final GCtrace pointer. */
+ setgcref(IR(as->J->ktrace)[LJ_GC64].gcr, obj2gco(as->J->curfinal));
+ IR(as->J->ktrace)->o = IR_KGC;
+ }
+
+ /* Sync the interpreter state with the on-trace state. */
+ asm_stack_restore(as, snap);
+
+ /* Root traces that add frames need to check the stack at the end. */
+ if (!as->parent && gotframe)
+ asm_stack_check(as, as->topslot, NULL, as->freeset & RSET_GPR, snapno);
+}
+
+/* -- Trace setup --------------------------------------------------------- */
+
+/* Clear reg/sp for all instructions and add register hints. */
+static void asm_setup_regsp(ASMState *as)
+{
+ GCtrace *T = as->T;
+ int sink = T->sinktags;
+ IRRef nins = T->nins;
+ IRIns *ir, *lastir;
+ int inloop;
+#if LJ_TARGET_ARM
+ uint32_t rload = 0xa6402a64;
+#endif
+
+ ra_setup(as);
+#if LJ_TARGET_ARM64
+ ra_setkref(as, RID_GL, (intptr_t)J2G(as->J));
+#endif
+
+ /* Clear reg/sp for constants. */
+ for (ir = IR(T->nk), lastir = IR(REF_BASE); ir < lastir; ir++) {
+ ir->prev = REGSP_INIT;
+ if (irt_is64(ir->t) && ir->o != IR_KNULL) {
+#if LJ_GC64
+ /* The false-positive of irt_is64() for ASMREF_L (REF_NIL) is OK here. */
+ ir->i = 0; /* Will become non-zero only for RIP-relative addresses. */
+#else
+ /* Make life easier for backends by putting address of constant in i. */
+ ir->i = (int32_t)(intptr_t)(ir+1);
+#endif
+ ir++;
+ }
+ }
+
+ /* REF_BASE is used for implicit references to the BASE register. */
+ lastir->prev = REGSP_HINT(RID_BASE);
+
+ as->snaprename = nins;
+ as->snapref = nins;
+ as->snapno = T->nsnap;
+ as->snapalloc = 0;
+
+ as->stopins = REF_BASE;
+ as->orignins = nins;
+ as->curins = nins;
+
+ /* Setup register hints for parent link instructions. */
+ ir = IR(REF_FIRST);
+ if (as->parent) {
+ uint16_t *p;
+ lastir = lj_snap_regspmap(as->J, as->parent, as->J->exitno, ir);
+ if (lastir - ir > LJ_MAX_JSLOTS)
+ lj_trace_err(as->J, LJ_TRERR_NYICOAL);
+ as->stopins = (IRRef)((lastir-1) - as->ir);
+ for (p = as->parentmap; ir < lastir; ir++) {
+ RegSP rs = ir->prev;
+ *p++ = (uint16_t)rs; /* Copy original parent RegSP to parentmap. */
+ if (!ra_hasspill(regsp_spill(rs)))
+ ir->prev = (uint16_t)REGSP_HINT(regsp_reg(rs));
+ else
+ ir->prev = REGSP_INIT;
+ }
+ }
+
+ inloop = 0;
+ as->evenspill = SPS_FIRST;
+ for (lastir = IR(nins); ir < lastir; ir++) {
+ if (sink) {
+ if (ir->r == RID_SINK)
+ continue;
+ if (ir->r == RID_SUNK) { /* Revert after ASM restart. */
+ ir->r = RID_SINK;
+ continue;
+ }
+ }
+ switch (ir->o) {
+ case IR_LOOP:
+ inloop = 1;
+ break;
+#if LJ_TARGET_ARM
+ case IR_SLOAD:
+ if (!((ir->op2 & IRSLOAD_TYPECHECK) || (ir+1)->o == IR_HIOP))
+ break;
+ /* fallthrough */
+ case IR_ALOAD: case IR_HLOAD: case IR_ULOAD: case IR_VLOAD:
+ if (!LJ_SOFTFP && irt_isnum(ir->t)) break;
+ ir->prev = (uint16_t)REGSP_HINT((rload & 15));
+ rload = lj_ror(rload, 4);
+ continue;
+ case IR_TMPREF:
+ if ((ir->op2 & IRTMPREF_OUT2) && as->evenspill < 4)
+ as->evenspill = 4; /* TMPREF OUT2 needs two TValues on the stack. */
+ break;
+#endif
+ case IR_CALLXS: {
+ CCallInfo ci;
+ ci.flags = asm_callx_flags(as, ir);
+ ir->prev = asm_setup_call_slots(as, ir, &ci);
+ if (inloop)
+ as->modset |= RSET_SCRATCH;
+ continue;
+ }
+ case IR_CALLL:
+ /* lj_vm_next needs two TValues on the stack. */
+#if LJ_TARGET_X64 && LJ_ABI_WIN
+ if (ir->op2 == IRCALL_lj_vm_next && as->evenspill < SPS_FIRST + 4)
+ as->evenspill = SPS_FIRST + 4;
+#else
+ if (SPS_FIRST < 4 && ir->op2 == IRCALL_lj_vm_next && as->evenspill < 4)
+ as->evenspill = 4;
+#endif
+ /* fallthrough */
+ case IR_CALLN: case IR_CALLA: case IR_CALLS: {
+ const CCallInfo *ci = &lj_ir_callinfo[ir->op2];
+ ir->prev = asm_setup_call_slots(as, ir, ci);
+ if (inloop)
+ as->modset |= (ci->flags & CCI_NOFPRCLOBBER) ?
+ (RSET_SCRATCH & ~RSET_FPR) : RSET_SCRATCH;
+ continue;
+ }
+ case IR_HIOP:
+ switch ((ir-1)->o) {
+#if LJ_SOFTFP && LJ_TARGET_ARM
+ case IR_SLOAD: case IR_ALOAD: case IR_HLOAD: case IR_ULOAD: case IR_VLOAD:
+ if (ra_hashint((ir-1)->r)) {
+ ir->prev = (ir-1)->prev + 1;
+ continue;
+ }
+ break;
+#endif
+#if !LJ_SOFTFP && LJ_NEED_FP64 && LJ_32 && LJ_HASFFI
+ case IR_CONV:
+ if (irt_isfp((ir-1)->t)) {
+ ir->prev = REGSP_HINT(RID_FPRET);
+ continue;
+ }
+#endif
+ /* fallthrough */
+ case IR_CALLN: case IR_CALLL: case IR_CALLS: case IR_CALLXS:
+#if LJ_SOFTFP
+ case IR_MIN: case IR_MAX:
+#endif
+ (ir-1)->prev = REGSP_HINT(RID_RETLO);
+ ir->prev = REGSP_HINT(RID_RETHI);
+ continue;
+ default:
+ break;
+ }
+ break;
+#if LJ_SOFTFP
+ case IR_MIN: case IR_MAX:
+ if ((ir+1)->o != IR_HIOP) break;
+#endif
+ /* fallthrough */
+ /* C calls evict all scratch regs and return results in RID_RET. */
+ case IR_SNEW: case IR_XSNEW: case IR_NEWREF: case IR_BUFPUT:
+ if (REGARG_NUMGPR < 3 && as->evenspill < 3)
+ as->evenspill = 3; /* lj_str_new and lj_tab_newkey need 3 args. */
+#if LJ_TARGET_X86 && LJ_HASFFI
+ if (0) {
+ case IR_CNEW:
+ if (ir->op2 != REF_NIL && as->evenspill < 4)
+ as->evenspill = 4; /* lj_cdata_newv needs 4 args. */
+ }
+ /* fallthrough */
+#else
+ /* fallthrough */
+ case IR_CNEW:
+#endif
+ /* fallthrough */
+ case IR_TNEW: case IR_TDUP: case IR_CNEWI: case IR_TOSTR:
+ case IR_BUFSTR:
+ ir->prev = REGSP_HINT(RID_RET);
+ if (inloop)
+ as->modset = RSET_SCRATCH;
+ continue;
+ case IR_STRTO: case IR_OBAR:
+ if (inloop)
+ as->modset = RSET_SCRATCH;
+ break;
+#if !LJ_SOFTFP
+#if !LJ_TARGET_X86ORX64
+ case IR_LDEXP:
+#endif
+#endif
+ /* fallthrough */
+ case IR_POW:
+ if (!LJ_SOFTFP && irt_isnum(ir->t)) {
+ if (inloop)
+ as->modset |= RSET_SCRATCH;
+#if LJ_TARGET_X86
+ if (irt_isnum(IR(ir->op2)->t)) {
+ if (as->evenspill < 4) /* Leave room to call pow(). */
+ as->evenspill = 4;
+ }
+ break;
+#else
+ ir->prev = REGSP_HINT(RID_FPRET);
+ continue;
+#endif
+ }
+ /* fallthrough */ /* for integer POW */
+ case IR_DIV: case IR_MOD:
+ if ((LJ_64 && LJ_SOFTFP) || !irt_isnum(ir->t)) {
+ ir->prev = REGSP_HINT(RID_RET);
+ if (inloop)
+ as->modset |= (RSET_SCRATCH & RSET_GPR);
+ continue;
+ }
+ break;
+#if LJ_64 && LJ_SOFTFP
+ case IR_ADD: case IR_SUB: case IR_MUL:
+ if (irt_isnum(ir->t)) {
+ ir->prev = REGSP_HINT(RID_RET);
+ if (inloop)
+ as->modset |= (RSET_SCRATCH & RSET_GPR);
+ continue;
+ }
+ break;
+#endif
+ case IR_FPMATH:
+#if LJ_TARGET_X86ORX64
+ if (ir->op2 <= IRFPM_TRUNC) {
+ if (!(as->flags & JIT_F_SSE4_1)) {
+ ir->prev = REGSP_HINT(RID_XMM0);
+ if (inloop)
+ as->modset |= RSET_RANGE(RID_XMM0, RID_XMM3+1)|RID2RSET(RID_EAX);
+ continue;
+ }
+ break;
+ }
+#endif
+ if (inloop)
+ as->modset |= RSET_SCRATCH;
+#if LJ_TARGET_X86
+ break;
+#else
+ ir->prev = REGSP_HINT(RID_FPRET);
+ continue;
+#endif
+#if LJ_TARGET_X86ORX64
+ /* Non-constant shift counts need to be in RID_ECX on x86/x64. */
+ case IR_BSHL: case IR_BSHR: case IR_BSAR:
+ if ((as->flags & JIT_F_BMI2)) /* Except if BMI2 is available. */
+ break;
+ /* fallthrough */
+ case IR_BROL: case IR_BROR:
+ if (!irref_isk(ir->op2) && !ra_hashint(IR(ir->op2)->r)) {
+ IR(ir->op2)->r = REGSP_HINT(RID_ECX);
+ if (inloop)
+ rset_set(as->modset, RID_ECX);
+ }
+ break;
+#endif
+ /* Do not propagate hints across type conversions or loads. */
+ case IR_TOBIT:
+ case IR_XLOAD:
+#if !LJ_TARGET_ARM
+ case IR_ALOAD: case IR_HLOAD: case IR_ULOAD: case IR_VLOAD:
+#endif
+ break;
+ case IR_CONV:
+ if (irt_isfp(ir->t) || (ir->op2 & IRCONV_SRCMASK) == IRT_NUM ||
+ (ir->op2 & IRCONV_SRCMASK) == IRT_FLOAT)
+ break;
+ /* fallthrough */
+ default:
+ /* Propagate hints across likely 'op reg, imm' or 'op reg'. */
+ if (irref_isk(ir->op2) && !irref_isk(ir->op1) &&
+ ra_hashint(regsp_reg(IR(ir->op1)->prev))) {
+ ir->prev = IR(ir->op1)->prev;
+ continue;
+ }
+ break;
+ }
+ ir->prev = REGSP_INIT;
+ }
+ if ((as->evenspill & 1))
+ as->oddspill = as->evenspill++;
+ else
+ as->oddspill = 0;
+}
+
+/* -- Assembler core ------------------------------------------------------ */
+
+/* Assemble a trace. */
+void lj_asm_trace(jit_State *J, GCtrace *T)
+{
+ ASMState as_;
+ ASMState *as = &as_;
+
+ /* Remove nops/renames left over from ASM restart due to LJ_TRERR_MCODELM. */
+ {
+ IRRef nins = T->nins;
+ IRIns *ir = &T->ir[nins-1];
+ if (ir->o == IR_NOP || ir->o == IR_RENAME) {
+ do { ir--; nins--; } while (ir->o == IR_NOP || ir->o == IR_RENAME);
+ T->nins = nins;
+ }
+ }
+
+ /* Ensure an initialized instruction beyond the last one for HIOP checks. */
+ /* This also allows one RENAME to be added without reallocating curfinal. */
+ as->orignins = lj_ir_nextins(J);
+ lj_ir_nop(&J->cur.ir[as->orignins]);
+
+ /* Setup initial state. Copy some fields to reduce indirections. */
+ as->J = J;
+ as->T = T;
+ J->curfinal = lj_trace_alloc(J->L, T); /* This copies the IR, too. */
+ as->flags = J->flags;
+ as->loopref = J->loopref;
+ as->realign = NULL;
+ as->loopinv = 0;
+ as->parent = J->parent ? traceref(J, J->parent) : NULL;
+
+ /* Reserve MCode memory. */
+ as->mctop = as->mctoporig = lj_mcode_reserve(J, &as->mcbot);
+ as->mcp = as->mctop;
+ as->mclim = as->mcbot + MCLIM_REDZONE;
+ asm_setup_target(as);
+
+ /*
+ ** This is a loop, because the MCode may have to be (re-)assembled
+ ** multiple times:
+ **
+ ** 1. as->realign is set (and the assembly aborted), if the arch-specific
+ ** backend wants the MCode to be aligned differently.
+ **
+ ** This is currently only the case on x86/x64, where small loops get
+ ** an aligned loop body plus a short branch. Not much effort is wasted,
+ ** because the abort happens very quickly and only once.
+ **
+ ** 2. The IR is immovable, since the MCode embeds pointers to various
+ ** constants inside the IR. But RENAMEs may need to be added to the IR
+ ** during assembly, which might grow and reallocate the IR. We check
+ ** at the end if the IR (in J->cur.ir) has actually grown, resize the
+ ** copy (in J->curfinal.ir) and try again.
+ **
+ ** 95% of all traces have zero RENAMEs, 3% have one RENAME, 1.5% have
+ ** 2 RENAMEs and only 0.5% have more than that. That's why we opt to
+ ** always have one spare slot in the IR (see above), which means we
+ ** have to redo the assembly for only ~2% of all traces.
+ **
+ ** Very, very rarely, this needs to be done repeatedly, since the
+ ** location of constants inside the IR (actually, reachability from
+ ** a global pointer) may affect register allocation and thus the
+ ** number of RENAMEs.
+ */
+ for (;;) {
+ as->mcp = as->mctop;
+#ifdef LUA_USE_ASSERT
+ as->mcp_prev = as->mcp;
+#endif
+ as->ir = J->curfinal->ir; /* Use the copied IR. */
+ as->curins = J->cur.nins = as->orignins;
+
+ RA_DBG_START();
+ RA_DBGX((as, "===== STOP ====="));
+
+ /* General trace setup. Emit tail of trace. */
+ asm_tail_prep(as);
+ as->mcloop = NULL;
+ as->flagmcp = NULL;
+ as->topslot = 0;
+ as->gcsteps = 0;
+ as->sectref = as->loopref;
+ as->fuseref = (as->flags & JIT_F_OPT_FUSE) ? as->loopref : FUSE_DISABLED;
+ asm_setup_regsp(as);
+ if (!as->loopref)
+ asm_tail_link(as);
+
+ /* Assemble a trace in linear backwards order. */
+ for (as->curins--; as->curins > as->stopins; as->curins--) {
+ IRIns *ir = IR(as->curins);
+ /* 64 bit types handled by SPLIT for 32 bit archs. */
+ lj_assertA(!(LJ_32 && irt_isint64(ir->t)),
+ "IR %04d has unsplit 64 bit type",
+ (int)(ir - as->ir) - REF_BIAS);
+ asm_snap_prev(as);
+ if (!ra_used(ir) && !ir_sideeff(ir) && (as->flags & JIT_F_OPT_DCE))
+ continue; /* Dead-code elimination can be soooo easy. */
+ if (irt_isguard(ir->t))
+ asm_snap_prep(as);
+ RA_DBG_REF();
+ checkmclim(as);
+ asm_ir(as, ir);
+ }
+
+ if (as->realign && J->curfinal->nins >= T->nins)
+ continue; /* Retry in case only the MCode needs to be realigned. */
+
+ /* Emit head of trace. */
+ RA_DBG_REF();
+ checkmclim(as);
+ if (as->gcsteps > 0) {
+ as->curins = as->T->snap[0].ref;
+ asm_snap_prep(as); /* The GC check is a guard. */
+ asm_gc_check(as);
+ as->curins = as->stopins;
+ }
+ ra_evictk(as);
+ if (as->parent)
+ asm_head_side(as);
+ else
+ asm_head_root(as);
+ asm_phi_fixup(as);
+
+ if (J->curfinal->nins >= T->nins) { /* IR didn't grow? */
+ lj_assertA(J->curfinal->nk == T->nk, "unexpected IR constant growth");
+ memcpy(J->curfinal->ir + as->orignins, T->ir + as->orignins,
+ (T->nins - as->orignins) * sizeof(IRIns)); /* Copy RENAMEs. */
+ T->nins = J->curfinal->nins;
+ /* Fill mcofs of any unprocessed snapshots. */
+ as->curins = REF_FIRST;
+ asm_snap_prev(as);
+ break; /* Done. */
+ }
+
+ /* Otherwise try again with a bigger IR. */
+ lj_trace_free(J2G(J), J->curfinal);
+ J->curfinal = NULL; /* In case lj_trace_alloc() OOMs. */
+ J->curfinal = lj_trace_alloc(J->L, T);
+ as->realign = NULL;
+ }
+
+ RA_DBGX((as, "===== START ===="));
+ RA_DBG_FLUSH();
+ if (as->freeset != RSET_ALL)
+ lj_trace_err(as->J, LJ_TRERR_BADRA); /* Ouch! Should never happen. */
+
+ /* Set trace entry point before fixing up tail to allow link to self. */
+ T->mcode = as->mcp;
+ T->mcloop = as->mcloop ? (MSize)((char *)as->mcloop - (char *)as->mcp) : 0;
+ if (as->loopref)
+ asm_loop_tail_fixup(as);
+ else
+ asm_tail_fixup(as, T->link); /* Note: this may change as->mctop! */
+ T->szmcode = (MSize)((char *)as->mctop - (char *)as->mcp);
+ asm_snap_fixup_mcofs(as);
+#if LJ_TARGET_MCODE_FIXUP
+ asm_mcode_fixup(T->mcode, T->szmcode);
+#endif
+ lj_mcode_sync(T->mcode, as->mctoporig);
+}
+
+#undef IR
+
+#endif
diff --git a/libs/luajit-cmake/luajit/src/lj_asm.h b/libs/luajit-cmake/luajit/src/lj_asm.h
new file mode 100644
index 0000000..f0a4f2d
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/lj_asm.h
@@ -0,0 +1,17 @@
+/*
+** IR assembler (SSA IR -> machine code).
+** Copyright (C) 2005-2022 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LJ_ASM_H
+#define _LJ_ASM_H
+
+#include "lj_jit.h"
+
+#if LJ_HASJIT
+LJ_FUNC void lj_asm_trace(jit_State *J, GCtrace *T);
+LJ_FUNC void lj_asm_patchexit(jit_State *J, GCtrace *T, ExitNo exitno,
+ MCode *target);
+#endif
+
+#endif
diff --git a/libs/luajit-cmake/luajit/src/lj_asm_arm.h b/libs/luajit-cmake/luajit/src/lj_asm_arm.h
new file mode 100644
index 0000000..326330f
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/lj_asm_arm.h
@@ -0,0 +1,2286 @@
+/*
+** ARM IR assembler (SSA IR -> machine code).
+** Copyright (C) 2005-2022 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+/* -- Register allocator extensions --------------------------------------- */
+
+/* Allocate a register with a hint. */
+static Reg ra_hintalloc(ASMState *as, IRRef ref, Reg hint, RegSet allow)
+{
+ Reg r = IR(ref)->r;
+ if (ra_noreg(r)) {
+ if (!ra_hashint(r) && !iscrossref(as, ref))
+ ra_sethint(IR(ref)->r, hint); /* Propagate register hint. */
+ r = ra_allocref(as, ref, allow);
+ }
+ ra_noweak(as, r);
+ return r;
+}
+
+/* Allocate a scratch register pair. */
+static Reg ra_scratchpair(ASMState *as, RegSet allow)
+{
+ RegSet pick1 = as->freeset & allow;
+ RegSet pick2 = pick1 & (pick1 >> 1) & RSET_GPREVEN;
+ Reg r;
+ if (pick2) {
+ r = rset_picktop(pick2);
+ } else {
+ RegSet pick = pick1 & (allow >> 1) & RSET_GPREVEN;
+ if (pick) {
+ r = rset_picktop(pick);
+ ra_restore(as, regcost_ref(as->cost[r+1]));
+ } else {
+ pick = pick1 & (allow << 1) & RSET_GPRODD;
+ if (pick) {
+ r = ra_restore(as, regcost_ref(as->cost[rset_picktop(pick)-1]));
+ } else {
+ r = ra_evict(as, allow & (allow >> 1) & RSET_GPREVEN);
+ ra_restore(as, regcost_ref(as->cost[r+1]));
+ }
+ }
+ }
+ lj_assertA(rset_test(RSET_GPREVEN, r), "odd reg %d", r);
+ ra_modified(as, r);
+ ra_modified(as, r+1);
+ RA_DBGX((as, "scratchpair $r $r", r, r+1));
+ return r;
+}
+
+#if !LJ_SOFTFP
+/* Allocate two source registers for three-operand instructions. */
+static Reg ra_alloc2(ASMState *as, IRIns *ir, RegSet allow)
+{
+ IRIns *irl = IR(ir->op1), *irr = IR(ir->op2);
+ Reg left = irl->r, right = irr->r;
+ if (ra_hasreg(left)) {
+ ra_noweak(as, left);
+ if (ra_noreg(right))
+ right = ra_allocref(as, ir->op2, rset_exclude(allow, left));
+ else
+ ra_noweak(as, right);
+ } else if (ra_hasreg(right)) {
+ ra_noweak(as, right);
+ left = ra_allocref(as, ir->op1, rset_exclude(allow, right));
+ } else if (ra_hashint(right)) {
+ right = ra_allocref(as, ir->op2, allow);
+ left = ra_alloc1(as, ir->op1, rset_exclude(allow, right));
+ } else {
+ left = ra_allocref(as, ir->op1, allow);
+ right = ra_alloc1(as, ir->op2, rset_exclude(allow, left));
+ }
+ return left | (right << 8);
+}
+#endif
+
+/* -- Guard handling ------------------------------------------------------ */
+
+/* Generate an exit stub group at the bottom of the reserved MCode memory. */
+static MCode *asm_exitstub_gen(ASMState *as, ExitNo group)
+{
+ MCode *mxp = as->mcbot;
+ int i;
+ if (mxp + 4*4+4*EXITSTUBS_PER_GROUP >= as->mctop)
+ asm_mclimit(as);
+ /* str lr, [sp]; bl ->vm_exit_handler; .long DISPATCH_address, group. */
+ *mxp++ = ARMI_STR|ARMI_LS_P|ARMI_LS_U|ARMF_D(RID_LR)|ARMF_N(RID_SP);
+ *mxp = ARMI_BL|((((MCode *)(void *)lj_vm_exit_handler-mxp)-2)&0x00ffffffu);
+ mxp++;
+ *mxp++ = (MCode)i32ptr(J2GG(as->J)->dispatch); /* DISPATCH address */
+ *mxp++ = group*EXITSTUBS_PER_GROUP;
+ for (i = 0; i < EXITSTUBS_PER_GROUP; i++)
+ *mxp++ = ARMI_B|((-6-i)&0x00ffffffu);
+ lj_mcode_sync(as->mcbot, mxp);
+ lj_mcode_commitbot(as->J, mxp);
+ as->mcbot = mxp;
+ as->mclim = as->mcbot + MCLIM_REDZONE;
+ return mxp - EXITSTUBS_PER_GROUP;
+}
+
+/* Setup all needed exit stubs. */
+static void asm_exitstub_setup(ASMState *as, ExitNo nexits)
+{
+ ExitNo i;
+ if (nexits >= EXITSTUBS_PER_GROUP*LJ_MAX_EXITSTUBGR)
+ lj_trace_err(as->J, LJ_TRERR_SNAPOV);
+ for (i = 0; i < (nexits+EXITSTUBS_PER_GROUP-1)/EXITSTUBS_PER_GROUP; i++)
+ if (as->J->exitstubgroup[i] == NULL)
+ as->J->exitstubgroup[i] = asm_exitstub_gen(as, i);
+}
+
+/* Emit conditional branch to exit for guard. */
+static void asm_guardcc(ASMState *as, ARMCC cc)
+{
+ MCode *target = exitstub_addr(as->J, as->snapno);
+ MCode *p = as->mcp;
+ if (LJ_UNLIKELY(p == as->invmcp)) {
+ as->loopinv = 1;
+ *p = ARMI_BL | ((target-p-2) & 0x00ffffffu);
+ emit_branch(as, ARMF_CC(ARMI_B, cc^1), p+1);
+ return;
+ }
+ emit_branch(as, ARMF_CC(ARMI_BL, cc), target);
+}
+
+/* -- Operand fusion ------------------------------------------------------ */
+
+/* Limit linear search to this distance. Avoids O(n^2) behavior. */
+#define CONFLICT_SEARCH_LIM 31
+
+/* Check if there's no conflicting instruction between curins and ref. */
+static int noconflict(ASMState *as, IRRef ref, IROp conflict)
+{
+ IRIns *ir = as->ir;
+ IRRef i = as->curins;
+ if (i > ref + CONFLICT_SEARCH_LIM)
+ return 0; /* Give up, ref is too far away. */
+ while (--i > ref)
+ if (ir[i].o == conflict)
+ return 0; /* Conflict found. */
+ return 1; /* Ok, no conflict. */
+}
+
+/* Fuse the array base of colocated arrays. */
+static int32_t asm_fuseabase(ASMState *as, IRRef ref)
+{
+ IRIns *ir = IR(ref);
+ if (ir->o == IR_TNEW && ir->op1 <= LJ_MAX_COLOSIZE &&
+ !neverfuse(as) && noconflict(as, ref, IR_NEWREF))
+ return (int32_t)sizeof(GCtab);
+ return 0;
+}
+
+/* Fuse array/hash/upvalue reference into register+offset operand. */
+static Reg asm_fuseahuref(ASMState *as, IRRef ref, int32_t *ofsp, RegSet allow,
+ int lim)
+{
+ IRIns *ir = IR(ref);
+ if (ra_noreg(ir->r)) {
+ if (ir->o == IR_AREF) {
+ if (mayfuse(as, ref)) {
+ if (irref_isk(ir->op2)) {
+ IRRef tab = IR(ir->op1)->op1;
+ int32_t ofs = asm_fuseabase(as, tab);
+ IRRef refa = ofs ? tab : ir->op1;
+ ofs += 8*IR(ir->op2)->i;
+ if (ofs > -lim && ofs < lim) {
+ *ofsp = ofs;
+ return ra_alloc1(as, refa, allow);
+ }
+ }
+ }
+ } else if (ir->o == IR_HREFK) {
+ if (mayfuse(as, ref)) {
+ int32_t ofs = (int32_t)(IR(ir->op2)->op2 * sizeof(Node));
+ if (ofs < lim) {
+ *ofsp = ofs;
+ return ra_alloc1(as, ir->op1, allow);
+ }
+ }
+ } else if (ir->o == IR_UREFC) {
+ if (irref_isk(ir->op1)) {
+ GCfunc *fn = ir_kfunc(IR(ir->op1));
+ int32_t ofs = i32ptr(&gcref(fn->l.uvptr[(ir->op2 >> 8)])->uv.tv);
+ *ofsp = (ofs & 255); /* Mask out less bits to allow LDRD. */
+ return ra_allock(as, (ofs & ~255), allow);
+ }
+ } else if (ir->o == IR_TMPREF) {
+ *ofsp = 0;
+ return RID_SP;
+ }
+ }
+ *ofsp = 0;
+ return ra_alloc1(as, ref, allow);
+}
+
+/* Fuse m operand into arithmetic/logic instructions. */
+static uint32_t asm_fuseopm(ASMState *as, ARMIns ai, IRRef ref, RegSet allow)
+{
+ IRIns *ir = IR(ref);
+ if (ra_hasreg(ir->r)) {
+ ra_noweak(as, ir->r);
+ return ARMF_M(ir->r);
+ } else if (irref_isk(ref)) {
+ uint32_t k = emit_isk12(ai, ir->i);
+ if (k)
+ return k;
+ } else if (mayfuse(as, ref)) {
+ if (ir->o >= IR_BSHL && ir->o <= IR_BROR) {
+ Reg m = ra_alloc1(as, ir->op1, allow);
+ ARMShift sh = ir->o == IR_BSHL ? ARMSH_LSL :
+ ir->o == IR_BSHR ? ARMSH_LSR :
+ ir->o == IR_BSAR ? ARMSH_ASR : ARMSH_ROR;
+ if (irref_isk(ir->op2)) {
+ return m | ARMF_SH(sh, (IR(ir->op2)->i & 31));
+ } else {
+ Reg s = ra_alloc1(as, ir->op2, rset_exclude(allow, m));
+ return m | ARMF_RSH(sh, s);
+ }
+ } else if (ir->o == IR_ADD && ir->op1 == ir->op2) {
+ Reg m = ra_alloc1(as, ir->op1, allow);
+ return m | ARMF_SH(ARMSH_LSL, 1);
+ }
+ }
+ return ra_allocref(as, ref, allow);
+}
+
+/* Fuse shifts into loads/stores. Only bother with BSHL 2 => lsl #2. */
+static IRRef asm_fuselsl2(ASMState *as, IRRef ref)
+{
+ IRIns *ir = IR(ref);
+ if (ra_noreg(ir->r) && mayfuse(as, ref) && ir->o == IR_BSHL &&
+ irref_isk(ir->op2) && IR(ir->op2)->i == 2)
+ return ir->op1;
+ return 0; /* No fusion. */
+}
+
+/* Fuse XLOAD/XSTORE reference into load/store operand. */
+static void asm_fusexref(ASMState *as, ARMIns ai, Reg rd, IRRef ref,
+ RegSet allow, int32_t ofs)
+{
+ IRIns *ir = IR(ref);
+ Reg base;
+ if (ra_noreg(ir->r) && canfuse(as, ir)) {
+ int32_t lim = (!LJ_SOFTFP && (ai & 0x08000000)) ? 1024 :
+ (ai & 0x04000000) ? 4096 : 256;
+ if (ir->o == IR_ADD) {
+ int32_t ofs2;
+ if (irref_isk(ir->op2) &&
+ (ofs2 = ofs + IR(ir->op2)->i) > -lim && ofs2 < lim &&
+ (!(!LJ_SOFTFP && (ai & 0x08000000)) || !(ofs2 & 3))) {
+ ofs = ofs2;
+ ref = ir->op1;
+ } else if (ofs == 0 && !(!LJ_SOFTFP && (ai & 0x08000000))) {
+ IRRef lref = ir->op1, rref = ir->op2;
+ Reg rn, rm;
+ if ((ai & 0x04000000)) {
+ IRRef sref = asm_fuselsl2(as, rref);
+ if (sref) {
+ rref = sref;
+ ai |= ARMF_SH(ARMSH_LSL, 2);
+ } else if ((sref = asm_fuselsl2(as, lref)) != 0) {
+ lref = rref;
+ rref = sref;
+ ai |= ARMF_SH(ARMSH_LSL, 2);
+ }
+ }
+ rn = ra_alloc1(as, lref, allow);
+ rm = ra_alloc1(as, rref, rset_exclude(allow, rn));
+ if ((ai & 0x04000000)) ai |= ARMI_LS_R;
+ emit_dnm(as, ai|ARMI_LS_P|ARMI_LS_U, rd, rn, rm);
+ return;
+ }
+ } else if (ir->o == IR_STRREF && !(!LJ_SOFTFP && (ai & 0x08000000))) {
+ lj_assertA(ofs == 0, "bad usage");
+ ofs = (int32_t)sizeof(GCstr);
+ if (irref_isk(ir->op2)) {
+ ofs += IR(ir->op2)->i;
+ ref = ir->op1;
+ } else if (irref_isk(ir->op1)) {
+ ofs += IR(ir->op1)->i;
+ ref = ir->op2;
+ } else {
+ /* NYI: Fuse ADD with constant. */
+ Reg rn = ra_alloc1(as, ir->op1, allow);
+ uint32_t m = asm_fuseopm(as, 0, ir->op2, rset_exclude(allow, rn));
+ if ((ai & 0x04000000))
+ emit_lso(as, ai, rd, rd, ofs);
+ else
+ emit_lsox(as, ai, rd, rd, ofs);
+ emit_dn(as, ARMI_ADD^m, rd, rn);
+ return;
+ }
+ if (ofs <= -lim || ofs >= lim) {
+ Reg rn = ra_alloc1(as, ref, allow);
+ Reg rm = ra_allock(as, ofs, rset_exclude(allow, rn));
+ if ((ai & 0x04000000)) ai |= ARMI_LS_R;
+ emit_dnm(as, ai|ARMI_LS_P|ARMI_LS_U, rd, rn, rm);
+ return;
+ }
+ }
+ }
+ base = ra_alloc1(as, ref, allow);
+#if !LJ_SOFTFP
+ if ((ai & 0x08000000))
+ emit_vlso(as, ai, rd, base, ofs);
+ else
+#endif
+ if ((ai & 0x04000000))
+ emit_lso(as, ai, rd, base, ofs);
+ else
+ emit_lsox(as, ai, rd, base, ofs);
+}
+
+#if !LJ_SOFTFP
+/* Fuse to multiply-add/sub instruction. */
+static int asm_fusemadd(ASMState *as, IRIns *ir, ARMIns ai, ARMIns air)
+{
+ IRRef lref = ir->op1, rref = ir->op2;
+ IRIns *irm;
+ if (lref != rref &&
+ ((mayfuse(as, lref) && (irm = IR(lref), irm->o == IR_MUL) &&
+ ra_noreg(irm->r)) ||
+ (mayfuse(as, rref) && (irm = IR(rref), irm->o == IR_MUL) &&
+ (rref = lref, ai = air, ra_noreg(irm->r))))) {
+ Reg dest = ra_dest(as, ir, RSET_FPR);
+ Reg add = ra_hintalloc(as, rref, dest, RSET_FPR);
+ Reg right, left = ra_alloc2(as, irm,
+ rset_exclude(rset_exclude(RSET_FPR, dest), add));
+ right = (left >> 8); left &= 255;
+ emit_dnm(as, ai, (dest & 15), (left & 15), (right & 15));
+ if (dest != add) emit_dm(as, ARMI_VMOV_D, (dest & 15), (add & 15));
+ return 1;
+ }
+ return 0;
+}
+#endif
+
+/* -- Calls --------------------------------------------------------------- */
+
+/* Generate a call to a C function. */
+static void asm_gencall(ASMState *as, const CCallInfo *ci, IRRef *args)
+{
+ uint32_t n, nargs = CCI_XNARGS(ci);
+ int32_t ofs = 0;
+#if LJ_SOFTFP
+ Reg gpr = REGARG_FIRSTGPR;
+#else
+ Reg gpr, fpr = REGARG_FIRSTFPR, fprodd = 0;
+#endif
+ if ((void *)ci->func)
+ emit_call(as, (void *)ci->func);
+#if !LJ_SOFTFP
+ for (gpr = REGARG_FIRSTGPR; gpr <= REGARG_LASTGPR; gpr++)
+ as->cost[gpr] = REGCOST(~0u, ASMREF_L);
+ gpr = REGARG_FIRSTGPR;
+#endif
+ for (n = 0; n < nargs; n++) { /* Setup args. */
+ IRRef ref = args[n];
+ IRIns *ir = IR(ref);
+#if !LJ_SOFTFP
+ if (ref && irt_isfp(ir->t)) {
+ RegSet of = as->freeset;
+ Reg src;
+ if (!LJ_ABI_SOFTFP && !(ci->flags & CCI_VARARG)) {
+ if (irt_isnum(ir->t)) {
+ if (fpr <= REGARG_LASTFPR) {
+ ra_leftov(as, fpr, ref);
+ fpr++;
+ continue;
+ }
+ } else if (fprodd) { /* Ick. */
+ src = ra_alloc1(as, ref, RSET_FPR);
+ emit_dm(as, ARMI_VMOV_S, (fprodd & 15), (src & 15) | 0x00400000);
+ fprodd = 0;
+ continue;
+ } else if (fpr <= REGARG_LASTFPR) {
+ ra_leftov(as, fpr, ref);
+ fprodd = fpr++;
+ continue;
+ }
+ /* Workaround to protect argument GPRs from being used for remat. */
+ as->freeset &= ~RSET_RANGE(REGARG_FIRSTGPR, REGARG_LASTGPR+1);
+ src = ra_alloc1(as, ref, RSET_FPR); /* May alloc GPR to remat FPR. */
+ as->freeset |= (of & RSET_RANGE(REGARG_FIRSTGPR, REGARG_LASTGPR+1));
+ fprodd = 0;
+ goto stackfp;
+ }
+ /* Workaround to protect argument GPRs from being used for remat. */
+ as->freeset &= ~RSET_RANGE(REGARG_FIRSTGPR, REGARG_LASTGPR+1);
+ src = ra_alloc1(as, ref, RSET_FPR); /* May alloc GPR to remat FPR. */
+ as->freeset |= (of & RSET_RANGE(REGARG_FIRSTGPR, REGARG_LASTGPR+1));
+ if (irt_isnum(ir->t)) gpr = (gpr+1) & ~1u;
+ if (gpr <= REGARG_LASTGPR) {
+ lj_assertA(rset_test(as->freeset, gpr),
+ "reg %d not free", gpr); /* Must have been evicted. */
+ if (irt_isnum(ir->t)) {
+ lj_assertA(rset_test(as->freeset, gpr+1),
+ "reg %d not free", gpr+1); /* Ditto. */
+ emit_dnm(as, ARMI_VMOV_RR_D, gpr, gpr+1, (src & 15));
+ gpr += 2;
+ } else {
+ emit_dn(as, ARMI_VMOV_R_S, gpr, (src & 15));
+ gpr++;
+ }
+ } else {
+ stackfp:
+ if (irt_isnum(ir->t)) ofs = (ofs + 4) & ~4;
+ emit_spstore(as, ir, src, ofs);
+ ofs += irt_isnum(ir->t) ? 8 : 4;
+ }
+ } else
+#endif
+ {
+ if (gpr <= REGARG_LASTGPR) {
+ lj_assertA(rset_test(as->freeset, gpr),
+ "reg %d not free", gpr); /* Must have been evicted. */
+ if (ref) ra_leftov(as, gpr, ref);
+ gpr++;
+ } else {
+ if (ref) {
+ Reg r = ra_alloc1(as, ref, RSET_GPR);
+ emit_spstore(as, ir, r, ofs);
+ }
+ ofs += 4;
+ }
+ }
+ }
+}
+
+/* Setup result reg/sp for call. Evict scratch regs. */
+static void asm_setupresult(ASMState *as, IRIns *ir, const CCallInfo *ci)
+{
+ RegSet drop = RSET_SCRATCH;
+ int hiop = ((ir+1)->o == IR_HIOP && !irt_isnil((ir+1)->t));
+ if (ra_hasreg(ir->r))
+ rset_clear(drop, ir->r); /* Dest reg handled below. */
+ if (hiop && ra_hasreg((ir+1)->r))
+ rset_clear(drop, (ir+1)->r); /* Dest reg handled below. */
+ ra_evictset(as, drop); /* Evictions must be performed first. */
+ if (ra_used(ir)) {
+ lj_assertA(!irt_ispri(ir->t), "PRI dest");
+ if (!LJ_SOFTFP && irt_isfp(ir->t)) {
+ if (LJ_ABI_SOFTFP || (ci->flags & (CCI_CASTU64|CCI_VARARG))) {
+ Reg dest = (ra_dest(as, ir, RSET_FPR) & 15);
+ if (irt_isnum(ir->t))
+ emit_dnm(as, ARMI_VMOV_D_RR, RID_RETLO, RID_RETHI, dest);
+ else
+ emit_dn(as, ARMI_VMOV_S_R, RID_RET, dest);
+ } else {
+ ra_destreg(as, ir, RID_FPRET);
+ }
+ } else if (hiop) {
+ ra_destpair(as, ir);
+ } else {
+ ra_destreg(as, ir, RID_RET);
+ }
+ }
+ UNUSED(ci);
+}
+
+static void asm_callx(ASMState *as, IRIns *ir)
+{
+ IRRef args[CCI_NARGS_MAX*2];
+ CCallInfo ci;
+ IRRef func;
+ IRIns *irf;
+ ci.flags = asm_callx_flags(as, ir);
+ asm_collectargs(as, ir, &ci, args);
+ asm_setupresult(as, ir, &ci);
+ func = ir->op2; irf = IR(func);
+ if (irf->o == IR_CARG) { func = irf->op1; irf = IR(func); }
+ if (irref_isk(func)) { /* Call to constant address. */
+ ci.func = (ASMFunction)(void *)(irf->i);
+ } else { /* Need a non-argument register for indirect calls. */
+ Reg freg = ra_alloc1(as, func, RSET_RANGE(RID_R4, RID_R12+1));
+ emit_m(as, ARMI_BLXr, freg);
+ ci.func = (ASMFunction)(void *)0;
+ }
+ asm_gencall(as, &ci, args);
+}
+
+/* -- Returns ------------------------------------------------------------- */
+
+/* Return to lower frame. Guard that it goes to the right spot. */
+static void asm_retf(ASMState *as, IRIns *ir)
+{
+ Reg base = ra_alloc1(as, REF_BASE, RSET_GPR);
+ void *pc = ir_kptr(IR(ir->op2));
+ int32_t delta = 1+LJ_FR2+bc_a(*((const BCIns *)pc - 1));
+ as->topslot -= (BCReg)delta;
+ if ((int32_t)as->topslot < 0) as->topslot = 0;
+ irt_setmark(IR(REF_BASE)->t); /* Children must not coalesce with BASE reg. */
+ /* Need to force a spill on REF_BASE now to update the stack slot. */
+ emit_lso(as, ARMI_STR, base, RID_SP, ra_spill(as, IR(REF_BASE)));
+ emit_setgl(as, base, jit_base);
+ emit_addptr(as, base, -8*delta);
+ asm_guardcc(as, CC_NE);
+ emit_nm(as, ARMI_CMP, RID_TMP,
+ ra_allock(as, i32ptr(pc), rset_exclude(RSET_GPR, base)));
+ emit_lso(as, ARMI_LDR, RID_TMP, base, -4);
+}
+
+/* -- Buffer operations --------------------------------------------------- */
+
+#if LJ_HASBUFFER
+static void asm_bufhdr_write(ASMState *as, Reg sb)
+{
+ Reg tmp = ra_scratch(as, rset_exclude(RSET_GPR, sb));
+ IRIns irgc;
+ int32_t addr = i32ptr((void *)&J2G(as->J)->cur_L);
+ irgc.ot = IRT(0, IRT_PGC); /* GC type. */
+ emit_storeofs(as, &irgc, RID_TMP, sb, offsetof(SBuf, L));
+ if ((as->flags & JIT_F_ARMV6T2)) {
+ emit_dnm(as, ARMI_BFI, RID_TMP, lj_fls(SBUF_MASK_FLAG), tmp);
+ } else {
+ emit_dnm(as, ARMI_ORR, RID_TMP, RID_TMP, tmp);
+ emit_dn(as, ARMI_AND|ARMI_K12|SBUF_MASK_FLAG, tmp, tmp);
+ }
+ emit_lso(as, ARMI_LDR, RID_TMP,
+ ra_allock(as, (addr & ~4095),
+ rset_exclude(rset_exclude(RSET_GPR, sb), tmp)),
+ (addr & 4095));
+ emit_loadofs(as, &irgc, tmp, sb, offsetof(SBuf, L));
+}
+#endif
+
+/* -- Type conversions ---------------------------------------------------- */
+
+#if !LJ_SOFTFP
+static void asm_tointg(ASMState *as, IRIns *ir, Reg left)
+{
+ Reg tmp = ra_scratch(as, rset_exclude(RSET_FPR, left));
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ asm_guardcc(as, CC_NE);
+ emit_d(as, ARMI_VMRS, 0);
+ emit_dm(as, ARMI_VCMP_D, (tmp & 15), (left & 15));
+ emit_dm(as, ARMI_VCVT_F64_S32, (tmp & 15), (tmp & 15));
+ emit_dn(as, ARMI_VMOV_R_S, dest, (tmp & 15));
+ emit_dm(as, ARMI_VCVT_S32_F64, (tmp & 15), (left & 15));
+}
+
+static void asm_tobit(ASMState *as, IRIns *ir)
+{
+ RegSet allow = RSET_FPR;
+ Reg left = ra_alloc1(as, ir->op1, allow);
+ Reg right = ra_alloc1(as, ir->op2, rset_clear(allow, left));
+ Reg tmp = ra_scratch(as, rset_clear(allow, right));
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ emit_dn(as, ARMI_VMOV_R_S, dest, (tmp & 15));
+ emit_dnm(as, ARMI_VADD_D, (tmp & 15), (left & 15), (right & 15));
+}
+#endif
+
+static void asm_conv(ASMState *as, IRIns *ir)
+{
+ IRType st = (IRType)(ir->op2 & IRCONV_SRCMASK);
+#if !LJ_SOFTFP
+ int stfp = (st == IRT_NUM || st == IRT_FLOAT);
+#endif
+ IRRef lref = ir->op1;
+ /* 64 bit integer conversions are handled by SPLIT. */
+ lj_assertA(!irt_isint64(ir->t) && !(st == IRT_I64 || st == IRT_U64),
+ "IR %04d has unsplit 64 bit type",
+ (int)(ir - as->ir) - REF_BIAS);
+#if LJ_SOFTFP
+ /* FP conversions are handled by SPLIT. */
+ lj_assertA(!irt_isfp(ir->t) && !(st == IRT_NUM || st == IRT_FLOAT),
+ "IR %04d has FP type",
+ (int)(ir - as->ir) - REF_BIAS);
+ /* Can't check for same types: SPLIT uses CONV int.int + BXOR for sfp NEG. */
+#else
+ lj_assertA(irt_type(ir->t) != st, "inconsistent types for CONV");
+ if (irt_isfp(ir->t)) {
+ Reg dest = ra_dest(as, ir, RSET_FPR);
+ if (stfp) { /* FP to FP conversion. */
+ emit_dm(as, st == IRT_NUM ? ARMI_VCVT_F32_F64 : ARMI_VCVT_F64_F32,
+ (dest & 15), (ra_alloc1(as, lref, RSET_FPR) & 15));
+ } else { /* Integer to FP conversion. */
+ Reg left = ra_alloc1(as, lref, RSET_GPR);
+ ARMIns ai = irt_isfloat(ir->t) ?
+ (st == IRT_INT ? ARMI_VCVT_F32_S32 : ARMI_VCVT_F32_U32) :
+ (st == IRT_INT ? ARMI_VCVT_F64_S32 : ARMI_VCVT_F64_U32);
+ emit_dm(as, ai, (dest & 15), (dest & 15));
+ emit_dn(as, ARMI_VMOV_S_R, left, (dest & 15));
+ }
+ } else if (stfp) { /* FP to integer conversion. */
+ if (irt_isguard(ir->t)) {
+ /* Checked conversions are only supported from number to int. */
+ lj_assertA(irt_isint(ir->t) && st == IRT_NUM,
+ "bad type for checked CONV");
+ asm_tointg(as, ir, ra_alloc1(as, lref, RSET_FPR));
+ } else {
+ Reg left = ra_alloc1(as, lref, RSET_FPR);
+ Reg tmp = ra_scratch(as, rset_exclude(RSET_FPR, left));
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ ARMIns ai;
+ emit_dn(as, ARMI_VMOV_R_S, dest, (tmp & 15));
+ ai = irt_isint(ir->t) ?
+ (st == IRT_NUM ? ARMI_VCVT_S32_F64 : ARMI_VCVT_S32_F32) :
+ (st == IRT_NUM ? ARMI_VCVT_U32_F64 : ARMI_VCVT_U32_F32);
+ emit_dm(as, ai, (tmp & 15), (left & 15));
+ }
+ } else
+#endif
+ {
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ if (st >= IRT_I8 && st <= IRT_U16) { /* Extend to 32 bit integer. */
+ Reg left = ra_alloc1(as, lref, RSET_GPR);
+ lj_assertA(irt_isint(ir->t) || irt_isu32(ir->t), "bad type for CONV EXT");
+ if ((as->flags & JIT_F_ARMV6)) {
+ ARMIns ai = st == IRT_I8 ? ARMI_SXTB :
+ st == IRT_U8 ? ARMI_UXTB :
+ st == IRT_I16 ? ARMI_SXTH : ARMI_UXTH;
+ emit_dm(as, ai, dest, left);
+ } else if (st == IRT_U8) {
+ emit_dn(as, ARMI_AND|ARMI_K12|255, dest, left);
+ } else {
+ uint32_t shift = st == IRT_I8 ? 24 : 16;
+ ARMShift sh = st == IRT_U16 ? ARMSH_LSR : ARMSH_ASR;
+ emit_dm(as, ARMI_MOV|ARMF_SH(sh, shift), dest, RID_TMP);
+ emit_dm(as, ARMI_MOV|ARMF_SH(ARMSH_LSL, shift), RID_TMP, left);
+ }
+ } else { /* Handle 32/32 bit no-op (cast). */
+ ra_leftov(as, dest, lref); /* Do nothing, but may need to move regs. */
+ }
+ }
+}
+
+static void asm_strto(ASMState *as, IRIns *ir)
+{
+ const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_strscan_num];
+ IRRef args[2];
+ Reg rlo = 0, rhi = 0, tmp;
+ int destused = ra_used(ir);
+ int32_t ofs = 0;
+ ra_evictset(as, RSET_SCRATCH);
+#if LJ_SOFTFP
+ if (destused) {
+ if (ra_hasspill(ir->s) && ra_hasspill((ir+1)->s) &&
+ (ir->s & 1) == 0 && ir->s + 1 == (ir+1)->s) {
+ int i;
+ for (i = 0; i < 2; i++) {
+ Reg r = (ir+i)->r;
+ if (ra_hasreg(r)) {
+ ra_free(as, r);
+ ra_modified(as, r);
+ emit_spload(as, ir+i, r, sps_scale((ir+i)->s));
+ }
+ }
+ ofs = sps_scale(ir->s);
+ destused = 0;
+ } else {
+ rhi = ra_dest(as, ir+1, RSET_GPR);
+ rlo = ra_dest(as, ir, rset_exclude(RSET_GPR, rhi));
+ }
+ }
+ asm_guardcc(as, CC_EQ);
+ if (destused) {
+ emit_lso(as, ARMI_LDR, rhi, RID_SP, 4);
+ emit_lso(as, ARMI_LDR, rlo, RID_SP, 0);
+ }
+#else
+ UNUSED(rhi);
+ if (destused) {
+ if (ra_hasspill(ir->s)) {
+ ofs = sps_scale(ir->s);
+ destused = 0;
+ if (ra_hasreg(ir->r)) {
+ ra_free(as, ir->r);
+ ra_modified(as, ir->r);
+ emit_spload(as, ir, ir->r, ofs);
+ }
+ } else {
+ rlo = ra_dest(as, ir, RSET_FPR);
+ }
+ }
+ asm_guardcc(as, CC_EQ);
+ if (destused)
+ emit_vlso(as, ARMI_VLDR_D, rlo, RID_SP, 0);
+#endif
+ emit_n(as, ARMI_CMP|ARMI_K12|0, RID_RET); /* Test return status. */
+ args[0] = ir->op1; /* GCstr *str */
+ args[1] = ASMREF_TMP1; /* TValue *n */
+ asm_gencall(as, ci, args);
+ tmp = ra_releasetmp(as, ASMREF_TMP1);
+ if (ofs == 0)
+ emit_dm(as, ARMI_MOV, tmp, RID_SP);
+ else
+ emit_opk(as, ARMI_ADD, tmp, RID_SP, ofs, RSET_GPR);
+}
+
+/* -- Memory references --------------------------------------------------- */
+
+/* Get pointer to TValue. */
+static void asm_tvptr(ASMState *as, Reg dest, IRRef ref, MSize mode)
+{
+ if ((mode & IRTMPREF_IN1)) {
+ IRIns *ir = IR(ref);
+ if (irt_isnum(ir->t)) {
+ if ((mode & IRTMPREF_OUT1)) {
+#if LJ_SOFTFP
+ lj_assertA(irref_isk(ref), "unsplit FP op");
+ emit_dm(as, ARMI_MOV, dest, RID_SP);
+ emit_lso(as, ARMI_STR,
+ ra_allock(as, (int32_t)ir_knum(ir)->u32.lo, RSET_GPR),
+ RID_SP, 0);
+ emit_lso(as, ARMI_STR,
+ ra_allock(as, (int32_t)ir_knum(ir)->u32.hi, RSET_GPR),
+ RID_SP, 4);
+#else
+ Reg src = ra_alloc1(as, ref, RSET_FPR);
+ emit_dm(as, ARMI_MOV, dest, RID_SP);
+ emit_vlso(as, ARMI_VSTR_D, src, RID_SP, 0);
+#endif
+ } else if (irref_isk(ref)) {
+ /* Use the number constant itself as a TValue. */
+ ra_allockreg(as, i32ptr(ir_knum(ir)), dest);
+ } else {
+#if LJ_SOFTFP
+ lj_assertA(0, "unsplit FP op");
+#else
+ /* Otherwise force a spill and use the spill slot. */
+ emit_opk(as, ARMI_ADD, dest, RID_SP, ra_spill(as, ir), RSET_GPR);
+#endif
+ }
+ } else {
+ /* Otherwise use [sp] and [sp+4] to hold the TValue.
+ ** This assumes the following call has max. 4 args.
+ */
+ Reg type;
+ emit_dm(as, ARMI_MOV, dest, RID_SP);
+ if (!irt_ispri(ir->t)) {
+ Reg src = ra_alloc1(as, ref, RSET_GPR);
+ emit_lso(as, ARMI_STR, src, RID_SP, 0);
+ }
+ if (LJ_SOFTFP && (ir+1)->o == IR_HIOP && !irt_isnil((ir+1)->t))
+ type = ra_alloc1(as, ref+1, RSET_GPR);
+ else
+ type = ra_allock(as, irt_toitype(ir->t), RSET_GPR);
+ emit_lso(as, ARMI_STR, type, RID_SP, 4);
+ }
+ } else {
+ emit_dm(as, ARMI_MOV, dest, RID_SP);
+ }
+}
+
+static void asm_aref(ASMState *as, IRIns *ir)
+{
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ Reg idx, base;
+ if (irref_isk(ir->op2)) {
+ IRRef tab = IR(ir->op1)->op1;
+ int32_t ofs = asm_fuseabase(as, tab);
+ IRRef refa = ofs ? tab : ir->op1;
+ uint32_t k = emit_isk12(ARMI_ADD, ofs + 8*IR(ir->op2)->i);
+ if (k) {
+ base = ra_alloc1(as, refa, RSET_GPR);
+ emit_dn(as, ARMI_ADD^k, dest, base);
+ return;
+ }
+ }
+ base = ra_alloc1(as, ir->op1, RSET_GPR);
+ idx = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, base));
+ emit_dnm(as, ARMI_ADD|ARMF_SH(ARMSH_LSL, 3), dest, base, idx);
+}
+
+/* Inlined hash lookup. Specialized for key type and for const keys.
+** The equivalent C code is:
+** Node *n = hashkey(t, key);
+** do {
+** if (lj_obj_equal(&n->key, key)) return &n->val;
+** } while ((n = nextnode(n)));
+** return niltv(L);
+*/
+static void asm_href(ASMState *as, IRIns *ir, IROp merge)
+{
+ RegSet allow = RSET_GPR;
+ int destused = ra_used(ir);
+ Reg dest = ra_dest(as, ir, allow);
+ Reg tab = ra_alloc1(as, ir->op1, rset_clear(allow, dest));
+ Reg key = 0, keyhi = 0, keynumhi = RID_NONE, tmp = RID_TMP;
+ IRRef refkey = ir->op2;
+ IRIns *irkey = IR(refkey);
+ IRType1 kt = irkey->t;
+ int32_t k = 0, khi = emit_isk12(ARMI_CMP, irt_toitype(kt));
+ uint32_t khash;
+ MCLabel l_end, l_loop;
+ rset_clear(allow, tab);
+ if (!irref_isk(refkey) || irt_isstr(kt)) {
+#if LJ_SOFTFP
+ key = ra_alloc1(as, refkey, allow);
+ rset_clear(allow, key);
+ if (irkey[1].o == IR_HIOP) {
+ if (ra_hasreg((irkey+1)->r)) {
+ keynumhi = (irkey+1)->r;
+ keyhi = RID_TMP;
+ ra_noweak(as, keynumhi);
+ } else {
+ keyhi = keynumhi = ra_allocref(as, refkey+1, allow);
+ }
+ rset_clear(allow, keynumhi);
+ khi = 0;
+ }
+#else
+ if (irt_isnum(kt)) {
+ key = ra_scratch(as, allow);
+ rset_clear(allow, key);
+ keyhi = keynumhi = ra_scratch(as, allow);
+ rset_clear(allow, keyhi);
+ khi = 0;
+ } else {
+ key = ra_alloc1(as, refkey, allow);
+ rset_clear(allow, key);
+ }
+#endif
+ } else if (irt_isnum(kt)) {
+ int32_t val = (int32_t)ir_knum(irkey)->u32.lo;
+ k = emit_isk12(ARMI_CMP, val);
+ if (!k) {
+ key = ra_allock(as, val, allow);
+ rset_clear(allow, key);
+ }
+ val = (int32_t)ir_knum(irkey)->u32.hi;
+ khi = emit_isk12(ARMI_CMP, val);
+ if (!khi) {
+ keyhi = ra_allock(as, val, allow);
+ rset_clear(allow, keyhi);
+ }
+ } else if (!irt_ispri(kt)) {
+ k = emit_isk12(ARMI_CMP, irkey->i);
+ if (!k) {
+ key = ra_alloc1(as, refkey, allow);
+ rset_clear(allow, key);
+ }
+ }
+ if (!irt_ispri(kt))
+ tmp = ra_scratchpair(as, allow);
+
+ /* Key not found in chain: jump to exit (if merged) or load niltv. */
+ l_end = emit_label(as);
+ as->invmcp = NULL;
+ if (merge == IR_NE)
+ asm_guardcc(as, CC_AL);
+ else if (destused)
+ emit_loada(as, dest, niltvg(J2G(as->J)));
+
+ /* Follow hash chain until the end. */
+ l_loop = --as->mcp;
+ emit_n(as, ARMI_CMP|ARMI_K12|0, dest);
+ emit_lso(as, ARMI_LDR, dest, dest, (int32_t)offsetof(Node, next));
+
+ /* Type and value comparison. */
+ if (merge == IR_EQ)
+ asm_guardcc(as, CC_EQ);
+ else
+ emit_branch(as, ARMF_CC(ARMI_B, CC_EQ), l_end);
+ if (!irt_ispri(kt)) {
+ emit_nm(as, ARMF_CC(ARMI_CMP, CC_EQ)^k, tmp, key);
+ emit_nm(as, ARMI_CMP^khi, tmp+1, keyhi);
+ emit_lsox(as, ARMI_LDRD, tmp, dest, (int32_t)offsetof(Node, key));
+ } else {
+ emit_n(as, ARMI_CMP^khi, tmp);
+ emit_lso(as, ARMI_LDR, tmp, dest, (int32_t)offsetof(Node, key.it));
+ }
+ *l_loop = ARMF_CC(ARMI_B, CC_NE) | ((as->mcp-l_loop-2) & 0x00ffffffu);
+
+ /* Load main position relative to tab->node into dest. */
+ khash = irref_isk(refkey) ? ir_khash(as, irkey) : 1;
+ if (khash == 0) {
+ emit_lso(as, ARMI_LDR, dest, tab, (int32_t)offsetof(GCtab, node));
+ } else {
+ emit_dnm(as, ARMI_ADD|ARMF_SH(ARMSH_LSL, 3), dest, dest, tmp);
+ emit_dnm(as, ARMI_ADD|ARMF_SH(ARMSH_LSL, 1), tmp, tmp, tmp);
+ if (irt_isstr(kt)) { /* Fetch of str->sid is cheaper than ra_allock. */
+ emit_dnm(as, ARMI_AND, tmp, tmp+1, RID_TMP);
+ emit_lso(as, ARMI_LDR, dest, tab, (int32_t)offsetof(GCtab, node));
+ emit_lso(as, ARMI_LDR, tmp+1, key, (int32_t)offsetof(GCstr, sid));
+ emit_lso(as, ARMI_LDR, RID_TMP, tab, (int32_t)offsetof(GCtab, hmask));
+ } else if (irref_isk(refkey)) {
+ emit_opk(as, ARMI_AND, tmp, RID_TMP, (int32_t)khash,
+ rset_exclude(rset_exclude(RSET_GPR, tab), dest));
+ emit_lso(as, ARMI_LDR, dest, tab, (int32_t)offsetof(GCtab, node));
+ emit_lso(as, ARMI_LDR, RID_TMP, tab, (int32_t)offsetof(GCtab, hmask));
+ } else { /* Must match with hash*() in lj_tab.c. */
+ if (ra_hasreg(keynumhi)) { /* Canonicalize +-0.0 to 0.0. */
+ if (keyhi == RID_TMP)
+ emit_dm(as, ARMF_CC(ARMI_MOV, CC_NE), keyhi, keynumhi);
+ emit_d(as, ARMF_CC(ARMI_MOV, CC_EQ)|ARMI_K12|0, keyhi);
+ }
+ emit_dnm(as, ARMI_AND, tmp, tmp, RID_TMP);
+ emit_dnm(as, ARMI_SUB|ARMF_SH(ARMSH_ROR, 32-HASH_ROT3), tmp, tmp, tmp+1);
+ emit_lso(as, ARMI_LDR, dest, tab, (int32_t)offsetof(GCtab, node));
+ emit_dnm(as, ARMI_EOR|ARMF_SH(ARMSH_ROR, 32-((HASH_ROT2+HASH_ROT1)&31)),
+ tmp, tmp+1, tmp);
+ emit_lso(as, ARMI_LDR, RID_TMP, tab, (int32_t)offsetof(GCtab, hmask));
+ emit_dnm(as, ARMI_SUB|ARMF_SH(ARMSH_ROR, 32-HASH_ROT1), tmp+1, tmp+1, tmp);
+ if (ra_hasreg(keynumhi)) {
+ emit_dnm(as, ARMI_EOR, tmp+1, tmp, key);
+ emit_dnm(as, ARMI_ORR|ARMI_S, RID_TMP, tmp, key); /* Test for +-0.0. */
+ emit_dnm(as, ARMI_ADD, tmp, keynumhi, keynumhi);
+#if !LJ_SOFTFP
+ emit_dnm(as, ARMI_VMOV_RR_D, key, keynumhi,
+ (ra_alloc1(as, refkey, RSET_FPR) & 15));
+#endif
+ } else {
+ emit_dnm(as, ARMI_EOR, tmp+1, tmp, key);
+ emit_opk(as, ARMI_ADD, tmp, key, (int32_t)HASH_BIAS,
+ rset_exclude(rset_exclude(RSET_GPR, tab), key));
+ }
+ }
+ }
+}
+
+static void asm_hrefk(ASMState *as, IRIns *ir)
+{
+ IRIns *kslot = IR(ir->op2);
+ IRIns *irkey = IR(kslot->op1);
+ int32_t ofs = (int32_t)(kslot->op2 * sizeof(Node));
+ int32_t kofs = ofs + (int32_t)offsetof(Node, key);
+ Reg dest = (ra_used(ir) || ofs > 4095) ? ra_dest(as, ir, RSET_GPR) : RID_NONE;
+ Reg node = ra_alloc1(as, ir->op1, RSET_GPR);
+ Reg key = RID_NONE, type = RID_TMP, idx = node;
+ RegSet allow = rset_exclude(RSET_GPR, node);
+ lj_assertA(ofs % sizeof(Node) == 0, "unaligned HREFK slot");
+ if (ofs > 4095) {
+ idx = dest;
+ rset_clear(allow, dest);
+ kofs = (int32_t)offsetof(Node, key);
+ } else if (ra_hasreg(dest)) {
+ emit_opk(as, ARMI_ADD, dest, node, ofs, allow);
+ }
+ asm_guardcc(as, CC_NE);
+ if (!irt_ispri(irkey->t)) {
+ RegSet even = (as->freeset & allow);
+ even = even & (even >> 1) & RSET_GPREVEN;
+ if (even) {
+ key = ra_scratch(as, even);
+ if (rset_test(as->freeset, key+1)) {
+ type = key+1;
+ ra_modified(as, type);
+ }
+ } else {
+ key = ra_scratch(as, allow);
+ }
+ rset_clear(allow, key);
+ }
+ rset_clear(allow, type);
+ if (irt_isnum(irkey->t)) {
+ emit_opk(as, ARMF_CC(ARMI_CMP, CC_EQ), 0, type,
+ (int32_t)ir_knum(irkey)->u32.hi, allow);
+ emit_opk(as, ARMI_CMP, 0, key,
+ (int32_t)ir_knum(irkey)->u32.lo, allow);
+ } else {
+ if (ra_hasreg(key))
+ emit_opk(as, ARMF_CC(ARMI_CMP, CC_EQ), 0, key, irkey->i, allow);
+ emit_n(as, ARMI_CMN|ARMI_K12|-irt_toitype(irkey->t), type);
+ }
+ emit_lso(as, ARMI_LDR, type, idx, kofs+4);
+ if (ra_hasreg(key)) emit_lso(as, ARMI_LDR, key, idx, kofs);
+ if (ofs > 4095)
+ emit_opk(as, ARMI_ADD, dest, node, ofs, RSET_GPR);
+}
+
+static void asm_uref(ASMState *as, IRIns *ir)
+{
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ if (irref_isk(ir->op1)) {
+ GCfunc *fn = ir_kfunc(IR(ir->op1));
+ MRef *v = &gcref(fn->l.uvptr[(ir->op2 >> 8)])->uv.v;
+ emit_lsptr(as, ARMI_LDR, dest, v);
+ } else {
+ Reg uv = ra_scratch(as, RSET_GPR);
+ Reg func = ra_alloc1(as, ir->op1, RSET_GPR);
+ if (ir->o == IR_UREFC) {
+ asm_guardcc(as, CC_NE);
+ emit_n(as, ARMI_CMP|ARMI_K12|1, RID_TMP);
+ emit_opk(as, ARMI_ADD, dest, uv,
+ (int32_t)offsetof(GCupval, tv), RSET_GPR);
+ emit_lso(as, ARMI_LDRB, RID_TMP, uv, (int32_t)offsetof(GCupval, closed));
+ } else {
+ emit_lso(as, ARMI_LDR, dest, uv, (int32_t)offsetof(GCupval, v));
+ }
+ emit_lso(as, ARMI_LDR, uv, func,
+ (int32_t)offsetof(GCfuncL, uvptr) + 4*(int32_t)(ir->op2 >> 8));
+ }
+}
+
+static void asm_fref(ASMState *as, IRIns *ir)
+{
+ UNUSED(as); UNUSED(ir);
+ lj_assertA(!ra_used(ir), "unfused FREF");
+}
+
+static void asm_strref(ASMState *as, IRIns *ir)
+{
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ IRRef ref = ir->op2, refk = ir->op1;
+ Reg r;
+ if (irref_isk(ref)) {
+ IRRef tmp = refk; refk = ref; ref = tmp;
+ } else if (!irref_isk(refk)) {
+ uint32_t k, m = ARMI_K12|sizeof(GCstr);
+ Reg right, left = ra_alloc1(as, ir->op1, RSET_GPR);
+ IRIns *irr = IR(ir->op2);
+ if (ra_hasreg(irr->r)) {
+ ra_noweak(as, irr->r);
+ right = irr->r;
+ } else if (mayfuse(as, irr->op2) &&
+ irr->o == IR_ADD && irref_isk(irr->op2) &&
+ (k = emit_isk12(ARMI_ADD,
+ (int32_t)sizeof(GCstr) + IR(irr->op2)->i))) {
+ m = k;
+ right = ra_alloc1(as, irr->op1, rset_exclude(RSET_GPR, left));
+ } else {
+ right = ra_allocref(as, ir->op2, rset_exclude(RSET_GPR, left));
+ }
+ emit_dn(as, ARMI_ADD^m, dest, dest);
+ emit_dnm(as, ARMI_ADD, dest, left, right);
+ return;
+ }
+ r = ra_alloc1(as, ref, RSET_GPR);
+ emit_opk(as, ARMI_ADD, dest, r,
+ sizeof(GCstr) + IR(refk)->i, rset_exclude(RSET_GPR, r));
+}
+
+/* -- Loads and stores ---------------------------------------------------- */
+
+static ARMIns asm_fxloadins(ASMState *as, IRIns *ir)
+{
+ UNUSED(as);
+ switch (irt_type(ir->t)) {
+ case IRT_I8: return ARMI_LDRSB;
+ case IRT_U8: return ARMI_LDRB;
+ case IRT_I16: return ARMI_LDRSH;
+ case IRT_U16: return ARMI_LDRH;
+ case IRT_NUM: lj_assertA(!LJ_SOFTFP, "unsplit FP op"); return ARMI_VLDR_D;
+ case IRT_FLOAT: if (!LJ_SOFTFP) return ARMI_VLDR_S; /* fallthrough */
+ default: return ARMI_LDR;
+ }
+}
+
+static ARMIns asm_fxstoreins(ASMState *as, IRIns *ir)
+{
+ UNUSED(as);
+ switch (irt_type(ir->t)) {
+ case IRT_I8: case IRT_U8: return ARMI_STRB;
+ case IRT_I16: case IRT_U16: return ARMI_STRH;
+ case IRT_NUM: lj_assertA(!LJ_SOFTFP, "unsplit FP op"); return ARMI_VSTR_D;
+ case IRT_FLOAT: if (!LJ_SOFTFP) return ARMI_VSTR_S; /* fallthrough */
+ default: return ARMI_STR;
+ }
+}
+
+static void asm_fload(ASMState *as, IRIns *ir)
+{
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ ARMIns ai = asm_fxloadins(as, ir);
+ Reg idx;
+ int32_t ofs;
+ if (ir->op1 == REF_NIL) { /* FLOAD from GG_State with offset. */
+ idx = ra_allock(as, (int32_t)(ir->op2<<2) + (int32_t)J2GG(as->J), RSET_GPR);
+ ofs = 0;
+ } else {
+ idx = ra_alloc1(as, ir->op1, RSET_GPR);
+ if (ir->op2 == IRFL_TAB_ARRAY) {
+ ofs = asm_fuseabase(as, ir->op1);
+ if (ofs) { /* Turn the t->array load into an add for colocated arrays. */
+ emit_dn(as, ARMI_ADD|ARMI_K12|ofs, dest, idx);
+ return;
+ }
+ }
+ ofs = field_ofs[ir->op2];
+ }
+ if ((ai & 0x04000000))
+ emit_lso(as, ai, dest, idx, ofs);
+ else
+ emit_lsox(as, ai, dest, idx, ofs);
+}
+
+static void asm_fstore(ASMState *as, IRIns *ir)
+{
+ if (ir->r != RID_SINK) {
+ Reg src = ra_alloc1(as, ir->op2, RSET_GPR);
+ IRIns *irf = IR(ir->op1);
+ Reg idx = ra_alloc1(as, irf->op1, rset_exclude(RSET_GPR, src));
+ int32_t ofs = field_ofs[irf->op2];
+ ARMIns ai = asm_fxstoreins(as, ir);
+ if ((ai & 0x04000000))
+ emit_lso(as, ai, src, idx, ofs);
+ else
+ emit_lsox(as, ai, src, idx, ofs);
+ }
+}
+
+static void asm_xload(ASMState *as, IRIns *ir)
+{
+ Reg dest = ra_dest(as, ir,
+ (!LJ_SOFTFP && irt_isfp(ir->t)) ? RSET_FPR : RSET_GPR);
+ lj_assertA(!(ir->op2 & IRXLOAD_UNALIGNED), "unaligned XLOAD");
+ asm_fusexref(as, asm_fxloadins(as, ir), dest, ir->op1, RSET_GPR, 0);
+}
+
+static void asm_xstore_(ASMState *as, IRIns *ir, int32_t ofs)
+{
+ if (ir->r != RID_SINK) {
+ Reg src = ra_alloc1(as, ir->op2,
+ (!LJ_SOFTFP && irt_isfp(ir->t)) ? RSET_FPR : RSET_GPR);
+ asm_fusexref(as, asm_fxstoreins(as, ir), src, ir->op1,
+ rset_exclude(RSET_GPR, src), ofs);
+ }
+}
+
+#define asm_xstore(as, ir) asm_xstore_(as, ir, 0)
+
+static void asm_ahuvload(ASMState *as, IRIns *ir)
+{
+ int hiop = (LJ_SOFTFP && (ir+1)->o == IR_HIOP);
+ IRType t = hiop ? IRT_NUM : irt_type(ir->t);
+ Reg dest = RID_NONE, type = RID_NONE, idx;
+ RegSet allow = RSET_GPR;
+ int32_t ofs = 0;
+ if (hiop && ra_used(ir+1)) {
+ type = ra_dest(as, ir+1, allow);
+ rset_clear(allow, type);
+ }
+ if (ra_used(ir)) {
+ lj_assertA((LJ_SOFTFP ? 0 : irt_isnum(ir->t)) ||
+ irt_isint(ir->t) || irt_isaddr(ir->t),
+ "bad load type %d", irt_type(ir->t));
+ dest = ra_dest(as, ir, (!LJ_SOFTFP && t == IRT_NUM) ? RSET_FPR : allow);
+ rset_clear(allow, dest);
+ }
+ idx = asm_fuseahuref(as, ir->op1, &ofs, allow,
+ (!LJ_SOFTFP && t == IRT_NUM) ? 1024 : 4096);
+ if (ir->o == IR_VLOAD) ofs += 8 * ir->op2;
+ if (!hiop || type == RID_NONE) {
+ rset_clear(allow, idx);
+ if (ofs < 256 && ra_hasreg(dest) && (dest & 1) == 0 &&
+ rset_test((as->freeset & allow), dest+1)) {
+ type = dest+1;
+ ra_modified(as, type);
+ } else {
+ type = RID_TMP;
+ }
+ }
+ asm_guardcc(as, t == IRT_NUM ? CC_HS : CC_NE);
+ emit_n(as, ARMI_CMN|ARMI_K12|-irt_toitype_(t), type);
+ if (ra_hasreg(dest)) {
+#if !LJ_SOFTFP
+ if (t == IRT_NUM)
+ emit_vlso(as, ARMI_VLDR_D, dest, idx, ofs);
+ else
+#endif
+ emit_lso(as, ARMI_LDR, dest, idx, ofs);
+ }
+ emit_lso(as, ARMI_LDR, type, idx, ofs+4);
+}
+
+static void asm_ahustore(ASMState *as, IRIns *ir)
+{
+ if (ir->r != RID_SINK) {
+ RegSet allow = RSET_GPR;
+ Reg idx, src = RID_NONE, type = RID_NONE;
+ int32_t ofs = 0;
+#if !LJ_SOFTFP
+ if (irt_isnum(ir->t)) {
+ src = ra_alloc1(as, ir->op2, RSET_FPR);
+ idx = asm_fuseahuref(as, ir->op1, &ofs, allow, 1024);
+ emit_vlso(as, ARMI_VSTR_D, src, idx, ofs);
+ } else
+#endif
+ {
+ int hiop = (LJ_SOFTFP && (ir+1)->o == IR_HIOP);
+ if (!irt_ispri(ir->t)) {
+ src = ra_alloc1(as, ir->op2, allow);
+ rset_clear(allow, src);
+ }
+ if (hiop)
+ type = ra_alloc1(as, (ir+1)->op2, allow);
+ else
+ type = ra_allock(as, (int32_t)irt_toitype(ir->t), allow);
+ idx = asm_fuseahuref(as, ir->op1, &ofs, rset_exclude(allow, type), 4096);
+ if (ra_hasreg(src)) emit_lso(as, ARMI_STR, src, idx, ofs);
+ emit_lso(as, ARMI_STR, type, idx, ofs+4);
+ }
+ }
+}
+
+static void asm_sload(ASMState *as, IRIns *ir)
+{
+ int32_t ofs = 8*((int32_t)ir->op1-1) + ((ir->op2 & IRSLOAD_FRAME) ? 4 : 0);
+ int hiop = (LJ_SOFTFP && (ir+1)->o == IR_HIOP);
+ IRType t = hiop ? IRT_NUM : irt_type(ir->t);
+ Reg dest = RID_NONE, type = RID_NONE, base;
+ RegSet allow = RSET_GPR;
+ lj_assertA(!(ir->op2 & IRSLOAD_PARENT),
+ "bad parent SLOAD"); /* Handled by asm_head_side(). */
+ lj_assertA(irt_isguard(ir->t) || !(ir->op2 & IRSLOAD_TYPECHECK),
+ "inconsistent SLOAD variant");
+#if LJ_SOFTFP
+ lj_assertA(!(ir->op2 & IRSLOAD_CONVERT),
+ "unsplit SLOAD convert"); /* Handled by LJ_SOFTFP SPLIT. */
+ if (hiop && ra_used(ir+1)) {
+ type = ra_dest(as, ir+1, allow);
+ rset_clear(allow, type);
+ }
+#else
+ if ((ir->op2 & IRSLOAD_CONVERT) && irt_isguard(ir->t) && t == IRT_INT) {
+ dest = ra_scratch(as, RSET_FPR);
+ asm_tointg(as, ir, dest);
+ t = IRT_NUM; /* Continue with a regular number type check. */
+ } else
+#endif
+ if (ra_used(ir)) {
+ Reg tmp = RID_NONE;
+ if ((ir->op2 & IRSLOAD_CONVERT))
+ tmp = ra_scratch(as, t == IRT_INT ? RSET_FPR : RSET_GPR);
+ lj_assertA((LJ_SOFTFP ? 0 : irt_isnum(ir->t)) ||
+ irt_isint(ir->t) || irt_isaddr(ir->t),
+ "bad SLOAD type %d", irt_type(ir->t));
+ dest = ra_dest(as, ir, (!LJ_SOFTFP && t == IRT_NUM) ? RSET_FPR : allow);
+ rset_clear(allow, dest);
+ base = ra_alloc1(as, REF_BASE, allow);
+ if ((ir->op2 & IRSLOAD_CONVERT)) {
+ if (t == IRT_INT) {
+ emit_dn(as, ARMI_VMOV_R_S, dest, (tmp & 15));
+ emit_dm(as, ARMI_VCVT_S32_F64, (tmp & 15), (tmp & 15));
+ t = IRT_NUM; /* Check for original type. */
+ } else {
+ emit_dm(as, ARMI_VCVT_F64_S32, (dest & 15), (dest & 15));
+ emit_dn(as, ARMI_VMOV_S_R, tmp, (dest & 15));
+ t = IRT_INT; /* Check for original type. */
+ }
+ dest = tmp;
+ }
+ goto dotypecheck;
+ }
+ base = ra_alloc1(as, REF_BASE, allow);
+dotypecheck:
+ rset_clear(allow, base);
+ if ((ir->op2 & IRSLOAD_TYPECHECK)) {
+ if (ra_noreg(type)) {
+ if (ofs < 256 && ra_hasreg(dest) && (dest & 1) == 0 &&
+ rset_test((as->freeset & allow), dest+1)) {
+ type = dest+1;
+ ra_modified(as, type);
+ } else {
+ type = RID_TMP;
+ }
+ }
+ asm_guardcc(as, t == IRT_NUM ? CC_HS : CC_NE);
+ if ((ir->op2 & IRSLOAD_KEYINDEX)) {
+ emit_n(as, ARMI_CMN|ARMI_K12|1, type);
+ emit_dn(as, ARMI_EOR^emit_isk12(ARMI_EOR, ~LJ_KEYINDEX), type, type);
+ } else {
+ emit_n(as, ARMI_CMN|ARMI_K12|-irt_toitype_(t), type);
+ }
+ }
+ if (ra_hasreg(dest)) {
+#if !LJ_SOFTFP
+ if (t == IRT_NUM) {
+ if (ofs < 1024) {
+ emit_vlso(as, ARMI_VLDR_D, dest, base, ofs);
+ } else {
+ if (ra_hasreg(type)) emit_lso(as, ARMI_LDR, type, base, ofs+4);
+ emit_vlso(as, ARMI_VLDR_D, dest, RID_TMP, 0);
+ emit_opk(as, ARMI_ADD, RID_TMP, base, ofs, allow);
+ return;
+ }
+ } else
+#endif
+ emit_lso(as, ARMI_LDR, dest, base, ofs);
+ }
+ if (ra_hasreg(type)) emit_lso(as, ARMI_LDR, type, base, ofs+4);
+}
+
+/* -- Allocations --------------------------------------------------------- */
+
+#if LJ_HASFFI
+static void asm_cnew(ASMState *as, IRIns *ir)
+{
+ CTState *cts = ctype_ctsG(J2G(as->J));
+ CTypeID id = (CTypeID)IR(ir->op1)->i;
+ CTSize sz;
+ CTInfo info = lj_ctype_info(cts, id, &sz);
+ const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_mem_newgco];
+ IRRef args[4];
+ RegSet allow = (RSET_GPR & ~RSET_SCRATCH);
+ RegSet drop = RSET_SCRATCH;
+ lj_assertA(sz != CTSIZE_INVALID || (ir->o == IR_CNEW && ir->op2 != REF_NIL),
+ "bad CNEW/CNEWI operands");
+
+ as->gcsteps++;
+ if (ra_hasreg(ir->r))
+ rset_clear(drop, ir->r); /* Dest reg handled below. */
+ ra_evictset(as, drop);
+ if (ra_used(ir))
+ ra_destreg(as, ir, RID_RET); /* GCcdata * */
+
+ /* Initialize immutable cdata object. */
+ if (ir->o == IR_CNEWI) {
+ int32_t ofs = sizeof(GCcdata);
+ lj_assertA(sz == 4 || sz == 8, "bad CNEWI size %d", sz);
+ if (sz == 8) {
+ ofs += 4; ir++;
+ lj_assertA(ir->o == IR_HIOP, "expected HIOP for CNEWI");
+ }
+ for (;;) {
+ Reg r = ra_alloc1(as, ir->op2, allow);
+ emit_lso(as, ARMI_STR, r, RID_RET, ofs);
+ rset_clear(allow, r);
+ if (ofs == sizeof(GCcdata)) break;
+ ofs -= 4; ir--;
+ }
+ } else if (ir->op2 != REF_NIL) { /* Create VLA/VLS/aligned cdata. */
+ ci = &lj_ir_callinfo[IRCALL_lj_cdata_newv];
+ args[0] = ASMREF_L; /* lua_State *L */
+ args[1] = ir->op1; /* CTypeID id */
+ args[2] = ir->op2; /* CTSize sz */
+ args[3] = ASMREF_TMP1; /* CTSize align */
+ asm_gencall(as, ci, args);
+ emit_loadi(as, ra_releasetmp(as, ASMREF_TMP1), (int32_t)ctype_align(info));
+ return;
+ }
+
+ /* Initialize gct and ctypeid. lj_mem_newgco() already sets marked. */
+ {
+ uint32_t k = emit_isk12(ARMI_MOV, id);
+ Reg r = k ? RID_R1 : ra_allock(as, id, allow);
+ emit_lso(as, ARMI_STRB, RID_TMP, RID_RET, offsetof(GCcdata, gct));
+ emit_lsox(as, ARMI_STRH, r, RID_RET, offsetof(GCcdata, ctypeid));
+ emit_d(as, ARMI_MOV|ARMI_K12|~LJ_TCDATA, RID_TMP);
+ if (k) emit_d(as, ARMI_MOV^k, RID_R1);
+ }
+ args[0] = ASMREF_L; /* lua_State *L */
+ args[1] = ASMREF_TMP1; /* MSize size */
+ asm_gencall(as, ci, args);
+ ra_allockreg(as, (int32_t)(sz+sizeof(GCcdata)),
+ ra_releasetmp(as, ASMREF_TMP1));
+}
+#endif
+
+/* -- Write barriers ------------------------------------------------------ */
+
+static void asm_tbar(ASMState *as, IRIns *ir)
+{
+ Reg tab = ra_alloc1(as, ir->op1, RSET_GPR);
+ Reg link = ra_scratch(as, rset_exclude(RSET_GPR, tab));
+ Reg gr = ra_allock(as, i32ptr(J2G(as->J)),
+ rset_exclude(rset_exclude(RSET_GPR, tab), link));
+ Reg mark = RID_TMP;
+ MCLabel l_end = emit_label(as);
+ emit_lso(as, ARMI_STR, link, tab, (int32_t)offsetof(GCtab, gclist));
+ emit_lso(as, ARMI_STRB, mark, tab, (int32_t)offsetof(GCtab, marked));
+ emit_lso(as, ARMI_STR, tab, gr,
+ (int32_t)offsetof(global_State, gc.grayagain));
+ emit_dn(as, ARMI_BIC|ARMI_K12|LJ_GC_BLACK, mark, mark);
+ emit_lso(as, ARMI_LDR, link, gr,
+ (int32_t)offsetof(global_State, gc.grayagain));
+ emit_branch(as, ARMF_CC(ARMI_B, CC_EQ), l_end);
+ emit_n(as, ARMI_TST|ARMI_K12|LJ_GC_BLACK, mark);
+ emit_lso(as, ARMI_LDRB, mark, tab, (int32_t)offsetof(GCtab, marked));
+}
+
+static void asm_obar(ASMState *as, IRIns *ir)
+{
+ const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_gc_barrieruv];
+ IRRef args[2];
+ MCLabel l_end;
+ Reg obj, val, tmp;
+ /* No need for other object barriers (yet). */
+ lj_assertA(IR(ir->op1)->o == IR_UREFC, "bad OBAR type");
+ ra_evictset(as, RSET_SCRATCH);
+ l_end = emit_label(as);
+ args[0] = ASMREF_TMP1; /* global_State *g */
+ args[1] = ir->op1; /* TValue *tv */
+ asm_gencall(as, ci, args);
+ if ((l_end[-1] >> 28) == CC_AL)
+ l_end[-1] = ARMF_CC(l_end[-1], CC_NE);
+ else
+ emit_branch(as, ARMF_CC(ARMI_B, CC_EQ), l_end);
+ ra_allockreg(as, i32ptr(J2G(as->J)), ra_releasetmp(as, ASMREF_TMP1));
+ obj = IR(ir->op1)->r;
+ tmp = ra_scratch(as, rset_exclude(RSET_GPR, obj));
+ emit_n(as, ARMF_CC(ARMI_TST, CC_NE)|ARMI_K12|LJ_GC_BLACK, tmp);
+ emit_n(as, ARMI_TST|ARMI_K12|LJ_GC_WHITES, RID_TMP);
+ val = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, obj));
+ emit_lso(as, ARMI_LDRB, tmp, obj,
+ (int32_t)offsetof(GCupval, marked)-(int32_t)offsetof(GCupval, tv));
+ emit_lso(as, ARMI_LDRB, RID_TMP, val, (int32_t)offsetof(GChead, marked));
+}
+
+/* -- Arithmetic and logic operations ------------------------------------- */
+
+#if !LJ_SOFTFP
+static void asm_fparith(ASMState *as, IRIns *ir, ARMIns ai)
+{
+ Reg dest = ra_dest(as, ir, RSET_FPR);
+ Reg right, left = ra_alloc2(as, ir, RSET_FPR);
+ right = (left >> 8); left &= 255;
+ emit_dnm(as, ai, (dest & 15), (left & 15), (right & 15));
+}
+
+static void asm_fpunary(ASMState *as, IRIns *ir, ARMIns ai)
+{
+ Reg dest = ra_dest(as, ir, RSET_FPR);
+ Reg left = ra_hintalloc(as, ir->op1, dest, RSET_FPR);
+ emit_dm(as, ai, (dest & 15), (left & 15));
+}
+
+static void asm_callround(ASMState *as, IRIns *ir, int id)
+{
+ /* The modified regs must match with the *.dasc implementation. */
+ RegSet drop = RID2RSET(RID_R0)|RID2RSET(RID_R1)|RID2RSET(RID_R2)|
+ RID2RSET(RID_R3)|RID2RSET(RID_R12);
+ RegSet of;
+ Reg dest, src;
+ ra_evictset(as, drop);
+ dest = ra_dest(as, ir, RSET_FPR);
+ emit_dnm(as, ARMI_VMOV_D_RR, RID_RETLO, RID_RETHI, (dest & 15));
+ emit_call(as, id == IRFPM_FLOOR ? (void *)lj_vm_floor_sf :
+ id == IRFPM_CEIL ? (void *)lj_vm_ceil_sf :
+ (void *)lj_vm_trunc_sf);
+ /* Workaround to protect argument GPRs from being used for remat. */
+ of = as->freeset;
+ as->freeset &= ~RSET_RANGE(RID_R0, RID_R1+1);
+ as->cost[RID_R0] = as->cost[RID_R1] = REGCOST(~0u, ASMREF_L);
+ src = ra_alloc1(as, ir->op1, RSET_FPR); /* May alloc GPR to remat FPR. */
+ as->freeset |= (of & RSET_RANGE(RID_R0, RID_R1+1));
+ emit_dnm(as, ARMI_VMOV_RR_D, RID_R0, RID_R1, (src & 15));
+}
+
+static void asm_fpmath(ASMState *as, IRIns *ir)
+{
+ if (ir->op2 <= IRFPM_TRUNC)
+ asm_callround(as, ir, ir->op2);
+ else if (ir->op2 == IRFPM_SQRT)
+ asm_fpunary(as, ir, ARMI_VSQRT_D);
+ else
+ asm_callid(as, ir, IRCALL_lj_vm_floor + ir->op2);
+}
+#endif
+
+static int asm_swapops(ASMState *as, IRRef lref, IRRef rref)
+{
+ IRIns *ir;
+ if (irref_isk(rref))
+ return 0; /* Don't swap constants to the left. */
+ if (irref_isk(lref))
+ return 1; /* But swap constants to the right. */
+ ir = IR(rref);
+ if ((ir->o >= IR_BSHL && ir->o <= IR_BROR) ||
+ (ir->o == IR_ADD && ir->op1 == ir->op2))
+ return 0; /* Don't swap fusable operands to the left. */
+ ir = IR(lref);
+ if ((ir->o >= IR_BSHL && ir->o <= IR_BROR) ||
+ (ir->o == IR_ADD && ir->op1 == ir->op2))
+ return 1; /* But swap fusable operands to the right. */
+ return 0; /* Otherwise don't swap. */
+}
+
+static void asm_intop(ASMState *as, IRIns *ir, ARMIns ai)
+{
+ IRRef lref = ir->op1, rref = ir->op2;
+ Reg left, dest = ra_dest(as, ir, RSET_GPR);
+ uint32_t m;
+ if (asm_swapops(as, lref, rref)) {
+ IRRef tmp = lref; lref = rref; rref = tmp;
+ if ((ai & ~ARMI_S) == ARMI_SUB || (ai & ~ARMI_S) == ARMI_SBC)
+ ai ^= (ARMI_SUB^ARMI_RSB);
+ }
+ left = ra_hintalloc(as, lref, dest, RSET_GPR);
+ m = asm_fuseopm(as, ai, rref, rset_exclude(RSET_GPR, left));
+ if (irt_isguard(ir->t)) { /* For IR_ADDOV etc. */
+ asm_guardcc(as, CC_VS);
+ ai |= ARMI_S;
+ }
+ emit_dn(as, ai^m, dest, left);
+}
+
+/* Try to drop cmp r, #0. */
+static ARMIns asm_drop_cmp0(ASMState *as, ARMIns ai)
+{
+ if (as->flagmcp == as->mcp) {
+ uint32_t cc = (as->mcp[1] >> 28);
+ as->flagmcp = NULL;
+ if (cc <= CC_NE) {
+ as->mcp++;
+ ai |= ARMI_S;
+ } else if (cc == CC_GE) {
+ *++as->mcp ^= ((CC_GE^CC_PL) << 28);
+ ai |= ARMI_S;
+ } else if (cc == CC_LT) {
+ *++as->mcp ^= ((CC_LT^CC_MI) << 28);
+ ai |= ARMI_S;
+ } /* else: other conds don't work in general. */
+ }
+ return ai;
+}
+
+static void asm_intop_s(ASMState *as, IRIns *ir, ARMIns ai)
+{
+ asm_intop(as, ir, asm_drop_cmp0(as, ai));
+}
+
+static void asm_intneg(ASMState *as, IRIns *ir, ARMIns ai)
+{
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ Reg left = ra_hintalloc(as, ir->op1, dest, RSET_GPR);
+ emit_dn(as, ai|ARMI_K12|0, dest, left);
+}
+
+/* NYI: use add/shift for MUL(OV) with constants. FOLD only does 2^k. */
+static void asm_intmul(ASMState *as, IRIns *ir)
+{
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ Reg left = ra_alloc1(as, ir->op1, rset_exclude(RSET_GPR, dest));
+ Reg right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left));
+ Reg tmp = RID_NONE;
+ /* ARMv5 restriction: dest != left and dest_hi != left. */
+ if (dest == left && left != right) { left = right; right = dest; }
+ if (irt_isguard(ir->t)) { /* IR_MULOV */
+ if (!(as->flags & JIT_F_ARMV6) && dest == left)
+ tmp = left = ra_scratch(as, rset_exclude(RSET_GPR, left));
+ asm_guardcc(as, CC_NE);
+ emit_nm(as, ARMI_TEQ|ARMF_SH(ARMSH_ASR, 31), RID_TMP, dest);
+ emit_dnm(as, ARMI_SMULL|ARMF_S(right), dest, RID_TMP, left);
+ } else {
+ if (!(as->flags & JIT_F_ARMV6) && dest == left) tmp = left = RID_TMP;
+ emit_nm(as, ARMI_MUL|ARMF_S(right), dest, left);
+ }
+ /* Only need this for the dest == left == right case. */
+ if (ra_hasreg(tmp)) emit_dm(as, ARMI_MOV, tmp, right);
+}
+
+static void asm_add(ASMState *as, IRIns *ir)
+{
+#if !LJ_SOFTFP
+ if (irt_isnum(ir->t)) {
+ if (!asm_fusemadd(as, ir, ARMI_VMLA_D, ARMI_VMLA_D))
+ asm_fparith(as, ir, ARMI_VADD_D);
+ return;
+ }
+#endif
+ asm_intop_s(as, ir, ARMI_ADD);
+}
+
+static void asm_sub(ASMState *as, IRIns *ir)
+{
+#if !LJ_SOFTFP
+ if (irt_isnum(ir->t)) {
+ if (!asm_fusemadd(as, ir, ARMI_VNMLS_D, ARMI_VMLS_D))
+ asm_fparith(as, ir, ARMI_VSUB_D);
+ return;
+ }
+#endif
+ asm_intop_s(as, ir, ARMI_SUB);
+}
+
+static void asm_mul(ASMState *as, IRIns *ir)
+{
+#if !LJ_SOFTFP
+ if (irt_isnum(ir->t)) {
+ asm_fparith(as, ir, ARMI_VMUL_D);
+ return;
+ }
+#endif
+ asm_intmul(as, ir);
+}
+
+#define asm_addov(as, ir) asm_add(as, ir)
+#define asm_subov(as, ir) asm_sub(as, ir)
+#define asm_mulov(as, ir) asm_mul(as, ir)
+
+#if !LJ_SOFTFP
+#define asm_fpdiv(as, ir) asm_fparith(as, ir, ARMI_VDIV_D)
+#define asm_abs(as, ir) asm_fpunary(as, ir, ARMI_VABS_D)
+#endif
+
+static void asm_neg(ASMState *as, IRIns *ir)
+{
+#if !LJ_SOFTFP
+ if (irt_isnum(ir->t)) {
+ asm_fpunary(as, ir, ARMI_VNEG_D);
+ return;
+ }
+#endif
+ asm_intneg(as, ir, ARMI_RSB);
+}
+
+static void asm_bitop(ASMState *as, IRIns *ir, ARMIns ai)
+{
+ ai = asm_drop_cmp0(as, ai);
+ if (ir->op2 == 0) {
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ uint32_t m = asm_fuseopm(as, ai, ir->op1, RSET_GPR);
+ emit_d(as, ai^m, dest);
+ } else {
+ /* NYI: Turn BAND !k12 into uxtb, uxth or bfc or shl+shr. */
+ asm_intop(as, ir, ai);
+ }
+}
+
+#define asm_bnot(as, ir) asm_bitop(as, ir, ARMI_MVN)
+
+static void asm_bswap(ASMState *as, IRIns *ir)
+{
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ Reg left = ra_alloc1(as, ir->op1, RSET_GPR);
+ if ((as->flags & JIT_F_ARMV6)) {
+ emit_dm(as, ARMI_REV, dest, left);
+ } else {
+ Reg tmp2 = dest;
+ if (tmp2 == left)
+ tmp2 = ra_scratch(as, rset_exclude(rset_exclude(RSET_GPR, dest), left));
+ emit_dnm(as, ARMI_EOR|ARMF_SH(ARMSH_LSR, 8), dest, tmp2, RID_TMP);
+ emit_dm(as, ARMI_MOV|ARMF_SH(ARMSH_ROR, 8), tmp2, left);
+ emit_dn(as, ARMI_BIC|ARMI_K12|256*8|255, RID_TMP, RID_TMP);
+ emit_dnm(as, ARMI_EOR|ARMF_SH(ARMSH_ROR, 16), RID_TMP, left, left);
+ }
+}
+
+#define asm_band(as, ir) asm_bitop(as, ir, ARMI_AND)
+#define asm_bor(as, ir) asm_bitop(as, ir, ARMI_ORR)
+#define asm_bxor(as, ir) asm_bitop(as, ir, ARMI_EOR)
+
+static void asm_bitshift(ASMState *as, IRIns *ir, ARMShift sh)
+{
+ if (irref_isk(ir->op2)) { /* Constant shifts. */
+ /* NYI: Turn SHL+SHR or BAND+SHR into uxtb, uxth or ubfx. */
+ /* NYI: Turn SHL+ASR into sxtb, sxth or sbfx. */
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ Reg left = ra_alloc1(as, ir->op1, RSET_GPR);
+ int32_t shift = (IR(ir->op2)->i & 31);
+ emit_dm(as, ARMI_MOV|ARMF_SH(sh, shift), dest, left);
+ } else {
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ Reg left = ra_alloc1(as, ir->op1, RSET_GPR);
+ Reg right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left));
+ emit_dm(as, ARMI_MOV|ARMF_RSH(sh, right), dest, left);
+ }
+}
+
+#define asm_bshl(as, ir) asm_bitshift(as, ir, ARMSH_LSL)
+#define asm_bshr(as, ir) asm_bitshift(as, ir, ARMSH_LSR)
+#define asm_bsar(as, ir) asm_bitshift(as, ir, ARMSH_ASR)
+#define asm_bror(as, ir) asm_bitshift(as, ir, ARMSH_ROR)
+#define asm_brol(as, ir) lj_assertA(0, "unexpected BROL")
+
+static void asm_intmin_max(ASMState *as, IRIns *ir, int cc)
+{
+ uint32_t kcmp = 0, kmov = 0;
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ Reg left = ra_hintalloc(as, ir->op1, dest, RSET_GPR);
+ Reg right = 0;
+ if (irref_isk(ir->op2)) {
+ kcmp = emit_isk12(ARMI_CMP, IR(ir->op2)->i);
+ if (kcmp) kmov = emit_isk12(ARMI_MOV, IR(ir->op2)->i);
+ }
+ if (!kmov) {
+ kcmp = 0;
+ right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left));
+ }
+ if (kmov || dest != right) {
+ emit_dm(as, ARMF_CC(ARMI_MOV, cc)^kmov, dest, right);
+ cc ^= 1; /* Must use opposite conditions for paired moves. */
+ } else {
+ cc ^= (CC_LT^CC_GT); /* Otherwise may swap CC_LT <-> CC_GT. */
+ }
+ if (dest != left) emit_dm(as, ARMF_CC(ARMI_MOV, cc), dest, left);
+ emit_nm(as, ARMI_CMP^kcmp, left, right);
+}
+
+#if LJ_SOFTFP
+static void asm_sfpmin_max(ASMState *as, IRIns *ir, int cc)
+{
+ const CCallInfo *ci = &lj_ir_callinfo[IRCALL_softfp_cmp];
+ RegSet drop = RSET_SCRATCH;
+ Reg r;
+ IRRef args[4];
+ args[0] = ir->op1; args[1] = (ir+1)->op1;
+ args[2] = ir->op2; args[3] = (ir+1)->op2;
+ /* __aeabi_cdcmple preserves r0-r3. */
+ if (ra_hasreg(ir->r)) rset_clear(drop, ir->r);
+ if (ra_hasreg((ir+1)->r)) rset_clear(drop, (ir+1)->r);
+ if (!rset_test(as->freeset, RID_R2) &&
+ regcost_ref(as->cost[RID_R2]) == args[2]) rset_clear(drop, RID_R2);
+ if (!rset_test(as->freeset, RID_R3) &&
+ regcost_ref(as->cost[RID_R3]) == args[3]) rset_clear(drop, RID_R3);
+ ra_evictset(as, drop);
+ ra_destpair(as, ir);
+ emit_dm(as, ARMF_CC(ARMI_MOV, cc), RID_RETHI, RID_R3);
+ emit_dm(as, ARMF_CC(ARMI_MOV, cc), RID_RETLO, RID_R2);
+ emit_call(as, (void *)ci->func);
+ for (r = RID_R0; r <= RID_R3; r++)
+ ra_leftov(as, r, args[r-RID_R0]);
+}
+#else
+static void asm_fpmin_max(ASMState *as, IRIns *ir, int cc)
+{
+ Reg dest = (ra_dest(as, ir, RSET_FPR) & 15);
+ Reg right, left = ra_alloc2(as, ir, RSET_FPR);
+ right = ((left >> 8) & 15); left &= 15;
+ if (dest != left) emit_dm(as, ARMF_CC(ARMI_VMOV_D, cc^1), dest, left);
+ if (dest != right) emit_dm(as, ARMF_CC(ARMI_VMOV_D, cc), dest, right);
+ emit_d(as, ARMI_VMRS, 0);
+ emit_dm(as, ARMI_VCMP_D, left, right);
+}
+#endif
+
+static void asm_min_max(ASMState *as, IRIns *ir, int cc, int fcc)
+{
+#if LJ_SOFTFP
+ UNUSED(fcc);
+#else
+ if (irt_isnum(ir->t))
+ asm_fpmin_max(as, ir, fcc);
+ else
+#endif
+ asm_intmin_max(as, ir, cc);
+}
+
+#define asm_min(as, ir) asm_min_max(as, ir, CC_GT, CC_PL)
+#define asm_max(as, ir) asm_min_max(as, ir, CC_LT, CC_LE)
+
+/* -- Comparisons --------------------------------------------------------- */
+
+/* Map of comparisons to flags. ORDER IR. */
+static const uint8_t asm_compmap[IR_ABC+1] = {
+ /* op FP swp int cc FP cc */
+ /* LT */ CC_GE + (CC_HS << 4),
+ /* GE x */ CC_LT + (CC_HI << 4),
+ /* LE */ CC_GT + (CC_HI << 4),
+ /* GT x */ CC_LE + (CC_HS << 4),
+ /* ULT x */ CC_HS + (CC_LS << 4),
+ /* UGE */ CC_LO + (CC_LO << 4),
+ /* ULE x */ CC_HI + (CC_LO << 4),
+ /* UGT */ CC_LS + (CC_LS << 4),
+ /* EQ */ CC_NE + (CC_NE << 4),
+ /* NE */ CC_EQ + (CC_EQ << 4),
+ /* ABC */ CC_LS + (CC_LS << 4) /* Same as UGT. */
+};
+
+#if LJ_SOFTFP
+/* FP comparisons. */
+static void asm_sfpcomp(ASMState *as, IRIns *ir)
+{
+ const CCallInfo *ci = &lj_ir_callinfo[IRCALL_softfp_cmp];
+ RegSet drop = RSET_SCRATCH;
+ Reg r;
+ IRRef args[4];
+ int swp = (((ir->o ^ (ir->o >> 2)) & ~(ir->o >> 3) & 1) << 1);
+ args[swp^0] = ir->op1; args[swp^1] = (ir+1)->op1;
+ args[swp^2] = ir->op2; args[swp^3] = (ir+1)->op2;
+ /* __aeabi_cdcmple preserves r0-r3. This helps to reduce spills. */
+ for (r = RID_R0; r <= RID_R3; r++)
+ if (!rset_test(as->freeset, r) &&
+ regcost_ref(as->cost[r]) == args[r-RID_R0]) rset_clear(drop, r);
+ ra_evictset(as, drop);
+ asm_guardcc(as, (asm_compmap[ir->o] >> 4));
+ emit_call(as, (void *)ci->func);
+ for (r = RID_R0; r <= RID_R3; r++)
+ ra_leftov(as, r, args[r-RID_R0]);
+}
+#else
+/* FP comparisons. */
+static void asm_fpcomp(ASMState *as, IRIns *ir)
+{
+ Reg left, right;
+ ARMIns ai;
+ int swp = ((ir->o ^ (ir->o >> 2)) & ~(ir->o >> 3) & 1);
+ if (!swp && irref_isk(ir->op2) && ir_knum(IR(ir->op2))->u64 == 0) {
+ left = (ra_alloc1(as, ir->op1, RSET_FPR) & 15);
+ right = 0;
+ ai = ARMI_VCMPZ_D;
+ } else {
+ left = ra_alloc2(as, ir, RSET_FPR);
+ if (swp) {
+ right = (left & 15); left = ((left >> 8) & 15);
+ } else {
+ right = ((left >> 8) & 15); left &= 15;
+ }
+ ai = ARMI_VCMP_D;
+ }
+ asm_guardcc(as, (asm_compmap[ir->o] >> 4));
+ emit_d(as, ARMI_VMRS, 0);
+ emit_dm(as, ai, left, right);
+}
+#endif
+
+/* Integer comparisons. */
+static void asm_intcomp(ASMState *as, IRIns *ir)
+{
+ ARMCC cc = (asm_compmap[ir->o] & 15);
+ IRRef lref = ir->op1, rref = ir->op2;
+ Reg left;
+ uint32_t m;
+ int cmpprev0 = 0;
+ lj_assertA(irt_isint(ir->t) || irt_isu32(ir->t) || irt_isaddr(ir->t),
+ "bad comparison data type %d", irt_type(ir->t));
+ if (asm_swapops(as, lref, rref)) {
+ Reg tmp = lref; lref = rref; rref = tmp;
+ if (cc >= CC_GE) cc ^= 7; /* LT <-> GT, LE <-> GE */
+ else if (cc > CC_NE) cc ^= 11; /* LO <-> HI, LS <-> HS */
+ }
+ if (irref_isk(rref) && IR(rref)->i == 0) {
+ IRIns *irl = IR(lref);
+ cmpprev0 = (irl+1 == ir);
+ /* Combine comp(BAND(left, right), 0) into tst left, right. */
+ if (cmpprev0 && irl->o == IR_BAND && !ra_used(irl)) {
+ IRRef blref = irl->op1, brref = irl->op2;
+ uint32_t m2 = 0;
+ Reg bleft;
+ if (asm_swapops(as, blref, brref)) {
+ Reg tmp = blref; blref = brref; brref = tmp;
+ }
+ if (irref_isk(brref)) {
+ m2 = emit_isk12(ARMI_AND, IR(brref)->i);
+ if ((m2 & (ARMI_AND^ARMI_BIC)))
+ goto notst; /* Not beneficial if we miss a constant operand. */
+ }
+ if (cc == CC_GE) cc = CC_PL;
+ else if (cc == CC_LT) cc = CC_MI;
+ else if (cc > CC_NE) goto notst; /* Other conds don't work with tst. */
+ bleft = ra_alloc1(as, blref, RSET_GPR);
+ if (!m2) m2 = asm_fuseopm(as, 0, brref, rset_exclude(RSET_GPR, bleft));
+ asm_guardcc(as, cc);
+ emit_n(as, ARMI_TST^m2, bleft);
+ return;
+ }
+ }
+notst:
+ left = ra_alloc1(as, lref, RSET_GPR);
+ m = asm_fuseopm(as, ARMI_CMP, rref, rset_exclude(RSET_GPR, left));
+ asm_guardcc(as, cc);
+ emit_n(as, ARMI_CMP^m, left);
+ /* Signed comparison with zero and referencing previous ins? */
+ if (cmpprev0 && (cc <= CC_NE || cc >= CC_GE))
+ as->flagmcp = as->mcp; /* Allow elimination of the compare. */
+}
+
+static void asm_comp(ASMState *as, IRIns *ir)
+{
+#if !LJ_SOFTFP
+ if (irt_isnum(ir->t))
+ asm_fpcomp(as, ir);
+ else
+#endif
+ asm_intcomp(as, ir);
+}
+
+#define asm_equal(as, ir) asm_comp(as, ir)
+
+#if LJ_HASFFI
+/* 64 bit integer comparisons. */
+static void asm_int64comp(ASMState *as, IRIns *ir)
+{
+ int signedcomp = (ir->o <= IR_GT);
+ ARMCC cclo, cchi;
+ Reg leftlo, lefthi;
+ uint32_t mlo, mhi;
+ RegSet allow = RSET_GPR, oldfree;
+
+ /* Always use unsigned comparison for loword. */
+ cclo = asm_compmap[ir->o + (signedcomp ? 4 : 0)] & 15;
+ leftlo = ra_alloc1(as, ir->op1, allow);
+ oldfree = as->freeset;
+ mlo = asm_fuseopm(as, ARMI_CMP, ir->op2, rset_clear(allow, leftlo));
+ allow &= ~(oldfree & ~as->freeset); /* Update for allocs of asm_fuseopm. */
+
+ /* Use signed or unsigned comparison for hiword. */
+ cchi = asm_compmap[ir->o] & 15;
+ lefthi = ra_alloc1(as, (ir+1)->op1, allow);
+ mhi = asm_fuseopm(as, ARMI_CMP, (ir+1)->op2, rset_clear(allow, lefthi));
+
+ /* All register allocations must be performed _before_ this point. */
+ if (signedcomp) {
+ MCLabel l_around = emit_label(as);
+ asm_guardcc(as, cclo);
+ emit_n(as, ARMI_CMP^mlo, leftlo);
+ emit_branch(as, ARMF_CC(ARMI_B, CC_NE), l_around);
+ if (cchi == CC_GE || cchi == CC_LE) cchi ^= 6; /* GE -> GT, LE -> LT */
+ asm_guardcc(as, cchi);
+ } else {
+ asm_guardcc(as, cclo);
+ emit_n(as, ARMF_CC(ARMI_CMP, CC_EQ)^mlo, leftlo);
+ }
+ emit_n(as, ARMI_CMP^mhi, lefthi);
+}
+#endif
+
+/* -- Split register ops -------------------------------------------------- */
+
+/* Hiword op of a split 32/32 bit op. Previous op is the loword op. */
+static void asm_hiop(ASMState *as, IRIns *ir)
+{
+ /* HIOP is marked as a store because it needs its own DCE logic. */
+ int uselo = ra_used(ir-1), usehi = ra_used(ir); /* Loword/hiword used? */
+ if (LJ_UNLIKELY(!(as->flags & JIT_F_OPT_DCE))) uselo = usehi = 1;
+#if LJ_HASFFI || LJ_SOFTFP
+ if ((ir-1)->o <= IR_NE) { /* 64 bit integer or FP comparisons. ORDER IR. */
+ as->curins--; /* Always skip the loword comparison. */
+#if LJ_SOFTFP
+ if (!irt_isint(ir->t)) {
+ asm_sfpcomp(as, ir-1);
+ return;
+ }
+#endif
+#if LJ_HASFFI
+ asm_int64comp(as, ir-1);
+#endif
+ return;
+#if LJ_SOFTFP
+ } else if ((ir-1)->o == IR_MIN || (ir-1)->o == IR_MAX) {
+ as->curins--; /* Always skip the loword min/max. */
+ if (uselo || usehi)
+ asm_sfpmin_max(as, ir-1, (ir-1)->o == IR_MIN ? CC_PL : CC_LE);
+ return;
+#elif LJ_HASFFI
+ } else if ((ir-1)->o == IR_CONV) {
+ as->curins--; /* Always skip the CONV. */
+ if (usehi || uselo)
+ asm_conv64(as, ir);
+ return;
+#endif
+ } else if ((ir-1)->o == IR_XSTORE) {
+ if ((ir-1)->r != RID_SINK)
+ asm_xstore_(as, ir, 4);
+ return;
+ }
+#endif
+ if (!usehi) return; /* Skip unused hiword op for all remaining ops. */
+ switch ((ir-1)->o) {
+#if LJ_HASFFI
+ case IR_ADD:
+ as->curins--;
+ asm_intop(as, ir, ARMI_ADC);
+ asm_intop(as, ir-1, ARMI_ADD|ARMI_S);
+ break;
+ case IR_SUB:
+ as->curins--;
+ asm_intop(as, ir, ARMI_SBC);
+ asm_intop(as, ir-1, ARMI_SUB|ARMI_S);
+ break;
+ case IR_NEG:
+ as->curins--;
+ asm_intneg(as, ir, ARMI_RSC);
+ asm_intneg(as, ir-1, ARMI_RSB|ARMI_S);
+ break;
+ case IR_CNEWI:
+ /* Nothing to do here. Handled by lo op itself. */
+ break;
+#endif
+#if LJ_SOFTFP
+ case IR_SLOAD: case IR_ALOAD: case IR_HLOAD: case IR_ULOAD: case IR_VLOAD:
+ case IR_STRTO:
+ if (!uselo)
+ ra_allocref(as, ir->op1, RSET_GPR); /* Mark lo op as used. */
+ break;
+ case IR_ASTORE: case IR_HSTORE: case IR_USTORE: case IR_TOSTR: case IR_TMPREF:
+ /* Nothing to do here. Handled by lo op itself. */
+ break;
+#endif
+ case IR_CALLN: case IR_CALLL: case IR_CALLS: case IR_CALLXS:
+ if (!uselo)
+ ra_allocref(as, ir->op1, RID2RSET(RID_RETLO)); /* Mark lo op as used. */
+ break;
+ default: lj_assertA(0, "bad HIOP for op %d", (ir-1)->o); break;
+ }
+}
+
+/* -- Profiling ----------------------------------------------------------- */
+
+static void asm_prof(ASMState *as, IRIns *ir)
+{
+ UNUSED(ir);
+ asm_guardcc(as, CC_NE);
+ emit_n(as, ARMI_TST|ARMI_K12|HOOK_PROFILE, RID_TMP);
+ emit_lsptr(as, ARMI_LDRB, RID_TMP, (void *)&J2G(as->J)->hookmask);
+}
+
+/* -- Stack handling ------------------------------------------------------ */
+
+/* Check Lua stack size for overflow. Use exit handler as fallback. */
+static void asm_stack_check(ASMState *as, BCReg topslot,
+ IRIns *irp, RegSet allow, ExitNo exitno)
+{
+ Reg pbase;
+ uint32_t k;
+ if (irp) {
+ if (!ra_hasspill(irp->s)) {
+ pbase = irp->r;
+ lj_assertA(ra_hasreg(pbase), "base reg lost");
+ } else if (allow) {
+ pbase = rset_pickbot(allow);
+ } else {
+ pbase = RID_RET;
+ emit_lso(as, ARMI_LDR, RID_RET, RID_SP, 0); /* Restore temp. register. */
+ }
+ } else {
+ pbase = RID_BASE;
+ }
+ emit_branch(as, ARMF_CC(ARMI_BL, CC_LS), exitstub_addr(as->J, exitno));
+ k = emit_isk12(0, (int32_t)(8*topslot));
+ lj_assertA(k, "slot offset %d does not fit in K12", 8*topslot);
+ emit_n(as, ARMI_CMP^k, RID_TMP);
+ emit_dnm(as, ARMI_SUB, RID_TMP, RID_TMP, pbase);
+ emit_lso(as, ARMI_LDR, RID_TMP, RID_TMP,
+ (int32_t)offsetof(lua_State, maxstack));
+ if (irp) { /* Must not spill arbitrary registers in head of side trace. */
+ int32_t i = i32ptr(&J2G(as->J)->cur_L);
+ if (ra_hasspill(irp->s))
+ emit_lso(as, ARMI_LDR, pbase, RID_SP, sps_scale(irp->s));
+ emit_lso(as, ARMI_LDR, RID_TMP, RID_TMP, (i & 4095));
+ if (ra_hasspill(irp->s) && !allow)
+ emit_lso(as, ARMI_STR, RID_RET, RID_SP, 0); /* Save temp. register. */
+ emit_loadi(as, RID_TMP, (i & ~4095));
+ } else {
+ emit_getgl(as, RID_TMP, cur_L);
+ }
+}
+
+/* Restore Lua stack from on-trace state. */
+static void asm_stack_restore(ASMState *as, SnapShot *snap)
+{
+ SnapEntry *map = &as->T->snapmap[snap->mapofs];
+ SnapEntry *flinks = &as->T->snapmap[snap_nextofs(as->T, snap)-1];
+ MSize n, nent = snap->nent;
+ /* Store the value of all modified slots to the Lua stack. */
+ for (n = 0; n < nent; n++) {
+ SnapEntry sn = map[n];
+ BCReg s = snap_slot(sn);
+ int32_t ofs = 8*((int32_t)s-1);
+ IRRef ref = snap_ref(sn);
+ IRIns *ir = IR(ref);
+ if ((sn & SNAP_NORESTORE))
+ continue;
+ if (irt_isnum(ir->t)) {
+#if LJ_SOFTFP
+ RegSet odd = rset_exclude(RSET_GPRODD, RID_BASE);
+ Reg tmp;
+ /* LJ_SOFTFP: must be a number constant. */
+ lj_assertA(irref_isk(ref), "unsplit FP op");
+ tmp = ra_allock(as, (int32_t)ir_knum(ir)->u32.lo,
+ rset_exclude(RSET_GPREVEN, RID_BASE));
+ emit_lso(as, ARMI_STR, tmp, RID_BASE, ofs);
+ if (rset_test(as->freeset, tmp+1)) odd = RID2RSET(tmp+1);
+ tmp = ra_allock(as, (int32_t)ir_knum(ir)->u32.hi, odd);
+ emit_lso(as, ARMI_STR, tmp, RID_BASE, ofs+4);
+#else
+ Reg src = ra_alloc1(as, ref, RSET_FPR);
+ emit_vlso(as, ARMI_VSTR_D, src, RID_BASE, ofs);
+#endif
+ } else {
+ RegSet odd = rset_exclude(RSET_GPRODD, RID_BASE);
+ Reg type;
+ lj_assertA(irt_ispri(ir->t) || irt_isaddr(ir->t) || irt_isinteger(ir->t),
+ "restore of IR type %d", irt_type(ir->t));
+ if (!irt_ispri(ir->t)) {
+ Reg src = ra_alloc1(as, ref, rset_exclude(RSET_GPREVEN, RID_BASE));
+ emit_lso(as, ARMI_STR, src, RID_BASE, ofs);
+ if (rset_test(as->freeset, src+1)) odd = RID2RSET(src+1);
+ }
+ if ((sn & (SNAP_CONT|SNAP_FRAME))) {
+ if (s == 0) continue; /* Do not overwrite link to previous frame. */
+ type = ra_allock(as, (int32_t)(*flinks--), odd);
+#if LJ_SOFTFP
+ } else if ((sn & SNAP_SOFTFPNUM)) {
+ type = ra_alloc1(as, ref+1, rset_exclude(RSET_GPRODD, RID_BASE));
+#endif
+ } else if ((sn & SNAP_KEYINDEX)) {
+ type = ra_allock(as, (int32_t)LJ_KEYINDEX, odd);
+ } else {
+ type = ra_allock(as, (int32_t)irt_toitype(ir->t), odd);
+ }
+ emit_lso(as, ARMI_STR, type, RID_BASE, ofs+4);
+ }
+ checkmclim(as);
+ }
+ lj_assertA(map + nent == flinks, "inconsistent frames in snapshot");
+}
+
+/* -- GC handling --------------------------------------------------------- */
+
+/* Marker to prevent patching the GC check exit. */
+#define ARM_NOPATCH_GC_CHECK (ARMI_BIC|ARMI_K12)
+
+/* Check GC threshold and do one or more GC steps. */
+static void asm_gc_check(ASMState *as)
+{
+ const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_gc_step_jit];
+ IRRef args[2];
+ MCLabel l_end;
+ Reg tmp1, tmp2;
+ ra_evictset(as, RSET_SCRATCH);
+ l_end = emit_label(as);
+ /* Exit trace if in GCSatomic or GCSfinalize. Avoids syncing GC objects. */
+ asm_guardcc(as, CC_NE); /* Assumes asm_snap_prep() already done. */
+ *--as->mcp = ARM_NOPATCH_GC_CHECK;
+ emit_n(as, ARMI_CMP|ARMI_K12|0, RID_RET);
+ args[0] = ASMREF_TMP1; /* global_State *g */
+ args[1] = ASMREF_TMP2; /* MSize steps */
+ asm_gencall(as, ci, args);
+ tmp1 = ra_releasetmp(as, ASMREF_TMP1);
+ tmp2 = ra_releasetmp(as, ASMREF_TMP2);
+ emit_loadi(as, tmp2, as->gcsteps);
+ /* Jump around GC step if GC total < GC threshold. */
+ emit_branch(as, ARMF_CC(ARMI_B, CC_LS), l_end);
+ emit_nm(as, ARMI_CMP, RID_TMP, tmp2);
+ emit_lso(as, ARMI_LDR, tmp2, tmp1,
+ (int32_t)offsetof(global_State, gc.threshold));
+ emit_lso(as, ARMI_LDR, RID_TMP, tmp1,
+ (int32_t)offsetof(global_State, gc.total));
+ ra_allockreg(as, i32ptr(J2G(as->J)), tmp1);
+ as->gcsteps = 0;
+ checkmclim(as);
+}
+
+/* -- Loop handling ------------------------------------------------------- */
+
+/* Fixup the loop branch. */
+static void asm_loop_fixup(ASMState *as)
+{
+ MCode *p = as->mctop;
+ MCode *target = as->mcp;
+ if (as->loopinv) { /* Inverted loop branch? */
+ /* asm_guardcc already inverted the bcc and patched the final bl. */
+ p[-2] |= ((uint32_t)(target-p) & 0x00ffffffu);
+ } else {
+ p[-1] = ARMI_B | ((uint32_t)((target-p)-1) & 0x00ffffffu);
+ }
+}
+
+/* Fixup the tail of the loop. */
+static void asm_loop_tail_fixup(ASMState *as)
+{
+ UNUSED(as); /* Nothing to do. */
+}
+
+/* -- Head of trace ------------------------------------------------------- */
+
+/* Reload L register from g->cur_L. */
+static void asm_head_lreg(ASMState *as)
+{
+ IRIns *ir = IR(ASMREF_L);
+ if (ra_used(ir)) {
+ Reg r = ra_dest(as, ir, RSET_GPR);
+ emit_getgl(as, r, cur_L);
+ ra_evictk(as);
+ }
+}
+
+/* Coalesce BASE register for a root trace. */
+static void asm_head_root_base(ASMState *as)
+{
+ IRIns *ir;
+ asm_head_lreg(as);
+ ir = IR(REF_BASE);
+ if (ra_hasreg(ir->r) && (rset_test(as->modset, ir->r) || irt_ismarked(ir->t)))
+ ra_spill(as, ir);
+ ra_destreg(as, ir, RID_BASE);
+}
+
+/* Coalesce BASE register for a side trace. */
+static RegSet asm_head_side_base(ASMState *as, IRIns *irp, RegSet allow)
+{
+ IRIns *ir;
+ asm_head_lreg(as);
+ ir = IR(REF_BASE);
+ if (ra_hasreg(ir->r) && (rset_test(as->modset, ir->r) || irt_ismarked(ir->t)))
+ ra_spill(as, ir);
+ if (ra_hasspill(irp->s)) {
+ rset_clear(allow, ra_dest(as, ir, allow));
+ } else {
+ Reg r = irp->r;
+ lj_assertA(ra_hasreg(r), "base reg lost");
+ rset_clear(allow, r);
+ if (r != ir->r && !rset_test(as->freeset, r))
+ ra_restore(as, regcost_ref(as->cost[r]));
+ ra_destreg(as, ir, r);
+ }
+ return allow;
+}
+
+/* -- Tail of trace ------------------------------------------------------- */
+
+/* Fixup the tail code. */
+static void asm_tail_fixup(ASMState *as, TraceNo lnk)
+{
+ MCode *p = as->mctop;
+ MCode *target;
+ int32_t spadj = as->T->spadjust;
+ if (spadj == 0) {
+ as->mctop = --p;
+ } else {
+ /* Patch stack adjustment. */
+ uint32_t k = emit_isk12(ARMI_ADD, spadj);
+ lj_assertA(k, "stack adjustment %d does not fit in K12", spadj);
+ p[-2] = (ARMI_ADD^k) | ARMF_D(RID_SP) | ARMF_N(RID_SP);
+ }
+ /* Patch exit branch. */
+ target = lnk ? traceref(as->J, lnk)->mcode : (MCode *)lj_vm_exit_interp;
+ p[-1] = ARMI_B|(((target-p)-1)&0x00ffffffu);
+}
+
+/* Prepare tail of code. */
+static void asm_tail_prep(ASMState *as)
+{
+ MCode *p = as->mctop - 1; /* Leave room for exit branch. */
+ if (as->loopref) {
+ as->invmcp = as->mcp = p;
+ } else {
+ as->mcp = p-1; /* Leave room for stack pointer adjustment. */
+ as->invmcp = NULL;
+ }
+ *p = 0; /* Prevent load/store merging. */
+}
+
+/* -- Trace setup --------------------------------------------------------- */
+
+/* Ensure there are enough stack slots for call arguments. */
+static Reg asm_setup_call_slots(ASMState *as, IRIns *ir, const CCallInfo *ci)
+{
+ IRRef args[CCI_NARGS_MAX*2];
+ uint32_t i, nargs = CCI_XNARGS(ci);
+ int nslots = 0, ngpr = REGARG_NUMGPR, nfpr = REGARG_NUMFPR, fprodd = 0;
+ asm_collectargs(as, ir, ci, args);
+ for (i = 0; i < nargs; i++) {
+ if (!LJ_SOFTFP && args[i] && irt_isfp(IR(args[i])->t)) {
+ if (!LJ_ABI_SOFTFP && !(ci->flags & CCI_VARARG)) {
+ if (irt_isnum(IR(args[i])->t)) {
+ if (nfpr > 0) nfpr--;
+ else fprodd = 0, nslots = (nslots + 3) & ~1;
+ } else {
+ if (fprodd) fprodd--;
+ else if (nfpr > 0) fprodd = 1, nfpr--;
+ else nslots++;
+ }
+ } else if (irt_isnum(IR(args[i])->t)) {
+ ngpr &= ~1;
+ if (ngpr > 0) ngpr -= 2; else nslots += 2;
+ } else {
+ if (ngpr > 0) ngpr--; else nslots++;
+ }
+ } else {
+ if (ngpr > 0) ngpr--; else nslots++;
+ }
+ }
+ if (nslots > as->evenspill) /* Leave room for args in stack slots. */
+ as->evenspill = nslots;
+ return REGSP_HINT(RID_RET);
+}
+
+static void asm_setup_target(ASMState *as)
+{
+ /* May need extra exit for asm_stack_check on side traces. */
+ asm_exitstub_setup(as, as->T->nsnap + (as->parent ? 1 : 0));
+}
+
+/* -- Trace patching ------------------------------------------------------ */
+
+/* Patch exit jumps of existing machine code to a new target. */
+void lj_asm_patchexit(jit_State *J, GCtrace *T, ExitNo exitno, MCode *target)
+{
+ MCode *p = T->mcode;
+ MCode *pe = (MCode *)((char *)p + T->szmcode);
+ MCode *cstart = NULL, *cend = p;
+ MCode *mcarea = lj_mcode_patch(J, p, 0);
+ MCode *px = exitstub_addr(J, exitno) - 2;
+ for (; p < pe; p++) {
+ /* Look for bl_cc exitstub, replace with b_cc target. */
+ uint32_t ins = *p;
+ if ((ins & 0x0f000000u) == 0x0b000000u && ins < 0xf0000000u &&
+ ((ins ^ (px-p)) & 0x00ffffffu) == 0 &&
+ p[-1] != ARM_NOPATCH_GC_CHECK) {
+ *p = (ins & 0xfe000000u) | (((target-p)-2) & 0x00ffffffu);
+ cend = p+1;
+ if (!cstart) cstart = p;
+ }
+ }
+ lj_assertJ(cstart != NULL, "exit stub %d not found", exitno);
+ lj_mcode_sync(cstart, cend);
+ lj_mcode_patch(J, mcarea, 1);
+}
+
diff --git a/libs/luajit-cmake/luajit/src/lj_asm_arm64.h b/libs/luajit-cmake/luajit/src/lj_asm_arm64.h
new file mode 100644
index 0000000..4b7066f
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/lj_asm_arm64.h
@@ -0,0 +1,2070 @@
+/*
+** ARM64 IR assembler (SSA IR -> machine code).
+** Copyright (C) 2005-2022 Mike Pall. See Copyright Notice in luajit.h
+**
+** Contributed by Djordje Kovacevic and Stefan Pejic from RT-RK.com.
+** Sponsored by Cisco Systems, Inc.
+*/
+
+/* -- Register allocator extensions --------------------------------------- */
+
+/* Allocate a register with a hint. */
+static Reg ra_hintalloc(ASMState *as, IRRef ref, Reg hint, RegSet allow)
+{
+ Reg r = IR(ref)->r;
+ if (ra_noreg(r)) {
+ if (!ra_hashint(r) && !iscrossref(as, ref))
+ ra_sethint(IR(ref)->r, hint); /* Propagate register hint. */
+ r = ra_allocref(as, ref, allow);
+ }
+ ra_noweak(as, r);
+ return r;
+}
+
+/* Allocate two source registers for three-operand instructions. */
+static Reg ra_alloc2(ASMState *as, IRIns *ir, RegSet allow)
+{
+ IRIns *irl = IR(ir->op1), *irr = IR(ir->op2);
+ Reg left = irl->r, right = irr->r;
+ if (ra_hasreg(left)) {
+ ra_noweak(as, left);
+ if (ra_noreg(right))
+ right = ra_allocref(as, ir->op2, rset_exclude(allow, left));
+ else
+ ra_noweak(as, right);
+ } else if (ra_hasreg(right)) {
+ ra_noweak(as, right);
+ left = ra_allocref(as, ir->op1, rset_exclude(allow, right));
+ } else if (ra_hashint(right)) {
+ right = ra_allocref(as, ir->op2, allow);
+ left = ra_alloc1(as, ir->op1, rset_exclude(allow, right));
+ } else {
+ left = ra_allocref(as, ir->op1, allow);
+ right = ra_alloc1(as, ir->op2, rset_exclude(allow, left));
+ }
+ return left | (right << 8);
+}
+
+/* -- Guard handling ------------------------------------------------------ */
+
+/* Setup all needed exit stubs. */
+static void asm_exitstub_setup(ASMState *as, ExitNo nexits)
+{
+ ExitNo i;
+ MCode *mxp = as->mctop;
+ if (mxp - (nexits + 3 + MCLIM_REDZONE) < as->mclim)
+ asm_mclimit(as);
+ /* 1: str lr,[sp]; bl ->vm_exit_handler; movz w0,traceno; bl <1; bl <1; ... */
+ for (i = nexits-1; (int32_t)i >= 0; i--)
+ *--mxp = A64I_LE(A64I_BL | A64F_S26(-3-i));
+ *--mxp = A64I_LE(A64I_MOVZw | A64F_U16(as->T->traceno));
+ mxp--;
+ *mxp = A64I_LE(A64I_BL | A64F_S26(((MCode *)(void *)lj_vm_exit_handler-mxp)));
+ *--mxp = A64I_LE(A64I_STRx | A64F_D(RID_LR) | A64F_N(RID_SP));
+ as->mctop = mxp;
+}
+
+static MCode *asm_exitstub_addr(ASMState *as, ExitNo exitno)
+{
+ /* Keep this in-sync with exitstub_trace_addr(). */
+ return as->mctop + exitno + 3;
+}
+
+/* Emit conditional branch to exit for guard. */
+static void asm_guardcc(ASMState *as, A64CC cc)
+{
+ MCode *target = asm_exitstub_addr(as, as->snapno);
+ MCode *p = as->mcp;
+ if (LJ_UNLIKELY(p == as->invmcp)) {
+ as->loopinv = 1;
+ *p = A64I_B | A64F_S26(target-p);
+ emit_cond_branch(as, cc^1, p-1);
+ return;
+ }
+ emit_cond_branch(as, cc, target);
+}
+
+/* Emit test and branch instruction to exit for guard. */
+static void asm_guardtnb(ASMState *as, A64Ins ai, Reg r, uint32_t bit)
+{
+ MCode *target = asm_exitstub_addr(as, as->snapno);
+ MCode *p = as->mcp;
+ if (LJ_UNLIKELY(p == as->invmcp)) {
+ as->loopinv = 1;
+ *p = A64I_B | A64F_S26(target-p);
+ emit_tnb(as, ai^0x01000000u, r, bit, p-1);
+ return;
+ }
+ emit_tnb(as, ai, r, bit, target);
+}
+
+/* Emit compare and branch instruction to exit for guard. */
+static void asm_guardcnb(ASMState *as, A64Ins ai, Reg r)
+{
+ MCode *target = asm_exitstub_addr(as, as->snapno);
+ MCode *p = as->mcp;
+ if (LJ_UNLIKELY(p == as->invmcp)) {
+ as->loopinv = 1;
+ *p = A64I_B | A64F_S26(target-p);
+ emit_cnb(as, ai^0x01000000u, r, p-1);
+ return;
+ }
+ emit_cnb(as, ai, r, target);
+}
+
+/* -- Operand fusion ------------------------------------------------------ */
+
+/* Limit linear search to this distance. Avoids O(n^2) behavior. */
+#define CONFLICT_SEARCH_LIM 31
+
+static int asm_isk32(ASMState *as, IRRef ref, int32_t *k)
+{
+ if (irref_isk(ref)) {
+ IRIns *ir = IR(ref);
+ if (ir->o == IR_KNULL || !irt_is64(ir->t)) {
+ *k = ir->i;
+ return 1;
+ } else if (checki32((int64_t)ir_k64(ir)->u64)) {
+ *k = (int32_t)ir_k64(ir)->u64;
+ return 1;
+ }
+ }
+ return 0;
+}
+
+/* Check if there's no conflicting instruction between curins and ref. */
+static int noconflict(ASMState *as, IRRef ref, IROp conflict)
+{
+ IRIns *ir = as->ir;
+ IRRef i = as->curins;
+ if (i > ref + CONFLICT_SEARCH_LIM)
+ return 0; /* Give up, ref is too far away. */
+ while (--i > ref)
+ if (ir[i].o == conflict)
+ return 0; /* Conflict found. */
+ return 1; /* Ok, no conflict. */
+}
+
+/* Fuse the array base of colocated arrays. */
+static int32_t asm_fuseabase(ASMState *as, IRRef ref)
+{
+ IRIns *ir = IR(ref);
+ if (ir->o == IR_TNEW && ir->op1 <= LJ_MAX_COLOSIZE &&
+ !neverfuse(as) && noconflict(as, ref, IR_NEWREF))
+ return (int32_t)sizeof(GCtab);
+ return 0;
+}
+
+#define FUSE_REG 0x40000000
+
+/* Fuse array/hash/upvalue reference into register+offset operand. */
+static Reg asm_fuseahuref(ASMState *as, IRRef ref, int32_t *ofsp, RegSet allow,
+ A64Ins ins)
+{
+ IRIns *ir = IR(ref);
+ if (ra_noreg(ir->r)) {
+ if (ir->o == IR_AREF) {
+ if (mayfuse(as, ref)) {
+ if (irref_isk(ir->op2)) {
+ IRRef tab = IR(ir->op1)->op1;
+ int32_t ofs = asm_fuseabase(as, tab);
+ IRRef refa = ofs ? tab : ir->op1;
+ ofs += 8*IR(ir->op2)->i;
+ if (emit_checkofs(ins, ofs)) {
+ *ofsp = ofs;
+ return ra_alloc1(as, refa, allow);
+ }
+ } else {
+ Reg base = ra_alloc1(as, ir->op1, allow);
+ *ofsp = FUSE_REG|ra_alloc1(as, ir->op2, rset_exclude(allow, base));
+ return base;
+ }
+ }
+ } else if (ir->o == IR_HREFK) {
+ if (mayfuse(as, ref)) {
+ int32_t ofs = (int32_t)(IR(ir->op2)->op2 * sizeof(Node));
+ if (emit_checkofs(ins, ofs)) {
+ *ofsp = ofs;
+ return ra_alloc1(as, ir->op1, allow);
+ }
+ }
+ } else if (ir->o == IR_UREFC) {
+ if (irref_isk(ir->op1)) {
+ GCfunc *fn = ir_kfunc(IR(ir->op1));
+ GCupval *uv = &gcref(fn->l.uvptr[(ir->op2 >> 8)])->uv;
+ int64_t ofs = glofs(as, &uv->tv);
+ if (emit_checkofs(ins, ofs)) {
+ *ofsp = (int32_t)ofs;
+ return RID_GL;
+ }
+ }
+ } else if (ir->o == IR_TMPREF) {
+ *ofsp = (int32_t)glofs(as, &J2G(as->J)->tmptv);
+ return RID_GL;
+ }
+ }
+ *ofsp = 0;
+ return ra_alloc1(as, ref, allow);
+}
+
+/* Fuse m operand into arithmetic/logic instructions. */
+static uint32_t asm_fuseopm(ASMState *as, A64Ins ai, IRRef ref, RegSet allow)
+{
+ IRIns *ir = IR(ref);
+ if (ra_hasreg(ir->r)) {
+ ra_noweak(as, ir->r);
+ return A64F_M(ir->r);
+ } else if (irref_isk(ref)) {
+ uint32_t m;
+ int64_t k = get_k64val(as, ref);
+ if ((ai & 0x1f000000) == 0x0a000000)
+ m = emit_isk13(k, irt_is64(ir->t));
+ else
+ m = emit_isk12(k);
+ if (m)
+ return m;
+ } else if (mayfuse(as, ref)) {
+ if ((ir->o >= IR_BSHL && ir->o <= IR_BSAR && irref_isk(ir->op2)) ||
+ (ir->o == IR_ADD && ir->op1 == ir->op2)) {
+ A64Shift sh = ir->o == IR_BSHR ? A64SH_LSR :
+ ir->o == IR_BSAR ? A64SH_ASR : A64SH_LSL;
+ int shift = ir->o == IR_ADD ? 1 :
+ (IR(ir->op2)->i & (irt_is64(ir->t) ? 63 : 31));
+ IRIns *irl = IR(ir->op1);
+ if (sh == A64SH_LSL &&
+ irl->o == IR_CONV &&
+ irl->op2 == ((IRT_I64<<IRCONV_DSH)|IRT_INT|IRCONV_SEXT) &&
+ shift <= 4 &&
+ canfuse(as, irl)) {
+ Reg m = ra_alloc1(as, irl->op1, allow);
+ return A64F_M(m) | A64F_EXSH(A64EX_SXTW, shift);
+ } else {
+ Reg m = ra_alloc1(as, ir->op1, allow);
+ return A64F_M(m) | A64F_SH(sh, shift);
+ }
+ } else if (ir->o == IR_CONV &&
+ ir->op2 == ((IRT_I64<<IRCONV_DSH)|IRT_INT|IRCONV_SEXT)) {
+ Reg m = ra_alloc1(as, ir->op1, allow);
+ return A64F_M(m) | A64F_EX(A64EX_SXTW);
+ }
+ }
+ return A64F_M(ra_allocref(as, ref, allow));
+}
+
+/* Fuse XLOAD/XSTORE reference into load/store operand. */
+static void asm_fusexref(ASMState *as, A64Ins ai, Reg rd, IRRef ref,
+ RegSet allow)
+{
+ IRIns *ir = IR(ref);
+ Reg base;
+ int32_t ofs = 0;
+ if (ra_noreg(ir->r) && canfuse(as, ir)) {
+ if (ir->o == IR_ADD) {
+ if (asm_isk32(as, ir->op2, &ofs) && emit_checkofs(ai, ofs)) {
+ ref = ir->op1;
+ } else {
+ Reg rn, rm;
+ IRRef lref = ir->op1, rref = ir->op2;
+ IRIns *irl = IR(lref);
+ if (mayfuse(as, irl->op1)) {
+ unsigned int shift = 4;
+ if (irl->o == IR_BSHL && irref_isk(irl->op2)) {
+ shift = (IR(irl->op2)->i & 63);
+ } else if (irl->o == IR_ADD && irl->op1 == irl->op2) {
+ shift = 1;
+ }
+ if ((ai >> 30) == shift) {
+ lref = irl->op1;
+ irl = IR(lref);
+ ai |= A64I_LS_SH;
+ }
+ }
+ if (irl->o == IR_CONV &&
+ irl->op2 == ((IRT_I64<<IRCONV_DSH)|IRT_INT|IRCONV_SEXT) &&
+ canfuse(as, irl)) {
+ lref = irl->op1;
+ ai |= A64I_LS_SXTWx;
+ } else {
+ ai |= A64I_LS_LSLx;
+ }
+ rm = ra_alloc1(as, lref, allow);
+ rn = ra_alloc1(as, rref, rset_exclude(allow, rm));
+ emit_dnm(as, (ai^A64I_LS_R), (rd & 31), rn, rm);
+ return;
+ }
+ } else if (ir->o == IR_STRREF) {
+ if (asm_isk32(as, ir->op2, &ofs)) {
+ ref = ir->op1;
+ } else if (asm_isk32(as, ir->op1, &ofs)) {
+ ref = ir->op2;
+ } else {
+ Reg refk = irref_isk(ir->op1) ? ir->op1 : ir->op2;
+ Reg refv = irref_isk(ir->op1) ? ir->op2 : ir->op1;
+ Reg rn = ra_alloc1(as, refv, allow);
+ IRIns *irr = IR(refk);
+ uint32_t m;
+ if (irr+1 == ir && !ra_used(irr) &&
+ irr->o == IR_ADD && irref_isk(irr->op2)) {
+ ofs = sizeof(GCstr) + IR(irr->op2)->i;
+ if (emit_checkofs(ai, ofs)) {
+ Reg rm = ra_alloc1(as, irr->op1, rset_exclude(allow, rn));
+ m = A64F_M(rm) | A64F_EX(A64EX_SXTW);
+ goto skipopm;
+ }
+ }
+ m = asm_fuseopm(as, 0, refk, rset_exclude(allow, rn));
+ ofs = sizeof(GCstr);
+ skipopm:
+ emit_lso(as, ai, rd, rd, ofs);
+ emit_dn(as, A64I_ADDx^m, rd, rn);
+ return;
+ }
+ ofs += sizeof(GCstr);
+ if (!emit_checkofs(ai, ofs)) {
+ Reg rn = ra_alloc1(as, ref, allow);
+ Reg rm = ra_allock(as, ofs, rset_exclude(allow, rn));
+ emit_dnm(as, (ai^A64I_LS_R)|A64I_LS_UXTWx, rd, rn, rm);
+ return;
+ }
+ }
+ }
+ base = ra_alloc1(as, ref, allow);
+ emit_lso(as, ai, (rd & 31), base, ofs);
+}
+
+/* Fuse FP multiply-add/sub. */
+static int asm_fusemadd(ASMState *as, IRIns *ir, A64Ins ai, A64Ins air)
+{
+ IRRef lref = ir->op1, rref = ir->op2;
+ IRIns *irm;
+ if (lref != rref &&
+ ((mayfuse(as, lref) && (irm = IR(lref), irm->o == IR_MUL) &&
+ ra_noreg(irm->r)) ||
+ (mayfuse(as, rref) && (irm = IR(rref), irm->o == IR_MUL) &&
+ (rref = lref, ai = air, ra_noreg(irm->r))))) {
+ Reg dest = ra_dest(as, ir, RSET_FPR);
+ Reg add = ra_hintalloc(as, rref, dest, RSET_FPR);
+ Reg left = ra_alloc2(as, irm,
+ rset_exclude(rset_exclude(RSET_FPR, dest), add));
+ Reg right = (left >> 8); left &= 255;
+ emit_dnma(as, ai, (dest & 31), (left & 31), (right & 31), (add & 31));
+ return 1;
+ }
+ return 0;
+}
+
+/* Fuse BAND + BSHL/BSHR into UBFM. */
+static int asm_fuseandshift(ASMState *as, IRIns *ir)
+{
+ IRIns *irl = IR(ir->op1);
+ lj_assertA(ir->o == IR_BAND, "bad usage");
+ if (canfuse(as, irl) && irref_isk(ir->op2)) {
+ uint64_t mask = get_k64val(as, ir->op2);
+ if (irref_isk(irl->op2) && (irl->o == IR_BSHR || irl->o == IR_BSHL)) {
+ int32_t shmask = irt_is64(irl->t) ? 63 : 31;
+ int32_t shift = (IR(irl->op2)->i & shmask);
+ int32_t imms = shift;
+ if (irl->o == IR_BSHL) {
+ mask >>= shift;
+ shift = (shmask-shift+1) & shmask;
+ imms = 0;
+ }
+ if (mask && !((mask+1) & mask)) { /* Contiguous 1-bits at the bottom. */
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ Reg left = ra_alloc1(as, irl->op1, RSET_GPR);
+ A64Ins ai = shmask == 63 ? A64I_UBFMx : A64I_UBFMw;
+ imms += 63 - emit_clz64(mask);
+ if (imms > shmask) imms = shmask;
+ emit_dn(as, ai | A64F_IMMS(imms) | A64F_IMMR(shift), dest, left);
+ return 1;
+ }
+ }
+ }
+ return 0;
+}
+
+/* Fuse BOR(BSHL, BSHR) into EXTR/ROR. */
+static int asm_fuseorshift(ASMState *as, IRIns *ir)
+{
+ IRIns *irl = IR(ir->op1), *irr = IR(ir->op2);
+ lj_assertA(ir->o == IR_BOR, "bad usage");
+ if (canfuse(as, irl) && canfuse(as, irr) &&
+ ((irl->o == IR_BSHR && irr->o == IR_BSHL) ||
+ (irl->o == IR_BSHL && irr->o == IR_BSHR))) {
+ if (irref_isk(irl->op2) && irref_isk(irr->op2)) {
+ IRRef lref = irl->op1, rref = irr->op1;
+ uint32_t lshift = IR(irl->op2)->i, rshift = IR(irr->op2)->i;
+ if (irl->o == IR_BSHR) { /* BSHR needs to be the right operand. */
+ uint32_t tmp2;
+ IRRef tmp1 = lref; lref = rref; rref = tmp1;
+ tmp2 = lshift; lshift = rshift; rshift = tmp2;
+ }
+ if (rshift + lshift == (irt_is64(ir->t) ? 64 : 32)) {
+ A64Ins ai = irt_is64(ir->t) ? A64I_EXTRx : A64I_EXTRw;
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ Reg left = ra_alloc1(as, lref, RSET_GPR);
+ Reg right = ra_alloc1(as, rref, rset_exclude(RSET_GPR, left));
+ emit_dnm(as, ai | A64F_IMMS(rshift), dest, left, right);
+ return 1;
+ }
+ }
+ }
+ return 0;
+}
+
+/* -- Calls --------------------------------------------------------------- */
+
+/* Generate a call to a C function. */
+static void asm_gencall(ASMState *as, const CCallInfo *ci, IRRef *args)
+{
+ uint32_t n, nargs = CCI_XNARGS(ci);
+ int32_t ofs = 0;
+ Reg gpr, fpr = REGARG_FIRSTFPR;
+ if ((void *)ci->func)
+ emit_call(as, (void *)ci->func);
+ for (gpr = REGARG_FIRSTGPR; gpr <= REGARG_LASTGPR; gpr++)
+ as->cost[gpr] = REGCOST(~0u, ASMREF_L);
+ gpr = REGARG_FIRSTGPR;
+ for (n = 0; n < nargs; n++) { /* Setup args. */
+ IRRef ref = args[n];
+ IRIns *ir = IR(ref);
+ if (ref) {
+ if (irt_isfp(ir->t)) {
+ if (fpr <= REGARG_LASTFPR) {
+ lj_assertA(rset_test(as->freeset, fpr),
+ "reg %d not free", fpr); /* Must have been evicted. */
+ ra_leftov(as, fpr, ref);
+ fpr++;
+ } else {
+ Reg r = ra_alloc1(as, ref, RSET_FPR);
+ emit_spstore(as, ir, r, ofs + ((LJ_BE && !irt_isnum(ir->t)) ? 4 : 0));
+ ofs += 8;
+ }
+ } else {
+ if (gpr <= REGARG_LASTGPR) {
+ lj_assertA(rset_test(as->freeset, gpr),
+ "reg %d not free", gpr); /* Must have been evicted. */
+ ra_leftov(as, gpr, ref);
+ gpr++;
+ } else {
+ Reg r = ra_alloc1(as, ref, RSET_GPR);
+ emit_spstore(as, ir, r, ofs + ((LJ_BE && !irt_is64(ir->t)) ? 4 : 0));
+ ofs += 8;
+ }
+ }
+ }
+ }
+}
+
+/* Setup result reg/sp for call. Evict scratch regs. */
+static void asm_setupresult(ASMState *as, IRIns *ir, const CCallInfo *ci)
+{
+ RegSet drop = RSET_SCRATCH;
+ int hiop = ((ir+1)->o == IR_HIOP && !irt_isnil((ir+1)->t));
+ if (ra_hasreg(ir->r))
+ rset_clear(drop, ir->r); /* Dest reg handled below. */
+ if (hiop && ra_hasreg((ir+1)->r))
+ rset_clear(drop, (ir+1)->r); /* Dest reg handled below. */
+ ra_evictset(as, drop); /* Evictions must be performed first. */
+ if (ra_used(ir)) {
+ lj_assertA(!irt_ispri(ir->t), "PRI dest");
+ if (irt_isfp(ir->t)) {
+ if (ci->flags & CCI_CASTU64) {
+ Reg dest = ra_dest(as, ir, RSET_FPR) & 31;
+ emit_dn(as, irt_isnum(ir->t) ? A64I_FMOV_D_R : A64I_FMOV_S_R,
+ dest, RID_RET);
+ } else {
+ ra_destreg(as, ir, RID_FPRET);
+ }
+ } else if (hiop) {
+ ra_destpair(as, ir);
+ } else {
+ ra_destreg(as, ir, RID_RET);
+ }
+ }
+ UNUSED(ci);
+}
+
+static void asm_callx(ASMState *as, IRIns *ir)
+{
+ IRRef args[CCI_NARGS_MAX*2];
+ CCallInfo ci;
+ IRRef func;
+ IRIns *irf;
+ ci.flags = asm_callx_flags(as, ir);
+ asm_collectargs(as, ir, &ci, args);
+ asm_setupresult(as, ir, &ci);
+ func = ir->op2; irf = IR(func);
+ if (irf->o == IR_CARG) { func = irf->op1; irf = IR(func); }
+ if (irref_isk(func)) { /* Call to constant address. */
+ ci.func = (ASMFunction)(ir_k64(irf)->u64);
+ } else { /* Need a non-argument register for indirect calls. */
+ Reg freg = ra_alloc1(as, func, RSET_RANGE(RID_X8, RID_MAX_GPR)-RSET_FIXED);
+ emit_n(as, A64I_BLR, freg);
+ ci.func = (ASMFunction)(void *)0;
+ }
+ asm_gencall(as, &ci, args);
+}
+
+/* -- Returns ------------------------------------------------------------- */
+
+/* Return to lower frame. Guard that it goes to the right spot. */
+static void asm_retf(ASMState *as, IRIns *ir)
+{
+ Reg base = ra_alloc1(as, REF_BASE, RSET_GPR);
+ void *pc = ir_kptr(IR(ir->op2));
+ int32_t delta = 1+LJ_FR2+bc_a(*((const BCIns *)pc - 1));
+ as->topslot -= (BCReg)delta;
+ if ((int32_t)as->topslot < 0) as->topslot = 0;
+ irt_setmark(IR(REF_BASE)->t); /* Children must not coalesce with BASE reg. */
+ /* Need to force a spill on REF_BASE now to update the stack slot. */
+ emit_lso(as, A64I_STRx, base, RID_SP, ra_spill(as, IR(REF_BASE)));
+ emit_setgl(as, base, jit_base);
+ emit_addptr(as, base, -8*delta);
+ asm_guardcc(as, CC_NE);
+ emit_nm(as, A64I_CMPx, RID_TMP,
+ ra_allock(as, i64ptr(pc), rset_exclude(RSET_GPR, base)));
+ emit_lso(as, A64I_LDRx, RID_TMP, base, -8);
+}
+
+/* -- Buffer operations --------------------------------------------------- */
+
+#if LJ_HASBUFFER
+static void asm_bufhdr_write(ASMState *as, Reg sb)
+{
+ Reg tmp = ra_scratch(as, rset_exclude(RSET_GPR, sb));
+ IRIns irgc;
+ irgc.ot = IRT(0, IRT_PGC); /* GC type. */
+ emit_storeofs(as, &irgc, RID_TMP, sb, offsetof(SBuf, L));
+ emit_dn(as, A64I_BFMx | A64F_IMMS(lj_fls(SBUF_MASK_FLAG)) | A64F_IMMR(0), RID_TMP, tmp);
+ emit_getgl(as, RID_TMP, cur_L);
+ emit_loadofs(as, &irgc, tmp, sb, offsetof(SBuf, L));
+}
+#endif
+
+/* -- Type conversions ---------------------------------------------------- */
+
+static void asm_tointg(ASMState *as, IRIns *ir, Reg left)
+{
+ Reg tmp = ra_scratch(as, rset_exclude(RSET_FPR, left));
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ asm_guardcc(as, CC_NE);
+ emit_nm(as, A64I_FCMPd, (tmp & 31), (left & 31));
+ emit_dn(as, A64I_FCVT_F64_S32, (tmp & 31), dest);
+ emit_dn(as, A64I_FCVT_S32_F64, dest, (left & 31));
+}
+
+static void asm_tobit(ASMState *as, IRIns *ir)
+{
+ RegSet allow = RSET_FPR;
+ Reg left = ra_alloc1(as, ir->op1, allow);
+ Reg right = ra_alloc1(as, ir->op2, rset_clear(allow, left));
+ Reg tmp = ra_scratch(as, rset_clear(allow, right));
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ emit_dn(as, A64I_FMOV_R_S, dest, (tmp & 31));
+ emit_dnm(as, A64I_FADDd, (tmp & 31), (left & 31), (right & 31));
+}
+
+static void asm_conv(ASMState *as, IRIns *ir)
+{
+ IRType st = (IRType)(ir->op2 & IRCONV_SRCMASK);
+ int st64 = (st == IRT_I64 || st == IRT_U64 || st == IRT_P64);
+ int stfp = (st == IRT_NUM || st == IRT_FLOAT);
+ IRRef lref = ir->op1;
+ lj_assertA(irt_type(ir->t) != st, "inconsistent types for CONV");
+ if (irt_isfp(ir->t)) {
+ Reg dest = ra_dest(as, ir, RSET_FPR);
+ if (stfp) { /* FP to FP conversion. */
+ emit_dn(as, st == IRT_NUM ? A64I_FCVT_F32_F64 : A64I_FCVT_F64_F32,
+ (dest & 31), (ra_alloc1(as, lref, RSET_FPR) & 31));
+ } else { /* Integer to FP conversion. */
+ Reg left = ra_alloc1(as, lref, RSET_GPR);
+ A64Ins ai = irt_isfloat(ir->t) ?
+ (((IRT_IS64 >> st) & 1) ?
+ (st == IRT_I64 ? A64I_FCVT_F32_S64 : A64I_FCVT_F32_U64) :
+ (st == IRT_INT ? A64I_FCVT_F32_S32 : A64I_FCVT_F32_U32)) :
+ (((IRT_IS64 >> st) & 1) ?
+ (st == IRT_I64 ? A64I_FCVT_F64_S64 : A64I_FCVT_F64_U64) :
+ (st == IRT_INT ? A64I_FCVT_F64_S32 : A64I_FCVT_F64_U32));
+ emit_dn(as, ai, (dest & 31), left);
+ }
+ } else if (stfp) { /* FP to integer conversion. */
+ if (irt_isguard(ir->t)) {
+ /* Checked conversions are only supported from number to int. */
+ lj_assertA(irt_isint(ir->t) && st == IRT_NUM,
+ "bad type for checked CONV");
+ asm_tointg(as, ir, ra_alloc1(as, lref, RSET_FPR));
+ } else {
+ Reg left = ra_alloc1(as, lref, RSET_FPR);
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ A64Ins ai = irt_is64(ir->t) ?
+ (st == IRT_NUM ?
+ (irt_isi64(ir->t) ? A64I_FCVT_S64_F64 : A64I_FCVT_U64_F64) :
+ (irt_isi64(ir->t) ? A64I_FCVT_S64_F32 : A64I_FCVT_U64_F32)) :
+ (st == IRT_NUM ?
+ (irt_isint(ir->t) ? A64I_FCVT_S32_F64 : A64I_FCVT_U32_F64) :
+ (irt_isint(ir->t) ? A64I_FCVT_S32_F32 : A64I_FCVT_U32_F32));
+ emit_dn(as, ai, dest, (left & 31));
+ }
+ } else if (st >= IRT_I8 && st <= IRT_U16) { /* Extend to 32 bit integer. */
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ Reg left = ra_alloc1(as, lref, RSET_GPR);
+ A64Ins ai = st == IRT_I8 ? A64I_SXTBw :
+ st == IRT_U8 ? A64I_UXTBw :
+ st == IRT_I16 ? A64I_SXTHw : A64I_UXTHw;
+ lj_assertA(irt_isint(ir->t) || irt_isu32(ir->t), "bad type for CONV EXT");
+ emit_dn(as, ai, dest, left);
+ } else {
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ if (irt_is64(ir->t)) {
+ if (st64 || !(ir->op2 & IRCONV_SEXT)) {
+ /* 64/64 bit no-op (cast) or 32 to 64 bit zero extension. */
+ ra_leftov(as, dest, lref); /* Do nothing, but may need to move regs. */
+ } else { /* 32 to 64 bit sign extension. */
+ Reg left = ra_alloc1(as, lref, RSET_GPR);
+ emit_dn(as, A64I_SXTW, dest, left);
+ }
+ } else {
+ if (st64 && !(ir->op2 & IRCONV_NONE)) {
+ /* This is either a 32 bit reg/reg mov which zeroes the hiword
+ ** or a load of the loword from a 64 bit address.
+ */
+ Reg left = ra_alloc1(as, lref, RSET_GPR);
+ emit_dm(as, A64I_MOVw, dest, left);
+ } else { /* 32/32 bit no-op (cast). */
+ ra_leftov(as, dest, lref); /* Do nothing, but may need to move regs. */
+ }
+ }
+ }
+}
+
+static void asm_strto(ASMState *as, IRIns *ir)
+{
+ const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_strscan_num];
+ IRRef args[2];
+ Reg dest = 0, tmp;
+ int destused = ra_used(ir);
+ int32_t ofs = 0;
+ ra_evictset(as, RSET_SCRATCH);
+ if (destused) {
+ if (ra_hasspill(ir->s)) {
+ ofs = sps_scale(ir->s);
+ destused = 0;
+ if (ra_hasreg(ir->r)) {
+ ra_free(as, ir->r);
+ ra_modified(as, ir->r);
+ emit_spload(as, ir, ir->r, ofs);
+ }
+ } else {
+ dest = ra_dest(as, ir, RSET_FPR);
+ }
+ }
+ if (destused)
+ emit_lso(as, A64I_LDRd, (dest & 31), RID_SP, 0);
+ asm_guardcnb(as, A64I_CBZ, RID_RET);
+ args[0] = ir->op1; /* GCstr *str */
+ args[1] = ASMREF_TMP1; /* TValue *n */
+ asm_gencall(as, ci, args);
+ tmp = ra_releasetmp(as, ASMREF_TMP1);
+ emit_opk(as, A64I_ADDx, tmp, RID_SP, ofs, RSET_GPR);
+}
+
+/* -- Memory references --------------------------------------------------- */
+
+/* Store tagged value for ref at base+ofs. */
+static void asm_tvstore64(ASMState *as, Reg base, int32_t ofs, IRRef ref)
+{
+ RegSet allow = rset_exclude(RSET_GPR, base);
+ IRIns *ir = IR(ref);
+ lj_assertA(irt_ispri(ir->t) || irt_isaddr(ir->t) || irt_isinteger(ir->t),
+ "store of IR type %d", irt_type(ir->t));
+ if (irref_isk(ref)) {
+ TValue k;
+ lj_ir_kvalue(as->J->L, &k, ir);
+ emit_lso(as, A64I_STRx, ra_allock(as, k.u64, allow), base, ofs);
+ } else {
+ Reg src = ra_alloc1(as, ref, allow);
+ rset_clear(allow, src);
+ if (irt_isinteger(ir->t)) {
+ Reg type = ra_allock(as, (int64_t)irt_toitype(ir->t) << 47, allow);
+ emit_lso(as, A64I_STRx, RID_TMP, base, ofs);
+ emit_dnm(as, A64I_ADDx | A64F_EX(A64EX_UXTW), RID_TMP, type, src);
+ } else {
+ Reg type = ra_allock(as, (int32_t)irt_toitype(ir->t), allow);
+ emit_lso(as, A64I_STRx, RID_TMP, base, ofs);
+ emit_dnm(as, A64I_ADDx | A64F_SH(A64SH_LSL, 47), RID_TMP, src, type);
+ }
+ }
+}
+
+/* Get pointer to TValue. */
+static void asm_tvptr(ASMState *as, Reg dest, IRRef ref, MSize mode)
+{
+ if ((mode & IRTMPREF_IN1)) {
+ IRIns *ir = IR(ref);
+ if (irt_isnum(ir->t)) {
+ if (irref_isk(ref) && !(mode & IRTMPREF_OUT1)) {
+ /* Use the number constant itself as a TValue. */
+ ra_allockreg(as, i64ptr(ir_knum(ir)), dest);
+ return;
+ }
+ emit_lso(as, A64I_STRd, (ra_alloc1(as, ref, RSET_FPR) & 31), dest, 0);
+ } else {
+ asm_tvstore64(as, dest, 0, ref);
+ }
+ }
+ /* g->tmptv holds the TValue(s). */
+ emit_dn(as, A64I_ADDx^emit_isk12(glofs(as, &J2G(as->J)->tmptv)), dest, RID_GL);
+}
+
+static void asm_aref(ASMState *as, IRIns *ir)
+{
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ Reg idx, base;
+ if (irref_isk(ir->op2)) {
+ IRRef tab = IR(ir->op1)->op1;
+ int32_t ofs = asm_fuseabase(as, tab);
+ IRRef refa = ofs ? tab : ir->op1;
+ uint32_t k = emit_isk12(ofs + 8*IR(ir->op2)->i);
+ if (k) {
+ base = ra_alloc1(as, refa, RSET_GPR);
+ emit_dn(as, A64I_ADDx^k, dest, base);
+ return;
+ }
+ }
+ base = ra_alloc1(as, ir->op1, RSET_GPR);
+ idx = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, base));
+ emit_dnm(as, A64I_ADDx | A64F_EXSH(A64EX_UXTW, 3), dest, base, idx);
+}
+
+/* Inlined hash lookup. Specialized for key type and for const keys.
+** The equivalent C code is:
+** Node *n = hashkey(t, key);
+** do {
+** if (lj_obj_equal(&n->key, key)) return &n->val;
+** } while ((n = nextnode(n)));
+** return niltv(L);
+*/
+static void asm_href(ASMState *as, IRIns *ir, IROp merge)
+{
+ RegSet allow = RSET_GPR;
+ int destused = ra_used(ir);
+ Reg dest = ra_dest(as, ir, allow);
+ Reg tab = ra_alloc1(as, ir->op1, rset_clear(allow, dest));
+ Reg key = 0, tmp = RID_TMP;
+ Reg ftmp = RID_NONE, type = RID_NONE, scr = RID_NONE, tisnum = RID_NONE;
+ IRRef refkey = ir->op2;
+ IRIns *irkey = IR(refkey);
+ int isk = irref_isk(ir->op2);
+ IRType1 kt = irkey->t;
+ uint32_t k = 0;
+ uint32_t khash;
+ MCLabel l_end, l_loop, l_next;
+ rset_clear(allow, tab);
+
+ if (!isk) {
+ key = ra_alloc1(as, ir->op2, irt_isnum(kt) ? RSET_FPR : allow);
+ rset_clear(allow, key);
+ if (!irt_isstr(kt)) {
+ tmp = ra_scratch(as, allow);
+ rset_clear(allow, tmp);
+ }
+ } else if (irt_isnum(kt)) {
+ int64_t val = (int64_t)ir_knum(irkey)->u64;
+ if (!(k = emit_isk12(val))) {
+ key = ra_allock(as, val, allow);
+ rset_clear(allow, key);
+ }
+ } else if (!irt_ispri(kt)) {
+ if (!(k = emit_isk12(irkey->i))) {
+ key = ra_alloc1(as, refkey, allow);
+ rset_clear(allow, key);
+ }
+ }
+
+ /* Allocate constants early. */
+ if (irt_isnum(kt)) {
+ if (!isk) {
+ tisnum = ra_allock(as, LJ_TISNUM << 15, allow);
+ ftmp = ra_scratch(as, rset_exclude(RSET_FPR, key));
+ rset_clear(allow, tisnum);
+ }
+ } else if (irt_isaddr(kt)) {
+ if (isk) {
+ int64_t kk = ((int64_t)irt_toitype(kt) << 47) | irkey[1].tv.u64;
+ scr = ra_allock(as, kk, allow);
+ } else {
+ scr = ra_scratch(as, allow);
+ }
+ rset_clear(allow, scr);
+ } else {
+ lj_assertA(irt_ispri(kt) && !irt_isnil(kt), "bad HREF key type");
+ type = ra_allock(as, ~((int64_t)~irt_toitype(kt) << 47), allow);
+ scr = ra_scratch(as, rset_clear(allow, type));
+ rset_clear(allow, scr);
+ }
+
+ /* Key not found in chain: jump to exit (if merged) or load niltv. */
+ l_end = emit_label(as);
+ as->invmcp = NULL;
+ if (merge == IR_NE)
+ asm_guardcc(as, CC_AL);
+ else if (destused)
+ emit_loada(as, dest, niltvg(J2G(as->J)));
+
+ /* Follow hash chain until the end. */
+ l_loop = --as->mcp;
+ emit_n(as, A64I_CMPx^A64I_K12^0, dest);
+ emit_lso(as, A64I_LDRx, dest, dest, offsetof(Node, next));
+ l_next = emit_label(as);
+
+ /* Type and value comparison. */
+ if (merge == IR_EQ)
+ asm_guardcc(as, CC_EQ);
+ else
+ emit_cond_branch(as, CC_EQ, l_end);
+
+ if (irt_isnum(kt)) {
+ if (isk) {
+ /* Assumes -0.0 is already canonicalized to +0.0. */
+ if (k)
+ emit_n(as, A64I_CMPx^k, tmp);
+ else
+ emit_nm(as, A64I_CMPx, key, tmp);
+ emit_lso(as, A64I_LDRx, tmp, dest, offsetof(Node, key.u64));
+ } else {
+ emit_nm(as, A64I_FCMPd, key, ftmp);
+ emit_dn(as, A64I_FMOV_D_R, (ftmp & 31), (tmp & 31));
+ emit_cond_branch(as, CC_LO, l_next);
+ emit_nm(as, A64I_CMPx | A64F_SH(A64SH_LSR, 32), tisnum, tmp);
+ emit_lso(as, A64I_LDRx, tmp, dest, offsetof(Node, key.n));
+ }
+ } else if (irt_isaddr(kt)) {
+ if (isk) {
+ emit_nm(as, A64I_CMPx, scr, tmp);
+ emit_lso(as, A64I_LDRx, tmp, dest, offsetof(Node, key.u64));
+ } else {
+ emit_nm(as, A64I_CMPx, tmp, scr);
+ emit_lso(as, A64I_LDRx, scr, dest, offsetof(Node, key.u64));
+ }
+ } else {
+ emit_nm(as, A64I_CMPx, scr, type);
+ emit_lso(as, A64I_LDRx, scr, dest, offsetof(Node, key));
+ }
+
+ *l_loop = A64I_BCC | A64F_S19(as->mcp - l_loop) | CC_NE;
+ if (!isk && irt_isaddr(kt)) {
+ type = ra_allock(as, (int32_t)irt_toitype(kt), allow);
+ emit_dnm(as, A64I_ADDx | A64F_SH(A64SH_LSL, 47), tmp, key, type);
+ rset_clear(allow, type);
+ }
+ /* Load main position relative to tab->node into dest. */
+ khash = isk ? ir_khash(as, irkey) : 1;
+ if (khash == 0) {
+ emit_lso(as, A64I_LDRx, dest, tab, offsetof(GCtab, node));
+ } else {
+ emit_dnm(as, A64I_ADDx | A64F_SH(A64SH_LSL, 3), dest, tmp, dest);
+ emit_dnm(as, A64I_ADDx | A64F_SH(A64SH_LSL, 1), dest, dest, dest);
+ emit_lso(as, A64I_LDRx, tmp, tab, offsetof(GCtab, node));
+ if (isk) {
+ Reg tmphash = ra_allock(as, khash, allow);
+ emit_dnm(as, A64I_ANDw, dest, dest, tmphash);
+ emit_lso(as, A64I_LDRw, dest, tab, offsetof(GCtab, hmask));
+ } else if (irt_isstr(kt)) {
+ /* Fetch of str->sid is cheaper than ra_allock. */
+ emit_dnm(as, A64I_ANDw, dest, dest, tmp);
+ emit_lso(as, A64I_LDRw, tmp, key, offsetof(GCstr, sid));
+ emit_lso(as, A64I_LDRw, dest, tab, offsetof(GCtab, hmask));
+ } else { /* Must match with hash*() in lj_tab.c. */
+ emit_dnm(as, A64I_ANDw, dest, dest, tmp);
+ emit_lso(as, A64I_LDRw, tmp, tab, offsetof(GCtab, hmask));
+ emit_dnm(as, A64I_SUBw, dest, dest, tmp);
+ emit_dnm(as, A64I_EXTRw | (A64F_IMMS(32-HASH_ROT3)), tmp, tmp, tmp);
+ emit_dnm(as, A64I_EORw, dest, dest, tmp);
+ emit_dnm(as, A64I_EXTRw | (A64F_IMMS(32-HASH_ROT2)), dest, dest, dest);
+ emit_dnm(as, A64I_SUBw, tmp, tmp, dest);
+ emit_dnm(as, A64I_EXTRw | (A64F_IMMS(32-HASH_ROT1)), dest, dest, dest);
+ emit_dnm(as, A64I_EORw, tmp, tmp, dest);
+ if (irt_isnum(kt)) {
+ emit_dnm(as, A64I_ADDw, dest, dest, dest);
+ emit_dn(as, A64I_LSRx | A64F_IMMR(32)|A64F_IMMS(32), dest, dest);
+ emit_dm(as, A64I_MOVw, tmp, dest);
+ emit_dn(as, A64I_FMOV_R_D, dest, (key & 31));
+ } else {
+ checkmclim(as);
+ emit_dm(as, A64I_MOVw, tmp, key);
+ emit_dnm(as, A64I_EORw, dest, dest,
+ ra_allock(as, irt_toitype(kt) << 15, allow));
+ emit_dn(as, A64I_LSRx | A64F_IMMR(32)|A64F_IMMS(32), dest, dest);
+ emit_dm(as, A64I_MOVx, dest, key);
+ }
+ }
+ }
+}
+
+static void asm_hrefk(ASMState *as, IRIns *ir)
+{
+ IRIns *kslot = IR(ir->op2);
+ IRIns *irkey = IR(kslot->op1);
+ int32_t ofs = (int32_t)(kslot->op2 * sizeof(Node));
+ int32_t kofs = ofs + (int32_t)offsetof(Node, key);
+ int bigofs = !emit_checkofs(A64I_LDRx, ofs);
+ Reg dest = (ra_used(ir) || bigofs) ? ra_dest(as, ir, RSET_GPR) : RID_NONE;
+ Reg node = ra_alloc1(as, ir->op1, RSET_GPR);
+ Reg key, idx = node;
+ RegSet allow = rset_exclude(RSET_GPR, node);
+ uint64_t k;
+ lj_assertA(ofs % sizeof(Node) == 0, "unaligned HREFK slot");
+ if (bigofs) {
+ idx = dest;
+ rset_clear(allow, dest);
+ kofs = (int32_t)offsetof(Node, key);
+ } else if (ra_hasreg(dest)) {
+ emit_opk(as, A64I_ADDx, dest, node, ofs, allow);
+ }
+ asm_guardcc(as, CC_NE);
+ if (irt_ispri(irkey->t)) {
+ k = ~((int64_t)~irt_toitype(irkey->t) << 47);
+ } else if (irt_isnum(irkey->t)) {
+ k = ir_knum(irkey)->u64;
+ } else {
+ k = ((uint64_t)irt_toitype(irkey->t) << 47) | (uint64_t)ir_kgc(irkey);
+ }
+ key = ra_scratch(as, allow);
+ emit_nm(as, A64I_CMPx, key, ra_allock(as, k, rset_exclude(allow, key)));
+ emit_lso(as, A64I_LDRx, key, idx, kofs);
+ if (bigofs)
+ emit_opk(as, A64I_ADDx, dest, node, ofs, RSET_GPR);
+}
+
+static void asm_uref(ASMState *as, IRIns *ir)
+{
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ if (irref_isk(ir->op1)) {
+ GCfunc *fn = ir_kfunc(IR(ir->op1));
+ MRef *v = &gcref(fn->l.uvptr[(ir->op2 >> 8)])->uv.v;
+ emit_lsptr(as, A64I_LDRx, dest, v);
+ } else {
+ Reg uv = ra_scratch(as, RSET_GPR);
+ Reg func = ra_alloc1(as, ir->op1, RSET_GPR);
+ if (ir->o == IR_UREFC) {
+ asm_guardcc(as, CC_NE);
+ emit_n(as, (A64I_CMPx^A64I_K12) | A64F_U12(1), RID_TMP);
+ emit_opk(as, A64I_ADDx, dest, uv,
+ (int32_t)offsetof(GCupval, tv), RSET_GPR);
+ emit_lso(as, A64I_LDRB, RID_TMP, uv, (int32_t)offsetof(GCupval, closed));
+ } else {
+ emit_lso(as, A64I_LDRx, dest, uv, (int32_t)offsetof(GCupval, v));
+ }
+ emit_lso(as, A64I_LDRx, uv, func,
+ (int32_t)offsetof(GCfuncL, uvptr) + 8*(int32_t)(ir->op2 >> 8));
+ }
+}
+
+static void asm_fref(ASMState *as, IRIns *ir)
+{
+ UNUSED(as); UNUSED(ir);
+ lj_assertA(!ra_used(ir), "unfused FREF");
+}
+
+static void asm_strref(ASMState *as, IRIns *ir)
+{
+ RegSet allow = RSET_GPR;
+ Reg dest = ra_dest(as, ir, allow);
+ Reg base = ra_alloc1(as, ir->op1, allow);
+ IRIns *irr = IR(ir->op2);
+ int32_t ofs = sizeof(GCstr);
+ uint32_t m;
+ rset_clear(allow, base);
+ if (irref_isk(ir->op2) && (m = emit_isk12(ofs + irr->i))) {
+ emit_dn(as, A64I_ADDx^m, dest, base);
+ } else {
+ emit_dn(as, (A64I_ADDx^A64I_K12) | A64F_U12(ofs), dest, dest);
+ emit_dnm(as, A64I_ADDx, dest, base, ra_alloc1(as, ir->op2, allow));
+ }
+}
+
+/* -- Loads and stores ---------------------------------------------------- */
+
+static A64Ins asm_fxloadins(IRIns *ir)
+{
+ switch (irt_type(ir->t)) {
+ case IRT_I8: return A64I_LDRB ^ A64I_LS_S;
+ case IRT_U8: return A64I_LDRB;
+ case IRT_I16: return A64I_LDRH ^ A64I_LS_S;
+ case IRT_U16: return A64I_LDRH;
+ case IRT_NUM: return A64I_LDRd;
+ case IRT_FLOAT: return A64I_LDRs;
+ default: return irt_is64(ir->t) ? A64I_LDRx : A64I_LDRw;
+ }
+}
+
+static A64Ins asm_fxstoreins(IRIns *ir)
+{
+ switch (irt_type(ir->t)) {
+ case IRT_I8: case IRT_U8: return A64I_STRB;
+ case IRT_I16: case IRT_U16: return A64I_STRH;
+ case IRT_NUM: return A64I_STRd;
+ case IRT_FLOAT: return A64I_STRs;
+ default: return irt_is64(ir->t) ? A64I_STRx : A64I_STRw;
+ }
+}
+
+static void asm_fload(ASMState *as, IRIns *ir)
+{
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ Reg idx;
+ A64Ins ai = asm_fxloadins(ir);
+ int32_t ofs;
+ if (ir->op1 == REF_NIL) { /* FLOAD from GG_State with offset. */
+ idx = RID_GL;
+ ofs = (ir->op2 << 2) - GG_OFS(g);
+ } else {
+ idx = ra_alloc1(as, ir->op1, RSET_GPR);
+ if (ir->op2 == IRFL_TAB_ARRAY) {
+ ofs = asm_fuseabase(as, ir->op1);
+ if (ofs) { /* Turn the t->array load into an add for colocated arrays. */
+ emit_dn(as, (A64I_ADDx^A64I_K12) | A64F_U12(ofs), dest, idx);
+ return;
+ }
+ }
+ ofs = field_ofs[ir->op2];
+ }
+ emit_lso(as, ai, (dest & 31), idx, ofs);
+}
+
+static void asm_fstore(ASMState *as, IRIns *ir)
+{
+ if (ir->r != RID_SINK) {
+ Reg src = ra_alloc1(as, ir->op2, RSET_GPR);
+ IRIns *irf = IR(ir->op1);
+ Reg idx = ra_alloc1(as, irf->op1, rset_exclude(RSET_GPR, src));
+ int32_t ofs = field_ofs[irf->op2];
+ emit_lso(as, asm_fxstoreins(ir), (src & 31), idx, ofs);
+ }
+}
+
+static void asm_xload(ASMState *as, IRIns *ir)
+{
+ Reg dest = ra_dest(as, ir, irt_isfp(ir->t) ? RSET_FPR : RSET_GPR);
+ lj_assertA(!(ir->op2 & IRXLOAD_UNALIGNED), "unaligned XLOAD");
+ asm_fusexref(as, asm_fxloadins(ir), dest, ir->op1, RSET_GPR);
+}
+
+static void asm_xstore(ASMState *as, IRIns *ir)
+{
+ if (ir->r != RID_SINK) {
+ Reg src = ra_alloc1(as, ir->op2, irt_isfp(ir->t) ? RSET_FPR : RSET_GPR);
+ asm_fusexref(as, asm_fxstoreins(ir), src, ir->op1,
+ rset_exclude(RSET_GPR, src));
+ }
+}
+
+static void asm_ahuvload(ASMState *as, IRIns *ir)
+{
+ Reg idx, tmp, type;
+ int32_t ofs = 0;
+ RegSet gpr = RSET_GPR, allow = irt_isnum(ir->t) ? RSET_FPR : RSET_GPR;
+ lj_assertA(irt_isnum(ir->t) || irt_ispri(ir->t) || irt_isaddr(ir->t) ||
+ irt_isint(ir->t),
+ "bad load type %d", irt_type(ir->t));
+ if (ra_used(ir)) {
+ Reg dest = ra_dest(as, ir, allow);
+ tmp = irt_isnum(ir->t) ? ra_scratch(as, rset_clear(gpr, dest)) : dest;
+ if (irt_isaddr(ir->t)) {
+ emit_dn(as, A64I_ANDx^emit_isk13(LJ_GCVMASK, 1), dest, dest);
+ } else if (irt_isnum(ir->t)) {
+ emit_dn(as, A64I_FMOV_D_R, (dest & 31), tmp);
+ } else if (irt_isint(ir->t)) {
+ emit_dm(as, A64I_MOVw, dest, dest);
+ }
+ } else {
+ tmp = ra_scratch(as, gpr);
+ }
+ type = ra_scratch(as, rset_clear(gpr, tmp));
+ idx = asm_fuseahuref(as, ir->op1, &ofs, rset_clear(gpr, type), A64I_LDRx);
+ if (ir->o == IR_VLOAD) ofs += 8 * ir->op2;
+ /* Always do the type check, even if the load result is unused. */
+ asm_guardcc(as, irt_isnum(ir->t) ? CC_LS : CC_NE);
+ if (irt_type(ir->t) >= IRT_NUM) {
+ lj_assertA(irt_isinteger(ir->t) || irt_isnum(ir->t),
+ "bad load type %d", irt_type(ir->t));
+ emit_nm(as, A64I_CMPx | A64F_SH(A64SH_LSR, 32),
+ ra_allock(as, LJ_TISNUM << 15, rset_exclude(gpr, idx)), tmp);
+ } else if (irt_isaddr(ir->t)) {
+ emit_n(as, (A64I_CMNx^A64I_K12) | A64F_U12(-irt_toitype(ir->t)), type);
+ emit_dn(as, A64I_ASRx | A64F_IMMR(47), type, tmp);
+ } else if (irt_isnil(ir->t)) {
+ emit_n(as, (A64I_CMNx^A64I_K12) | A64F_U12(1), tmp);
+ } else {
+ emit_nm(as, A64I_CMPx | A64F_SH(A64SH_LSR, 32),
+ ra_allock(as, (irt_toitype(ir->t) << 15) | 0x7fff, gpr), tmp);
+ }
+ if (ofs & FUSE_REG)
+ emit_dnm(as, (A64I_LDRx^A64I_LS_R)|A64I_LS_UXTWx|A64I_LS_SH, tmp, idx, (ofs & 31));
+ else
+ emit_lso(as, A64I_LDRx, tmp, idx, ofs);
+}
+
+static void asm_ahustore(ASMState *as, IRIns *ir)
+{
+ if (ir->r != RID_SINK) {
+ RegSet allow = RSET_GPR;
+ Reg idx, src = RID_NONE, tmp = RID_TMP, type = RID_NONE;
+ int32_t ofs = 0;
+ if (irt_isnum(ir->t)) {
+ src = ra_alloc1(as, ir->op2, RSET_FPR);
+ idx = asm_fuseahuref(as, ir->op1, &ofs, allow, A64I_STRd);
+ if (ofs & FUSE_REG)
+ emit_dnm(as, (A64I_STRd^A64I_LS_R)|A64I_LS_UXTWx|A64I_LS_SH, (src & 31), idx, (ofs &31));
+ else
+ emit_lso(as, A64I_STRd, (src & 31), idx, ofs);
+ } else {
+ if (!irt_ispri(ir->t)) {
+ src = ra_alloc1(as, ir->op2, allow);
+ rset_clear(allow, src);
+ if (irt_isinteger(ir->t))
+ type = ra_allock(as, (uint64_t)(int32_t)LJ_TISNUM << 47, allow);
+ else
+ type = ra_allock(as, irt_toitype(ir->t), allow);
+ } else {
+ tmp = type = ra_allock(as, ~((int64_t)~irt_toitype(ir->t)<<47), allow);
+ }
+ idx = asm_fuseahuref(as, ir->op1, &ofs, rset_exclude(allow, type),
+ A64I_STRx);
+ if (ofs & FUSE_REG)
+ emit_dnm(as, (A64I_STRx^A64I_LS_R)|A64I_LS_UXTWx|A64I_LS_SH, tmp, idx, (ofs & 31));
+ else
+ emit_lso(as, A64I_STRx, tmp, idx, ofs);
+ if (ra_hasreg(src)) {
+ if (irt_isinteger(ir->t)) {
+ emit_dnm(as, A64I_ADDx | A64F_EX(A64EX_UXTW), tmp, type, src);
+ } else {
+ emit_dnm(as, A64I_ADDx | A64F_SH(A64SH_LSL, 47), tmp, src, type);
+ }
+ }
+ }
+ }
+}
+
+static void asm_sload(ASMState *as, IRIns *ir)
+{
+ int32_t ofs = 8*((int32_t)ir->op1-2);
+ IRType1 t = ir->t;
+ Reg dest = RID_NONE, base;
+ RegSet allow = RSET_GPR;
+ lj_assertA(!(ir->op2 & IRSLOAD_PARENT),
+ "bad parent SLOAD"); /* Handled by asm_head_side(). */
+ lj_assertA(irt_isguard(t) || !(ir->op2 & IRSLOAD_TYPECHECK),
+ "inconsistent SLOAD variant");
+ if ((ir->op2 & IRSLOAD_CONVERT) && irt_isguard(t) && irt_isint(t)) {
+ dest = ra_scratch(as, RSET_FPR);
+ asm_tointg(as, ir, dest);
+ t.irt = IRT_NUM; /* Continue with a regular number type check. */
+ } else if (ra_used(ir)) {
+ Reg tmp = RID_NONE;
+ if ((ir->op2 & IRSLOAD_CONVERT))
+ tmp = ra_scratch(as, irt_isint(t) ? RSET_FPR : RSET_GPR);
+ lj_assertA((irt_isnum(t)) || irt_isint(t) || irt_isaddr(t),
+ "bad SLOAD type %d", irt_type(t));
+ dest = ra_dest(as, ir, irt_isnum(t) ? RSET_FPR : allow);
+ base = ra_alloc1(as, REF_BASE, rset_clear(allow, dest));
+ if (irt_isaddr(t)) {
+ emit_dn(as, A64I_ANDx^emit_isk13(LJ_GCVMASK, 1), dest, dest);
+ } else if ((ir->op2 & IRSLOAD_CONVERT)) {
+ if (irt_isint(t)) {
+ emit_dn(as, A64I_FCVT_S32_F64, dest, (tmp & 31));
+ /* If value is already loaded for type check, move it to FPR. */
+ if ((ir->op2 & IRSLOAD_TYPECHECK))
+ emit_dn(as, A64I_FMOV_D_R, (tmp & 31), dest);
+ else
+ dest = tmp;
+ t.irt = IRT_NUM; /* Check for original type. */
+ } else {
+ emit_dn(as, A64I_FCVT_F64_S32, (dest & 31), tmp);
+ dest = tmp;
+ t.irt = IRT_INT; /* Check for original type. */
+ }
+ } else if (irt_isint(t) && (ir->op2 & IRSLOAD_TYPECHECK)) {
+ emit_dm(as, A64I_MOVw, dest, dest);
+ }
+ goto dotypecheck;
+ }
+ base = ra_alloc1(as, REF_BASE, allow);
+dotypecheck:
+ rset_clear(allow, base);
+ if ((ir->op2 & IRSLOAD_TYPECHECK)) {
+ Reg tmp;
+ if (ra_hasreg(dest) && rset_test(RSET_GPR, dest)) {
+ tmp = dest;
+ } else {
+ tmp = ra_scratch(as, allow);
+ rset_clear(allow, tmp);
+ }
+ if (ra_hasreg(dest) && irt_isnum(t) && !(ir->op2 & IRSLOAD_CONVERT))
+ emit_dn(as, A64I_FMOV_D_R, (dest & 31), tmp);
+ /* Need type check, even if the load result is unused. */
+ asm_guardcc(as, irt_isnum(t) ? CC_LS : CC_NE);
+ if (irt_type(t) >= IRT_NUM) {
+ lj_assertA(irt_isinteger(t) || irt_isnum(t),
+ "bad SLOAD type %d", irt_type(t));
+ emit_nm(as, A64I_CMPx | A64F_SH(A64SH_LSR, 32),
+ ra_allock(as, (ir->op2 & IRSLOAD_KEYINDEX) ? LJ_KEYINDEX : (LJ_TISNUM << 15), allow), tmp);
+ } else if (irt_isnil(t)) {
+ emit_n(as, (A64I_CMNx^A64I_K12) | A64F_U12(1), tmp);
+ } else if (irt_ispri(t)) {
+ emit_nm(as, A64I_CMPx,
+ ra_allock(as, ~((int64_t)~irt_toitype(t) << 47) , allow), tmp);
+ } else {
+ Reg type = ra_scratch(as, allow);
+ emit_n(as, (A64I_CMNx^A64I_K12) | A64F_U12(-irt_toitype(t)), type);
+ emit_dn(as, A64I_ASRx | A64F_IMMR(47), type, tmp);
+ }
+ emit_lso(as, A64I_LDRx, tmp, base, ofs);
+ return;
+ }
+ if (ra_hasreg(dest)) {
+ emit_lso(as, irt_isnum(t) ? A64I_LDRd :
+ (irt_isint(t) ? A64I_LDRw : A64I_LDRx), (dest & 31), base,
+ ofs ^ ((LJ_BE && irt_isint(t) ? 4 : 0)));
+ }
+}
+
+/* -- Allocations --------------------------------------------------------- */
+
+#if LJ_HASFFI
+static void asm_cnew(ASMState *as, IRIns *ir)
+{
+ CTState *cts = ctype_ctsG(J2G(as->J));
+ CTypeID id = (CTypeID)IR(ir->op1)->i;
+ CTSize sz;
+ CTInfo info = lj_ctype_info(cts, id, &sz);
+ const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_mem_newgco];
+ IRRef args[4];
+ RegSet allow = (RSET_GPR & ~RSET_SCRATCH);
+ lj_assertA(sz != CTSIZE_INVALID || (ir->o == IR_CNEW && ir->op2 != REF_NIL),
+ "bad CNEW/CNEWI operands");
+
+ as->gcsteps++;
+ asm_setupresult(as, ir, ci); /* GCcdata * */
+ /* Initialize immutable cdata object. */
+ if (ir->o == IR_CNEWI) {
+ int32_t ofs = sizeof(GCcdata);
+ Reg r = ra_alloc1(as, ir->op2, allow);
+ lj_assertA(sz == 4 || sz == 8, "bad CNEWI size %d", sz);
+ emit_lso(as, sz == 8 ? A64I_STRx : A64I_STRw, r, RID_RET, ofs);
+ } else if (ir->op2 != REF_NIL) { /* Create VLA/VLS/aligned cdata. */
+ ci = &lj_ir_callinfo[IRCALL_lj_cdata_newv];
+ args[0] = ASMREF_L; /* lua_State *L */
+ args[1] = ir->op1; /* CTypeID id */
+ args[2] = ir->op2; /* CTSize sz */
+ args[3] = ASMREF_TMP1; /* CTSize align */
+ asm_gencall(as, ci, args);
+ emit_loadi(as, ra_releasetmp(as, ASMREF_TMP1), (int32_t)ctype_align(info));
+ return;
+ }
+
+ /* Initialize gct and ctypeid. lj_mem_newgco() already sets marked. */
+ {
+ Reg r = (id < 65536) ? RID_X1 : ra_allock(as, id, allow);
+ emit_lso(as, A64I_STRB, RID_TMP, RID_RET, offsetof(GCcdata, gct));
+ emit_lso(as, A64I_STRH, r, RID_RET, offsetof(GCcdata, ctypeid));
+ emit_d(as, A64I_MOVZw | A64F_U16(~LJ_TCDATA), RID_TMP);
+ if (id < 65536) emit_d(as, A64I_MOVZw | A64F_U16(id), RID_X1);
+ }
+ args[0] = ASMREF_L; /* lua_State *L */
+ args[1] = ASMREF_TMP1; /* MSize size */
+ asm_gencall(as, ci, args);
+ ra_allockreg(as, (int32_t)(sz+sizeof(GCcdata)),
+ ra_releasetmp(as, ASMREF_TMP1));
+}
+#endif
+
+/* -- Write barriers ------------------------------------------------------ */
+
+static void asm_tbar(ASMState *as, IRIns *ir)
+{
+ Reg tab = ra_alloc1(as, ir->op1, RSET_GPR);
+ Reg link = ra_scratch(as, rset_exclude(RSET_GPR, tab));
+ Reg mark = RID_TMP;
+ MCLabel l_end = emit_label(as);
+ emit_lso(as, A64I_STRx, link, tab, (int32_t)offsetof(GCtab, gclist));
+ emit_lso(as, A64I_STRB, mark, tab, (int32_t)offsetof(GCtab, marked));
+ emit_setgl(as, tab, gc.grayagain);
+ emit_dn(as, A64I_ANDw^emit_isk13(~LJ_GC_BLACK, 0), mark, mark);
+ emit_getgl(as, link, gc.grayagain);
+ emit_cond_branch(as, CC_EQ, l_end);
+ emit_n(as, A64I_TSTw^emit_isk13(LJ_GC_BLACK, 0), mark);
+ emit_lso(as, A64I_LDRB, mark, tab, (int32_t)offsetof(GCtab, marked));
+}
+
+static void asm_obar(ASMState *as, IRIns *ir)
+{
+ const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_gc_barrieruv];
+ IRRef args[2];
+ MCLabel l_end;
+ RegSet allow = RSET_GPR;
+ Reg obj, val, tmp;
+ /* No need for other object barriers (yet). */
+ lj_assertA(IR(ir->op1)->o == IR_UREFC, "bad OBAR type");
+ ra_evictset(as, RSET_SCRATCH);
+ l_end = emit_label(as);
+ args[0] = ASMREF_TMP1; /* global_State *g */
+ args[1] = ir->op1; /* TValue *tv */
+ asm_gencall(as, ci, args);
+ emit_dm(as, A64I_MOVx, ra_releasetmp(as, ASMREF_TMP1), RID_GL);
+ obj = IR(ir->op1)->r;
+ tmp = ra_scratch(as, rset_exclude(allow, obj));
+ emit_cond_branch(as, CC_EQ, l_end);
+ emit_n(as, A64I_TSTw^emit_isk13(LJ_GC_BLACK, 0), tmp);
+ emit_cond_branch(as, CC_EQ, l_end);
+ emit_n(as, A64I_TSTw^emit_isk13(LJ_GC_WHITES, 0), RID_TMP);
+ val = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, obj));
+ emit_lso(as, A64I_LDRB, tmp, obj,
+ (int32_t)offsetof(GCupval, marked)-(int32_t)offsetof(GCupval, tv));
+ emit_lso(as, A64I_LDRB, RID_TMP, val, (int32_t)offsetof(GChead, marked));
+}
+
+/* -- Arithmetic and logic operations ------------------------------------- */
+
+static void asm_fparith(ASMState *as, IRIns *ir, A64Ins ai)
+{
+ Reg dest = ra_dest(as, ir, RSET_FPR);
+ Reg right, left = ra_alloc2(as, ir, RSET_FPR);
+ right = (left >> 8); left &= 255;
+ emit_dnm(as, ai, (dest & 31), (left & 31), (right & 31));
+}
+
+static void asm_fpunary(ASMState *as, IRIns *ir, A64Ins ai)
+{
+ Reg dest = ra_dest(as, ir, RSET_FPR);
+ Reg left = ra_hintalloc(as, ir->op1, dest, RSET_FPR);
+ emit_dn(as, ai, (dest & 31), (left & 31));
+}
+
+static void asm_fpmath(ASMState *as, IRIns *ir)
+{
+ IRFPMathOp fpm = (IRFPMathOp)ir->op2;
+ if (fpm == IRFPM_SQRT) {
+ asm_fpunary(as, ir, A64I_FSQRTd);
+ } else if (fpm <= IRFPM_TRUNC) {
+ asm_fpunary(as, ir, fpm == IRFPM_FLOOR ? A64I_FRINTMd :
+ fpm == IRFPM_CEIL ? A64I_FRINTPd : A64I_FRINTZd);
+ } else {
+ asm_callid(as, ir, IRCALL_lj_vm_floor + fpm);
+ }
+}
+
+static int asm_swapops(ASMState *as, IRRef lref, IRRef rref)
+{
+ IRIns *ir;
+ if (irref_isk(rref))
+ return 0; /* Don't swap constants to the left. */
+ if (irref_isk(lref))
+ return 1; /* But swap constants to the right. */
+ ir = IR(rref);
+ if ((ir->o >= IR_BSHL && ir->o <= IR_BSAR) ||
+ (ir->o == IR_ADD && ir->op1 == ir->op2) ||
+ (ir->o == IR_CONV && ir->op2 == ((IRT_I64<<IRCONV_DSH)|IRT_INT|IRCONV_SEXT)))
+ return 0; /* Don't swap fusable operands to the left. */
+ ir = IR(lref);
+ if ((ir->o >= IR_BSHL && ir->o <= IR_BSAR) ||
+ (ir->o == IR_ADD && ir->op1 == ir->op2) ||
+ (ir->o == IR_CONV && ir->op2 == ((IRT_I64<<IRCONV_DSH)|IRT_INT|IRCONV_SEXT)))
+ return 1; /* But swap fusable operands to the right. */
+ return 0; /* Otherwise don't swap. */
+}
+
+static void asm_intop(ASMState *as, IRIns *ir, A64Ins ai)
+{
+ IRRef lref = ir->op1, rref = ir->op2;
+ Reg left, dest = ra_dest(as, ir, RSET_GPR);
+ uint32_t m;
+ if ((ai & ~A64I_S) != A64I_SUBw && asm_swapops(as, lref, rref)) {
+ IRRef tmp = lref; lref = rref; rref = tmp;
+ }
+ left = ra_hintalloc(as, lref, dest, RSET_GPR);
+ if (irt_is64(ir->t)) ai |= A64I_X;
+ m = asm_fuseopm(as, ai, rref, rset_exclude(RSET_GPR, left));
+ if (irt_isguard(ir->t)) { /* For IR_ADDOV etc. */
+ asm_guardcc(as, CC_VS);
+ ai |= A64I_S;
+ }
+ emit_dn(as, ai^m, dest, left);
+}
+
+static void asm_intop_s(ASMState *as, IRIns *ir, A64Ins ai)
+{
+ if (as->flagmcp == as->mcp) { /* Drop cmp r, #0. */
+ as->flagmcp = NULL;
+ as->mcp++;
+ ai |= A64I_S;
+ }
+ asm_intop(as, ir, ai);
+}
+
+static void asm_intneg(ASMState *as, IRIns *ir)
+{
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ Reg left = ra_hintalloc(as, ir->op1, dest, RSET_GPR);
+ emit_dm(as, irt_is64(ir->t) ? A64I_NEGx : A64I_NEGw, dest, left);
+}
+
+/* NYI: use add/shift for MUL(OV) with constants. FOLD only does 2^k. */
+static void asm_intmul(ASMState *as, IRIns *ir)
+{
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ Reg left = ra_alloc1(as, ir->op1, rset_exclude(RSET_GPR, dest));
+ Reg right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left));
+ if (irt_isguard(ir->t)) { /* IR_MULOV */
+ asm_guardcc(as, CC_NE);
+ emit_dm(as, A64I_MOVw, dest, dest); /* Zero-extend. */
+ emit_nm(as, A64I_CMPw | A64F_SH(A64SH_ASR, 31), RID_TMP, dest);
+ emit_dn(as, A64I_ASRx | A64F_IMMR(32), RID_TMP, dest);
+ emit_dnm(as, A64I_SMULL, dest, right, left);
+ } else {
+ emit_dnm(as, irt_is64(ir->t) ? A64I_MULx : A64I_MULw, dest, left, right);
+ }
+}
+
+static void asm_add(ASMState *as, IRIns *ir)
+{
+ if (irt_isnum(ir->t)) {
+ if (!asm_fusemadd(as, ir, A64I_FMADDd, A64I_FMADDd))
+ asm_fparith(as, ir, A64I_FADDd);
+ return;
+ }
+ asm_intop_s(as, ir, A64I_ADDw);
+}
+
+static void asm_sub(ASMState *as, IRIns *ir)
+{
+ if (irt_isnum(ir->t)) {
+ if (!asm_fusemadd(as, ir, A64I_FNMSUBd, A64I_FMSUBd))
+ asm_fparith(as, ir, A64I_FSUBd);
+ return;
+ }
+ asm_intop_s(as, ir, A64I_SUBw);
+}
+
+static void asm_mul(ASMState *as, IRIns *ir)
+{
+ if (irt_isnum(ir->t)) {
+ asm_fparith(as, ir, A64I_FMULd);
+ return;
+ }
+ asm_intmul(as, ir);
+}
+
+#define asm_addov(as, ir) asm_add(as, ir)
+#define asm_subov(as, ir) asm_sub(as, ir)
+#define asm_mulov(as, ir) asm_mul(as, ir)
+
+#define asm_fpdiv(as, ir) asm_fparith(as, ir, A64I_FDIVd)
+#define asm_abs(as, ir) asm_fpunary(as, ir, A64I_FABS)
+
+static void asm_neg(ASMState *as, IRIns *ir)
+{
+ if (irt_isnum(ir->t)) {
+ asm_fpunary(as, ir, A64I_FNEGd);
+ return;
+ }
+ asm_intneg(as, ir);
+}
+
+static void asm_band(ASMState *as, IRIns *ir)
+{
+ A64Ins ai = A64I_ANDw;
+ if (asm_fuseandshift(as, ir))
+ return;
+ if (as->flagmcp == as->mcp) {
+ /* Try to drop cmp r, #0. */
+ as->flagmcp = NULL;
+ as->mcp++;
+ ai = A64I_ANDSw;
+ }
+ asm_intop(as, ir, ai);
+}
+
+static void asm_borbxor(ASMState *as, IRIns *ir, A64Ins ai)
+{
+ IRRef lref = ir->op1, rref = ir->op2;
+ IRIns *irl = IR(lref), *irr = IR(rref);
+ if ((canfuse(as, irl) && irl->o == IR_BNOT && !irref_isk(rref)) ||
+ (canfuse(as, irr) && irr->o == IR_BNOT && !irref_isk(lref))) {
+ Reg left, dest = ra_dest(as, ir, RSET_GPR);
+ uint32_t m;
+ if (irl->o == IR_BNOT) {
+ IRRef tmp = lref; lref = rref; rref = tmp;
+ }
+ left = ra_alloc1(as, lref, RSET_GPR);
+ ai |= A64I_ON;
+ if (irt_is64(ir->t)) ai |= A64I_X;
+ m = asm_fuseopm(as, ai, IR(rref)->op1, rset_exclude(RSET_GPR, left));
+ emit_dn(as, ai^m, dest, left);
+ } else {
+ asm_intop(as, ir, ai);
+ }
+}
+
+static void asm_bor(ASMState *as, IRIns *ir)
+{
+ if (asm_fuseorshift(as, ir))
+ return;
+ asm_borbxor(as, ir, A64I_ORRw);
+}
+
+#define asm_bxor(as, ir) asm_borbxor(as, ir, A64I_EORw)
+
+static void asm_bnot(ASMState *as, IRIns *ir)
+{
+ A64Ins ai = A64I_MVNw;
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ uint32_t m = asm_fuseopm(as, ai, ir->op1, RSET_GPR);
+ if (irt_is64(ir->t)) ai |= A64I_X;
+ emit_d(as, ai^m, dest);
+}
+
+static void asm_bswap(ASMState *as, IRIns *ir)
+{
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ Reg left = ra_alloc1(as, ir->op1, RSET_GPR);
+ emit_dn(as, irt_is64(ir->t) ? A64I_REVx : A64I_REVw, dest, left);
+}
+
+static void asm_bitshift(ASMState *as, IRIns *ir, A64Ins ai, A64Shift sh)
+{
+ int32_t shmask = irt_is64(ir->t) ? 63 : 31;
+ if (irref_isk(ir->op2)) { /* Constant shifts. */
+ Reg left, dest = ra_dest(as, ir, RSET_GPR);
+ int32_t shift = (IR(ir->op2)->i & shmask);
+ IRIns *irl = IR(ir->op1);
+ if (shmask == 63) ai += A64I_UBFMx - A64I_UBFMw;
+
+ /* Fuse BSHL + BSHR/BSAR into UBFM/SBFM aka UBFX/SBFX/UBFIZ/SBFIZ. */
+ if ((sh == A64SH_LSR || sh == A64SH_ASR) && canfuse(as, irl)) {
+ if (irl->o == IR_BSHL && irref_isk(irl->op2)) {
+ int32_t shift2 = (IR(irl->op2)->i & shmask);
+ shift = ((shift - shift2) & shmask);
+ shmask -= shift2;
+ ir = irl;
+ }
+ }
+
+ left = ra_alloc1(as, ir->op1, RSET_GPR);
+ switch (sh) {
+ case A64SH_LSL:
+ emit_dn(as, ai | A64F_IMMS(shmask-shift) |
+ A64F_IMMR((shmask-shift+1)&shmask), dest, left);
+ break;
+ case A64SH_LSR: case A64SH_ASR:
+ emit_dn(as, ai | A64F_IMMS(shmask) | A64F_IMMR(shift), dest, left);
+ break;
+ case A64SH_ROR:
+ emit_dnm(as, ai | A64F_IMMS(shift), dest, left, left);
+ break;
+ }
+ } else { /* Variable-length shifts. */
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ Reg left = ra_alloc1(as, ir->op1, RSET_GPR);
+ Reg right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left));
+ emit_dnm(as, (shmask == 63 ? A64I_SHRx : A64I_SHRw) | A64F_BSH(sh), dest, left, right);
+ }
+}
+
+#define asm_bshl(as, ir) asm_bitshift(as, ir, A64I_UBFMw, A64SH_LSL)
+#define asm_bshr(as, ir) asm_bitshift(as, ir, A64I_UBFMw, A64SH_LSR)
+#define asm_bsar(as, ir) asm_bitshift(as, ir, A64I_SBFMw, A64SH_ASR)
+#define asm_bror(as, ir) asm_bitshift(as, ir, A64I_EXTRw, A64SH_ROR)
+#define asm_brol(as, ir) lj_assertA(0, "unexpected BROL")
+
+static void asm_intmin_max(ASMState *as, IRIns *ir, A64CC cc)
+{
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ Reg left = ra_hintalloc(as, ir->op1, dest, RSET_GPR);
+ Reg right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left));
+ emit_dnm(as, A64I_CSELw|A64F_CC(cc), dest, left, right);
+ emit_nm(as, A64I_CMPw, left, right);
+}
+
+static void asm_fpmin_max(ASMState *as, IRIns *ir, A64CC fcc)
+{
+ Reg dest = (ra_dest(as, ir, RSET_FPR) & 31);
+ Reg right, left = ra_alloc2(as, ir, RSET_FPR);
+ right = ((left >> 8) & 31); left &= 31;
+ emit_dnm(as, A64I_FCSELd | A64F_CC(fcc), dest, right, left);
+ emit_nm(as, A64I_FCMPd, left, right);
+}
+
+static void asm_min_max(ASMState *as, IRIns *ir, A64CC cc, A64CC fcc)
+{
+ if (irt_isnum(ir->t))
+ asm_fpmin_max(as, ir, fcc);
+ else
+ asm_intmin_max(as, ir, cc);
+}
+
+#define asm_min(as, ir) asm_min_max(as, ir, CC_LT, CC_PL)
+#define asm_max(as, ir) asm_min_max(as, ir, CC_GT, CC_LE)
+
+/* -- Comparisons --------------------------------------------------------- */
+
+/* Map of comparisons to flags. ORDER IR. */
+static const uint8_t asm_compmap[IR_ABC+1] = {
+ /* op FP swp int cc FP cc */
+ /* LT */ CC_GE + (CC_HS << 4),
+ /* GE x */ CC_LT + (CC_HI << 4),
+ /* LE */ CC_GT + (CC_HI << 4),
+ /* GT x */ CC_LE + (CC_HS << 4),
+ /* ULT x */ CC_HS + (CC_LS << 4),
+ /* UGE */ CC_LO + (CC_LO << 4),
+ /* ULE x */ CC_HI + (CC_LO << 4),
+ /* UGT */ CC_LS + (CC_LS << 4),
+ /* EQ */ CC_NE + (CC_NE << 4),
+ /* NE */ CC_EQ + (CC_EQ << 4),
+ /* ABC */ CC_LS + (CC_LS << 4) /* Same as UGT. */
+};
+
+/* FP comparisons. */
+static void asm_fpcomp(ASMState *as, IRIns *ir)
+{
+ Reg left, right;
+ A64Ins ai;
+ int swp = ((ir->o ^ (ir->o >> 2)) & ~(ir->o >> 3) & 1);
+ if (!swp && irref_isk(ir->op2) && ir_knum(IR(ir->op2))->u64 == 0) {
+ left = (ra_alloc1(as, ir->op1, RSET_FPR) & 31);
+ right = 0;
+ ai = A64I_FCMPZd;
+ } else {
+ left = ra_alloc2(as, ir, RSET_FPR);
+ if (swp) {
+ right = (left & 31); left = ((left >> 8) & 31);
+ } else {
+ right = ((left >> 8) & 31); left &= 31;
+ }
+ ai = A64I_FCMPd;
+ }
+ asm_guardcc(as, (asm_compmap[ir->o] >> 4));
+ emit_nm(as, ai, left, right);
+}
+
+/* Integer comparisons. */
+static void asm_intcomp(ASMState *as, IRIns *ir)
+{
+ A64CC oldcc, cc = (asm_compmap[ir->o] & 15);
+ A64Ins ai = irt_is64(ir->t) ? A64I_CMPx : A64I_CMPw;
+ IRRef lref = ir->op1, rref = ir->op2;
+ Reg left;
+ uint32_t m;
+ int cmpprev0 = 0;
+ lj_assertA(irt_is64(ir->t) || irt_isint(ir->t) ||
+ irt_isu32(ir->t) || irt_isaddr(ir->t) || irt_isu8(ir->t),
+ "bad comparison data type %d", irt_type(ir->t));
+ if (asm_swapops(as, lref, rref)) {
+ IRRef tmp = lref; lref = rref; rref = tmp;
+ if (cc >= CC_GE) cc ^= 7; /* LT <-> GT, LE <-> GE */
+ else if (cc > CC_NE) cc ^= 11; /* LO <-> HI, LS <-> HS */
+ }
+ oldcc = cc;
+ if (irref_isk(rref) && get_k64val(as, rref) == 0) {
+ IRIns *irl = IR(lref);
+ if (cc == CC_GE) cc = CC_PL;
+ else if (cc == CC_LT) cc = CC_MI;
+ else if (cc > CC_NE) goto nocombine; /* Other conds don't work with tst. */
+ cmpprev0 = (irl+1 == ir);
+ /* Combine and-cmp-bcc into tbz/tbnz or and-cmp into tst. */
+ if (cmpprev0 && irl->o == IR_BAND && !ra_used(irl)) {
+ IRRef blref = irl->op1, brref = irl->op2;
+ uint32_t m2 = 0;
+ Reg bleft;
+ if (asm_swapops(as, blref, brref)) {
+ Reg tmp = blref; blref = brref; brref = tmp;
+ }
+ if (irref_isk(brref)) {
+ uint64_t k = get_k64val(as, brref);
+ if (k && !(k & (k-1)) && (cc == CC_EQ || cc == CC_NE)) {
+ asm_guardtnb(as, cc == CC_EQ ? A64I_TBZ : A64I_TBNZ,
+ ra_alloc1(as, blref, RSET_GPR), emit_ctz64(k));
+ return;
+ }
+ m2 = emit_isk13(k, irt_is64(irl->t));
+ }
+ bleft = ra_alloc1(as, blref, RSET_GPR);
+ ai = (irt_is64(irl->t) ? A64I_TSTx : A64I_TSTw);
+ if (!m2)
+ m2 = asm_fuseopm(as, ai, brref, rset_exclude(RSET_GPR, bleft));
+ asm_guardcc(as, cc);
+ emit_n(as, ai^m2, bleft);
+ return;
+ }
+ if (cc == CC_EQ || cc == CC_NE) {
+ /* Combine cmp-bcc into cbz/cbnz. */
+ ai = cc == CC_EQ ? A64I_CBZ : A64I_CBNZ;
+ if (irt_is64(ir->t)) ai |= A64I_X;
+ asm_guardcnb(as, ai, ra_alloc1(as, lref, RSET_GPR));
+ return;
+ }
+ }
+nocombine:
+ left = ra_alloc1(as, lref, RSET_GPR);
+ m = asm_fuseopm(as, ai, rref, rset_exclude(RSET_GPR, left));
+ asm_guardcc(as, cc);
+ emit_n(as, ai^m, left);
+ /* Signed comparison with zero and referencing previous ins? */
+ if (cmpprev0 && (oldcc <= CC_NE || oldcc >= CC_GE))
+ as->flagmcp = as->mcp; /* Allow elimination of the compare. */
+}
+
+static void asm_comp(ASMState *as, IRIns *ir)
+{
+ if (irt_isnum(ir->t))
+ asm_fpcomp(as, ir);
+ else
+ asm_intcomp(as, ir);
+}
+
+#define asm_equal(as, ir) asm_comp(as, ir)
+
+/* -- Split register ops -------------------------------------------------- */
+
+/* Hiword op of a split 64/64 bit op. Previous op is the loword op. */
+static void asm_hiop(ASMState *as, IRIns *ir)
+{
+ /* HIOP is marked as a store because it needs its own DCE logic. */
+ int uselo = ra_used(ir-1), usehi = ra_used(ir); /* Loword/hiword used? */
+ if (LJ_UNLIKELY(!(as->flags & JIT_F_OPT_DCE))) uselo = usehi = 1;
+ if (!usehi) return; /* Skip unused hiword op for all remaining ops. */
+ switch ((ir-1)->o) {
+ case IR_CALLN:
+ case IR_CALLL:
+ case IR_CALLS:
+ case IR_CALLXS:
+ if (!uselo)
+ ra_allocref(as, ir->op1, RID2RSET(RID_RETLO)); /* Mark lo op as used. */
+ break;
+ default: lj_assertA(0, "bad HIOP for op %d", (ir-1)->o); break;
+ }
+}
+
+/* -- Profiling ----------------------------------------------------------- */
+
+static void asm_prof(ASMState *as, IRIns *ir)
+{
+ uint32_t k = emit_isk13(HOOK_PROFILE, 0);
+ lj_assertA(k != 0, "HOOK_PROFILE does not fit in K13");
+ UNUSED(ir);
+ asm_guardcc(as, CC_NE);
+ emit_n(as, A64I_TSTw^k, RID_TMP);
+ emit_lsptr(as, A64I_LDRB, RID_TMP, (void *)&J2G(as->J)->hookmask);
+}
+
+/* -- Stack handling ------------------------------------------------------ */
+
+/* Check Lua stack size for overflow. Use exit handler as fallback. */
+static void asm_stack_check(ASMState *as, BCReg topslot,
+ IRIns *irp, RegSet allow, ExitNo exitno)
+{
+ Reg pbase;
+ uint32_t k;
+ if (irp) {
+ if (!ra_hasspill(irp->s)) {
+ pbase = irp->r;
+ lj_assertA(ra_hasreg(pbase), "base reg lost");
+ } else if (allow) {
+ pbase = rset_pickbot(allow);
+ } else {
+ pbase = RID_RET;
+ emit_lso(as, A64I_LDRx, RID_RET, RID_SP, 0); /* Restore temp register. */
+ }
+ } else {
+ pbase = RID_BASE;
+ }
+ emit_cond_branch(as, CC_LS, asm_exitstub_addr(as, exitno));
+ k = emit_isk12((8*topslot));
+ lj_assertA(k, "slot offset %d does not fit in K12", 8*topslot);
+ emit_n(as, A64I_CMPx^k, RID_TMP);
+ emit_dnm(as, A64I_SUBx, RID_TMP, RID_TMP, pbase);
+ emit_lso(as, A64I_LDRx, RID_TMP, RID_TMP,
+ (int32_t)offsetof(lua_State, maxstack));
+ if (irp) { /* Must not spill arbitrary registers in head of side trace. */
+ if (ra_hasspill(irp->s))
+ emit_lso(as, A64I_LDRx, pbase, RID_SP, sps_scale(irp->s));
+ emit_lso(as, A64I_LDRx, RID_TMP, RID_GL, glofs(as, &J2G(as->J)->cur_L));
+ if (ra_hasspill(irp->s) && !allow)
+ emit_lso(as, A64I_STRx, RID_RET, RID_SP, 0); /* Save temp register. */
+ } else {
+ emit_getgl(as, RID_TMP, cur_L);
+ }
+}
+
+/* Restore Lua stack from on-trace state. */
+static void asm_stack_restore(ASMState *as, SnapShot *snap)
+{
+ SnapEntry *map = &as->T->snapmap[snap->mapofs];
+#ifdef LUA_USE_ASSERT
+ SnapEntry *flinks = &as->T->snapmap[snap_nextofs(as->T, snap)-1-LJ_FR2];
+#endif
+ MSize n, nent = snap->nent;
+ /* Store the value of all modified slots to the Lua stack. */
+ for (n = 0; n < nent; n++) {
+ SnapEntry sn = map[n];
+ BCReg s = snap_slot(sn);
+ int32_t ofs = 8*((int32_t)s-1-LJ_FR2);
+ IRRef ref = snap_ref(sn);
+ IRIns *ir = IR(ref);
+ if ((sn & SNAP_NORESTORE))
+ continue;
+ if ((sn & SNAP_KEYINDEX)) {
+ RegSet allow = rset_exclude(RSET_GPR, RID_BASE);
+ Reg r = irref_isk(ref) ? ra_allock(as, ir->i, allow) :
+ ra_alloc1(as, ref, allow);
+ rset_clear(allow, r);
+ emit_lso(as, A64I_STRw, r, RID_BASE, ofs);
+ emit_lso(as, A64I_STRw, ra_allock(as, LJ_KEYINDEX, allow), RID_BASE, ofs+4);
+ } else if (irt_isnum(ir->t)) {
+ Reg src = ra_alloc1(as, ref, RSET_FPR);
+ emit_lso(as, A64I_STRd, (src & 31), RID_BASE, ofs);
+ } else {
+ asm_tvstore64(as, RID_BASE, ofs, ref);
+ }
+ checkmclim(as);
+ }
+ lj_assertA(map + nent == flinks, "inconsistent frames in snapshot");
+}
+
+/* -- GC handling --------------------------------------------------------- */
+
+/* Marker to prevent patching the GC check exit. */
+#define ARM64_NOPATCH_GC_CHECK \
+ (A64I_ORRx|A64F_D(RID_TMP)|A64F_M(RID_TMP)|A64F_N(RID_TMP))
+
+/* Check GC threshold and do one or more GC steps. */
+static void asm_gc_check(ASMState *as)
+{
+ const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_gc_step_jit];
+ IRRef args[2];
+ MCLabel l_end;
+ Reg tmp2;
+ ra_evictset(as, RSET_SCRATCH);
+ l_end = emit_label(as);
+ /* Exit trace if in GCSatomic or GCSfinalize. Avoids syncing GC objects. */
+ asm_guardcnb(as, A64I_CBNZ, RID_RET); /* Assumes asm_snap_prep() is done. */
+ *--as->mcp = ARM64_NOPATCH_GC_CHECK;
+ args[0] = ASMREF_TMP1; /* global_State *g */
+ args[1] = ASMREF_TMP2; /* MSize steps */
+ asm_gencall(as, ci, args);
+ emit_dm(as, A64I_MOVx, ra_releasetmp(as, ASMREF_TMP1), RID_GL);
+ tmp2 = ra_releasetmp(as, ASMREF_TMP2);
+ emit_loadi(as, tmp2, as->gcsteps);
+ /* Jump around GC step if GC total < GC threshold. */
+ emit_cond_branch(as, CC_LS, l_end);
+ emit_nm(as, A64I_CMPx, RID_TMP, tmp2);
+ emit_getgl(as, tmp2, gc.threshold);
+ emit_getgl(as, RID_TMP, gc.total);
+ as->gcsteps = 0;
+ checkmclim(as);
+}
+
+/* -- Loop handling ------------------------------------------------------- */
+
+/* Fixup the loop branch. */
+static void asm_loop_fixup(ASMState *as)
+{
+ MCode *p = as->mctop;
+ MCode *target = as->mcp;
+ if (as->loopinv) { /* Inverted loop branch? */
+ uint32_t mask = (p[-2] & 0x7e000000) == 0x36000000 ? 0x3fffu : 0x7ffffu;
+ ptrdiff_t delta = target - (p - 2);
+ /* asm_guard* already inverted the bcc/tnb/cnb and patched the final b. */
+ p[-2] |= ((uint32_t)delta & mask) << 5;
+ } else {
+ ptrdiff_t delta = target - (p - 1);
+ p[-1] = A64I_B | A64F_S26(delta);
+ }
+}
+
+/* Fixup the tail of the loop. */
+static void asm_loop_tail_fixup(ASMState *as)
+{
+ UNUSED(as); /* Nothing to do. */
+}
+
+/* -- Head of trace ------------------------------------------------------- */
+
+/* Reload L register from g->cur_L. */
+static void asm_head_lreg(ASMState *as)
+{
+ IRIns *ir = IR(ASMREF_L);
+ if (ra_used(ir)) {
+ Reg r = ra_dest(as, ir, RSET_GPR);
+ emit_getgl(as, r, cur_L);
+ ra_evictk(as);
+ }
+}
+
+/* Coalesce BASE register for a root trace. */
+static void asm_head_root_base(ASMState *as)
+{
+ IRIns *ir;
+ asm_head_lreg(as);
+ ir = IR(REF_BASE);
+ if (ra_hasreg(ir->r) && (rset_test(as->modset, ir->r) || irt_ismarked(ir->t)))
+ ra_spill(as, ir);
+ ra_destreg(as, ir, RID_BASE);
+}
+
+/* Coalesce BASE register for a side trace. */
+static RegSet asm_head_side_base(ASMState *as, IRIns *irp, RegSet allow)
+{
+ IRIns *ir;
+ asm_head_lreg(as);
+ ir = IR(REF_BASE);
+ if (ra_hasreg(ir->r) && (rset_test(as->modset, ir->r) || irt_ismarked(ir->t)))
+ ra_spill(as, ir);
+ if (ra_hasspill(irp->s)) {
+ rset_clear(allow, ra_dest(as, ir, allow));
+ } else {
+ Reg r = irp->r;
+ lj_assertA(ra_hasreg(r), "base reg lost");
+ rset_clear(allow, r);
+ if (r != ir->r && !rset_test(as->freeset, r))
+ ra_restore(as, regcost_ref(as->cost[r]));
+ ra_destreg(as, ir, r);
+ }
+ return allow;
+}
+
+/* -- Tail of trace ------------------------------------------------------- */
+
+/* Fixup the tail code. */
+static void asm_tail_fixup(ASMState *as, TraceNo lnk)
+{
+ MCode *p = as->mctop;
+ MCode *target;
+ /* Undo the sp adjustment in BC_JLOOP when exiting to the interpreter. */
+ int32_t spadj = as->T->spadjust + (lnk ? 0 : sps_scale(SPS_FIXED));
+ if (spadj == 0) {
+ *--p = A64I_LE(A64I_NOP);
+ as->mctop = p;
+ } else {
+ /* Patch stack adjustment. */
+ uint32_t k = emit_isk12(spadj);
+ lj_assertA(k, "stack adjustment %d does not fit in K12", spadj);
+ p[-2] = (A64I_ADDx^k) | A64F_D(RID_SP) | A64F_N(RID_SP);
+ }
+ /* Patch exit branch. */
+ target = lnk ? traceref(as->J, lnk)->mcode : (MCode *)lj_vm_exit_interp;
+ p[-1] = A64I_B | A64F_S26((target-p)+1);
+}
+
+/* Prepare tail of code. */
+static void asm_tail_prep(ASMState *as)
+{
+ MCode *p = as->mctop - 1; /* Leave room for exit branch. */
+ if (as->loopref) {
+ as->invmcp = as->mcp = p;
+ } else {
+ as->mcp = p-1; /* Leave room for stack pointer adjustment. */
+ as->invmcp = NULL;
+ }
+ *p = 0; /* Prevent load/store merging. */
+}
+
+/* -- Trace setup --------------------------------------------------------- */
+
+/* Ensure there are enough stack slots for call arguments. */
+static Reg asm_setup_call_slots(ASMState *as, IRIns *ir, const CCallInfo *ci)
+{
+ IRRef args[CCI_NARGS_MAX*2];
+ uint32_t i, nargs = CCI_XNARGS(ci);
+ int nslots = 0, ngpr = REGARG_NUMGPR, nfpr = REGARG_NUMFPR;
+ asm_collectargs(as, ir, ci, args);
+ for (i = 0; i < nargs; i++) {
+ if (args[i] && irt_isfp(IR(args[i])->t)) {
+ if (nfpr > 0) nfpr--; else nslots += 2;
+ } else {
+ if (ngpr > 0) ngpr--; else nslots += 2;
+ }
+ }
+ if (nslots > as->evenspill) /* Leave room for args in stack slots. */
+ as->evenspill = nslots;
+ return REGSP_HINT(RID_RET);
+}
+
+static void asm_setup_target(ASMState *as)
+{
+ /* May need extra exit for asm_stack_check on side traces. */
+ asm_exitstub_setup(as, as->T->nsnap + (as->parent ? 1 : 0));
+}
+
+#if LJ_BE
+/* ARM64 instructions are always little-endian. Swap for ARM64BE. */
+static void asm_mcode_fixup(MCode *mcode, MSize size)
+{
+ MCode *pe = (MCode *)((char *)mcode + size);
+ while (mcode < pe) {
+ MCode ins = *mcode;
+ *mcode++ = lj_bswap(ins);
+ }
+}
+#define LJ_TARGET_MCODE_FIXUP 1
+#endif
+
+/* -- Trace patching ------------------------------------------------------ */
+
+/* Patch exit jumps of existing machine code to a new target. */
+void lj_asm_patchexit(jit_State *J, GCtrace *T, ExitNo exitno, MCode *target)
+{
+ MCode *p = T->mcode;
+ MCode *pe = (MCode *)((char *)p + T->szmcode);
+ MCode *cstart = NULL;
+ MCode *mcarea = lj_mcode_patch(J, p, 0);
+ MCode *px = exitstub_trace_addr(T, exitno);
+ int patchlong = 1;
+ /* Note: this assumes a trace exit is only ever patched once. */
+ for (; p < pe; p++) {
+ /* Look for exitstub branch, replace with branch to target. */
+ ptrdiff_t delta = target - p;
+ MCode ins = A64I_LE(*p);
+ if ((ins & 0xff000000u) == 0x54000000u &&
+ ((ins ^ ((px-p)<<5)) & 0x00ffffe0u) == 0) {
+ /* Patch bcc, if within range. */
+ if (A64F_S_OK(delta, 19)) {
+ *p = A64I_LE((ins & 0xff00001fu) | A64F_S19(delta));
+ if (!cstart) cstart = p;
+ }
+ } else if ((ins & 0xfc000000u) == 0x14000000u &&
+ ((ins ^ (px-p)) & 0x03ffffffu) == 0) {
+ /* Patch b. */
+ lj_assertJ(A64F_S_OK(delta, 26), "branch target out of range");
+ *p = A64I_LE((ins & 0xfc000000u) | A64F_S26(delta));
+ if (!cstart) cstart = p;
+ } else if ((ins & 0x7e000000u) == 0x34000000u &&
+ ((ins ^ ((px-p)<<5)) & 0x00ffffe0u) == 0) {
+ /* Patch cbz/cbnz, if within range. */
+ if (p[-1] == ARM64_NOPATCH_GC_CHECK) {
+ patchlong = 0;
+ } else if (A64F_S_OK(delta, 19)) {
+ *p = A64I_LE((ins & 0xff00001fu) | A64F_S19(delta));
+ if (!cstart) cstart = p;
+ }
+ } else if ((ins & 0x7e000000u) == 0x36000000u &&
+ ((ins ^ ((px-p)<<5)) & 0x0007ffe0u) == 0) {
+ /* Patch tbz/tbnz, if within range. */
+ if (A64F_S_OK(delta, 14)) {
+ *p = A64I_LE((ins & 0xfff8001fu) | A64F_S14(delta));
+ if (!cstart) cstart = p;
+ }
+ }
+ }
+ /* Always patch long-range branch in exit stub itself. Except, if we can't. */
+ if (patchlong) {
+ ptrdiff_t delta = target - px;
+ lj_assertJ(A64F_S_OK(delta, 26), "branch target out of range");
+ *px = A64I_B | A64F_S26(delta);
+ if (!cstart) cstart = px;
+ }
+ if (cstart) lj_mcode_sync(cstart, px+1);
+ lj_mcode_patch(J, mcarea, 1);
+}
+
diff --git a/libs/luajit-cmake/luajit/src/lj_asm_mips.h b/libs/luajit-cmake/luajit/src/lj_asm_mips.h
new file mode 100644
index 0000000..1686b40
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/lj_asm_mips.h
@@ -0,0 +1,2808 @@
+/*
+** MIPS IR assembler (SSA IR -> machine code).
+** Copyright (C) 2005-2022 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+/* -- Register allocator extensions --------------------------------------- */
+
+/* Allocate a register with a hint. */
+static Reg ra_hintalloc(ASMState *as, IRRef ref, Reg hint, RegSet allow)
+{
+ Reg r = IR(ref)->r;
+ if (ra_noreg(r)) {
+ if (!ra_hashint(r) && !iscrossref(as, ref))
+ ra_sethint(IR(ref)->r, hint); /* Propagate register hint. */
+ r = ra_allocref(as, ref, allow);
+ }
+ ra_noweak(as, r);
+ return r;
+}
+
+/* Allocate a register or RID_ZERO. */
+static Reg ra_alloc1z(ASMState *as, IRRef ref, RegSet allow)
+{
+ Reg r = IR(ref)->r;
+ if (ra_noreg(r)) {
+ if (!(allow & RSET_FPR) && irref_isk(ref) && get_kval(as, ref) == 0)
+ return RID_ZERO;
+ r = ra_allocref(as, ref, allow);
+ } else {
+ ra_noweak(as, r);
+ }
+ return r;
+}
+
+/* Allocate two source registers for three-operand instructions. */
+static Reg ra_alloc2(ASMState *as, IRIns *ir, RegSet allow)
+{
+ IRIns *irl = IR(ir->op1), *irr = IR(ir->op2);
+ Reg left = irl->r, right = irr->r;
+ if (ra_hasreg(left)) {
+ ra_noweak(as, left);
+ if (ra_noreg(right))
+ right = ra_alloc1z(as, ir->op2, rset_exclude(allow, left));
+ else
+ ra_noweak(as, right);
+ } else if (ra_hasreg(right)) {
+ ra_noweak(as, right);
+ left = ra_alloc1z(as, ir->op1, rset_exclude(allow, right));
+ } else if (ra_hashint(right)) {
+ right = ra_alloc1z(as, ir->op2, allow);
+ left = ra_alloc1z(as, ir->op1, rset_exclude(allow, right));
+ } else {
+ left = ra_alloc1z(as, ir->op1, allow);
+ right = ra_alloc1z(as, ir->op2, rset_exclude(allow, left));
+ }
+ return left | (right << 8);
+}
+
+/* -- Guard handling ------------------------------------------------------ */
+
+/* Need some spare long-range jump slots, for out-of-range branches. */
+#define MIPS_SPAREJUMP 4
+
+/* Setup spare long-range jump slots per mcarea. */
+static void asm_sparejump_setup(ASMState *as)
+{
+ MCode *mxp = as->mctop;
+ if ((char *)mxp == (char *)as->J->mcarea + as->J->szmcarea) {
+ mxp -= MIPS_SPAREJUMP*2;
+ lj_assertA(MIPSI_NOP == 0, "bad NOP");
+ memset(mxp, 0, MIPS_SPAREJUMP*2*sizeof(MCode));
+ as->mctop = mxp;
+ }
+}
+
+static MCode *asm_sparejump_use(MCode *mcarea, MCode tjump)
+{
+ MCode *mxp = (MCode *)((char *)mcarea + ((MCLink *)mcarea)->size);
+ int slot = MIPS_SPAREJUMP;
+ while (slot--) {
+ mxp -= 2;
+ if (*mxp == tjump) {
+ return mxp;
+ } else if (*mxp == MIPSI_NOP) {
+ *mxp = tjump;
+ return mxp;
+ }
+ }
+ return NULL;
+}
+
+/* Setup exit stub after the end of each trace. */
+static void asm_exitstub_setup(ASMState *as)
+{
+ MCode *mxp = as->mctop;
+ /* sw TMP, 0(sp); j ->vm_exit_handler; li TMP, traceno */
+ *--mxp = MIPSI_LI|MIPSF_T(RID_TMP)|as->T->traceno;
+ *--mxp = MIPSI_J|((((uintptr_t)(void *)lj_vm_exit_handler)>>2)&0x03ffffffu);
+ lj_assertA(((uintptr_t)mxp ^ (uintptr_t)(void *)lj_vm_exit_handler)>>28 == 0,
+ "branch target out of range");
+ *--mxp = MIPSI_SW|MIPSF_T(RID_TMP)|MIPSF_S(RID_SP)|0;
+ as->mctop = mxp;
+}
+
+/* Keep this in-sync with exitstub_trace_addr(). */
+#define asm_exitstub_addr(as) ((as)->mctop)
+
+/* Emit conditional branch to exit for guard. */
+static void asm_guard(ASMState *as, MIPSIns mi, Reg rs, Reg rt)
+{
+ MCode *target = asm_exitstub_addr(as);
+ MCode *p = as->mcp;
+ if (LJ_UNLIKELY(p == as->invmcp)) {
+ as->invmcp = NULL;
+ as->loopinv = 1;
+ as->mcp = p+1;
+#if !LJ_TARGET_MIPSR6
+ mi = mi ^ ((mi>>28) == 1 ? 0x04000000u : 0x00010000u); /* Invert cond. */
+#else
+ mi = mi ^ ((mi>>28) == 1 ? 0x04000000u :
+ (mi>>28) == 4 ? 0x00800000u : 0x00010000u); /* Invert cond. */
+#endif
+ target = p; /* Patch target later in asm_loop_fixup. */
+ }
+ emit_ti(as, MIPSI_LI, RID_TMP, as->snapno);
+ emit_branch(as, mi, rs, rt, target);
+}
+
+/* -- Operand fusion ------------------------------------------------------ */
+
+/* Limit linear search to this distance. Avoids O(n^2) behavior. */
+#define CONFLICT_SEARCH_LIM 31
+
+/* Check if there's no conflicting instruction between curins and ref. */
+static int noconflict(ASMState *as, IRRef ref, IROp conflict)
+{
+ IRIns *ir = as->ir;
+ IRRef i = as->curins;
+ if (i > ref + CONFLICT_SEARCH_LIM)
+ return 0; /* Give up, ref is too far away. */
+ while (--i > ref)
+ if (ir[i].o == conflict)
+ return 0; /* Conflict found. */
+ return 1; /* Ok, no conflict. */
+}
+
+/* Fuse the array base of colocated arrays. */
+static int32_t asm_fuseabase(ASMState *as, IRRef ref)
+{
+ IRIns *ir = IR(ref);
+ if (ir->o == IR_TNEW && ir->op1 <= LJ_MAX_COLOSIZE &&
+ !neverfuse(as) && noconflict(as, ref, IR_NEWREF))
+ return (int32_t)sizeof(GCtab);
+ return 0;
+}
+
+/* Fuse array/hash/upvalue reference into register+offset operand. */
+static Reg asm_fuseahuref(ASMState *as, IRRef ref, int32_t *ofsp, RegSet allow)
+{
+ IRIns *ir = IR(ref);
+ if (ra_noreg(ir->r)) {
+ if (ir->o == IR_AREF) {
+ if (mayfuse(as, ref)) {
+ if (irref_isk(ir->op2)) {
+ IRRef tab = IR(ir->op1)->op1;
+ int32_t ofs = asm_fuseabase(as, tab);
+ IRRef refa = ofs ? tab : ir->op1;
+ ofs += 8*IR(ir->op2)->i;
+ if (checki16(ofs)) {
+ *ofsp = ofs;
+ return ra_alloc1(as, refa, allow);
+ }
+ }
+ }
+ } else if (ir->o == IR_HREFK) {
+ if (mayfuse(as, ref)) {
+ int32_t ofs = (int32_t)(IR(ir->op2)->op2 * sizeof(Node));
+ if (checki16(ofs)) {
+ *ofsp = ofs;
+ return ra_alloc1(as, ir->op1, allow);
+ }
+ }
+ } else if (ir->o == IR_UREFC) {
+ if (irref_isk(ir->op1)) {
+ GCfunc *fn = ir_kfunc(IR(ir->op1));
+ intptr_t ofs = (intptr_t)&gcref(fn->l.uvptr[(ir->op2 >> 8)])->uv.tv;
+ intptr_t jgl = (intptr_t)J2G(as->J);
+ if ((uintptr_t)(ofs-jgl) < 65536) {
+ *ofsp = ofs-jgl-32768;
+ return RID_JGL;
+ } else {
+ *ofsp = (int16_t)ofs;
+ return ra_allock(as, ofs-(int16_t)ofs, allow);
+ }
+ }
+ } else if (ir->o == IR_TMPREF) {
+ *ofsp = (int32_t)(offsetof(global_State, tmptv)-32768);
+ return RID_JGL;
+ }
+ }
+ *ofsp = 0;
+ return ra_alloc1(as, ref, allow);
+}
+
+/* Fuse XLOAD/XSTORE reference into load/store operand. */
+static void asm_fusexref(ASMState *as, MIPSIns mi, Reg rt, IRRef ref,
+ RegSet allow, int32_t ofs)
+{
+ IRIns *ir = IR(ref);
+ Reg base;
+ if (ra_noreg(ir->r) && canfuse(as, ir)) {
+ if (ir->o == IR_ADD) {
+ intptr_t ofs2;
+ if (irref_isk(ir->op2) && (ofs2 = ofs + get_kval(as, ir->op2),
+ checki16(ofs2))) {
+ ref = ir->op1;
+ ofs = (int32_t)ofs2;
+ }
+ } else if (ir->o == IR_STRREF) {
+ intptr_t ofs2 = 65536;
+ lj_assertA(ofs == 0, "bad usage");
+ ofs = (int32_t)sizeof(GCstr);
+ if (irref_isk(ir->op2)) {
+ ofs2 = ofs + get_kval(as, ir->op2);
+ ref = ir->op1;
+ } else if (irref_isk(ir->op1)) {
+ ofs2 = ofs + get_kval(as, ir->op1);
+ ref = ir->op2;
+ }
+ if (!checki16(ofs2)) {
+ /* NYI: Fuse ADD with constant. */
+ Reg right, left = ra_alloc2(as, ir, allow);
+ right = (left >> 8); left &= 255;
+ emit_hsi(as, mi, rt, RID_TMP, ofs);
+ emit_dst(as, MIPSI_AADDU, RID_TMP, left, right);
+ return;
+ }
+ ofs = ofs2;
+ }
+ }
+ base = ra_alloc1(as, ref, allow);
+ emit_hsi(as, mi, rt, base, ofs);
+}
+
+/* -- Calls --------------------------------------------------------------- */
+
+/* Generate a call to a C function. */
+static void asm_gencall(ASMState *as, const CCallInfo *ci, IRRef *args)
+{
+ uint32_t n, nargs = CCI_XNARGS(ci);
+ int32_t ofs = LJ_32 ? 16 : 0;
+#if LJ_SOFTFP
+ Reg gpr = REGARG_FIRSTGPR;
+#else
+ Reg gpr, fpr = REGARG_FIRSTFPR;
+#endif
+ if ((void *)ci->func)
+ emit_call(as, (void *)ci->func, 1);
+#if !LJ_SOFTFP
+ for (gpr = REGARG_FIRSTGPR; gpr <= REGARG_LASTGPR; gpr++)
+ as->cost[gpr] = REGCOST(~0u, ASMREF_L);
+ gpr = REGARG_FIRSTGPR;
+#endif
+ for (n = 0; n < nargs; n++) { /* Setup args. */
+ IRRef ref = args[n];
+ if (ref) {
+ IRIns *ir = IR(ref);
+#if !LJ_SOFTFP
+ if (irt_isfp(ir->t) && fpr <= REGARG_LASTFPR &&
+ !(ci->flags & CCI_VARARG)) {
+ lj_assertA(rset_test(as->freeset, fpr),
+ "reg %d not free", fpr); /* Already evicted. */
+ ra_leftov(as, fpr, ref);
+ fpr += LJ_32 ? 2 : 1;
+ gpr += (LJ_32 && irt_isnum(ir->t)) ? 2 : 1;
+ } else
+#endif
+ {
+#if LJ_32 && !LJ_SOFTFP
+ fpr = REGARG_LASTFPR+1;
+#endif
+ if (LJ_32 && irt_isnum(ir->t)) gpr = (gpr+1) & ~1;
+ if (gpr <= REGARG_LASTGPR) {
+ lj_assertA(rset_test(as->freeset, gpr),
+ "reg %d not free", gpr); /* Already evicted. */
+#if !LJ_SOFTFP
+ if (irt_isfp(ir->t)) {
+ RegSet of = as->freeset;
+ Reg r;
+ /* Workaround to protect argument GPRs from being used for remat. */
+ as->freeset &= ~RSET_RANGE(REGARG_FIRSTGPR, REGARG_LASTGPR+1);
+ r = ra_alloc1(as, ref, RSET_FPR);
+ as->freeset |= (of & RSET_RANGE(REGARG_FIRSTGPR, REGARG_LASTGPR+1));
+ if (irt_isnum(ir->t)) {
+#if LJ_32
+ emit_tg(as, MIPSI_MFC1, gpr+(LJ_BE?0:1), r+1);
+ emit_tg(as, MIPSI_MFC1, gpr+(LJ_BE?1:0), r);
+ lj_assertA(rset_test(as->freeset, gpr+1),
+ "reg %d not free", gpr+1); /* Already evicted. */
+ gpr += 2;
+#else
+ emit_tg(as, MIPSI_DMFC1, gpr, r);
+ gpr++; fpr++;
+#endif
+ } else if (irt_isfloat(ir->t)) {
+ emit_tg(as, MIPSI_MFC1, gpr, r);
+ gpr++;
+#if LJ_64
+ fpr++;
+#endif
+ }
+ } else
+#endif
+ {
+ ra_leftov(as, gpr, ref);
+ gpr++;
+#if LJ_64 && !LJ_SOFTFP
+ fpr++;
+#endif
+ }
+ } else {
+ Reg r = ra_alloc1z(as, ref, !LJ_SOFTFP && irt_isfp(ir->t) ? RSET_FPR : RSET_GPR);
+#if LJ_32
+ if (irt_isnum(ir->t)) ofs = (ofs + 4) & ~4;
+ emit_spstore(as, ir, r, ofs);
+ ofs += irt_isnum(ir->t) ? 8 : 4;
+#else
+ emit_spstore(as, ir, r, ofs + ((LJ_BE && !irt_isfp(ir->t) && !irt_is64(ir->t)) ? 4 : 0));
+ ofs += 8;
+#endif
+ }
+ }
+ } else {
+#if !LJ_SOFTFP
+ fpr = REGARG_LASTFPR+1;
+#endif
+ if (gpr <= REGARG_LASTGPR) {
+ gpr++;
+#if LJ_64 && !LJ_SOFTFP
+ fpr++;
+#endif
+ } else {
+ ofs += LJ_32 ? 4 : 8;
+ }
+ }
+ checkmclim(as);
+ }
+}
+
+/* Setup result reg/sp for call. Evict scratch regs. */
+static void asm_setupresult(ASMState *as, IRIns *ir, const CCallInfo *ci)
+{
+ RegSet drop = RSET_SCRATCH;
+ int hiop = ((ir+1)->o == IR_HIOP && !irt_isnil((ir+1)->t));
+#if !LJ_SOFTFP
+ if ((ci->flags & CCI_NOFPRCLOBBER))
+ drop &= ~RSET_FPR;
+#endif
+ if (ra_hasreg(ir->r))
+ rset_clear(drop, ir->r); /* Dest reg handled below. */
+ if (hiop && ra_hasreg((ir+1)->r))
+ rset_clear(drop, (ir+1)->r); /* Dest reg handled below. */
+ ra_evictset(as, drop); /* Evictions must be performed first. */
+ if (ra_used(ir)) {
+ lj_assertA(!irt_ispri(ir->t), "PRI dest");
+ if (!LJ_SOFTFP && irt_isfp(ir->t)) {
+ if ((ci->flags & CCI_CASTU64)) {
+ int32_t ofs = sps_scale(ir->s);
+ Reg dest = ir->r;
+ if (ra_hasreg(dest)) {
+ ra_free(as, dest);
+ ra_modified(as, dest);
+#if LJ_32
+ emit_tg(as, MIPSI_MTC1, RID_RETHI, dest+1);
+ emit_tg(as, MIPSI_MTC1, RID_RETLO, dest);
+#else
+ emit_tg(as, MIPSI_DMTC1, RID_RET, dest);
+#endif
+ }
+ if (ofs) {
+#if LJ_32
+ emit_tsi(as, MIPSI_SW, RID_RETLO, RID_SP, ofs+(LJ_BE?4:0));
+ emit_tsi(as, MIPSI_SW, RID_RETHI, RID_SP, ofs+(LJ_BE?0:4));
+#else
+ emit_tsi(as, MIPSI_SD, RID_RET, RID_SP, ofs);
+#endif
+ }
+ } else {
+ ra_destreg(as, ir, RID_FPRET);
+ }
+ } else if (hiop) {
+ ra_destpair(as, ir);
+ } else {
+ ra_destreg(as, ir, RID_RET);
+ }
+ }
+}
+
+static void asm_callx(ASMState *as, IRIns *ir)
+{
+ IRRef args[CCI_NARGS_MAX*2];
+ CCallInfo ci;
+ IRRef func;
+ IRIns *irf;
+ ci.flags = asm_callx_flags(as, ir);
+ asm_collectargs(as, ir, &ci, args);
+ asm_setupresult(as, ir, &ci);
+ func = ir->op2; irf = IR(func);
+ if (irf->o == IR_CARG) { func = irf->op1; irf = IR(func); }
+ if (irref_isk(func)) { /* Call to constant address. */
+ ci.func = (ASMFunction)(void *)get_kval(as, func);
+ } else { /* Need specific register for indirect calls. */
+ Reg r = ra_alloc1(as, func, RID2RSET(RID_CFUNCADDR));
+ MCode *p = as->mcp;
+ if (r == RID_CFUNCADDR)
+ *--p = MIPSI_NOP;
+ else
+ *--p = MIPSI_MOVE | MIPSF_D(RID_CFUNCADDR) | MIPSF_S(r);
+ *--p = MIPSI_JALR | MIPSF_S(r);
+ as->mcp = p;
+ ci.func = (ASMFunction)(void *)0;
+ }
+ asm_gencall(as, &ci, args);
+}
+
+#if !LJ_SOFTFP
+static void asm_callround(ASMState *as, IRIns *ir, IRCallID id)
+{
+ /* The modified regs must match with the *.dasc implementation. */
+ RegSet drop = RID2RSET(RID_R1)|RID2RSET(RID_R12)|RID2RSET(RID_FPRET)|
+ RID2RSET(RID_F2)|RID2RSET(RID_F4)|RID2RSET(REGARG_FIRSTFPR)
+#if LJ_TARGET_MIPSR6
+ |RID2RSET(RID_F21)
+#endif
+ ;
+ if (ra_hasreg(ir->r)) rset_clear(drop, ir->r);
+ ra_evictset(as, drop);
+ ra_destreg(as, ir, RID_FPRET);
+ emit_call(as, (void *)lj_ir_callinfo[id].func, 0);
+ ra_leftov(as, REGARG_FIRSTFPR, ir->op1);
+}
+#endif
+
+/* -- Returns ------------------------------------------------------------- */
+
+/* Return to lower frame. Guard that it goes to the right spot. */
+static void asm_retf(ASMState *as, IRIns *ir)
+{
+ Reg base = ra_alloc1(as, REF_BASE, RSET_GPR);
+ void *pc = ir_kptr(IR(ir->op2));
+ int32_t delta = 1+LJ_FR2+bc_a(*((const BCIns *)pc - 1));
+ as->topslot -= (BCReg)delta;
+ if ((int32_t)as->topslot < 0) as->topslot = 0;
+ irt_setmark(IR(REF_BASE)->t); /* Children must not coalesce with BASE reg. */
+ emit_setgl(as, base, jit_base);
+ emit_addptr(as, base, -8*delta);
+ asm_guard(as, MIPSI_BNE, RID_TMP,
+ ra_allock(as, igcptr(pc), rset_exclude(RSET_GPR, base)));
+ emit_tsi(as, MIPSI_AL, RID_TMP, base, -8);
+}
+
+/* -- Buffer operations --------------------------------------------------- */
+
+#if LJ_HASBUFFER
+static void asm_bufhdr_write(ASMState *as, Reg sb)
+{
+ Reg tmp = ra_scratch(as, rset_exclude(RSET_GPR, sb));
+ IRIns irgc;
+ irgc.ot = IRT(0, IRT_PGC); /* GC type. */
+ emit_storeofs(as, &irgc, RID_TMP, sb, offsetof(SBuf, L));
+ if ((as->flags & JIT_F_MIPSXXR2)) {
+ emit_tsml(as, LJ_64 ? MIPSI_DINS : MIPSI_INS, RID_TMP, tmp,
+ lj_fls(SBUF_MASK_FLAG), 0);
+ } else {
+ emit_dst(as, MIPSI_OR, RID_TMP, RID_TMP, tmp);
+ emit_tsi(as, MIPSI_ANDI, tmp, tmp, SBUF_MASK_FLAG);
+ }
+ emit_getgl(as, RID_TMP, cur_L);
+ emit_loadofs(as, &irgc, tmp, sb, offsetof(SBuf, L));
+}
+#endif
+
+/* -- Type conversions ---------------------------------------------------- */
+
+#if !LJ_SOFTFP
+static void asm_tointg(ASMState *as, IRIns *ir, Reg left)
+{
+ Reg tmp = ra_scratch(as, rset_exclude(RSET_FPR, left));
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+#if !LJ_TARGET_MIPSR6
+ asm_guard(as, MIPSI_BC1F, 0, 0);
+ emit_fgh(as, MIPSI_C_EQ_D, 0, tmp, left);
+#else
+ asm_guard(as, MIPSI_BC1EQZ, 0, (tmp&31));
+ emit_fgh(as, MIPSI_CMP_EQ_D, tmp, tmp, left);
+#endif
+ emit_fg(as, MIPSI_CVT_D_W, tmp, tmp);
+ emit_tg(as, MIPSI_MFC1, dest, tmp);
+ emit_fg(as, MIPSI_CVT_W_D, tmp, left);
+}
+
+static void asm_tobit(ASMState *as, IRIns *ir)
+{
+ RegSet allow = RSET_FPR;
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ Reg left = ra_alloc1(as, ir->op1, allow);
+ Reg right = ra_alloc1(as, ir->op2, rset_clear(allow, left));
+ Reg tmp = ra_scratch(as, rset_clear(allow, right));
+ emit_tg(as, MIPSI_MFC1, dest, tmp);
+ emit_fgh(as, MIPSI_ADD_D, tmp, left, right);
+}
+#elif LJ_64 /* && LJ_SOFTFP */
+static void asm_tointg(ASMState *as, IRIns *ir, Reg r)
+{
+ /* The modified regs must match with the *.dasc implementation. */
+ RegSet drop = RID2RSET(REGARG_FIRSTGPR)|RID2RSET(RID_RET)|RID2RSET(RID_RET+1)|
+ RID2RSET(RID_R1)|RID2RSET(RID_R12);
+ if (ra_hasreg(ir->r)) rset_clear(drop, ir->r);
+ ra_evictset(as, drop);
+ /* Return values are in RID_RET (converted value) and RID_RET+1 (status). */
+ ra_destreg(as, ir, RID_RET);
+ asm_guard(as, MIPSI_BNE, RID_RET+1, RID_ZERO);
+ emit_call(as, (void *)lj_ir_callinfo[IRCALL_lj_vm_tointg].func, 0);
+ if (r == RID_NONE)
+ ra_leftov(as, REGARG_FIRSTGPR, ir->op1);
+ else if (r != REGARG_FIRSTGPR)
+ emit_move(as, REGARG_FIRSTGPR, r);
+}
+
+static void asm_tobit(ASMState *as, IRIns *ir)
+{
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ emit_dta(as, MIPSI_SLL, dest, dest, 0);
+ asm_callid(as, ir, IRCALL_lj_vm_tobit);
+}
+#endif
+
+static void asm_conv(ASMState *as, IRIns *ir)
+{
+ IRType st = (IRType)(ir->op2 & IRCONV_SRCMASK);
+#if !LJ_SOFTFP32
+ int stfp = (st == IRT_NUM || st == IRT_FLOAT);
+#endif
+#if LJ_64
+ int st64 = (st == IRT_I64 || st == IRT_U64 || st == IRT_P64);
+#endif
+ IRRef lref = ir->op1;
+#if LJ_32
+ /* 64 bit integer conversions are handled by SPLIT. */
+ lj_assertA(!(irt_isint64(ir->t) || (st == IRT_I64 || st == IRT_U64)),
+ "IR %04d has unsplit 64 bit type",
+ (int)(ir - as->ir) - REF_BIAS);
+#endif
+#if LJ_SOFTFP32
+ /* FP conversions are handled by SPLIT. */
+ lj_assertA(!irt_isfp(ir->t) && !(st == IRT_NUM || st == IRT_FLOAT),
+ "IR %04d has FP type",
+ (int)(ir - as->ir) - REF_BIAS);
+ /* Can't check for same types: SPLIT uses CONV int.int + BXOR for sfp NEG. */
+#else
+ lj_assertA(irt_type(ir->t) != st, "inconsistent types for CONV");
+#if !LJ_SOFTFP
+ if (irt_isfp(ir->t)) {
+ Reg dest = ra_dest(as, ir, RSET_FPR);
+ if (stfp) { /* FP to FP conversion. */
+ emit_fg(as, st == IRT_NUM ? MIPSI_CVT_S_D : MIPSI_CVT_D_S,
+ dest, ra_alloc1(as, lref, RSET_FPR));
+ } else if (st == IRT_U32) { /* U32 to FP conversion. */
+ /* y = (x ^ 0x8000000) + 2147483648.0 */
+ Reg left = ra_alloc1(as, lref, RSET_GPR);
+ Reg tmp = ra_scratch(as, rset_exclude(RSET_FPR, dest));
+ if (irt_isfloat(ir->t))
+ emit_fg(as, MIPSI_CVT_S_D, dest, dest);
+ /* Must perform arithmetic with doubles to keep the precision. */
+ emit_fgh(as, MIPSI_ADD_D, dest, dest, tmp);
+ emit_fg(as, MIPSI_CVT_D_W, dest, dest);
+ emit_lsptr(as, MIPSI_LDC1, (tmp & 31),
+ (void *)&as->J->k64[LJ_K64_2P31], RSET_GPR);
+ emit_tg(as, MIPSI_MTC1, RID_TMP, dest);
+ emit_dst(as, MIPSI_XOR, RID_TMP, RID_TMP, left);
+ emit_ti(as, MIPSI_LUI, RID_TMP, 0x8000);
+#if LJ_64
+ } else if(st == IRT_U64) { /* U64 to FP conversion. */
+ /* if (x >= 1u<<63) y = (double)(int64_t)(x&(1u<<63)-1) + pow(2.0, 63) */
+ Reg left = ra_alloc1(as, lref, RSET_GPR);
+ Reg tmp = ra_scratch(as, rset_exclude(RSET_FPR, dest));
+ MCLabel l_end = emit_label(as);
+ if (irt_isfloat(ir->t)) {
+ emit_fgh(as, MIPSI_ADD_S, dest, dest, tmp);
+ emit_lsptr(as, MIPSI_LWC1, (tmp & 31), (void *)&as->J->k32[LJ_K32_2P63],
+ rset_exclude(RSET_GPR, left));
+ emit_fg(as, MIPSI_CVT_S_L, dest, dest);
+ } else {
+ emit_fgh(as, MIPSI_ADD_D, dest, dest, tmp);
+ emit_lsptr(as, MIPSI_LDC1, (tmp & 31), (void *)&as->J->k64[LJ_K64_2P63],
+ rset_exclude(RSET_GPR, left));
+ emit_fg(as, MIPSI_CVT_D_L, dest, dest);
+ }
+ emit_branch(as, MIPSI_BGEZ, left, RID_ZERO, l_end);
+ emit_tg(as, MIPSI_DMTC1, RID_TMP, dest);
+ emit_tsml(as, MIPSI_DEXTM, RID_TMP, left, 30, 0);
+#endif
+ } else { /* Integer to FP conversion. */
+ Reg left = ra_alloc1(as, lref, RSET_GPR);
+#if LJ_32
+ emit_fg(as, irt_isfloat(ir->t) ? MIPSI_CVT_S_W : MIPSI_CVT_D_W,
+ dest, dest);
+ emit_tg(as, MIPSI_MTC1, left, dest);
+#else
+ MIPSIns mi = irt_isfloat(ir->t) ?
+ (st64 ? MIPSI_CVT_S_L : MIPSI_CVT_S_W) :
+ (st64 ? MIPSI_CVT_D_L : MIPSI_CVT_D_W);
+ emit_fg(as, mi, dest, dest);
+ emit_tg(as, st64 ? MIPSI_DMTC1 : MIPSI_MTC1, left, dest);
+#endif
+ }
+ } else if (stfp) { /* FP to integer conversion. */
+ if (irt_isguard(ir->t)) {
+ /* Checked conversions are only supported from number to int. */
+ lj_assertA(irt_isint(ir->t) && st == IRT_NUM,
+ "bad type for checked CONV");
+ asm_tointg(as, ir, ra_alloc1(as, lref, RSET_FPR));
+ } else {
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ Reg left = ra_alloc1(as, lref, RSET_FPR);
+ Reg tmp = ra_scratch(as, rset_exclude(RSET_FPR, left));
+ if (irt_isu32(ir->t)) { /* FP to U32 conversion. */
+ /* y = (int)floor(x - 2147483648.0) ^ 0x80000000 */
+ emit_dst(as, MIPSI_XOR, dest, dest, RID_TMP);
+ emit_ti(as, MIPSI_LUI, RID_TMP, 0x8000);
+ emit_tg(as, MIPSI_MFC1, dest, tmp);
+ emit_fg(as, st == IRT_FLOAT ? MIPSI_FLOOR_W_S : MIPSI_FLOOR_W_D,
+ tmp, tmp);
+ emit_fgh(as, st == IRT_FLOAT ? MIPSI_SUB_S : MIPSI_SUB_D,
+ tmp, left, tmp);
+ if (st == IRT_FLOAT)
+ emit_lsptr(as, MIPSI_LWC1, (tmp & 31),
+ (void *)&as->J->k32[LJ_K32_2P31], RSET_GPR);
+ else
+ emit_lsptr(as, MIPSI_LDC1, (tmp & 31),
+ (void *)&as->J->k64[LJ_K64_2P31], RSET_GPR);
+#if LJ_64
+ } else if (irt_isu64(ir->t)) { /* FP to U64 conversion. */
+ MCLabel l_end;
+ emit_tg(as, MIPSI_DMFC1, dest, tmp);
+ l_end = emit_label(as);
+ /* For inputs >= 2^63 add -2^64 and convert again. */
+ if (st == IRT_NUM) {
+ emit_fg(as, MIPSI_TRUNC_L_D, tmp, tmp);
+ emit_fgh(as, MIPSI_ADD_D, tmp, left, tmp);
+ emit_lsptr(as, MIPSI_LDC1, (tmp & 31),
+ (void *)&as->J->k64[LJ_K64_M2P64],
+ rset_exclude(RSET_GPR, dest));
+ emit_fg(as, MIPSI_TRUNC_L_D, tmp, left); /* Delay slot. */
+#if !LJ_TARGET_MIPSR6
+ emit_branch(as, MIPSI_BC1T, 0, 0, l_end);
+ emit_fgh(as, MIPSI_C_OLT_D, 0, left, tmp);
+#else
+ emit_branch(as, MIPSI_BC1NEZ, 0, (left&31), l_end);
+ emit_fgh(as, MIPSI_CMP_LT_D, left, left, tmp);
+#endif
+ emit_lsptr(as, MIPSI_LDC1, (tmp & 31),
+ (void *)&as->J->k64[LJ_K64_2P63],
+ rset_exclude(RSET_GPR, dest));
+ } else {
+ emit_fg(as, MIPSI_TRUNC_L_S, tmp, tmp);
+ emit_fgh(as, MIPSI_ADD_S, tmp, left, tmp);
+ emit_lsptr(as, MIPSI_LWC1, (tmp & 31),
+ (void *)&as->J->k32[LJ_K32_M2P64],
+ rset_exclude(RSET_GPR, dest));
+ emit_fg(as, MIPSI_TRUNC_L_S, tmp, left); /* Delay slot. */
+#if !LJ_TARGET_MIPSR6
+ emit_branch(as, MIPSI_BC1T, 0, 0, l_end);
+ emit_fgh(as, MIPSI_C_OLT_S, 0, left, tmp);
+#else
+ emit_branch(as, MIPSI_BC1NEZ, 0, (left&31), l_end);
+ emit_fgh(as, MIPSI_CMP_LT_S, left, left, tmp);
+#endif
+ emit_lsptr(as, MIPSI_LWC1, (tmp & 31),
+ (void *)&as->J->k32[LJ_K32_2P63],
+ rset_exclude(RSET_GPR, dest));
+ }
+#endif
+ } else {
+#if LJ_32
+ emit_tg(as, MIPSI_MFC1, dest, tmp);
+ emit_fg(as, st == IRT_FLOAT ? MIPSI_TRUNC_W_S : MIPSI_TRUNC_W_D,
+ tmp, left);
+#else
+ MIPSIns mi = irt_is64(ir->t) ?
+ (st == IRT_NUM ? MIPSI_TRUNC_L_D : MIPSI_TRUNC_L_S) :
+ (st == IRT_NUM ? MIPSI_TRUNC_W_D : MIPSI_TRUNC_W_S);
+ emit_tg(as, irt_is64(ir->t) ? MIPSI_DMFC1 : MIPSI_MFC1, dest, left);
+ emit_fg(as, mi, left, left);
+#endif
+ }
+ }
+ } else
+#else
+ if (irt_isfp(ir->t)) {
+#if LJ_64 && LJ_HASFFI
+ if (stfp) { /* FP to FP conversion. */
+ asm_callid(as, ir, irt_isnum(ir->t) ? IRCALL_softfp_f2d :
+ IRCALL_softfp_d2f);
+ } else { /* Integer to FP conversion. */
+ IRCallID cid = ((IRT_IS64 >> st) & 1) ?
+ (irt_isnum(ir->t) ?
+ (st == IRT_I64 ? IRCALL_fp64_l2d : IRCALL_fp64_ul2d) :
+ (st == IRT_I64 ? IRCALL_fp64_l2f : IRCALL_fp64_ul2f)) :
+ (irt_isnum(ir->t) ?
+ (st == IRT_INT ? IRCALL_softfp_i2d : IRCALL_softfp_ui2d) :
+ (st == IRT_INT ? IRCALL_softfp_i2f : IRCALL_softfp_ui2f));
+ asm_callid(as, ir, cid);
+ }
+#else
+ asm_callid(as, ir, IRCALL_softfp_i2d);
+#endif
+ } else if (stfp) { /* FP to integer conversion. */
+ if (irt_isguard(ir->t)) {
+ /* Checked conversions are only supported from number to int. */
+ lj_assertA(irt_isint(ir->t) && st == IRT_NUM,
+ "bad type for checked CONV");
+ asm_tointg(as, ir, RID_NONE);
+ } else {
+ IRCallID cid = irt_is64(ir->t) ?
+ ((st == IRT_NUM) ?
+ (irt_isi64(ir->t) ? IRCALL_fp64_d2l : IRCALL_fp64_d2ul) :
+ (irt_isi64(ir->t) ? IRCALL_fp64_f2l : IRCALL_fp64_f2ul)) :
+ ((st == IRT_NUM) ?
+ (irt_isint(ir->t) ? IRCALL_softfp_d2i : IRCALL_softfp_d2ui) :
+ (irt_isint(ir->t) ? IRCALL_softfp_f2i : IRCALL_softfp_f2ui));
+ asm_callid(as, ir, cid);
+ }
+ } else
+#endif
+#endif
+ {
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ if (st >= IRT_I8 && st <= IRT_U16) { /* Extend to 32 bit integer. */
+ Reg left = ra_alloc1(as, ir->op1, RSET_GPR);
+ lj_assertA(irt_isint(ir->t) || irt_isu32(ir->t), "bad type for CONV EXT");
+ if ((ir->op2 & IRCONV_SEXT)) {
+ if (LJ_64 || (as->flags & JIT_F_MIPSXXR2)) {
+ emit_dst(as, st == IRT_I8 ? MIPSI_SEB : MIPSI_SEH, dest, 0, left);
+ } else {
+ uint32_t shift = st == IRT_I8 ? 24 : 16;
+ emit_dta(as, MIPSI_SRA, dest, dest, shift);
+ emit_dta(as, MIPSI_SLL, dest, left, shift);
+ }
+ } else {
+ emit_tsi(as, MIPSI_ANDI, dest, left,
+ (int32_t)(st == IRT_U8 ? 0xff : 0xffff));
+ }
+ } else { /* 32/64 bit integer conversions. */
+#if LJ_32
+ /* Only need to handle 32/32 bit no-op (cast) on 32 bit archs. */
+ ra_leftov(as, dest, lref); /* Do nothing, but may need to move regs. */
+#else
+ if (irt_is64(ir->t)) {
+ if (st64) {
+ /* 64/64 bit no-op (cast)*/
+ ra_leftov(as, dest, lref);
+ } else {
+ Reg left = ra_alloc1(as, lref, RSET_GPR);
+ if ((ir->op2 & IRCONV_SEXT)) { /* 32 to 64 bit sign extension. */
+ emit_dta(as, MIPSI_SLL, dest, left, 0);
+ } else { /* 32 to 64 bit zero extension. */
+ emit_tsml(as, MIPSI_DEXT, dest, left, 31, 0);
+ }
+ }
+ } else {
+ if (st64 && !(ir->op2 & IRCONV_NONE)) {
+ /* This is either a 32 bit reg/reg mov which zeroes the hiword
+ ** or a load of the loword from a 64 bit address.
+ */
+ Reg left = ra_alloc1(as, lref, RSET_GPR);
+ emit_tsml(as, MIPSI_DEXT, dest, left, 31, 0);
+ } else { /* 32/32 bit no-op (cast). */
+ /* Do nothing, but may need to move regs. */
+ ra_leftov(as, dest, lref);
+ }
+ }
+#endif
+ }
+ }
+}
+
+static void asm_strto(ASMState *as, IRIns *ir)
+{
+ const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_strscan_num];
+ IRRef args[2];
+ int32_t ofs = 0;
+#if LJ_SOFTFP32
+ ra_evictset(as, RSET_SCRATCH);
+ if (ra_used(ir)) {
+ if (ra_hasspill(ir->s) && ra_hasspill((ir+1)->s) &&
+ (ir->s & 1) == LJ_BE && (ir->s ^ 1) == (ir+1)->s) {
+ int i;
+ for (i = 0; i < 2; i++) {
+ Reg r = (ir+i)->r;
+ if (ra_hasreg(r)) {
+ ra_free(as, r);
+ ra_modified(as, r);
+ emit_spload(as, ir+i, r, sps_scale((ir+i)->s));
+ }
+ }
+ ofs = sps_scale(ir->s & ~1);
+ } else {
+ Reg rhi = ra_dest(as, ir+1, RSET_GPR);
+ Reg rlo = ra_dest(as, ir, rset_exclude(RSET_GPR, rhi));
+ emit_tsi(as, MIPSI_LW, rhi, RID_SP, ofs+(LJ_BE?0:4));
+ emit_tsi(as, MIPSI_LW, rlo, RID_SP, ofs+(LJ_BE?4:0));
+ }
+ }
+#else
+ RegSet drop = RSET_SCRATCH;
+ if (ra_hasreg(ir->r)) rset_set(drop, ir->r); /* Spill dest reg (if any). */
+ ra_evictset(as, drop);
+ ofs = sps_scale(ir->s);
+#endif
+ asm_guard(as, MIPSI_BEQ, RID_RET, RID_ZERO); /* Test return status. */
+ args[0] = ir->op1; /* GCstr *str */
+ args[1] = ASMREF_TMP1; /* TValue *n */
+ asm_gencall(as, ci, args);
+ /* Store the result to the spill slot or temp slots. */
+ emit_tsi(as, MIPSI_AADDIU, ra_releasetmp(as, ASMREF_TMP1),
+ RID_SP, ofs);
+}
+
+/* -- Memory references --------------------------------------------------- */
+
+#if LJ_64
+/* Store tagged value for ref at base+ofs. */
+static void asm_tvstore64(ASMState *as, Reg base, int32_t ofs, IRRef ref)
+{
+ RegSet allow = rset_exclude(RSET_GPR, base);
+ IRIns *ir = IR(ref);
+ lj_assertA(irt_ispri(ir->t) || irt_isaddr(ir->t) || irt_isinteger(ir->t),
+ "store of IR type %d", irt_type(ir->t));
+ if (irref_isk(ref)) {
+ TValue k;
+ lj_ir_kvalue(as->J->L, &k, ir);
+ emit_tsi(as, MIPSI_SD, ra_allock(as, (int64_t)k.u64, allow), base, ofs);
+ } else {
+ Reg src = ra_alloc1(as, ref, allow);
+ Reg type = ra_allock(as, (int64_t)irt_toitype(ir->t) << 47,
+ rset_exclude(allow, src));
+ emit_tsi(as, MIPSI_SD, RID_TMP, base, ofs);
+ if (irt_isinteger(ir->t)) {
+ emit_dst(as, MIPSI_DADDU, RID_TMP, RID_TMP, type);
+ emit_tsml(as, MIPSI_DEXT, RID_TMP, src, 31, 0);
+ } else {
+ emit_dst(as, MIPSI_DADDU, RID_TMP, src, type);
+ }
+ }
+}
+#endif
+
+/* Get pointer to TValue. */
+static void asm_tvptr(ASMState *as, Reg dest, IRRef ref, MSize mode)
+{
+ int32_t tmpofs = (int32_t)(offsetof(global_State, tmptv)-32768);
+ if ((mode & IRTMPREF_IN1)) {
+ IRIns *ir = IR(ref);
+ if (irt_isnum(ir->t)) {
+ if ((mode & IRTMPREF_OUT1)) {
+#if LJ_SOFTFP
+ emit_tsi(as, MIPSI_AADDIU, dest, RID_JGL, tmpofs);
+#if LJ_64
+ emit_setgl(as, ra_alloc1(as, ref, RSET_GPR), tmptv.u64);
+#else
+ lj_assertA(irref_isk(ref), "unsplit FP op");
+ emit_setgl(as,
+ ra_allock(as, (int32_t)ir_knum(ir)->u32.lo, RSET_GPR),
+ tmptv.u32.lo);
+ emit_setgl(as,
+ ra_allock(as, (int32_t)ir_knum(ir)->u32.hi, RSET_GPR),
+ tmptv.u32.hi);
+#endif
+#else
+ Reg src = ra_alloc1(as, ref, RSET_FPR);
+ emit_tsi(as, MIPSI_AADDIU, dest, RID_JGL, tmpofs);
+ emit_tsi(as, MIPSI_SDC1, (src & 31), RID_JGL, tmpofs);
+#endif
+ } else if (irref_isk(ref)) {
+ /* Use the number constant itself as a TValue. */
+ ra_allockreg(as, igcptr(ir_knum(ir)), dest);
+ } else {
+#if LJ_SOFTFP32
+ lj_assertA(0, "unsplit FP op");
+#else
+ /* Otherwise force a spill and use the spill slot. */
+ emit_tsi(as, MIPSI_AADDIU, dest, RID_SP, ra_spill(as, ir));
+#endif
+ }
+ } else {
+ /* Otherwise use g->tmptv to hold the TValue. */
+#if LJ_32
+ Reg type;
+ emit_tsi(as, MIPSI_ADDIU, dest, RID_JGL, tmpofs);
+ if (!irt_ispri(ir->t)) {
+ Reg src = ra_alloc1(as, ref, RSET_GPR);
+ emit_setgl(as, src, tmptv.gcr);
+ }
+ if (LJ_SOFTFP && (ir+1)->o == IR_HIOP && !irt_isnil((ir+1)->t))
+ type = ra_alloc1(as, ref+1, RSET_GPR);
+ else
+ type = ra_allock(as, (int32_t)irt_toitype(ir->t), RSET_GPR);
+ emit_setgl(as, type, tmptv.it);
+#else
+ asm_tvstore64(as, dest, 0, ref);
+ emit_tsi(as, MIPSI_DADDIU, dest, RID_JGL, tmpofs);
+#endif
+ }
+ } else {
+ emit_tsi(as, MIPSI_AADDIU, dest, RID_JGL, tmpofs);
+ }
+}
+
+static void asm_aref(ASMState *as, IRIns *ir)
+{
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ Reg idx, base;
+ if (irref_isk(ir->op2)) {
+ IRRef tab = IR(ir->op1)->op1;
+ int32_t ofs = asm_fuseabase(as, tab);
+ IRRef refa = ofs ? tab : ir->op1;
+ ofs += 8*IR(ir->op2)->i;
+ if (checki16(ofs)) {
+ base = ra_alloc1(as, refa, RSET_GPR);
+ emit_tsi(as, MIPSI_AADDIU, dest, base, ofs);
+ return;
+ }
+ }
+ base = ra_alloc1(as, ir->op1, RSET_GPR);
+ idx = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, base));
+#if !LJ_TARGET_MIPSR6
+ emit_dst(as, MIPSI_AADDU, dest, RID_TMP, base);
+ emit_dta(as, MIPSI_SLL, RID_TMP, idx, 3);
+#else
+ emit_dst(as, MIPSI_ALSA | MIPSF_A(3-1), dest, idx, base);
+#endif
+}
+
+/* Inlined hash lookup. Specialized for key type and for const keys.
+** The equivalent C code is:
+** Node *n = hashkey(t, key);
+** do {
+** if (lj_obj_equal(&n->key, key)) return &n->val;
+** } while ((n = nextnode(n)));
+** return niltv(L);
+*/
+static void asm_href(ASMState *as, IRIns *ir, IROp merge)
+{
+ RegSet allow = RSET_GPR;
+ int destused = ra_used(ir);
+ Reg dest = ra_dest(as, ir, allow);
+ Reg tab = ra_alloc1(as, ir->op1, rset_clear(allow, dest));
+ Reg key = RID_NONE, type = RID_NONE, tmpnum = RID_NONE, tmp1 = RID_TMP, tmp2;
+#if LJ_64
+ Reg cmp64 = RID_NONE;
+#endif
+ IRRef refkey = ir->op2;
+ IRIns *irkey = IR(refkey);
+ int isk = irref_isk(refkey);
+ IRType1 kt = irkey->t;
+ uint32_t khash;
+ MCLabel l_end, l_loop, l_next;
+
+ rset_clear(allow, tab);
+ if (!LJ_SOFTFP && irt_isnum(kt)) {
+ key = ra_alloc1(as, refkey, RSET_FPR);
+ tmpnum = ra_scratch(as, rset_exclude(RSET_FPR, key));
+ } else {
+ if (!irt_ispri(kt)) {
+ key = ra_alloc1(as, refkey, allow);
+ rset_clear(allow, key);
+ }
+#if LJ_32
+ if (LJ_SOFTFP && irkey[1].o == IR_HIOP) {
+ if (ra_hasreg((irkey+1)->r)) {
+ type = tmpnum = (irkey+1)->r;
+ tmp1 = ra_scratch(as, allow);
+ rset_clear(allow, tmp1);
+ ra_noweak(as, tmpnum);
+ } else {
+ type = tmpnum = ra_allocref(as, refkey+1, allow);
+ }
+ rset_clear(allow, tmpnum);
+ } else {
+ type = ra_allock(as, (int32_t)irt_toitype(kt), allow);
+ rset_clear(allow, type);
+ }
+#endif
+ }
+ tmp2 = ra_scratch(as, allow);
+ rset_clear(allow, tmp2);
+#if LJ_64
+ if (LJ_SOFTFP || !irt_isnum(kt)) {
+ /* Allocate cmp64 register used for 64-bit comparisons */
+ if (LJ_SOFTFP && irt_isnum(kt)) {
+ cmp64 = key;
+ } else if (!isk && irt_isaddr(kt)) {
+ cmp64 = tmp2;
+ } else {
+ int64_t k;
+ if (isk && irt_isaddr(kt)) {
+ k = ((int64_t)irt_toitype(kt) << 47) | irkey[1].tv.u64;
+ } else {
+ lj_assertA(irt_ispri(kt) && !irt_isnil(kt), "bad HREF key type");
+ k = ~((int64_t)~irt_toitype(kt) << 47);
+ }
+ cmp64 = ra_allock(as, k, allow);
+ rset_clear(allow, cmp64);
+ }
+ }
+#endif
+
+ /* Key not found in chain: jump to exit (if merged) or load niltv. */
+ l_end = emit_label(as);
+ as->invmcp = NULL;
+ if (merge == IR_NE)
+ asm_guard(as, MIPSI_B, RID_ZERO, RID_ZERO);
+ else if (destused)
+ emit_loada(as, dest, niltvg(J2G(as->J)));
+ /* Follow hash chain until the end. */
+ emit_move(as, dest, tmp1);
+ l_loop = --as->mcp;
+ emit_tsi(as, MIPSI_AL, tmp1, dest, (int32_t)offsetof(Node, next));
+ l_next = emit_label(as);
+
+ /* Type and value comparison. */
+ if (merge == IR_EQ) { /* Must match asm_guard(). */
+ emit_ti(as, MIPSI_LI, RID_TMP, as->snapno);
+ l_end = asm_exitstub_addr(as);
+ }
+ if (!LJ_SOFTFP && irt_isnum(kt)) {
+#if !LJ_TARGET_MIPSR6
+ emit_branch(as, MIPSI_BC1T, 0, 0, l_end);
+ emit_fgh(as, MIPSI_C_EQ_D, 0, tmpnum, key);
+#else
+ emit_branch(as, MIPSI_BC1NEZ, 0, (tmpnum&31), l_end);
+ emit_fgh(as, MIPSI_CMP_EQ_D, tmpnum, tmpnum, key);
+#endif
+ *--as->mcp = MIPSI_NOP; /* Avoid NaN comparison overhead. */
+ emit_branch(as, MIPSI_BEQ, tmp1, RID_ZERO, l_next);
+ emit_tsi(as, MIPSI_SLTIU, tmp1, tmp1, (int32_t)LJ_TISNUM);
+#if LJ_32
+ emit_hsi(as, MIPSI_LDC1, tmpnum, dest, (int32_t)offsetof(Node, key.n));
+ } else {
+ if (irt_ispri(kt)) {
+ emit_branch(as, MIPSI_BEQ, tmp1, type, l_end);
+ } else {
+ emit_branch(as, MIPSI_BEQ, tmp2, key, l_end);
+ emit_tsi(as, MIPSI_LW, tmp2, dest, (int32_t)offsetof(Node, key.gcr));
+ emit_branch(as, MIPSI_BNE, tmp1, type, l_next);
+ }
+ }
+ emit_tsi(as, MIPSI_LW, tmp1, dest, (int32_t)offsetof(Node, key.it));
+ *l_loop = MIPSI_BNE | MIPSF_S(tmp1) | ((as->mcp-l_loop-1) & 0xffffu);
+#else
+ emit_dta(as, MIPSI_DSRA32, tmp1, tmp1, 15);
+ emit_tg(as, MIPSI_DMTC1, tmp1, tmpnum);
+ emit_tsi(as, MIPSI_LD, tmp1, dest, (int32_t)offsetof(Node, key.u64));
+ } else {
+ emit_branch(as, MIPSI_BEQ, tmp1, cmp64, l_end);
+ emit_tsi(as, MIPSI_LD, tmp1, dest, (int32_t)offsetof(Node, key.u64));
+ }
+ *l_loop = MIPSI_BNE | MIPSF_S(tmp1) | ((as->mcp-l_loop-1) & 0xffffu);
+ if (!isk && irt_isaddr(kt)) {
+ type = ra_allock(as, (int64_t)irt_toitype(kt) << 47, allow);
+ emit_dst(as, MIPSI_DADDU, tmp2, key, type);
+ rset_clear(allow, type);
+ }
+#endif
+
+ /* Load main position relative to tab->node into dest. */
+ khash = isk ? ir_khash(as, irkey) : 1;
+ if (khash == 0) {
+ emit_tsi(as, MIPSI_AL, dest, tab, (int32_t)offsetof(GCtab, node));
+ } else {
+ Reg tmphash = tmp1;
+ if (isk)
+ tmphash = ra_allock(as, khash, allow);
+ emit_dst(as, MIPSI_AADDU, dest, dest, tmp1);
+ lj_assertA(sizeof(Node) == 24, "bad Node size");
+ emit_dst(as, MIPSI_SUBU, tmp1, tmp2, tmp1);
+ emit_dta(as, MIPSI_SLL, tmp1, tmp1, 3);
+ emit_dta(as, MIPSI_SLL, tmp2, tmp1, 5);
+ emit_dst(as, MIPSI_AND, tmp1, tmp2, tmphash);
+ emit_tsi(as, MIPSI_AL, dest, tab, (int32_t)offsetof(GCtab, node));
+ emit_tsi(as, MIPSI_LW, tmp2, tab, (int32_t)offsetof(GCtab, hmask));
+ if (isk) {
+ /* Nothing to do. */
+ } else if (irt_isstr(kt)) {
+ emit_tsi(as, MIPSI_LW, tmp1, key, (int32_t)offsetof(GCstr, sid));
+ } else { /* Must match with hash*() in lj_tab.c. */
+ emit_dst(as, MIPSI_SUBU, tmp1, tmp1, tmp2);
+ emit_rotr(as, tmp2, tmp2, dest, (-HASH_ROT3)&31);
+ emit_dst(as, MIPSI_XOR, tmp1, tmp1, tmp2);
+ emit_rotr(as, tmp1, tmp1, dest, (-HASH_ROT2-HASH_ROT1)&31);
+ emit_dst(as, MIPSI_SUBU, tmp2, tmp2, dest);
+#if LJ_32
+ if (LJ_SOFTFP ? (irkey[1].o == IR_HIOP) : irt_isnum(kt)) {
+ emit_dst(as, MIPSI_XOR, tmp2, tmp2, tmp1);
+ if ((as->flags & JIT_F_MIPSXXR2)) {
+ emit_dta(as, MIPSI_ROTR, dest, tmp1, (-HASH_ROT1)&31);
+ } else {
+ emit_dst(as, MIPSI_OR, dest, dest, tmp1);
+ emit_dta(as, MIPSI_SLL, tmp1, tmp1, HASH_ROT1);
+ emit_dta(as, MIPSI_SRL, dest, tmp1, (-HASH_ROT1)&31);
+ }
+ emit_dst(as, MIPSI_ADDU, tmp1, tmp1, tmp1);
+#if LJ_SOFTFP
+ emit_ds(as, MIPSI_MOVE, tmp1, type);
+ emit_ds(as, MIPSI_MOVE, tmp2, key);
+#else
+ emit_tg(as, MIPSI_MFC1, tmp2, key);
+ emit_tg(as, MIPSI_MFC1, tmp1, key+1);
+#endif
+ } else {
+ emit_dst(as, MIPSI_XOR, tmp2, key, tmp1);
+ emit_rotr(as, dest, tmp1, tmp2, (-HASH_ROT1)&31);
+ emit_dst(as, MIPSI_ADDU, tmp1, key, ra_allock(as, HASH_BIAS, allow));
+ }
+#else
+ emit_dst(as, MIPSI_XOR, tmp2, tmp2, tmp1);
+ emit_dta(as, MIPSI_ROTR, dest, tmp1, (-HASH_ROT1)&31);
+ if (irt_isnum(kt)) {
+ emit_dst(as, MIPSI_ADDU, tmp1, tmp1, tmp1);
+ emit_dta(as, MIPSI_DSRA32, tmp1, LJ_SOFTFP ? key : tmp1, 0);
+ emit_dta(as, MIPSI_SLL, tmp2, LJ_SOFTFP ? key : tmp1, 0);
+#if !LJ_SOFTFP
+ emit_tg(as, MIPSI_DMFC1, tmp1, key);
+#endif
+ } else {
+ checkmclim(as);
+ emit_dta(as, MIPSI_DSRA32, tmp1, tmp1, 0);
+ emit_dta(as, MIPSI_SLL, tmp2, key, 0);
+ emit_dst(as, MIPSI_DADDU, tmp1, key, type);
+ }
+#endif
+ }
+ }
+}
+
+static void asm_hrefk(ASMState *as, IRIns *ir)
+{
+ IRIns *kslot = IR(ir->op2);
+ IRIns *irkey = IR(kslot->op1);
+ int32_t ofs = (int32_t)(kslot->op2 * sizeof(Node));
+ int32_t kofs = ofs + (int32_t)offsetof(Node, key);
+ Reg dest = (ra_used(ir)||ofs > 32736) ? ra_dest(as, ir, RSET_GPR) : RID_NONE;
+ Reg node = ra_alloc1(as, ir->op1, RSET_GPR);
+ RegSet allow = rset_exclude(RSET_GPR, node);
+ Reg idx = node;
+#if LJ_32
+ Reg key = RID_NONE, type = RID_TMP;
+ int32_t lo, hi;
+#else
+ Reg key = ra_scratch(as, allow);
+ int64_t k;
+#endif
+ lj_assertA(ofs % sizeof(Node) == 0, "unaligned HREFK slot");
+ if (ofs > 32736) {
+ idx = dest;
+ rset_clear(allow, dest);
+ kofs = (int32_t)offsetof(Node, key);
+ } else if (ra_hasreg(dest)) {
+ emit_tsi(as, MIPSI_AADDIU, dest, node, ofs);
+ }
+#if LJ_32
+ if (!irt_ispri(irkey->t)) {
+ key = ra_scratch(as, allow);
+ rset_clear(allow, key);
+ }
+ if (irt_isnum(irkey->t)) {
+ lo = (int32_t)ir_knum(irkey)->u32.lo;
+ hi = (int32_t)ir_knum(irkey)->u32.hi;
+ } else {
+ lo = irkey->i;
+ hi = irt_toitype(irkey->t);
+ if (!ra_hasreg(key))
+ goto nolo;
+ }
+ asm_guard(as, MIPSI_BNE, key, lo ? ra_allock(as, lo, allow) : RID_ZERO);
+nolo:
+ asm_guard(as, MIPSI_BNE, type, hi ? ra_allock(as, hi, allow) : RID_ZERO);
+ if (ra_hasreg(key)) emit_tsi(as, MIPSI_LW, key, idx, kofs+(LJ_BE?4:0));
+ emit_tsi(as, MIPSI_LW, type, idx, kofs+(LJ_BE?0:4));
+#else
+ if (irt_ispri(irkey->t)) {
+ lj_assertA(!irt_isnil(irkey->t), "bad HREFK key type");
+ k = ~((int64_t)~irt_toitype(irkey->t) << 47);
+ } else if (irt_isnum(irkey->t)) {
+ k = (int64_t)ir_knum(irkey)->u64;
+ } else {
+ k = ((int64_t)irt_toitype(irkey->t) << 47) | (int64_t)ir_kgc(irkey);
+ }
+ asm_guard(as, MIPSI_BNE, key, ra_allock(as, k, allow));
+ emit_tsi(as, MIPSI_LD, key, idx, kofs);
+#endif
+ if (ofs > 32736)
+ emit_tsi(as, MIPSI_AADDU, dest, node, ra_allock(as, ofs, allow));
+}
+
+static void asm_uref(ASMState *as, IRIns *ir)
+{
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ if (irref_isk(ir->op1)) {
+ GCfunc *fn = ir_kfunc(IR(ir->op1));
+ MRef *v = &gcref(fn->l.uvptr[(ir->op2 >> 8)])->uv.v;
+ emit_lsptr(as, MIPSI_AL, dest, v, RSET_GPR);
+ } else {
+ Reg uv = ra_scratch(as, RSET_GPR);
+ Reg func = ra_alloc1(as, ir->op1, RSET_GPR);
+ if (ir->o == IR_UREFC) {
+ asm_guard(as, MIPSI_BEQ, RID_TMP, RID_ZERO);
+ emit_tsi(as, MIPSI_AADDIU, dest, uv, (int32_t)offsetof(GCupval, tv));
+ emit_tsi(as, MIPSI_LBU, RID_TMP, uv, (int32_t)offsetof(GCupval, closed));
+ } else {
+ emit_tsi(as, MIPSI_AL, dest, uv, (int32_t)offsetof(GCupval, v));
+ }
+ emit_tsi(as, MIPSI_AL, uv, func, (int32_t)offsetof(GCfuncL, uvptr) +
+ (int32_t)sizeof(MRef) * (int32_t)(ir->op2 >> 8));
+ }
+}
+
+static void asm_fref(ASMState *as, IRIns *ir)
+{
+ UNUSED(as); UNUSED(ir);
+ lj_assertA(!ra_used(ir), "unfused FREF");
+}
+
+static void asm_strref(ASMState *as, IRIns *ir)
+{
+#if LJ_32
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ IRRef ref = ir->op2, refk = ir->op1;
+ int32_t ofs = (int32_t)sizeof(GCstr);
+ Reg r;
+ if (irref_isk(ref)) {
+ IRRef tmp = refk; refk = ref; ref = tmp;
+ } else if (!irref_isk(refk)) {
+ Reg right, left = ra_alloc1(as, ir->op1, RSET_GPR);
+ IRIns *irr = IR(ir->op2);
+ if (ra_hasreg(irr->r)) {
+ ra_noweak(as, irr->r);
+ right = irr->r;
+ } else if (mayfuse(as, irr->op2) &&
+ irr->o == IR_ADD && irref_isk(irr->op2) &&
+ checki16(ofs + IR(irr->op2)->i)) {
+ ofs += IR(irr->op2)->i;
+ right = ra_alloc1(as, irr->op1, rset_exclude(RSET_GPR, left));
+ } else {
+ right = ra_allocref(as, ir->op2, rset_exclude(RSET_GPR, left));
+ }
+ emit_tsi(as, MIPSI_ADDIU, dest, dest, ofs);
+ emit_dst(as, MIPSI_ADDU, dest, left, right);
+ return;
+ }
+ r = ra_alloc1(as, ref, RSET_GPR);
+ ofs += IR(refk)->i;
+ if (checki16(ofs))
+ emit_tsi(as, MIPSI_ADDIU, dest, r, ofs);
+ else
+ emit_dst(as, MIPSI_ADDU, dest, r,
+ ra_allock(as, ofs, rset_exclude(RSET_GPR, r)));
+#else
+ RegSet allow = RSET_GPR;
+ Reg dest = ra_dest(as, ir, allow);
+ Reg base = ra_alloc1(as, ir->op1, allow);
+ IRIns *irr = IR(ir->op2);
+ int32_t ofs = sizeof(GCstr);
+ rset_clear(allow, base);
+ if (irref_isk(ir->op2) && checki16(ofs + irr->i)) {
+ emit_tsi(as, MIPSI_DADDIU, dest, base, ofs + irr->i);
+ } else {
+ emit_tsi(as, MIPSI_DADDIU, dest, dest, ofs);
+ emit_dst(as, MIPSI_DADDU, dest, base, ra_alloc1(as, ir->op2, allow));
+ }
+#endif
+}
+
+/* -- Loads and stores ---------------------------------------------------- */
+
+static MIPSIns asm_fxloadins(ASMState *as, IRIns *ir)
+{
+ UNUSED(as);
+ switch (irt_type(ir->t)) {
+ case IRT_I8: return MIPSI_LB;
+ case IRT_U8: return MIPSI_LBU;
+ case IRT_I16: return MIPSI_LH;
+ case IRT_U16: return MIPSI_LHU;
+ case IRT_NUM:
+ lj_assertA(!LJ_SOFTFP32, "unsplit FP op");
+ if (!LJ_SOFTFP) return MIPSI_LDC1;
+ /* fallthrough */
+ case IRT_FLOAT: if (!LJ_SOFTFP) return MIPSI_LWC1;
+ /* fallthrough */
+ default: return (LJ_64 && irt_is64(ir->t)) ? MIPSI_LD : MIPSI_LW;
+ }
+}
+
+static MIPSIns asm_fxstoreins(ASMState *as, IRIns *ir)
+{
+ UNUSED(as);
+ switch (irt_type(ir->t)) {
+ case IRT_I8: case IRT_U8: return MIPSI_SB;
+ case IRT_I16: case IRT_U16: return MIPSI_SH;
+ case IRT_NUM:
+ lj_assertA(!LJ_SOFTFP32, "unsplit FP op");
+ if (!LJ_SOFTFP) return MIPSI_SDC1;
+ /* fallthrough */
+ case IRT_FLOAT: if (!LJ_SOFTFP) return MIPSI_SWC1;
+ /* fallthrough */
+ default: return (LJ_64 && irt_is64(ir->t)) ? MIPSI_SD : MIPSI_SW;
+ }
+}
+
+static void asm_fload(ASMState *as, IRIns *ir)
+{
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ MIPSIns mi = asm_fxloadins(as, ir);
+ Reg idx;
+ int32_t ofs;
+ if (ir->op1 == REF_NIL) { /* FLOAD from GG_State with offset. */
+ idx = RID_JGL;
+ ofs = (ir->op2 << 2) - 32768 - GG_OFS(g);
+ } else {
+ idx = ra_alloc1(as, ir->op1, RSET_GPR);
+ if (ir->op2 == IRFL_TAB_ARRAY) {
+ ofs = asm_fuseabase(as, ir->op1);
+ if (ofs) { /* Turn the t->array load into an add for colocated arrays. */
+ emit_tsi(as, MIPSI_AADDIU, dest, idx, ofs);
+ return;
+ }
+ }
+ ofs = field_ofs[ir->op2];
+ }
+ lj_assertA(!irt_isfp(ir->t), "bad FP FLOAD");
+ emit_tsi(as, mi, dest, idx, ofs);
+}
+
+static void asm_fstore(ASMState *as, IRIns *ir)
+{
+ if (ir->r != RID_SINK) {
+ Reg src = ra_alloc1z(as, ir->op2, RSET_GPR);
+ IRIns *irf = IR(ir->op1);
+ Reg idx = ra_alloc1(as, irf->op1, rset_exclude(RSET_GPR, src));
+ int32_t ofs = field_ofs[irf->op2];
+ MIPSIns mi = asm_fxstoreins(as, ir);
+ lj_assertA(!irt_isfp(ir->t), "bad FP FSTORE");
+ emit_tsi(as, mi, src, idx, ofs);
+ }
+}
+
+static void asm_xload(ASMState *as, IRIns *ir)
+{
+ Reg dest = ra_dest(as, ir,
+ (!LJ_SOFTFP && irt_isfp(ir->t)) ? RSET_FPR : RSET_GPR);
+ lj_assertA(LJ_TARGET_UNALIGNED || !(ir->op2 & IRXLOAD_UNALIGNED),
+ "unaligned XLOAD");
+ asm_fusexref(as, asm_fxloadins(as, ir), dest, ir->op1, RSET_GPR, 0);
+}
+
+static void asm_xstore_(ASMState *as, IRIns *ir, int32_t ofs)
+{
+ if (ir->r != RID_SINK) {
+ Reg src = ra_alloc1z(as, ir->op2,
+ (!LJ_SOFTFP && irt_isfp(ir->t)) ? RSET_FPR : RSET_GPR);
+ asm_fusexref(as, asm_fxstoreins(as, ir), src, ir->op1,
+ rset_exclude(RSET_GPR, src), ofs);
+ }
+}
+
+#define asm_xstore(as, ir) asm_xstore_(as, ir, 0)
+
+static void asm_ahuvload(ASMState *as, IRIns *ir)
+{
+ int hiop = (LJ_SOFTFP32 && (ir+1)->o == IR_HIOP);
+ Reg dest = RID_NONE, type = RID_TMP, idx;
+ RegSet allow = RSET_GPR;
+ int32_t ofs = 0;
+ IRType1 t = ir->t;
+ if (hiop) {
+ t.irt = IRT_NUM;
+ if (ra_used(ir+1)) {
+ type = ra_dest(as, ir+1, allow);
+ rset_clear(allow, type);
+ }
+ }
+ if (ra_used(ir)) {
+ lj_assertA((LJ_SOFTFP32 ? 0 : irt_isnum(ir->t)) ||
+ irt_isint(ir->t) || irt_isaddr(ir->t),
+ "bad load type %d", irt_type(ir->t));
+ dest = ra_dest(as, ir, (!LJ_SOFTFP && irt_isnum(t)) ? RSET_FPR : allow);
+ rset_clear(allow, dest);
+#if LJ_64
+ if (irt_isaddr(t))
+ emit_tsml(as, MIPSI_DEXTM, dest, dest, 14, 0);
+ else if (irt_isint(t))
+ emit_dta(as, MIPSI_SLL, dest, dest, 0);
+#endif
+ }
+ idx = asm_fuseahuref(as, ir->op1, &ofs, allow);
+ if (ir->o == IR_VLOAD) ofs += 8 * ir->op2;
+ rset_clear(allow, idx);
+ if (irt_isnum(t)) {
+ asm_guard(as, MIPSI_BEQ, RID_TMP, RID_ZERO);
+ emit_tsi(as, MIPSI_SLTIU, RID_TMP, type, (int32_t)LJ_TISNUM);
+ } else {
+ asm_guard(as, MIPSI_BNE, type,
+ ra_allock(as, (int32_t)irt_toitype(t), allow));
+ }
+#if LJ_32
+ if (ra_hasreg(dest)) {
+ if (!LJ_SOFTFP && irt_isnum(t))
+ emit_hsi(as, MIPSI_LDC1, dest, idx, ofs);
+ else
+ emit_tsi(as, MIPSI_LW, dest, idx, ofs+(LJ_BE?4:0));
+ }
+ emit_tsi(as, MIPSI_LW, type, idx, ofs+(LJ_BE?0:4));
+#else
+ if (ra_hasreg(dest)) {
+ if (!LJ_SOFTFP && irt_isnum(t)) {
+ emit_hsi(as, MIPSI_LDC1, dest, idx, ofs);
+ dest = type;
+ }
+ } else {
+ dest = type;
+ }
+ emit_dta(as, MIPSI_DSRA32, type, dest, 15);
+ emit_tsi(as, MIPSI_LD, dest, idx, ofs);
+#endif
+}
+
+static void asm_ahustore(ASMState *as, IRIns *ir)
+{
+ RegSet allow = RSET_GPR;
+ Reg idx, src = RID_NONE, type = RID_NONE;
+ int32_t ofs = 0;
+ if (ir->r == RID_SINK)
+ return;
+ if (!LJ_SOFTFP32 && irt_isnum(ir->t)) {
+ src = ra_alloc1(as, ir->op2, LJ_SOFTFP ? RSET_GPR : RSET_FPR);
+ idx = asm_fuseahuref(as, ir->op1, &ofs, allow);
+ emit_hsi(as, LJ_SOFTFP ? MIPSI_SD : MIPSI_SDC1, src, idx, ofs);
+ } else {
+#if LJ_32
+ if (!irt_ispri(ir->t)) {
+ src = ra_alloc1(as, ir->op2, allow);
+ rset_clear(allow, src);
+ }
+ if (LJ_SOFTFP && (ir+1)->o == IR_HIOP)
+ type = ra_alloc1(as, (ir+1)->op2, allow);
+ else
+ type = ra_allock(as, (int32_t)irt_toitype(ir->t), allow);
+ rset_clear(allow, type);
+ idx = asm_fuseahuref(as, ir->op1, &ofs, allow);
+ if (ra_hasreg(src))
+ emit_tsi(as, MIPSI_SW, src, idx, ofs+(LJ_BE?4:0));
+ emit_tsi(as, MIPSI_SW, type, idx, ofs+(LJ_BE?0:4));
+#else
+ Reg tmp = RID_TMP;
+ if (irt_ispri(ir->t)) {
+ tmp = ra_allock(as, ~((int64_t)~irt_toitype(ir->t) << 47), allow);
+ rset_clear(allow, tmp);
+ } else {
+ src = ra_alloc1(as, ir->op2, allow);
+ rset_clear(allow, src);
+ type = ra_allock(as, (int64_t)irt_toitype(ir->t) << 47, allow);
+ rset_clear(allow, type);
+ }
+ idx = asm_fuseahuref(as, ir->op1, &ofs, allow);
+ emit_tsi(as, MIPSI_SD, tmp, idx, ofs);
+ if (ra_hasreg(src)) {
+ if (irt_isinteger(ir->t)) {
+ emit_dst(as, MIPSI_DADDU, tmp, tmp, type);
+ emit_tsml(as, MIPSI_DEXT, tmp, src, 31, 0);
+ } else {
+ emit_dst(as, MIPSI_DADDU, tmp, src, type);
+ }
+ }
+#endif
+ }
+}
+
+static void asm_sload(ASMState *as, IRIns *ir)
+{
+ Reg dest = RID_NONE, type = RID_NONE, base;
+ RegSet allow = RSET_GPR;
+ IRType1 t = ir->t;
+#if LJ_32
+ int32_t ofs = 8*((int32_t)ir->op1-1) + ((ir->op2 & IRSLOAD_FRAME) ? 4 : 0);
+ int hiop = (LJ_SOFTFP32 && (ir+1)->o == IR_HIOP);
+ if (hiop)
+ t.irt = IRT_NUM;
+#else
+ int32_t ofs = 8*((int32_t)ir->op1-2);
+#endif
+ lj_assertA(!(ir->op2 & IRSLOAD_PARENT),
+ "bad parent SLOAD"); /* Handled by asm_head_side(). */
+ lj_assertA(irt_isguard(ir->t) || !(ir->op2 & IRSLOAD_TYPECHECK),
+ "inconsistent SLOAD variant");
+#if LJ_SOFTFP32
+ lj_assertA(!(ir->op2 & IRSLOAD_CONVERT),
+ "unsplit SLOAD convert"); /* Handled by LJ_SOFTFP SPLIT. */
+ if (hiop && ra_used(ir+1)) {
+ type = ra_dest(as, ir+1, allow);
+ rset_clear(allow, type);
+ }
+#else
+ if ((ir->op2 & IRSLOAD_CONVERT) && irt_isguard(t) && irt_isint(t)) {
+ dest = ra_scratch(as, LJ_SOFTFP ? allow : RSET_FPR);
+ asm_tointg(as, ir, dest);
+ t.irt = IRT_NUM; /* Continue with a regular number type check. */
+ } else
+#endif
+ if (ra_used(ir)) {
+ lj_assertA((LJ_SOFTFP32 ? 0 : irt_isnum(ir->t)) ||
+ irt_isint(ir->t) || irt_isaddr(ir->t),
+ "bad SLOAD type %d", irt_type(ir->t));
+ dest = ra_dest(as, ir, (!LJ_SOFTFP && irt_isnum(t)) ? RSET_FPR : allow);
+ rset_clear(allow, dest);
+ base = ra_alloc1(as, REF_BASE, allow);
+ rset_clear(allow, base);
+ if (!LJ_SOFTFP32 && (ir->op2 & IRSLOAD_CONVERT)) {
+ if (irt_isint(t)) {
+ Reg tmp = ra_scratch(as, LJ_SOFTFP ? RSET_GPR : RSET_FPR);
+#if LJ_SOFTFP
+ ra_evictset(as, rset_exclude(RSET_SCRATCH, dest));
+ ra_destreg(as, ir, RID_RET);
+ emit_call(as, (void *)lj_ir_callinfo[IRCALL_softfp_d2i].func, 0);
+ if (tmp != REGARG_FIRSTGPR)
+ emit_move(as, REGARG_FIRSTGPR, tmp);
+#else
+ emit_tg(as, MIPSI_MFC1, dest, tmp);
+ emit_fg(as, MIPSI_TRUNC_W_D, tmp, tmp);
+#endif
+ dest = tmp;
+ t.irt = IRT_NUM; /* Check for original type. */
+ } else {
+ Reg tmp = ra_scratch(as, RSET_GPR);
+#if LJ_SOFTFP
+ ra_evictset(as, rset_exclude(RSET_SCRATCH, dest));
+ ra_destreg(as, ir, RID_RET);
+ emit_call(as, (void *)lj_ir_callinfo[IRCALL_softfp_i2d].func, 0);
+ emit_dta(as, MIPSI_SLL, REGARG_FIRSTGPR, tmp, 0);
+#else
+ emit_fg(as, MIPSI_CVT_D_W, dest, dest);
+ emit_tg(as, MIPSI_MTC1, tmp, dest);
+#endif
+ dest = tmp;
+ t.irt = IRT_INT; /* Check for original type. */
+ }
+ }
+#if LJ_64
+ else if (irt_isaddr(t)) {
+ /* Clear type from pointers. */
+ emit_tsml(as, MIPSI_DEXTM, dest, dest, 14, 0);
+ } else if (irt_isint(t) && (ir->op2 & IRSLOAD_TYPECHECK)) {
+ /* Sign-extend integers. */
+ emit_dta(as, MIPSI_SLL, dest, dest, 0);
+ }
+#endif
+ goto dotypecheck;
+ }
+ base = ra_alloc1(as, REF_BASE, allow);
+ rset_clear(allow, base);
+dotypecheck:
+#if LJ_32
+ if ((ir->op2 & IRSLOAD_TYPECHECK)) {
+ if (ra_noreg(type))
+ type = RID_TMP;
+ if (irt_isnum(t)) {
+ asm_guard(as, MIPSI_BEQ, RID_TMP, RID_ZERO);
+ emit_tsi(as, MIPSI_SLTIU, RID_TMP, type, (int32_t)LJ_TISNUM);
+ } else {
+ Reg ktype = ra_allock(as, (ir->op2 & IRSLOAD_KEYINDEX) ? LJ_KEYINDEX : irt_toitype(t), allow);
+ asm_guard(as, MIPSI_BNE, type, ktype);
+ }
+ }
+ if (ra_hasreg(dest)) {
+ if (!LJ_SOFTFP && irt_isnum(t))
+ emit_hsi(as, MIPSI_LDC1, dest, base, ofs);
+ else
+ emit_tsi(as, MIPSI_LW, dest, base, ofs ^ (LJ_BE?4:0));
+ }
+ if (ra_hasreg(type))
+ emit_tsi(as, MIPSI_LW, type, base, ofs ^ (LJ_BE?0:4));
+#else
+ if ((ir->op2 & IRSLOAD_TYPECHECK)) {
+ type = dest < RID_MAX_GPR ? dest : RID_TMP;
+ if (irt_ispri(t)) {
+ asm_guard(as, MIPSI_BNE, type,
+ ra_allock(as, ~((int64_t)~irt_toitype(t) << 47) , allow));
+ } else if ((ir->op2 & IRSLOAD_KEYINDEX)) {
+ asm_guard(as, MIPSI_BNE, RID_TMP,
+ ra_allock(as, (int32_t)LJ_KEYINDEX, allow));
+ emit_dta(as, MIPSI_DSRA32, RID_TMP, type, 0);
+ } else {
+ if (irt_isnum(t)) {
+ asm_guard(as, MIPSI_BEQ, RID_TMP, RID_ZERO);
+ emit_tsi(as, MIPSI_SLTIU, RID_TMP, RID_TMP, (int32_t)LJ_TISNUM);
+ if (!LJ_SOFTFP && ra_hasreg(dest))
+ emit_hsi(as, MIPSI_LDC1, dest, base, ofs);
+ } else {
+ asm_guard(as, MIPSI_BNE, RID_TMP,
+ ra_allock(as, (int32_t)irt_toitype(t), allow));
+ }
+ emit_dta(as, MIPSI_DSRA32, RID_TMP, type, 15);
+ }
+ emit_tsi(as, MIPSI_LD, type, base, ofs);
+ } else if (ra_hasreg(dest)) {
+ if (!LJ_SOFTFP && irt_isnum(t))
+ emit_hsi(as, MIPSI_LDC1, dest, base, ofs);
+ else
+ emit_tsi(as, irt_isint(t) ? MIPSI_LW : MIPSI_LD, dest, base,
+ ofs ^ ((LJ_BE && irt_isint(t)) ? 4 : 0));
+ }
+#endif
+}
+
+/* -- Allocations --------------------------------------------------------- */
+
+#if LJ_HASFFI
+static void asm_cnew(ASMState *as, IRIns *ir)
+{
+ CTState *cts = ctype_ctsG(J2G(as->J));
+ CTypeID id = (CTypeID)IR(ir->op1)->i;
+ CTSize sz;
+ CTInfo info = lj_ctype_info(cts, id, &sz);
+ const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_mem_newgco];
+ IRRef args[4];
+ RegSet drop = RSET_SCRATCH;
+ lj_assertA(sz != CTSIZE_INVALID || (ir->o == IR_CNEW && ir->op2 != REF_NIL),
+ "bad CNEW/CNEWI operands");
+
+ as->gcsteps++;
+ if (ra_hasreg(ir->r))
+ rset_clear(drop, ir->r); /* Dest reg handled below. */
+ ra_evictset(as, drop);
+ if (ra_used(ir))
+ ra_destreg(as, ir, RID_RET); /* GCcdata * */
+
+ /* Initialize immutable cdata object. */
+ if (ir->o == IR_CNEWI) {
+ RegSet allow = (RSET_GPR & ~RSET_SCRATCH);
+#if LJ_32
+ int32_t ofs = sizeof(GCcdata);
+ if (sz == 8) {
+ ofs += 4;
+ lj_assertA((ir+1)->o == IR_HIOP, "expected HIOP for CNEWI");
+ if (LJ_LE) ir++;
+ }
+ for (;;) {
+ Reg r = ra_alloc1z(as, ir->op2, allow);
+ emit_tsi(as, MIPSI_SW, r, RID_RET, ofs);
+ rset_clear(allow, r);
+ if (ofs == sizeof(GCcdata)) break;
+ ofs -= 4; if (LJ_BE) ir++; else ir--;
+ }
+#else
+ emit_tsi(as, sz == 8 ? MIPSI_SD : MIPSI_SW, ra_alloc1(as, ir->op2, allow),
+ RID_RET, sizeof(GCcdata));
+#endif
+ lj_assertA(sz == 4 || sz == 8, "bad CNEWI size %d", sz);
+ } else if (ir->op2 != REF_NIL) { /* Create VLA/VLS/aligned cdata. */
+ ci = &lj_ir_callinfo[IRCALL_lj_cdata_newv];
+ args[0] = ASMREF_L; /* lua_State *L */
+ args[1] = ir->op1; /* CTypeID id */
+ args[2] = ir->op2; /* CTSize sz */
+ args[3] = ASMREF_TMP1; /* CTSize align */
+ asm_gencall(as, ci, args);
+ emit_loadi(as, ra_releasetmp(as, ASMREF_TMP1), (int32_t)ctype_align(info));
+ return;
+ }
+
+ /* Initialize gct and ctypeid. lj_mem_newgco() already sets marked. */
+ emit_tsi(as, MIPSI_SB, RID_RET+1, RID_RET, offsetof(GCcdata, gct));
+ emit_tsi(as, MIPSI_SH, RID_TMP, RID_RET, offsetof(GCcdata, ctypeid));
+ emit_ti(as, MIPSI_LI, RID_RET+1, ~LJ_TCDATA);
+ emit_ti(as, MIPSI_LI, RID_TMP, id); /* Lower 16 bit used. Sign-ext ok. */
+ args[0] = ASMREF_L; /* lua_State *L */
+ args[1] = ASMREF_TMP1; /* MSize size */
+ asm_gencall(as, ci, args);
+ ra_allockreg(as, (int32_t)(sz+sizeof(GCcdata)),
+ ra_releasetmp(as, ASMREF_TMP1));
+}
+#endif
+
+/* -- Write barriers ------------------------------------------------------ */
+
+static void asm_tbar(ASMState *as, IRIns *ir)
+{
+ Reg tab = ra_alloc1(as, ir->op1, RSET_GPR);
+ Reg mark = ra_scratch(as, rset_exclude(RSET_GPR, tab));
+ Reg link = RID_TMP;
+ MCLabel l_end = emit_label(as);
+ emit_tsi(as, MIPSI_AS, link, tab, (int32_t)offsetof(GCtab, gclist));
+ emit_tsi(as, MIPSI_SB, mark, tab, (int32_t)offsetof(GCtab, marked));
+ emit_setgl(as, tab, gc.grayagain);
+ emit_getgl(as, link, gc.grayagain);
+ emit_dst(as, MIPSI_XOR, mark, mark, RID_TMP); /* Clear black bit. */
+ emit_branch(as, MIPSI_BEQ, RID_TMP, RID_ZERO, l_end);
+ emit_tsi(as, MIPSI_ANDI, RID_TMP, mark, LJ_GC_BLACK);
+ emit_tsi(as, MIPSI_LBU, mark, tab, (int32_t)offsetof(GCtab, marked));
+}
+
+static void asm_obar(ASMState *as, IRIns *ir)
+{
+ const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_gc_barrieruv];
+ IRRef args[2];
+ MCLabel l_end;
+ Reg obj, val, tmp;
+ /* No need for other object barriers (yet). */
+ lj_assertA(IR(ir->op1)->o == IR_UREFC, "bad OBAR type");
+ ra_evictset(as, RSET_SCRATCH);
+ l_end = emit_label(as);
+ args[0] = ASMREF_TMP1; /* global_State *g */
+ args[1] = ir->op1; /* TValue *tv */
+ asm_gencall(as, ci, args);
+ emit_tsi(as, MIPSI_AADDIU, ra_releasetmp(as, ASMREF_TMP1), RID_JGL, -32768);
+ obj = IR(ir->op1)->r;
+ tmp = ra_scratch(as, rset_exclude(RSET_GPR, obj));
+ emit_branch(as, MIPSI_BEQ, RID_TMP, RID_ZERO, l_end);
+ emit_tsi(as, MIPSI_ANDI, tmp, tmp, LJ_GC_BLACK);
+ emit_branch(as, MIPSI_BEQ, RID_TMP, RID_ZERO, l_end);
+ emit_tsi(as, MIPSI_ANDI, RID_TMP, RID_TMP, LJ_GC_WHITES);
+ val = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, obj));
+ emit_tsi(as, MIPSI_LBU, tmp, obj,
+ (int32_t)offsetof(GCupval, marked)-(int32_t)offsetof(GCupval, tv));
+ emit_tsi(as, MIPSI_LBU, RID_TMP, val, (int32_t)offsetof(GChead, marked));
+}
+
+/* -- Arithmetic and logic operations ------------------------------------- */
+
+#if !LJ_SOFTFP
+static void asm_fparith(ASMState *as, IRIns *ir, MIPSIns mi)
+{
+ Reg dest = ra_dest(as, ir, RSET_FPR);
+ Reg right, left = ra_alloc2(as, ir, RSET_FPR);
+ right = (left >> 8); left &= 255;
+ emit_fgh(as, mi, dest, left, right);
+}
+
+static void asm_fpunary(ASMState *as, IRIns *ir, MIPSIns mi)
+{
+ Reg dest = ra_dest(as, ir, RSET_FPR);
+ Reg left = ra_hintalloc(as, ir->op1, dest, RSET_FPR);
+ emit_fg(as, mi, dest, left);
+}
+#endif
+
+#if !LJ_SOFTFP32
+static void asm_fpmath(ASMState *as, IRIns *ir)
+{
+#if !LJ_SOFTFP
+ if (ir->op2 <= IRFPM_TRUNC)
+ asm_callround(as, ir, IRCALL_lj_vm_floor + ir->op2);
+ else if (ir->op2 == IRFPM_SQRT)
+ asm_fpunary(as, ir, MIPSI_SQRT_D);
+ else
+#endif
+ asm_callid(as, ir, IRCALL_lj_vm_floor + ir->op2);
+}
+#endif
+
+#if !LJ_SOFTFP
+#define asm_fpadd(as, ir) asm_fparith(as, ir, MIPSI_ADD_D)
+#define asm_fpsub(as, ir) asm_fparith(as, ir, MIPSI_SUB_D)
+#define asm_fpmul(as, ir) asm_fparith(as, ir, MIPSI_MUL_D)
+#elif LJ_64 /* && LJ_SOFTFP */
+#define asm_fpadd(as, ir) asm_callid(as, ir, IRCALL_softfp_add)
+#define asm_fpsub(as, ir) asm_callid(as, ir, IRCALL_softfp_sub)
+#define asm_fpmul(as, ir) asm_callid(as, ir, IRCALL_softfp_mul)
+#endif
+
+static void asm_add(ASMState *as, IRIns *ir)
+{
+ IRType1 t = ir->t;
+#if !LJ_SOFTFP32
+ if (irt_isnum(t)) {
+ asm_fpadd(as, ir);
+ } else
+#endif
+ {
+ /* TODO MIPSR6: Fuse ADD(BSHL(a,1-4),b) or ADD(ADD(a,a),b) to MIPSI_ALSA. */
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ Reg right, left = ra_hintalloc(as, ir->op1, dest, RSET_GPR);
+ if (irref_isk(ir->op2)) {
+ intptr_t k = get_kval(as, ir->op2);
+ if (checki16(k)) {
+ emit_tsi(as, (LJ_64 && irt_is64(t)) ? MIPSI_DADDIU : MIPSI_ADDIU, dest,
+ left, k);
+ return;
+ }
+ }
+ right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left));
+ emit_dst(as, (LJ_64 && irt_is64(t)) ? MIPSI_DADDU : MIPSI_ADDU, dest,
+ left, right);
+ }
+}
+
+static void asm_sub(ASMState *as, IRIns *ir)
+{
+#if !LJ_SOFTFP32
+ if (irt_isnum(ir->t)) {
+ asm_fpsub(as, ir);
+ } else
+#endif
+ {
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ Reg right, left = ra_alloc2(as, ir, RSET_GPR);
+ right = (left >> 8); left &= 255;
+ emit_dst(as, (LJ_64 && irt_is64(ir->t)) ? MIPSI_DSUBU : MIPSI_SUBU, dest,
+ left, right);
+ }
+}
+
+static void asm_mul(ASMState *as, IRIns *ir)
+{
+#if !LJ_SOFTFP32
+ if (irt_isnum(ir->t)) {
+ asm_fpmul(as, ir);
+ } else
+#endif
+ {
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ Reg right, left = ra_alloc2(as, ir, RSET_GPR);
+ right = (left >> 8); left &= 255;
+ if (LJ_64 && irt_is64(ir->t)) {
+#if !LJ_TARGET_MIPSR6
+ emit_dst(as, MIPSI_MFLO, dest, 0, 0);
+ emit_dst(as, MIPSI_DMULT, 0, left, right);
+#else
+ emit_dst(as, MIPSI_DMUL, dest, left, right);
+#endif
+ } else {
+ emit_dst(as, MIPSI_MUL, dest, left, right);
+ }
+ }
+}
+
+#if !LJ_SOFTFP32
+static void asm_fpdiv(ASMState *as, IRIns *ir)
+{
+#if !LJ_SOFTFP
+ asm_fparith(as, ir, MIPSI_DIV_D);
+#else
+ asm_callid(as, ir, IRCALL_softfp_div);
+#endif
+}
+#endif
+
+static void asm_neg(ASMState *as, IRIns *ir)
+{
+#if !LJ_SOFTFP
+ if (irt_isnum(ir->t)) {
+ asm_fpunary(as, ir, MIPSI_NEG_D);
+ } else
+#elif LJ_64 /* && LJ_SOFTFP */
+ if (irt_isnum(ir->t)) {
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ Reg left = ra_hintalloc(as, ir->op1, dest, RSET_GPR);
+ emit_dst(as, MIPSI_XOR, dest, left,
+ ra_allock(as, 0x8000000000000000ll, rset_exclude(RSET_GPR, dest)));
+ } else
+#endif
+ {
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ Reg left = ra_hintalloc(as, ir->op1, dest, RSET_GPR);
+ emit_dst(as, (LJ_64 && irt_is64(ir->t)) ? MIPSI_DSUBU : MIPSI_SUBU, dest,
+ RID_ZERO, left);
+ }
+}
+
+#if !LJ_SOFTFP
+#define asm_abs(as, ir) asm_fpunary(as, ir, MIPSI_ABS_D)
+#elif LJ_64 /* && LJ_SOFTFP */
+static void asm_abs(ASMState *as, IRIns *ir)
+{
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ Reg left = ra_alloc1(as, ir->op1, RSET_GPR);
+ emit_tsml(as, MIPSI_DEXTM, dest, left, 30, 0);
+}
+#endif
+
+static void asm_arithov(ASMState *as, IRIns *ir)
+{
+ /* TODO MIPSR6: bovc/bnvc. Caveat: no delay slot to load RID_TMP. */
+ Reg right, left, tmp, dest = ra_dest(as, ir, RSET_GPR);
+ lj_assertA(!irt_is64(ir->t), "bad usage");
+ if (irref_isk(ir->op2)) {
+ int k = IR(ir->op2)->i;
+ if (ir->o == IR_SUBOV) k = -k;
+ if (checki16(k)) { /* (dest < left) == (k >= 0 ? 1 : 0) */
+ left = ra_alloc1(as, ir->op1, RSET_GPR);
+ asm_guard(as, k >= 0 ? MIPSI_BNE : MIPSI_BEQ, RID_TMP, RID_ZERO);
+ emit_dst(as, MIPSI_SLT, RID_TMP, dest, dest == left ? RID_TMP : left);
+ emit_tsi(as, MIPSI_ADDIU, dest, left, k);
+ if (dest == left) emit_move(as, RID_TMP, left);
+ return;
+ }
+ }
+ left = ra_alloc2(as, ir, RSET_GPR);
+ right = (left >> 8); left &= 255;
+ tmp = ra_scratch(as, rset_exclude(rset_exclude(rset_exclude(RSET_GPR, left),
+ right), dest));
+ asm_guard(as, MIPSI_BLTZ, RID_TMP, 0);
+ emit_dst(as, MIPSI_AND, RID_TMP, RID_TMP, tmp);
+ if (ir->o == IR_ADDOV) { /* ((dest^left) & (dest^right)) < 0 */
+ emit_dst(as, MIPSI_XOR, RID_TMP, dest, dest == right ? RID_TMP : right);
+ } else { /* ((dest^left) & (dest^~right)) < 0 */
+ emit_dst(as, MIPSI_XOR, RID_TMP, RID_TMP, dest);
+ emit_dst(as, MIPSI_NOR, RID_TMP, dest == right ? RID_TMP : right, RID_ZERO);
+ }
+ emit_dst(as, MIPSI_XOR, tmp, dest, dest == left ? RID_TMP : left);
+ emit_dst(as, ir->o == IR_ADDOV ? MIPSI_ADDU : MIPSI_SUBU, dest, left, right);
+ if (dest == left || dest == right)
+ emit_move(as, RID_TMP, dest == left ? left : right);
+}
+
+#define asm_addov(as, ir) asm_arithov(as, ir)
+#define asm_subov(as, ir) asm_arithov(as, ir)
+
+static void asm_mulov(ASMState *as, IRIns *ir)
+{
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ Reg tmp, right, left = ra_alloc2(as, ir, RSET_GPR);
+ right = (left >> 8); left &= 255;
+ tmp = ra_scratch(as, rset_exclude(rset_exclude(rset_exclude(RSET_GPR, left),
+ right), dest));
+ asm_guard(as, MIPSI_BNE, RID_TMP, tmp);
+ emit_dta(as, MIPSI_SRA, RID_TMP, dest, 31);
+#if !LJ_TARGET_MIPSR6
+ emit_dst(as, MIPSI_MFHI, tmp, 0, 0);
+ emit_dst(as, MIPSI_MFLO, dest, 0, 0);
+ emit_dst(as, MIPSI_MULT, 0, left, right);
+#else
+ emit_dst(as, MIPSI_MUL, dest, left, right);
+ emit_dst(as, MIPSI_MUH, tmp, left, right);
+#endif
+}
+
+#if LJ_32 && LJ_HASFFI
+static void asm_add64(ASMState *as, IRIns *ir)
+{
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ Reg right, left = ra_alloc1(as, ir->op1, RSET_GPR);
+ if (irref_isk(ir->op2)) {
+ int32_t k = IR(ir->op2)->i;
+ if (k == 0) {
+ emit_dst(as, MIPSI_ADDU, dest, left, RID_TMP);
+ goto loarith;
+ } else if (checki16(k)) {
+ emit_dst(as, MIPSI_ADDU, dest, dest, RID_TMP);
+ emit_tsi(as, MIPSI_ADDIU, dest, left, k);
+ goto loarith;
+ }
+ }
+ emit_dst(as, MIPSI_ADDU, dest, dest, RID_TMP);
+ right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left));
+ emit_dst(as, MIPSI_ADDU, dest, left, right);
+loarith:
+ ir--;
+ dest = ra_dest(as, ir, RSET_GPR);
+ left = ra_alloc1(as, ir->op1, RSET_GPR);
+ if (irref_isk(ir->op2)) {
+ int32_t k = IR(ir->op2)->i;
+ if (k == 0) {
+ if (dest != left)
+ emit_move(as, dest, left);
+ return;
+ } else if (checki16(k)) {
+ if (dest == left) {
+ Reg tmp = ra_scratch(as, rset_exclude(RSET_GPR, left));
+ emit_move(as, dest, tmp);
+ dest = tmp;
+ }
+ emit_dst(as, MIPSI_SLTU, RID_TMP, dest, left);
+ emit_tsi(as, MIPSI_ADDIU, dest, left, k);
+ return;
+ }
+ }
+ right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left));
+ if (dest == left && dest == right) {
+ Reg tmp = ra_scratch(as, rset_exclude(rset_exclude(RSET_GPR, left), right));
+ emit_move(as, dest, tmp);
+ dest = tmp;
+ }
+ emit_dst(as, MIPSI_SLTU, RID_TMP, dest, dest == left ? right : left);
+ emit_dst(as, MIPSI_ADDU, dest, left, right);
+}
+
+static void asm_sub64(ASMState *as, IRIns *ir)
+{
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ Reg right, left = ra_alloc2(as, ir, RSET_GPR);
+ right = (left >> 8); left &= 255;
+ emit_dst(as, MIPSI_SUBU, dest, dest, RID_TMP);
+ emit_dst(as, MIPSI_SUBU, dest, left, right);
+ ir--;
+ dest = ra_dest(as, ir, RSET_GPR);
+ left = ra_alloc2(as, ir, RSET_GPR);
+ right = (left >> 8); left &= 255;
+ if (dest == left) {
+ Reg tmp = ra_scratch(as, rset_exclude(rset_exclude(RSET_GPR, left), right));
+ emit_move(as, dest, tmp);
+ dest = tmp;
+ }
+ emit_dst(as, MIPSI_SLTU, RID_TMP, left, dest);
+ emit_dst(as, MIPSI_SUBU, dest, left, right);
+}
+
+static void asm_neg64(ASMState *as, IRIns *ir)
+{
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ Reg left = ra_alloc1(as, ir->op1, RSET_GPR);
+ emit_dst(as, MIPSI_SUBU, dest, dest, RID_TMP);
+ emit_dst(as, MIPSI_SUBU, dest, RID_ZERO, left);
+ ir--;
+ dest = ra_dest(as, ir, RSET_GPR);
+ left = ra_alloc1(as, ir->op1, RSET_GPR);
+ emit_dst(as, MIPSI_SLTU, RID_TMP, RID_ZERO, dest);
+ emit_dst(as, MIPSI_SUBU, dest, RID_ZERO, left);
+}
+#endif
+
+static void asm_bnot(ASMState *as, IRIns *ir)
+{
+ Reg left, right, dest = ra_dest(as, ir, RSET_GPR);
+ IRIns *irl = IR(ir->op1);
+ if (mayfuse(as, ir->op1) && irl->o == IR_BOR) {
+ left = ra_alloc2(as, irl, RSET_GPR);
+ right = (left >> 8); left &= 255;
+ } else {
+ left = ra_hintalloc(as, ir->op1, dest, RSET_GPR);
+ right = RID_ZERO;
+ }
+ emit_dst(as, MIPSI_NOR, dest, left, right);
+}
+
+static void asm_bswap(ASMState *as, IRIns *ir)
+{
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ Reg left = ra_alloc1(as, ir->op1, RSET_GPR);
+#if LJ_32
+ if ((as->flags & JIT_F_MIPSXXR2)) {
+ emit_dta(as, MIPSI_ROTR, dest, RID_TMP, 16);
+ emit_dst(as, MIPSI_WSBH, RID_TMP, 0, left);
+ } else {
+ Reg tmp = ra_scratch(as, rset_exclude(rset_exclude(RSET_GPR, left), dest));
+ emit_dst(as, MIPSI_OR, dest, dest, tmp);
+ emit_dst(as, MIPSI_OR, dest, dest, RID_TMP);
+ emit_tsi(as, MIPSI_ANDI, dest, dest, 0xff00);
+ emit_dta(as, MIPSI_SLL, RID_TMP, RID_TMP, 8);
+ emit_dta(as, MIPSI_SRL, dest, left, 8);
+ emit_tsi(as, MIPSI_ANDI, RID_TMP, left, 0xff00);
+ emit_dst(as, MIPSI_OR, tmp, tmp, RID_TMP);
+ emit_dta(as, MIPSI_SRL, tmp, left, 24);
+ emit_dta(as, MIPSI_SLL, RID_TMP, left, 24);
+ }
+#else
+ if (irt_is64(ir->t)) {
+ emit_dst(as, MIPSI_DSHD, dest, 0, RID_TMP);
+ emit_dst(as, MIPSI_DSBH, RID_TMP, 0, left);
+ } else {
+ emit_dta(as, MIPSI_ROTR, dest, RID_TMP, 16);
+ emit_dst(as, MIPSI_WSBH, RID_TMP, 0, left);
+ }
+#endif
+}
+
+static void asm_bitop(ASMState *as, IRIns *ir, MIPSIns mi, MIPSIns mik)
+{
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ Reg right, left = ra_hintalloc(as, ir->op1, dest, RSET_GPR);
+ if (irref_isk(ir->op2)) {
+ intptr_t k = get_kval(as, ir->op2);
+ if (checku16(k)) {
+ emit_tsi(as, mik, dest, left, k);
+ return;
+ }
+ }
+ right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left));
+ emit_dst(as, mi, dest, left, right);
+}
+
+#define asm_band(as, ir) asm_bitop(as, ir, MIPSI_AND, MIPSI_ANDI)
+#define asm_bor(as, ir) asm_bitop(as, ir, MIPSI_OR, MIPSI_ORI)
+#define asm_bxor(as, ir) asm_bitop(as, ir, MIPSI_XOR, MIPSI_XORI)
+
+static void asm_bitshift(ASMState *as, IRIns *ir, MIPSIns mi, MIPSIns mik)
+{
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ if (irref_isk(ir->op2)) { /* Constant shifts. */
+ uint32_t shift = (uint32_t)IR(ir->op2)->i;
+ if (LJ_64 && irt_is64(ir->t)) mik |= (shift & 32) ? MIPSI_D32 : MIPSI_D;
+ emit_dta(as, mik, dest, ra_hintalloc(as, ir->op1, dest, RSET_GPR),
+ (shift & 31));
+ } else {
+ Reg right, left = ra_alloc2(as, ir, RSET_GPR);
+ right = (left >> 8); left &= 255;
+ if (LJ_64 && irt_is64(ir->t)) mi |= MIPSI_DV;
+ emit_dst(as, mi, dest, right, left); /* Shift amount is in rs. */
+ }
+}
+
+#define asm_bshl(as, ir) asm_bitshift(as, ir, MIPSI_SLLV, MIPSI_SLL)
+#define asm_bshr(as, ir) asm_bitshift(as, ir, MIPSI_SRLV, MIPSI_SRL)
+#define asm_bsar(as, ir) asm_bitshift(as, ir, MIPSI_SRAV, MIPSI_SRA)
+#define asm_brol(as, ir) lj_assertA(0, "unexpected BROL")
+
+static void asm_bror(ASMState *as, IRIns *ir)
+{
+ if (LJ_64 || (as->flags & JIT_F_MIPSXXR2)) {
+ asm_bitshift(as, ir, MIPSI_ROTRV, MIPSI_ROTR);
+ } else {
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ if (irref_isk(ir->op2)) { /* Constant shifts. */
+ uint32_t shift = (uint32_t)(IR(ir->op2)->i & 31);
+ Reg left = ra_hintalloc(as, ir->op1, dest, RSET_GPR);
+ emit_rotr(as, dest, left, RID_TMP, shift);
+ } else {
+ Reg right, left = ra_alloc2(as, ir, RSET_GPR);
+ right = (left >> 8); left &= 255;
+ emit_dst(as, MIPSI_OR, dest, dest, RID_TMP);
+ emit_dst(as, MIPSI_SRLV, dest, right, left);
+ emit_dst(as, MIPSI_SLLV, RID_TMP, RID_TMP, left);
+ emit_dst(as, MIPSI_SUBU, RID_TMP, ra_allock(as, 32, RSET_GPR), right);
+ }
+ }
+}
+
+#if LJ_SOFTFP
+static void asm_sfpmin_max(ASMState *as, IRIns *ir)
+{
+ CCallInfo ci = lj_ir_callinfo[(IROp)ir->o == IR_MIN ? IRCALL_lj_vm_sfmin : IRCALL_lj_vm_sfmax];
+#if LJ_64
+ IRRef args[2];
+ args[0] = ir->op1;
+ args[1] = ir->op2;
+#else
+ IRRef args[4];
+ args[0^LJ_BE] = ir->op1;
+ args[1^LJ_BE] = (ir+1)->op1;
+ args[2^LJ_BE] = ir->op2;
+ args[3^LJ_BE] = (ir+1)->op2;
+#endif
+ asm_setupresult(as, ir, &ci);
+ emit_call(as, (void *)ci.func, 0);
+ ci.func = NULL;
+ asm_gencall(as, &ci, args);
+}
+#endif
+
+static void asm_min_max(ASMState *as, IRIns *ir, int ismax)
+{
+ if (!LJ_SOFTFP32 && irt_isnum(ir->t)) {
+#if LJ_SOFTFP
+ asm_sfpmin_max(as, ir);
+#else
+ Reg dest = ra_dest(as, ir, RSET_FPR);
+ Reg right, left = ra_alloc2(as, ir, RSET_FPR);
+ right = (left >> 8); left &= 255;
+#if !LJ_TARGET_MIPSR6
+ if (dest == left) {
+ emit_fg(as, MIPSI_MOVF_D, dest, right);
+ } else {
+ emit_fg(as, MIPSI_MOVT_D, dest, left);
+ if (dest != right) emit_fg(as, MIPSI_MOV_D, dest, right);
+ }
+ emit_fgh(as, MIPSI_C_OLT_D, 0, ismax ? right : left, ismax ? left : right);
+#else
+ emit_fgh(as, ismax ? MIPSI_MAX_D : MIPSI_MIN_D, dest, left, right);
+#endif
+#endif
+ } else {
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ Reg right, left = ra_alloc2(as, ir, RSET_GPR);
+ right = (left >> 8); left &= 255;
+ if (left == right) {
+ if (dest != left) emit_move(as, dest, left);
+ } else {
+#if !LJ_TARGET_MIPSR6
+ if (dest == left) {
+ emit_dst(as, MIPSI_MOVN, dest, right, RID_TMP);
+ } else {
+ emit_dst(as, MIPSI_MOVZ, dest, left, RID_TMP);
+ if (dest != right) emit_move(as, dest, right);
+ }
+#else
+ emit_dst(as, MIPSI_OR, dest, dest, RID_TMP);
+ if (dest != right) {
+ emit_dst(as, MIPSI_SELNEZ, RID_TMP, right, RID_TMP);
+ emit_dst(as, MIPSI_SELEQZ, dest, left, RID_TMP);
+ } else {
+ emit_dst(as, MIPSI_SELEQZ, RID_TMP, left, RID_TMP);
+ emit_dst(as, MIPSI_SELNEZ, dest, right, RID_TMP);
+ }
+#endif
+ emit_dst(as, MIPSI_SLT, RID_TMP,
+ ismax ? left : right, ismax ? right : left);
+ }
+ }
+}
+
+#define asm_min(as, ir) asm_min_max(as, ir, 0)
+#define asm_max(as, ir) asm_min_max(as, ir, 1)
+
+/* -- Comparisons --------------------------------------------------------- */
+
+#if LJ_SOFTFP
+/* SFP comparisons. */
+static void asm_sfpcomp(ASMState *as, IRIns *ir)
+{
+ const CCallInfo *ci = &lj_ir_callinfo[IRCALL_softfp_cmp];
+ RegSet drop = RSET_SCRATCH;
+ Reg r;
+#if LJ_64
+ IRRef args[2];
+ args[0] = ir->op1;
+ args[1] = ir->op2;
+#else
+ IRRef args[4];
+ args[LJ_LE ? 0 : 1] = ir->op1; args[LJ_LE ? 1 : 0] = (ir+1)->op1;
+ args[LJ_LE ? 2 : 3] = ir->op2; args[LJ_LE ? 3 : 2] = (ir+1)->op2;
+#endif
+
+ for (r = REGARG_FIRSTGPR; r <= REGARG_FIRSTGPR+(LJ_64?1:3); r++) {
+ if (!rset_test(as->freeset, r) &&
+ regcost_ref(as->cost[r]) == args[r-REGARG_FIRSTGPR])
+ rset_clear(drop, r);
+ }
+ ra_evictset(as, drop);
+
+ asm_setupresult(as, ir, ci);
+
+ switch ((IROp)ir->o) {
+ case IR_LT:
+ asm_guard(as, MIPSI_BGEZ, RID_RET, 0);
+ break;
+ case IR_ULT:
+ asm_guard(as, MIPSI_BEQ, RID_RET, RID_TMP);
+ emit_loadi(as, RID_TMP, 1);
+ asm_guard(as, MIPSI_BEQ, RID_RET, RID_ZERO);
+ break;
+ case IR_GE:
+ asm_guard(as, MIPSI_BEQ, RID_RET, RID_TMP);
+ emit_loadi(as, RID_TMP, 2);
+ asm_guard(as, MIPSI_BLTZ, RID_RET, 0);
+ break;
+ case IR_LE:
+ asm_guard(as, MIPSI_BGTZ, RID_RET, 0);
+ break;
+ case IR_GT:
+ asm_guard(as, MIPSI_BEQ, RID_RET, RID_TMP);
+ emit_loadi(as, RID_TMP, 2);
+ asm_guard(as, MIPSI_BLEZ, RID_RET, 0);
+ break;
+ case IR_UGE:
+ asm_guard(as, MIPSI_BLTZ, RID_RET, 0);
+ break;
+ case IR_ULE:
+ asm_guard(as, MIPSI_BEQ, RID_RET, RID_TMP);
+ emit_loadi(as, RID_TMP, 1);
+ break;
+ case IR_UGT: case IR_ABC:
+ asm_guard(as, MIPSI_BLEZ, RID_RET, 0);
+ break;
+ case IR_EQ: case IR_NE:
+ asm_guard(as, (ir->o & 1) ? MIPSI_BEQ : MIPSI_BNE, RID_RET, RID_ZERO);
+ default:
+ break;
+ }
+ asm_gencall(as, ci, args);
+}
+#endif
+
+static void asm_comp(ASMState *as, IRIns *ir)
+{
+ /* ORDER IR: LT GE LE GT ULT UGE ULE UGT. */
+ IROp op = ir->o;
+ if (!LJ_SOFTFP32 && irt_isnum(ir->t)) {
+#if LJ_SOFTFP
+ asm_sfpcomp(as, ir);
+#else
+#if !LJ_TARGET_MIPSR6
+ Reg right, left = ra_alloc2(as, ir, RSET_FPR);
+ right = (left >> 8); left &= 255;
+ asm_guard(as, (op&1) ? MIPSI_BC1T : MIPSI_BC1F, 0, 0);
+ emit_fgh(as, MIPSI_C_OLT_D + ((op&3) ^ ((op>>2)&1)), 0, left, right);
+#else
+ Reg tmp, right, left = ra_alloc2(as, ir, RSET_FPR);
+ right = (left >> 8); left &= 255;
+ tmp = ra_scratch(as, rset_exclude(rset_exclude(RSET_FPR, left), right));
+ asm_guard(as, (op&1) ? MIPSI_BC1NEZ : MIPSI_BC1EQZ, 0, (tmp&31));
+ emit_fgh(as, MIPSI_CMP_LT_D + ((op&3) ^ ((op>>2)&1)), tmp, left, right);
+#endif
+#endif
+ } else {
+ Reg right, left = ra_alloc1(as, ir->op1, RSET_GPR);
+ if (op == IR_ABC) op = IR_UGT;
+ if ((op&4) == 0 && irref_isk(ir->op2) && get_kval(as, ir->op2) == 0) {
+ MIPSIns mi = (op&2) ? ((op&1) ? MIPSI_BLEZ : MIPSI_BGTZ) :
+ ((op&1) ? MIPSI_BLTZ : MIPSI_BGEZ);
+ asm_guard(as, mi, left, 0);
+ } else {
+ if (irref_isk(ir->op2)) {
+ intptr_t k = get_kval(as, ir->op2);
+ if ((op&2)) k++;
+ if (checki16(k)) {
+ asm_guard(as, (op&1) ? MIPSI_BNE : MIPSI_BEQ, RID_TMP, RID_ZERO);
+ emit_tsi(as, (op&4) ? MIPSI_SLTIU : MIPSI_SLTI,
+ RID_TMP, left, k);
+ return;
+ }
+ }
+ right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left));
+ asm_guard(as, ((op^(op>>1))&1) ? MIPSI_BNE : MIPSI_BEQ, RID_TMP, RID_ZERO);
+ emit_dst(as, (op&4) ? MIPSI_SLTU : MIPSI_SLT,
+ RID_TMP, (op&2) ? right : left, (op&2) ? left : right);
+ }
+ }
+}
+
+static void asm_equal(ASMState *as, IRIns *ir)
+{
+ Reg right, left = ra_alloc2(as, ir, (!LJ_SOFTFP && irt_isnum(ir->t)) ?
+ RSET_FPR : RSET_GPR);
+ right = (left >> 8); left &= 255;
+ if (!LJ_SOFTFP32 && irt_isnum(ir->t)) {
+#if LJ_SOFTFP
+ asm_sfpcomp(as, ir);
+#elif !LJ_TARGET_MIPSR6
+ asm_guard(as, (ir->o & 1) ? MIPSI_BC1T : MIPSI_BC1F, 0, 0);
+ emit_fgh(as, MIPSI_C_EQ_D, 0, left, right);
+#else
+ Reg tmp = ra_scratch(as, rset_exclude(rset_exclude(RSET_FPR, left), right));
+ asm_guard(as, (ir->o & 1) ? MIPSI_BC1NEZ : MIPSI_BC1EQZ, 0, (tmp&31));
+ emit_fgh(as, MIPSI_CMP_EQ_D, tmp, left, right);
+#endif
+ } else {
+ asm_guard(as, (ir->o & 1) ? MIPSI_BEQ : MIPSI_BNE, left, right);
+ }
+}
+
+#if LJ_32 && LJ_HASFFI
+/* 64 bit integer comparisons. */
+static void asm_comp64(ASMState *as, IRIns *ir)
+{
+ /* ORDER IR: LT GE LE GT ULT UGE ULE UGT. */
+ IROp op = (ir-1)->o;
+ MCLabel l_end;
+ Reg rightlo, leftlo, righthi, lefthi = ra_alloc2(as, ir, RSET_GPR);
+ righthi = (lefthi >> 8); lefthi &= 255;
+ leftlo = ra_alloc2(as, ir-1,
+ rset_exclude(rset_exclude(RSET_GPR, lefthi), righthi));
+ rightlo = (leftlo >> 8); leftlo &= 255;
+ asm_guard(as, ((op^(op>>1))&1) ? MIPSI_BNE : MIPSI_BEQ, RID_TMP, RID_ZERO);
+ l_end = emit_label(as);
+ if (lefthi != righthi)
+ emit_dst(as, (op&4) ? MIPSI_SLTU : MIPSI_SLT, RID_TMP,
+ (op&2) ? righthi : lefthi, (op&2) ? lefthi : righthi);
+ emit_dst(as, MIPSI_SLTU, RID_TMP,
+ (op&2) ? rightlo : leftlo, (op&2) ? leftlo : rightlo);
+ if (lefthi != righthi)
+ emit_branch(as, MIPSI_BEQ, lefthi, righthi, l_end);
+}
+
+static void asm_comp64eq(ASMState *as, IRIns *ir)
+{
+ Reg tmp, right, left = ra_alloc2(as, ir, RSET_GPR);
+ right = (left >> 8); left &= 255;
+ asm_guard(as, ((ir-1)->o & 1) ? MIPSI_BEQ : MIPSI_BNE, RID_TMP, RID_ZERO);
+ tmp = ra_scratch(as, rset_exclude(rset_exclude(RSET_GPR, left), right));
+ emit_dst(as, MIPSI_OR, RID_TMP, RID_TMP, tmp);
+ emit_dst(as, MIPSI_XOR, tmp, left, right);
+ left = ra_alloc2(as, ir-1, RSET_GPR);
+ right = (left >> 8); left &= 255;
+ emit_dst(as, MIPSI_XOR, RID_TMP, left, right);
+}
+#endif
+
+/* -- Split register ops -------------------------------------------------- */
+
+/* Hiword op of a split 32/32 or 64/64 bit op. Previous op is the loword op. */
+static void asm_hiop(ASMState *as, IRIns *ir)
+{
+ /* HIOP is marked as a store because it needs its own DCE logic. */
+ int uselo = ra_used(ir-1), usehi = ra_used(ir); /* Loword/hiword used? */
+ if (LJ_UNLIKELY(!(as->flags & JIT_F_OPT_DCE))) uselo = usehi = 1;
+#if LJ_32 && (LJ_HASFFI || LJ_SOFTFP)
+ if ((ir-1)->o == IR_CONV) { /* Conversions to/from 64 bit. */
+ as->curins--; /* Always skip the CONV. */
+#if LJ_HASFFI && !LJ_SOFTFP
+ if (usehi || uselo)
+ asm_conv64(as, ir);
+ return;
+#endif
+ } else if ((ir-1)->o < IR_EQ) { /* 64 bit integer comparisons. ORDER IR. */
+ as->curins--; /* Always skip the loword comparison. */
+#if LJ_SOFTFP
+ if (!irt_isint(ir->t)) {
+ asm_sfpcomp(as, ir-1);
+ return;
+ }
+#endif
+#if LJ_HASFFI
+ asm_comp64(as, ir);
+#endif
+ return;
+ } else if ((ir-1)->o <= IR_NE) { /* 64 bit integer comparisons. ORDER IR. */
+ as->curins--; /* Always skip the loword comparison. */
+#if LJ_SOFTFP
+ if (!irt_isint(ir->t)) {
+ asm_sfpcomp(as, ir-1);
+ return;
+ }
+#endif
+#if LJ_HASFFI
+ asm_comp64eq(as, ir);
+#endif
+ return;
+#if LJ_SOFTFP
+ } else if ((ir-1)->o == IR_MIN || (ir-1)->o == IR_MAX) {
+ as->curins--; /* Always skip the loword min/max. */
+ if (uselo || usehi)
+ asm_sfpmin_max(as, ir-1);
+ return;
+#endif
+ } else if ((ir-1)->o == IR_XSTORE) {
+ as->curins--; /* Handle both stores here. */
+ if ((ir-1)->r != RID_SINK) {
+ asm_xstore_(as, ir, LJ_LE ? 4 : 0);
+ asm_xstore_(as, ir-1, LJ_LE ? 0 : 4);
+ }
+ return;
+ }
+#endif
+ if (!usehi) return; /* Skip unused hiword op for all remaining ops. */
+ switch ((ir-1)->o) {
+#if LJ_32 && LJ_HASFFI
+ case IR_ADD: as->curins--; asm_add64(as, ir); break;
+ case IR_SUB: as->curins--; asm_sub64(as, ir); break;
+ case IR_NEG: as->curins--; asm_neg64(as, ir); break;
+ case IR_CNEWI:
+ /* Nothing to do here. Handled by lo op itself. */
+ break;
+#endif
+#if LJ_32 && LJ_SOFTFP
+ case IR_SLOAD: case IR_ALOAD: case IR_HLOAD: case IR_ULOAD: case IR_VLOAD:
+ case IR_STRTO:
+ if (!uselo)
+ ra_allocref(as, ir->op1, RSET_GPR); /* Mark lo op as used. */
+ break;
+ case IR_ASTORE: case IR_HSTORE: case IR_USTORE: case IR_TOSTR: case IR_TMPREF:
+ /* Nothing to do here. Handled by lo op itself. */
+ break;
+#endif
+ case IR_CALLN: case IR_CALLL: case IR_CALLS: case IR_CALLXS:
+ if (!uselo)
+ ra_allocref(as, ir->op1, RID2RSET(RID_RETLO)); /* Mark lo op as used. */
+ break;
+ default: lj_assertA(0, "bad HIOP for op %d", (ir-1)->o); break;
+ }
+}
+
+/* -- Profiling ----------------------------------------------------------- */
+
+static void asm_prof(ASMState *as, IRIns *ir)
+{
+ UNUSED(ir);
+ asm_guard(as, MIPSI_BNE, RID_TMP, RID_ZERO);
+ emit_tsi(as, MIPSI_ANDI, RID_TMP, RID_TMP, HOOK_PROFILE);
+ emit_lsglptr(as, MIPSI_LBU, RID_TMP,
+ (int32_t)offsetof(global_State, hookmask));
+}
+
+/* -- Stack handling ------------------------------------------------------ */
+
+/* Check Lua stack size for overflow. Use exit handler as fallback. */
+static void asm_stack_check(ASMState *as, BCReg topslot,
+ IRIns *irp, RegSet allow, ExitNo exitno)
+{
+ /* Try to get an unused temp. register, otherwise spill/restore RID_RET*. */
+ Reg tmp, pbase = irp ? (ra_hasreg(irp->r) ? irp->r : RID_TMP) : RID_BASE;
+ ExitNo oldsnap = as->snapno;
+ rset_clear(allow, pbase);
+#if LJ_32
+ tmp = allow ? rset_pickbot(allow) :
+ (pbase == RID_RETHI ? RID_RETLO : RID_RETHI);
+#else
+ tmp = allow ? rset_pickbot(allow) : RID_RET;
+#endif
+ as->snapno = exitno;
+ asm_guard(as, MIPSI_BNE, RID_TMP, RID_ZERO);
+ as->snapno = oldsnap;
+ if (allow == RSET_EMPTY) /* Restore temp. register. */
+ emit_tsi(as, MIPSI_AL, tmp, RID_SP, 0);
+ else
+ ra_modified(as, tmp);
+ emit_tsi(as, MIPSI_SLTIU, RID_TMP, RID_TMP, (int32_t)(8*topslot));
+ emit_dst(as, MIPSI_ASUBU, RID_TMP, tmp, pbase);
+ emit_tsi(as, MIPSI_AL, tmp, tmp, offsetof(lua_State, maxstack));
+ if (pbase == RID_TMP)
+ emit_getgl(as, RID_TMP, jit_base);
+ emit_getgl(as, tmp, cur_L);
+ if (allow == RSET_EMPTY) /* Spill temp. register. */
+ emit_tsi(as, MIPSI_AS, tmp, RID_SP, 0);
+}
+
+/* Restore Lua stack from on-trace state. */
+static void asm_stack_restore(ASMState *as, SnapShot *snap)
+{
+ SnapEntry *map = &as->T->snapmap[snap->mapofs];
+#if LJ_32 || defined(LUA_USE_ASSERT)
+ SnapEntry *flinks = &as->T->snapmap[snap_nextofs(as->T, snap)-1-LJ_FR2];
+#endif
+ MSize n, nent = snap->nent;
+ /* Store the value of all modified slots to the Lua stack. */
+ for (n = 0; n < nent; n++) {
+ SnapEntry sn = map[n];
+ BCReg s = snap_slot(sn);
+ int32_t ofs = 8*((int32_t)s-1-LJ_FR2);
+ IRRef ref = snap_ref(sn);
+ IRIns *ir = IR(ref);
+ if ((sn & SNAP_NORESTORE))
+ continue;
+ if (irt_isnum(ir->t)) {
+#if LJ_SOFTFP32
+ Reg tmp;
+ RegSet allow = rset_exclude(RSET_GPR, RID_BASE);
+ /* LJ_SOFTFP: must be a number constant. */
+ lj_assertA(irref_isk(ref), "unsplit FP op");
+ tmp = ra_allock(as, (int32_t)ir_knum(ir)->u32.lo, allow);
+ emit_tsi(as, MIPSI_SW, tmp, RID_BASE, ofs+(LJ_BE?4:0));
+ if (rset_test(as->freeset, tmp+1)) allow = RID2RSET(tmp+1);
+ tmp = ra_allock(as, (int32_t)ir_knum(ir)->u32.hi, allow);
+ emit_tsi(as, MIPSI_SW, tmp, RID_BASE, ofs+(LJ_BE?0:4));
+#elif LJ_SOFTFP /* && LJ_64 */
+ Reg src = ra_alloc1(as, ref, rset_exclude(RSET_GPR, RID_BASE));
+ emit_tsi(as, MIPSI_SD, src, RID_BASE, ofs);
+#else
+ Reg src = ra_alloc1(as, ref, RSET_FPR);
+ emit_hsi(as, MIPSI_SDC1, src, RID_BASE, ofs);
+#endif
+ } else {
+#if LJ_32
+ RegSet allow = rset_exclude(RSET_GPR, RID_BASE);
+ Reg type;
+ lj_assertA(irt_ispri(ir->t) || irt_isaddr(ir->t) || irt_isinteger(ir->t),
+ "restore of IR type %d", irt_type(ir->t));
+ if (!irt_ispri(ir->t)) {
+ Reg src = ra_alloc1(as, ref, allow);
+ rset_clear(allow, src);
+ emit_tsi(as, MIPSI_SW, src, RID_BASE, ofs+(LJ_BE?4:0));
+ }
+ if ((sn & (SNAP_CONT|SNAP_FRAME))) {
+ if (s == 0) continue; /* Do not overwrite link to previous frame. */
+ type = ra_allock(as, (int32_t)(*flinks--), allow);
+#if LJ_SOFTFP
+ } else if ((sn & SNAP_SOFTFPNUM)) {
+ type = ra_alloc1(as, ref+1, rset_exclude(RSET_GPR, RID_BASE));
+#endif
+ } else if ((sn & SNAP_KEYINDEX)) {
+ type = ra_allock(as, (int32_t)LJ_KEYINDEX, allow);
+ } else {
+ type = ra_allock(as, (int32_t)irt_toitype(ir->t), allow);
+ }
+ emit_tsi(as, MIPSI_SW, type, RID_BASE, ofs+(LJ_BE?0:4));
+#else
+ if ((sn & SNAP_KEYINDEX)) {
+ RegSet allow = rset_exclude(RSET_GPR, RID_BASE);
+ int64_t kki = (int64_t)LJ_KEYINDEX << 32;
+ if (irref_isk(ref)) {
+ emit_tsi(as, MIPSI_SD,
+ ra_allock(as, kki | (int64_t)(uint32_t)ir->i, allow),
+ RID_BASE, ofs);
+ } else {
+ Reg src = ra_alloc1(as, ref, allow);
+ Reg rki = ra_allock(as, kki, rset_exclude(allow, src));
+ emit_tsi(as, MIPSI_SD, RID_TMP, RID_BASE, ofs);
+ emit_dst(as, MIPSI_DADDU, RID_TMP, src, rki);
+ }
+ } else {
+ asm_tvstore64(as, RID_BASE, ofs, ref);
+ }
+#endif
+ }
+ checkmclim(as);
+ }
+ lj_assertA(map + nent == flinks, "inconsistent frames in snapshot");
+}
+
+/* -- GC handling --------------------------------------------------------- */
+
+/* Marker to prevent patching the GC check exit. */
+#define MIPS_NOPATCH_GC_CHECK MIPSI_OR
+
+/* Check GC threshold and do one or more GC steps. */
+static void asm_gc_check(ASMState *as)
+{
+ const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_gc_step_jit];
+ IRRef args[2];
+ MCLabel l_end;
+ Reg tmp;
+ ra_evictset(as, RSET_SCRATCH);
+ l_end = emit_label(as);
+ /* Exit trace if in GCSatomic or GCSfinalize. Avoids syncing GC objects. */
+ /* Assumes asm_snap_prep() already done. */
+ asm_guard(as, MIPSI_BNE, RID_RET, RID_ZERO);
+ args[0] = ASMREF_TMP1; /* global_State *g */
+ args[1] = ASMREF_TMP2; /* MSize steps */
+ asm_gencall(as, ci, args);
+ l_end[-3] = MIPS_NOPATCH_GC_CHECK; /* Replace the nop after the call. */
+ emit_tsi(as, MIPSI_AADDIU, ra_releasetmp(as, ASMREF_TMP1), RID_JGL, -32768);
+ tmp = ra_releasetmp(as, ASMREF_TMP2);
+ emit_loadi(as, tmp, as->gcsteps);
+ /* Jump around GC step if GC total < GC threshold. */
+ emit_branch(as, MIPSI_BNE, RID_TMP, RID_ZERO, l_end);
+ emit_dst(as, MIPSI_SLTU, RID_TMP, RID_TMP, tmp);
+ emit_getgl(as, tmp, gc.threshold);
+ emit_getgl(as, RID_TMP, gc.total);
+ as->gcsteps = 0;
+ checkmclim(as);
+}
+
+/* -- Loop handling ------------------------------------------------------- */
+
+/* Fixup the loop branch. */
+static void asm_loop_fixup(ASMState *as)
+{
+ MCode *p = as->mctop;
+ MCode *target = as->mcp;
+ p[-1] = MIPSI_NOP;
+ if (as->loopinv) { /* Inverted loop branch? */
+ /* asm_guard already inverted the cond branch. Only patch the target. */
+ p[-3] |= ((target-p+2) & 0x0000ffffu);
+ } else {
+ p[-2] = MIPSI_J|(((uintptr_t)target>>2)&0x03ffffffu);
+ }
+}
+
+/* Fixup the tail of the loop. */
+static void asm_loop_tail_fixup(ASMState *as)
+{
+ if (as->loopinv) as->mctop--;
+}
+
+/* -- Head of trace ------------------------------------------------------- */
+
+/* Coalesce BASE register for a root trace. */
+static void asm_head_root_base(ASMState *as)
+{
+ IRIns *ir = IR(REF_BASE);
+ Reg r = ir->r;
+ if (ra_hasreg(r)) {
+ ra_free(as, r);
+ if (rset_test(as->modset, r) || irt_ismarked(ir->t))
+ ir->r = RID_INIT; /* No inheritance for modified BASE register. */
+ if (r != RID_BASE)
+ emit_move(as, r, RID_BASE);
+ }
+}
+
+/* Coalesce BASE register for a side trace. */
+static RegSet asm_head_side_base(ASMState *as, IRIns *irp, RegSet allow)
+{
+ IRIns *ir = IR(REF_BASE);
+ Reg r = ir->r;
+ if (ra_hasreg(r)) {
+ ra_free(as, r);
+ if (rset_test(as->modset, r) || irt_ismarked(ir->t))
+ ir->r = RID_INIT; /* No inheritance for modified BASE register. */
+ if (irp->r == r) {
+ rset_clear(allow, r); /* Mark same BASE register as coalesced. */
+ } else if (ra_hasreg(irp->r) && rset_test(as->freeset, irp->r)) {
+ rset_clear(allow, irp->r);
+ emit_move(as, r, irp->r); /* Move from coalesced parent reg. */
+ } else {
+ emit_getgl(as, r, jit_base); /* Otherwise reload BASE. */
+ }
+ }
+ return allow;
+}
+
+/* -- Tail of trace ------------------------------------------------------- */
+
+/* Fixup the tail code. */
+static void asm_tail_fixup(ASMState *as, TraceNo lnk)
+{
+ MCode *target = lnk ? traceref(as->J,lnk)->mcode : (MCode *)lj_vm_exit_interp;
+ int32_t spadj = as->T->spadjust;
+ MCode *p = as->mctop-1;
+ *p = spadj ? (MIPSI_AADDIU|MIPSF_T(RID_SP)|MIPSF_S(RID_SP)|spadj) : MIPSI_NOP;
+ p[-1] = MIPSI_J|(((uintptr_t)target>>2)&0x03ffffffu);
+}
+
+/* Prepare tail of code. */
+static void asm_tail_prep(ASMState *as)
+{
+ as->mcp = as->mctop-2; /* Leave room for branch plus nop or stack adj. */
+ as->invmcp = as->loopref ? as->mcp : NULL;
+}
+
+/* -- Trace setup --------------------------------------------------------- */
+
+/* Ensure there are enough stack slots for call arguments. */
+static Reg asm_setup_call_slots(ASMState *as, IRIns *ir, const CCallInfo *ci)
+{
+ IRRef args[CCI_NARGS_MAX*2];
+ uint32_t i, nargs = CCI_XNARGS(ci);
+#if LJ_32
+ int nslots = 4, ngpr = REGARG_NUMGPR, nfpr = REGARG_NUMFPR;
+#else
+ int nslots = 0, ngpr = REGARG_NUMGPR;
+#endif
+ asm_collectargs(as, ir, ci, args);
+ for (i = 0; i < nargs; i++) {
+#if LJ_32
+ if (!LJ_SOFTFP && args[i] && irt_isfp(IR(args[i])->t) &&
+ nfpr > 0 && !(ci->flags & CCI_VARARG)) {
+ nfpr--;
+ ngpr -= irt_isnum(IR(args[i])->t) ? 2 : 1;
+ } else if (!LJ_SOFTFP && args[i] && irt_isnum(IR(args[i])->t)) {
+ nfpr = 0;
+ ngpr = ngpr & ~1;
+ if (ngpr > 0) ngpr -= 2; else nslots = (nslots+3) & ~1;
+ } else {
+ nfpr = 0;
+ if (ngpr > 0) ngpr--; else nslots++;
+ }
+#else
+ if (ngpr > 0) ngpr--; else nslots += 2;
+#endif
+ }
+ if (nslots > as->evenspill) /* Leave room for args in stack slots. */
+ as->evenspill = nslots;
+ return irt_isfp(ir->t) ? REGSP_HINT(RID_FPRET) : REGSP_HINT(RID_RET);
+}
+
+static void asm_setup_target(ASMState *as)
+{
+ asm_sparejump_setup(as);
+ asm_exitstub_setup(as);
+}
+
+/* -- Trace patching ------------------------------------------------------ */
+
+/* Patch exit jumps of existing machine code to a new target. */
+void lj_asm_patchexit(jit_State *J, GCtrace *T, ExitNo exitno, MCode *target)
+{
+ MCode *p = T->mcode;
+ MCode *pe = (MCode *)((char *)p + T->szmcode);
+ MCode *px = exitstub_trace_addr(T, exitno);
+ MCode *cstart = NULL, *cstop = NULL;
+ MCode *mcarea = lj_mcode_patch(J, p, 0);
+ MCode exitload = MIPSI_LI | MIPSF_T(RID_TMP) | exitno;
+ MCode tjump = MIPSI_J|(((uintptr_t)target>>2)&0x03ffffffu);
+ for (p++; p < pe; p++) {
+ if (*p == exitload) { /* Look for load of exit number. */
+ /* Look for exitstub branch. Yes, this covers all used branch variants. */
+ if (((p[-1] ^ (px-p)) & 0xffffu) == 0 &&
+ ((p[-1] & 0xf0000000u) == MIPSI_BEQ ||
+ (p[-1] & 0xfc1e0000u) == MIPSI_BLTZ ||
+#if !LJ_TARGET_MIPSR6
+ (p[-1] & 0xffe00000u) == MIPSI_BC1F
+#else
+ (p[-1] & 0xff600000u) == MIPSI_BC1EQZ
+#endif
+ ) && p[-2] != MIPS_NOPATCH_GC_CHECK) {
+ ptrdiff_t delta = target - p;
+ if (((delta + 0x8000) >> 16) == 0) { /* Patch in-range branch. */
+ patchbranch:
+ p[-1] = (p[-1] & 0xffff0000u) | (delta & 0xffffu);
+ *p = MIPSI_NOP; /* Replace the load of the exit number. */
+ cstop = p+1;
+ if (!cstart) cstart = p-1;
+ } else { /* Branch out of range. Use spare jump slot in mcarea. */
+ MCode *mcjump = asm_sparejump_use(mcarea, tjump);
+ if (mcjump) {
+ lj_mcode_sync(mcjump, mcjump+1);
+ delta = mcjump - p;
+ if (((delta + 0x8000) >> 16) == 0) {
+ goto patchbranch;
+ } else {
+ lj_assertJ(0, "spare jump out of range: -Osizemcode too big");
+ }
+ }
+ /* Ignore jump slot overflow. Child trace is simply not attached. */
+ }
+ } else if (p+1 == pe) {
+ /* Patch NOP after code for inverted loop branch. Use of J is ok. */
+ lj_assertJ(p[1] == MIPSI_NOP, "expected NOP");
+ p[1] = tjump;
+ *p = MIPSI_NOP; /* Replace the load of the exit number. */
+ cstop = p+2;
+ if (!cstart) cstart = p+1;
+ }
+ }
+ }
+ if (cstart) lj_mcode_sync(cstart, cstop);
+ lj_mcode_patch(J, mcarea, 1);
+}
+
diff --git a/libs/luajit-cmake/luajit/src/lj_asm_ppc.h b/libs/luajit-cmake/luajit/src/lj_asm_ppc.h
new file mode 100644
index 0000000..546b8e5
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/lj_asm_ppc.h
@@ -0,0 +1,2325 @@
+/*
+** PPC IR assembler (SSA IR -> machine code).
+** Copyright (C) 2005-2022 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+/* -- Register allocator extensions --------------------------------------- */
+
+/* Allocate a register with a hint. */
+static Reg ra_hintalloc(ASMState *as, IRRef ref, Reg hint, RegSet allow)
+{
+ Reg r = IR(ref)->r;
+ if (ra_noreg(r)) {
+ if (!ra_hashint(r) && !iscrossref(as, ref))
+ ra_sethint(IR(ref)->r, hint); /* Propagate register hint. */
+ r = ra_allocref(as, ref, allow);
+ }
+ ra_noweak(as, r);
+ return r;
+}
+
+/* Allocate two source registers for three-operand instructions. */
+static Reg ra_alloc2(ASMState *as, IRIns *ir, RegSet allow)
+{
+ IRIns *irl = IR(ir->op1), *irr = IR(ir->op2);
+ Reg left = irl->r, right = irr->r;
+ if (ra_hasreg(left)) {
+ ra_noweak(as, left);
+ if (ra_noreg(right))
+ right = ra_allocref(as, ir->op2, rset_exclude(allow, left));
+ else
+ ra_noweak(as, right);
+ } else if (ra_hasreg(right)) {
+ ra_noweak(as, right);
+ left = ra_allocref(as, ir->op1, rset_exclude(allow, right));
+ } else if (ra_hashint(right)) {
+ right = ra_allocref(as, ir->op2, allow);
+ left = ra_alloc1(as, ir->op1, rset_exclude(allow, right));
+ } else {
+ left = ra_allocref(as, ir->op1, allow);
+ right = ra_alloc1(as, ir->op2, rset_exclude(allow, left));
+ }
+ return left | (right << 8);
+}
+
+/* -- Guard handling ------------------------------------------------------ */
+
+/* Setup exit stubs after the end of each trace. */
+static void asm_exitstub_setup(ASMState *as, ExitNo nexits)
+{
+ ExitNo i;
+ MCode *mxp = as->mctop;
+ if (mxp - (nexits + 3 + MCLIM_REDZONE) < as->mclim)
+ asm_mclimit(as);
+ /* 1: mflr r0; bl ->vm_exit_handler; li r0, traceno; bl <1; bl <1; ... */
+ for (i = nexits-1; (int32_t)i >= 0; i--)
+ *--mxp = PPCI_BL|(((-3-i)&0x00ffffffu)<<2);
+ *--mxp = PPCI_LI|PPCF_T(RID_TMP)|as->T->traceno; /* Read by exit handler. */
+ mxp--;
+ *mxp = PPCI_BL|((((MCode *)(void *)lj_vm_exit_handler-mxp)&0x00ffffffu)<<2);
+ *--mxp = PPCI_MFLR|PPCF_T(RID_TMP);
+ as->mctop = mxp;
+}
+
+static MCode *asm_exitstub_addr(ASMState *as, ExitNo exitno)
+{
+ /* Keep this in-sync with exitstub_trace_addr(). */
+ return as->mctop + exitno + 3;
+}
+
+/* Emit conditional branch to exit for guard. */
+static void asm_guardcc(ASMState *as, PPCCC cc)
+{
+ MCode *target = asm_exitstub_addr(as, as->snapno);
+ MCode *p = as->mcp;
+ if (LJ_UNLIKELY(p == as->invmcp)) {
+ as->loopinv = 1;
+ *p = PPCI_B | (((target-p) & 0x00ffffffu) << 2);
+ emit_condbranch(as, PPCI_BC, cc^4, p);
+ return;
+ }
+ emit_condbranch(as, PPCI_BC, cc, target);
+}
+
+/* -- Operand fusion ------------------------------------------------------ */
+
+/* Limit linear search to this distance. Avoids O(n^2) behavior. */
+#define CONFLICT_SEARCH_LIM 31
+
+/* Check if there's no conflicting instruction between curins and ref. */
+static int noconflict(ASMState *as, IRRef ref, IROp conflict)
+{
+ IRIns *ir = as->ir;
+ IRRef i = as->curins;
+ if (i > ref + CONFLICT_SEARCH_LIM)
+ return 0; /* Give up, ref is too far away. */
+ while (--i > ref)
+ if (ir[i].o == conflict)
+ return 0; /* Conflict found. */
+ return 1; /* Ok, no conflict. */
+}
+
+/* Fuse the array base of colocated arrays. */
+static int32_t asm_fuseabase(ASMState *as, IRRef ref)
+{
+ IRIns *ir = IR(ref);
+ if (ir->o == IR_TNEW && ir->op1 <= LJ_MAX_COLOSIZE &&
+ !neverfuse(as) && noconflict(as, ref, IR_NEWREF))
+ return (int32_t)sizeof(GCtab);
+ return 0;
+}
+
+/* Indicates load/store indexed is ok. */
+#define AHUREF_LSX ((int32_t)0x80000000)
+
+/* Fuse array/hash/upvalue reference into register+offset operand. */
+static Reg asm_fuseahuref(ASMState *as, IRRef ref, int32_t *ofsp, RegSet allow)
+{
+ IRIns *ir = IR(ref);
+ if (ra_noreg(ir->r)) {
+ if (ir->o == IR_AREF) {
+ if (mayfuse(as, ref)) {
+ if (irref_isk(ir->op2)) {
+ IRRef tab = IR(ir->op1)->op1;
+ int32_t ofs = asm_fuseabase(as, tab);
+ IRRef refa = ofs ? tab : ir->op1;
+ ofs += 8*IR(ir->op2)->i;
+ if (checki16(ofs)) {
+ *ofsp = ofs;
+ return ra_alloc1(as, refa, allow);
+ }
+ }
+ if (*ofsp == AHUREF_LSX) {
+ Reg base = ra_alloc1(as, ir->op1, allow);
+ Reg idx = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, base));
+ return base | (idx << 8);
+ }
+ }
+ } else if (ir->o == IR_HREFK) {
+ if (mayfuse(as, ref)) {
+ int32_t ofs = (int32_t)(IR(ir->op2)->op2 * sizeof(Node));
+ if (checki16(ofs)) {
+ *ofsp = ofs;
+ return ra_alloc1(as, ir->op1, allow);
+ }
+ }
+ } else if (ir->o == IR_UREFC) {
+ if (irref_isk(ir->op1)) {
+ GCfunc *fn = ir_kfunc(IR(ir->op1));
+ int32_t ofs = i32ptr(&gcref(fn->l.uvptr[(ir->op2 >> 8)])->uv.tv);
+ int32_t jgl = (intptr_t)J2G(as->J);
+ if ((uint32_t)(ofs-jgl) < 65536) {
+ *ofsp = ofs-jgl-32768;
+ return RID_JGL;
+ } else {
+ *ofsp = (int16_t)ofs;
+ return ra_allock(as, ofs-(int16_t)ofs, allow);
+ }
+ }
+ } else if (ir->o == IR_TMPREF) {
+ *ofsp = (int32_t)(offsetof(global_State, tmptv)-32768);
+ return RID_JGL;
+ }
+ }
+ *ofsp = 0;
+ return ra_alloc1(as, ref, allow);
+}
+
+/* Fuse XLOAD/XSTORE reference into load/store operand. */
+static void asm_fusexref(ASMState *as, PPCIns pi, Reg rt, IRRef ref,
+ RegSet allow, int32_t ofs)
+{
+ IRIns *ir = IR(ref);
+ Reg base;
+ if (ra_noreg(ir->r) && canfuse(as, ir)) {
+ if (ir->o == IR_ADD) {
+ int32_t ofs2;
+ if (irref_isk(ir->op2) && (ofs2 = ofs + IR(ir->op2)->i, checki16(ofs2))) {
+ ofs = ofs2;
+ ref = ir->op1;
+ } else if (ofs == 0) {
+ Reg right, left = ra_alloc2(as, ir, allow);
+ right = (left >> 8); left &= 255;
+ emit_fab(as, PPCI_LWZX | ((pi >> 20) & 0x780), rt, left, right);
+ return;
+ }
+ } else if (ir->o == IR_STRREF) {
+ lj_assertA(ofs == 0, "bad usage");
+ ofs = (int32_t)sizeof(GCstr);
+ if (irref_isk(ir->op2)) {
+ ofs += IR(ir->op2)->i;
+ ref = ir->op1;
+ } else if (irref_isk(ir->op1)) {
+ ofs += IR(ir->op1)->i;
+ ref = ir->op2;
+ } else {
+ /* NYI: Fuse ADD with constant. */
+ Reg tmp, right, left = ra_alloc2(as, ir, allow);
+ right = (left >> 8); left &= 255;
+ tmp = ra_scratch(as, rset_exclude(rset_exclude(allow, left), right));
+ emit_fai(as, pi, rt, tmp, ofs);
+ emit_tab(as, PPCI_ADD, tmp, left, right);
+ return;
+ }
+ if (!checki16(ofs)) {
+ Reg left = ra_alloc1(as, ref, allow);
+ Reg right = ra_allock(as, ofs, rset_exclude(allow, left));
+ emit_fab(as, PPCI_LWZX | ((pi >> 20) & 0x780), rt, left, right);
+ return;
+ }
+ }
+ }
+ base = ra_alloc1(as, ref, allow);
+ emit_fai(as, pi, rt, base, ofs);
+}
+
+/* Fuse XLOAD/XSTORE reference into indexed-only load/store operand. */
+static void asm_fusexrefx(ASMState *as, PPCIns pi, Reg rt, IRRef ref,
+ RegSet allow)
+{
+ IRIns *ira = IR(ref);
+ Reg right, left;
+ if (canfuse(as, ira) && ira->o == IR_ADD && ra_noreg(ira->r)) {
+ left = ra_alloc2(as, ira, allow);
+ right = (left >> 8); left &= 255;
+ } else {
+ right = ra_alloc1(as, ref, allow);
+ left = RID_R0;
+ }
+ emit_tab(as, pi, rt, left, right);
+}
+
+#if !LJ_SOFTFP
+/* Fuse to multiply-add/sub instruction. */
+static int asm_fusemadd(ASMState *as, IRIns *ir, PPCIns pi, PPCIns pir)
+{
+ IRRef lref = ir->op1, rref = ir->op2;
+ IRIns *irm;
+ if (lref != rref &&
+ ((mayfuse(as, lref) && (irm = IR(lref), irm->o == IR_MUL) &&
+ ra_noreg(irm->r)) ||
+ (mayfuse(as, rref) && (irm = IR(rref), irm->o == IR_MUL) &&
+ (rref = lref, pi = pir, ra_noreg(irm->r))))) {
+ Reg dest = ra_dest(as, ir, RSET_FPR);
+ Reg add = ra_alloc1(as, rref, RSET_FPR);
+ Reg right, left = ra_alloc2(as, irm, rset_exclude(RSET_FPR, add));
+ right = (left >> 8); left &= 255;
+ emit_facb(as, pi, dest, left, right, add);
+ return 1;
+ }
+ return 0;
+}
+#endif
+
+/* -- Calls --------------------------------------------------------------- */
+
+/* Generate a call to a C function. */
+static void asm_gencall(ASMState *as, const CCallInfo *ci, IRRef *args)
+{
+ uint32_t n, nargs = CCI_XNARGS(ci);
+ int32_t ofs = 8;
+ Reg gpr = REGARG_FIRSTGPR;
+#if !LJ_SOFTFP
+ Reg fpr = REGARG_FIRSTFPR;
+#endif
+ if ((void *)ci->func)
+ emit_call(as, (void *)ci->func);
+ for (n = 0; n < nargs; n++) { /* Setup args. */
+ IRRef ref = args[n];
+ if (ref) {
+ IRIns *ir = IR(ref);
+#if !LJ_SOFTFP
+ if (irt_isfp(ir->t)) {
+ if (fpr <= REGARG_LASTFPR) {
+ lj_assertA(rset_test(as->freeset, fpr),
+ "reg %d not free", fpr); /* Already evicted. */
+ ra_leftov(as, fpr, ref);
+ fpr++;
+ } else {
+ Reg r = ra_alloc1(as, ref, RSET_FPR);
+ if (irt_isnum(ir->t)) ofs = (ofs + 4) & ~4;
+ emit_spstore(as, ir, r, ofs);
+ ofs += irt_isnum(ir->t) ? 8 : 4;
+ }
+ } else
+#endif
+ {
+ if (gpr <= REGARG_LASTGPR) {
+ lj_assertA(rset_test(as->freeset, gpr),
+ "reg %d not free", gpr); /* Already evicted. */
+ ra_leftov(as, gpr, ref);
+ gpr++;
+ } else {
+ Reg r = ra_alloc1(as, ref, RSET_GPR);
+ emit_spstore(as, ir, r, ofs);
+ ofs += 4;
+ }
+ }
+ } else {
+ if (gpr <= REGARG_LASTGPR)
+ gpr++;
+ else
+ ofs += 4;
+ }
+ checkmclim(as);
+ }
+#if !LJ_SOFTFP
+ if ((ci->flags & CCI_VARARG)) /* Vararg calls need to know about FPR use. */
+ emit_tab(as, fpr == REGARG_FIRSTFPR ? PPCI_CRXOR : PPCI_CREQV, 6, 6, 6);
+#endif
+}
+
+/* Setup result reg/sp for call. Evict scratch regs. */
+static void asm_setupresult(ASMState *as, IRIns *ir, const CCallInfo *ci)
+{
+ RegSet drop = RSET_SCRATCH;
+ int hiop = ((ir+1)->o == IR_HIOP && !irt_isnil((ir+1)->t));
+#if !LJ_SOFTFP
+ if ((ci->flags & CCI_NOFPRCLOBBER))
+ drop &= ~RSET_FPR;
+#endif
+ if (ra_hasreg(ir->r))
+ rset_clear(drop, ir->r); /* Dest reg handled below. */
+ if (hiop && ra_hasreg((ir+1)->r))
+ rset_clear(drop, (ir+1)->r); /* Dest reg handled below. */
+ ra_evictset(as, drop); /* Evictions must be performed first. */
+ if (ra_used(ir)) {
+ lj_assertA(!irt_ispri(ir->t), "PRI dest");
+ if (!LJ_SOFTFP && irt_isfp(ir->t)) {
+ if ((ci->flags & CCI_CASTU64)) {
+ /* Use spill slot or temp slots. */
+ int32_t ofs = ir->s ? sps_scale(ir->s) : SPOFS_TMP;
+ Reg dest = ir->r;
+ if (ra_hasreg(dest)) {
+ ra_free(as, dest);
+ ra_modified(as, dest);
+ emit_fai(as, PPCI_LFD, dest, RID_SP, ofs);
+ }
+ emit_tai(as, PPCI_STW, RID_RETHI, RID_SP, ofs);
+ emit_tai(as, PPCI_STW, RID_RETLO, RID_SP, ofs+4);
+ } else {
+ ra_destreg(as, ir, RID_FPRET);
+ }
+ } else if (hiop) {
+ ra_destpair(as, ir);
+ } else {
+ ra_destreg(as, ir, RID_RET);
+ }
+ }
+}
+
+static void asm_callx(ASMState *as, IRIns *ir)
+{
+ IRRef args[CCI_NARGS_MAX*2];
+ CCallInfo ci;
+ IRRef func;
+ IRIns *irf;
+ ci.flags = asm_callx_flags(as, ir);
+ asm_collectargs(as, ir, &ci, args);
+ asm_setupresult(as, ir, &ci);
+ func = ir->op2; irf = IR(func);
+ if (irf->o == IR_CARG) { func = irf->op1; irf = IR(func); }
+ if (irref_isk(func)) { /* Call to constant address. */
+ ci.func = (ASMFunction)(void *)(intptr_t)(irf->i);
+ } else { /* Need a non-argument register for indirect calls. */
+ RegSet allow = RSET_GPR & ~RSET_RANGE(RID_R0, REGARG_LASTGPR+1);
+ Reg freg = ra_alloc1(as, func, allow);
+ *--as->mcp = PPCI_BCTRL;
+ *--as->mcp = PPCI_MTCTR | PPCF_T(freg);
+ ci.func = (ASMFunction)(void *)0;
+ }
+ asm_gencall(as, &ci, args);
+}
+
+/* -- Returns ------------------------------------------------------------- */
+
+/* Return to lower frame. Guard that it goes to the right spot. */
+static void asm_retf(ASMState *as, IRIns *ir)
+{
+ Reg base = ra_alloc1(as, REF_BASE, RSET_GPR);
+ void *pc = ir_kptr(IR(ir->op2));
+ int32_t delta = 1+LJ_FR2+bc_a(*((const BCIns *)pc - 1));
+ as->topslot -= (BCReg)delta;
+ if ((int32_t)as->topslot < 0) as->topslot = 0;
+ irt_setmark(IR(REF_BASE)->t); /* Children must not coalesce with BASE reg. */
+ emit_setgl(as, base, jit_base);
+ emit_addptr(as, base, -8*delta);
+ asm_guardcc(as, CC_NE);
+ emit_ab(as, PPCI_CMPW, RID_TMP,
+ ra_allock(as, i32ptr(pc), rset_exclude(RSET_GPR, base)));
+ emit_tai(as, PPCI_LWZ, RID_TMP, base, -8);
+}
+
+/* -- Buffer operations --------------------------------------------------- */
+
+#if LJ_HASBUFFER
+static void asm_bufhdr_write(ASMState *as, Reg sb)
+{
+ Reg tmp = ra_scratch(as, rset_exclude(RSET_GPR, sb));
+ IRIns irgc;
+ irgc.ot = IRT(0, IRT_PGC); /* GC type. */
+ emit_storeofs(as, &irgc, RID_TMP, sb, offsetof(SBuf, L));
+ emit_rot(as, PPCI_RLWIMI, RID_TMP, tmp, 0, 31-lj_fls(SBUF_MASK_FLAG), 31);
+ emit_getgl(as, RID_TMP, cur_L);
+ emit_loadofs(as, &irgc, tmp, sb, offsetof(SBuf, L));
+}
+#endif
+
+/* -- Type conversions ---------------------------------------------------- */
+
+#if !LJ_SOFTFP
+static void asm_tointg(ASMState *as, IRIns *ir, Reg left)
+{
+ RegSet allow = RSET_FPR;
+ Reg tmp = ra_scratch(as, rset_clear(allow, left));
+ Reg fbias = ra_scratch(as, rset_clear(allow, tmp));
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ Reg hibias = ra_allock(as, 0x43300000, rset_exclude(RSET_GPR, dest));
+ asm_guardcc(as, CC_NE);
+ emit_fab(as, PPCI_FCMPU, 0, tmp, left);
+ emit_fab(as, PPCI_FSUB, tmp, tmp, fbias);
+ emit_fai(as, PPCI_LFD, tmp, RID_SP, SPOFS_TMP);
+ emit_tai(as, PPCI_STW, RID_TMP, RID_SP, SPOFS_TMPLO);
+ emit_tai(as, PPCI_STW, hibias, RID_SP, SPOFS_TMPHI);
+ emit_asi(as, PPCI_XORIS, RID_TMP, dest, 0x8000);
+ emit_tai(as, PPCI_LWZ, dest, RID_SP, SPOFS_TMPLO);
+ emit_lsptr(as, PPCI_LFS, (fbias & 31),
+ (void *)&as->J->k32[LJ_K32_2P52_2P31], RSET_GPR);
+ emit_fai(as, PPCI_STFD, tmp, RID_SP, SPOFS_TMP);
+ emit_fb(as, PPCI_FCTIWZ, tmp, left);
+}
+
+static void asm_tobit(ASMState *as, IRIns *ir)
+{
+ RegSet allow = RSET_FPR;
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ Reg left = ra_alloc1(as, ir->op1, allow);
+ Reg right = ra_alloc1(as, ir->op2, rset_clear(allow, left));
+ Reg tmp = ra_scratch(as, rset_clear(allow, right));
+ emit_tai(as, PPCI_LWZ, dest, RID_SP, SPOFS_TMPLO);
+ emit_fai(as, PPCI_STFD, tmp, RID_SP, SPOFS_TMP);
+ emit_fab(as, PPCI_FADD, tmp, left, right);
+}
+#endif
+
+static void asm_conv(ASMState *as, IRIns *ir)
+{
+ IRType st = (IRType)(ir->op2 & IRCONV_SRCMASK);
+#if !LJ_SOFTFP
+ int stfp = (st == IRT_NUM || st == IRT_FLOAT);
+#endif
+ IRRef lref = ir->op1;
+ /* 64 bit integer conversions are handled by SPLIT. */
+ lj_assertA(!(irt_isint64(ir->t) || (st == IRT_I64 || st == IRT_U64)),
+ "IR %04d has unsplit 64 bit type",
+ (int)(ir - as->ir) - REF_BIAS);
+#if LJ_SOFTFP
+ /* FP conversions are handled by SPLIT. */
+ lj_assertA(!irt_isfp(ir->t) && !(st == IRT_NUM || st == IRT_FLOAT),
+ "IR %04d has FP type",
+ (int)(ir - as->ir) - REF_BIAS);
+ /* Can't check for same types: SPLIT uses CONV int.int + BXOR for sfp NEG. */
+#else
+ lj_assertA(irt_type(ir->t) != st, "inconsistent types for CONV");
+ if (irt_isfp(ir->t)) {
+ Reg dest = ra_dest(as, ir, RSET_FPR);
+ if (stfp) { /* FP to FP conversion. */
+ if (st == IRT_NUM) /* double -> float conversion. */
+ emit_fb(as, PPCI_FRSP, dest, ra_alloc1(as, lref, RSET_FPR));
+ else /* float -> double conversion is a no-op on PPC. */
+ ra_leftov(as, dest, lref); /* Do nothing, but may need to move regs. */
+ } else { /* Integer to FP conversion. */
+ /* IRT_INT: Flip hibit, bias with 2^52, subtract 2^52+2^31. */
+ /* IRT_U32: Bias with 2^52, subtract 2^52. */
+ RegSet allow = RSET_GPR;
+ Reg left = ra_alloc1(as, lref, allow);
+ Reg hibias = ra_allock(as, 0x43300000, rset_clear(allow, left));
+ Reg fbias = ra_scratch(as, rset_exclude(RSET_FPR, dest));
+ if (irt_isfloat(ir->t)) emit_fb(as, PPCI_FRSP, dest, dest);
+ emit_fab(as, PPCI_FSUB, dest, dest, fbias);
+ emit_fai(as, PPCI_LFD, dest, RID_SP, SPOFS_TMP);
+ emit_lsptr(as, PPCI_LFS, (fbias & 31),
+ &as->J->k32[st == IRT_U32 ? LJ_K32_2P52 : LJ_K32_2P52_2P31],
+ rset_clear(allow, hibias));
+ emit_tai(as, PPCI_STW, st == IRT_U32 ? left : RID_TMP,
+ RID_SP, SPOFS_TMPLO);
+ emit_tai(as, PPCI_STW, hibias, RID_SP, SPOFS_TMPHI);
+ if (st != IRT_U32) emit_asi(as, PPCI_XORIS, RID_TMP, left, 0x8000);
+ }
+ } else if (stfp) { /* FP to integer conversion. */
+ if (irt_isguard(ir->t)) {
+ /* Checked conversions are only supported from number to int. */
+ lj_assertA(irt_isint(ir->t) && st == IRT_NUM,
+ "bad type for checked CONV");
+ asm_tointg(as, ir, ra_alloc1(as, lref, RSET_FPR));
+ } else {
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ Reg left = ra_alloc1(as, lref, RSET_FPR);
+ Reg tmp = ra_scratch(as, rset_exclude(RSET_FPR, left));
+ if (irt_isu32(ir->t)) {
+ /* Convert both x and x-2^31 to int and merge results. */
+ Reg tmpi = ra_scratch(as, rset_exclude(RSET_GPR, dest));
+ emit_asb(as, PPCI_OR, dest, dest, tmpi); /* Select with mask idiom. */
+ emit_asb(as, PPCI_AND, tmpi, tmpi, RID_TMP);
+ emit_asb(as, PPCI_ANDC, dest, dest, RID_TMP);
+ emit_tai(as, PPCI_LWZ, tmpi, RID_SP, SPOFS_TMPLO); /* tmp = (int)(x) */
+ emit_tai(as, PPCI_ADDIS, dest, dest, 0x8000); /* dest += 2^31 */
+ emit_asb(as, PPCI_SRAWI, RID_TMP, dest, 31); /* mask = -(dest < 0) */
+ emit_fai(as, PPCI_STFD, tmp, RID_SP, SPOFS_TMP);
+ emit_tai(as, PPCI_LWZ, dest,
+ RID_SP, SPOFS_TMPLO); /* dest = (int)(x-2^31) */
+ emit_fb(as, PPCI_FCTIWZ, tmp, left);
+ emit_fai(as, PPCI_STFD, tmp, RID_SP, SPOFS_TMP);
+ emit_fb(as, PPCI_FCTIWZ, tmp, tmp);
+ emit_fab(as, PPCI_FSUB, tmp, left, tmp);
+ emit_lsptr(as, PPCI_LFS, (tmp & 31),
+ (void *)&as->J->k32[LJ_K32_2P31], RSET_GPR);
+ } else {
+ emit_tai(as, PPCI_LWZ, dest, RID_SP, SPOFS_TMPLO);
+ emit_fai(as, PPCI_STFD, tmp, RID_SP, SPOFS_TMP);
+ emit_fb(as, PPCI_FCTIWZ, tmp, left);
+ }
+ }
+ } else
+#endif
+ {
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ if (st >= IRT_I8 && st <= IRT_U16) { /* Extend to 32 bit integer. */
+ Reg left = ra_alloc1(as, ir->op1, RSET_GPR);
+ lj_assertA(irt_isint(ir->t) || irt_isu32(ir->t), "bad type for CONV EXT");
+ if ((ir->op2 & IRCONV_SEXT))
+ emit_as(as, st == IRT_I8 ? PPCI_EXTSB : PPCI_EXTSH, dest, left);
+ else
+ emit_rot(as, PPCI_RLWINM, dest, left, 0, st == IRT_U8 ? 24 : 16, 31);
+ } else { /* 32/64 bit integer conversions. */
+ /* Only need to handle 32/32 bit no-op (cast) on 32 bit archs. */
+ ra_leftov(as, dest, lref); /* Do nothing, but may need to move regs. */
+ }
+ }
+}
+
+static void asm_strto(ASMState *as, IRIns *ir)
+{
+ const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_strscan_num];
+ IRRef args[2];
+ int32_t ofs = SPOFS_TMP;
+#if LJ_SOFTFP
+ ra_evictset(as, RSET_SCRATCH);
+ if (ra_used(ir)) {
+ if (ra_hasspill(ir->s) && ra_hasspill((ir+1)->s) &&
+ (ir->s & 1) == LJ_BE && (ir->s ^ 1) == (ir+1)->s) {
+ int i;
+ for (i = 0; i < 2; i++) {
+ Reg r = (ir+i)->r;
+ if (ra_hasreg(r)) {
+ ra_free(as, r);
+ ra_modified(as, r);
+ emit_spload(as, ir+i, r, sps_scale((ir+i)->s));
+ }
+ }
+ ofs = sps_scale(ir->s & ~1);
+ } else {
+ Reg rhi = ra_dest(as, ir+1, RSET_GPR);
+ Reg rlo = ra_dest(as, ir, rset_exclude(RSET_GPR, rhi));
+ emit_tai(as, PPCI_LWZ, rhi, RID_SP, ofs);
+ emit_tai(as, PPCI_LWZ, rlo, RID_SP, ofs+4);
+ }
+ }
+#else
+ RegSet drop = RSET_SCRATCH;
+ if (ra_hasreg(ir->r)) rset_set(drop, ir->r); /* Spill dest reg (if any). */
+ ra_evictset(as, drop);
+ if (ir->s) ofs = sps_scale(ir->s);
+#endif
+ asm_guardcc(as, CC_EQ);
+ emit_ai(as, PPCI_CMPWI, RID_RET, 0); /* Test return status. */
+ args[0] = ir->op1; /* GCstr *str */
+ args[1] = ASMREF_TMP1; /* TValue *n */
+ asm_gencall(as, ci, args);
+ /* Store the result to the spill slot or temp slots. */
+ emit_tai(as, PPCI_ADDI, ra_releasetmp(as, ASMREF_TMP1), RID_SP, ofs);
+}
+
+/* -- Memory references --------------------------------------------------- */
+
+/* Get pointer to TValue. */
+static void asm_tvptr(ASMState *as, Reg dest, IRRef ref, MSize mode)
+{
+ int32_t tmpofs = (int32_t)(offsetof(global_State, tmptv)-32768);
+ if ((mode & IRTMPREF_IN1)) {
+ IRIns *ir = IR(ref);
+ if (irt_isnum(ir->t)) {
+ if ((mode & IRTMPREF_OUT1)) {
+#if LJ_SOFTFP
+ lj_assertA(irref_isk(ref), "unsplit FP op");
+ emit_tai(as, PPCI_ADDI, dest, RID_JGL, tmpofs);
+ emit_setgl(as,
+ ra_allock(as, (int32_t)ir_knum(ir)->u32.lo, RSET_GPR),
+ tmptv.u32.lo);
+ emit_setgl(as,
+ ra_allock(as, (int32_t)ir_knum(ir)->u32.hi, RSET_GPR),
+ tmptv.u32.hi);
+#else
+ Reg src = ra_alloc1(as, ref, RSET_FPR);
+ emit_tai(as, PPCI_ADDI, dest, RID_JGL, tmpofs);
+ emit_fai(as, PPCI_STFD, src, RID_JGL, tmpofs);
+#endif
+ } else if (irref_isk(ref)) {
+ /* Use the number constant itself as a TValue. */
+ ra_allockreg(as, i32ptr(ir_knum(ir)), dest);
+ } else {
+#if LJ_SOFTFP
+ lj_assertA(0, "unsplit FP op");
+#else
+ /* Otherwise force a spill and use the spill slot. */
+ emit_tai(as, PPCI_ADDI, dest, RID_SP, ra_spill(as, ir));
+#endif
+ }
+ } else {
+ /* Otherwise use g->tmptv to hold the TValue. */
+ Reg type;
+ emit_tai(as, PPCI_ADDI, dest, RID_JGL, tmpofs);
+ if (!irt_ispri(ir->t)) {
+ Reg src = ra_alloc1(as, ref, RSET_GPR);
+ emit_setgl(as, src, tmptv.gcr);
+ }
+ if (LJ_SOFTFP && (ir+1)->o == IR_HIOP && !irt_isnil((ir+1)->t))
+ type = ra_alloc1(as, ref+1, RSET_GPR);
+ else
+ type = ra_allock(as, irt_toitype(ir->t), RSET_GPR);
+ emit_setgl(as, type, tmptv.it);
+ }
+ } else {
+ emit_tai(as, PPCI_ADDI, dest, RID_JGL, tmpofs);
+ }
+}
+
+static void asm_aref(ASMState *as, IRIns *ir)
+{
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ Reg idx, base;
+ if (irref_isk(ir->op2)) {
+ IRRef tab = IR(ir->op1)->op1;
+ int32_t ofs = asm_fuseabase(as, tab);
+ IRRef refa = ofs ? tab : ir->op1;
+ ofs += 8*IR(ir->op2)->i;
+ if (checki16(ofs)) {
+ base = ra_alloc1(as, refa, RSET_GPR);
+ emit_tai(as, PPCI_ADDI, dest, base, ofs);
+ return;
+ }
+ }
+ base = ra_alloc1(as, ir->op1, RSET_GPR);
+ idx = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, base));
+ emit_tab(as, PPCI_ADD, dest, RID_TMP, base);
+ emit_slwi(as, RID_TMP, idx, 3);
+}
+
+/* Inlined hash lookup. Specialized for key type and for const keys.
+** The equivalent C code is:
+** Node *n = hashkey(t, key);
+** do {
+** if (lj_obj_equal(&n->key, key)) return &n->val;
+** } while ((n = nextnode(n)));
+** return niltv(L);
+*/
+static void asm_href(ASMState *as, IRIns *ir, IROp merge)
+{
+ RegSet allow = RSET_GPR;
+ int destused = ra_used(ir);
+ Reg dest = ra_dest(as, ir, allow);
+ Reg tab = ra_alloc1(as, ir->op1, rset_clear(allow, dest));
+ Reg key = RID_NONE, tmp1 = RID_TMP, tmp2;
+ Reg tisnum = RID_NONE, tmpnum = RID_NONE;
+ IRRef refkey = ir->op2;
+ IRIns *irkey = IR(refkey);
+ int isk = irref_isk(refkey);
+ IRType1 kt = irkey->t;
+ uint32_t khash;
+ MCLabel l_end, l_loop, l_next;
+
+ rset_clear(allow, tab);
+#if LJ_SOFTFP
+ if (!isk) {
+ key = ra_alloc1(as, refkey, allow);
+ rset_clear(allow, key);
+ if (irkey[1].o == IR_HIOP) {
+ if (ra_hasreg((irkey+1)->r)) {
+ tmpnum = (irkey+1)->r;
+ ra_noweak(as, tmpnum);
+ } else {
+ tmpnum = ra_allocref(as, refkey+1, allow);
+ }
+ rset_clear(allow, tmpnum);
+ }
+ }
+#else
+ if (irt_isnum(kt)) {
+ key = ra_alloc1(as, refkey, RSET_FPR);
+ tmpnum = ra_scratch(as, rset_exclude(RSET_FPR, key));
+ tisnum = ra_allock(as, (int32_t)LJ_TISNUM, allow);
+ rset_clear(allow, tisnum);
+ } else if (!irt_ispri(kt)) {
+ key = ra_alloc1(as, refkey, allow);
+ rset_clear(allow, key);
+ }
+#endif
+ tmp2 = ra_scratch(as, allow);
+ rset_clear(allow, tmp2);
+
+ /* Key not found in chain: jump to exit (if merged) or load niltv. */
+ l_end = emit_label(as);
+ as->invmcp = NULL;
+ if (merge == IR_NE)
+ asm_guardcc(as, CC_EQ);
+ else if (destused)
+ emit_loada(as, dest, niltvg(J2G(as->J)));
+
+ /* Follow hash chain until the end. */
+ l_loop = --as->mcp;
+ emit_ai(as, PPCI_CMPWI, dest, 0);
+ emit_tai(as, PPCI_LWZ, dest, dest, (int32_t)offsetof(Node, next));
+ l_next = emit_label(as);
+
+ /* Type and value comparison. */
+ if (merge == IR_EQ)
+ asm_guardcc(as, CC_EQ);
+ else
+ emit_condbranch(as, PPCI_BC|PPCF_Y, CC_EQ, l_end);
+ if (!LJ_SOFTFP && irt_isnum(kt)) {
+ emit_fab(as, PPCI_FCMPU, 0, tmpnum, key);
+ emit_condbranch(as, PPCI_BC, CC_GE, l_next);
+ emit_ab(as, PPCI_CMPLW, tmp1, tisnum);
+ emit_fai(as, PPCI_LFD, tmpnum, dest, (int32_t)offsetof(Node, key.n));
+ } else {
+ if (!irt_ispri(kt)) {
+ emit_ab(as, PPCI_CMPW, tmp2, key);
+ emit_condbranch(as, PPCI_BC, CC_NE, l_next);
+ }
+ if (LJ_SOFTFP && ra_hasreg(tmpnum))
+ emit_ab(as, PPCI_CMPW, tmp1, tmpnum);
+ else
+ emit_ai(as, PPCI_CMPWI, tmp1, irt_toitype(irkey->t));
+ if (!irt_ispri(kt))
+ emit_tai(as, PPCI_LWZ, tmp2, dest, (int32_t)offsetof(Node, key.gcr));
+ }
+ emit_tai(as, PPCI_LWZ, tmp1, dest, (int32_t)offsetof(Node, key.it));
+ *l_loop = PPCI_BC | PPCF_Y | PPCF_CC(CC_NE) |
+ (((char *)as->mcp-(char *)l_loop) & 0xffffu);
+
+ /* Load main position relative to tab->node into dest. */
+ khash = isk ? ir_khash(as, irkey) : 1;
+ if (khash == 0) {
+ emit_tai(as, PPCI_LWZ, dest, tab, (int32_t)offsetof(GCtab, node));
+ } else {
+ Reg tmphash = tmp1;
+ if (isk)
+ tmphash = ra_allock(as, khash, allow);
+ emit_tab(as, PPCI_ADD, dest, dest, tmp1);
+ emit_tai(as, PPCI_MULLI, tmp1, tmp1, sizeof(Node));
+ emit_asb(as, PPCI_AND, tmp1, tmp2, tmphash);
+ emit_tai(as, PPCI_LWZ, dest, tab, (int32_t)offsetof(GCtab, node));
+ emit_tai(as, PPCI_LWZ, tmp2, tab, (int32_t)offsetof(GCtab, hmask));
+ if (isk) {
+ /* Nothing to do. */
+ } else if (irt_isstr(kt)) {
+ emit_tai(as, PPCI_LWZ, tmp1, key, (int32_t)offsetof(GCstr, sid));
+ } else { /* Must match with hash*() in lj_tab.c. */
+ emit_tab(as, PPCI_SUBF, tmp1, tmp2, tmp1);
+ emit_rotlwi(as, tmp2, tmp2, HASH_ROT3);
+ emit_asb(as, PPCI_XOR, tmp1, tmp1, tmp2);
+ emit_rotlwi(as, tmp1, tmp1, (HASH_ROT2+HASH_ROT1)&31);
+ emit_tab(as, PPCI_SUBF, tmp2, dest, tmp2);
+ if (LJ_SOFTFP ? (irkey[1].o == IR_HIOP) : irt_isnum(kt)) {
+#if LJ_SOFTFP
+ emit_asb(as, PPCI_XOR, tmp2, key, tmp1);
+ emit_rotlwi(as, dest, tmp1, HASH_ROT1);
+ emit_tab(as, PPCI_ADD, tmp1, tmpnum, tmpnum);
+#else
+ int32_t ofs = ra_spill(as, irkey);
+ emit_asb(as, PPCI_XOR, tmp2, tmp2, tmp1);
+ emit_rotlwi(as, dest, tmp1, HASH_ROT1);
+ emit_tab(as, PPCI_ADD, tmp1, tmp1, tmp1);
+ emit_tai(as, PPCI_LWZ, tmp2, RID_SP, ofs+4);
+ emit_tai(as, PPCI_LWZ, tmp1, RID_SP, ofs);
+#endif
+ } else {
+ emit_asb(as, PPCI_XOR, tmp2, key, tmp1);
+ emit_rotlwi(as, dest, tmp1, HASH_ROT1);
+ emit_tai(as, PPCI_ADDI, tmp1, tmp2, HASH_BIAS);
+ emit_tai(as, PPCI_ADDIS, tmp2, key, (HASH_BIAS + 32768)>>16);
+ }
+ }
+ }
+}
+
+static void asm_hrefk(ASMState *as, IRIns *ir)
+{
+ IRIns *kslot = IR(ir->op2);
+ IRIns *irkey = IR(kslot->op1);
+ int32_t ofs = (int32_t)(kslot->op2 * sizeof(Node));
+ int32_t kofs = ofs + (int32_t)offsetof(Node, key);
+ Reg dest = (ra_used(ir)||ofs > 32736) ? ra_dest(as, ir, RSET_GPR) : RID_NONE;
+ Reg node = ra_alloc1(as, ir->op1, RSET_GPR);
+ Reg key = RID_NONE, type = RID_TMP, idx = node;
+ RegSet allow = rset_exclude(RSET_GPR, node);
+ lj_assertA(ofs % sizeof(Node) == 0, "unaligned HREFK slot");
+ if (ofs > 32736) {
+ idx = dest;
+ rset_clear(allow, dest);
+ kofs = (int32_t)offsetof(Node, key);
+ } else if (ra_hasreg(dest)) {
+ emit_tai(as, PPCI_ADDI, dest, node, ofs);
+ }
+ asm_guardcc(as, CC_NE);
+ if (!irt_ispri(irkey->t)) {
+ key = ra_scratch(as, allow);
+ rset_clear(allow, key);
+ }
+ rset_clear(allow, type);
+ if (irt_isnum(irkey->t)) {
+ emit_cmpi(as, key, (int32_t)ir_knum(irkey)->u32.lo);
+ asm_guardcc(as, CC_NE);
+ emit_cmpi(as, type, (int32_t)ir_knum(irkey)->u32.hi);
+ } else {
+ if (ra_hasreg(key)) {
+ emit_cmpi(as, key, irkey->i); /* May use RID_TMP, i.e. type. */
+ asm_guardcc(as, CC_NE);
+ }
+ emit_ai(as, PPCI_CMPWI, type, irt_toitype(irkey->t));
+ }
+ if (ra_hasreg(key)) emit_tai(as, PPCI_LWZ, key, idx, kofs+4);
+ emit_tai(as, PPCI_LWZ, type, idx, kofs);
+ if (ofs > 32736) {
+ emit_tai(as, PPCI_ADDIS, dest, dest, (ofs + 32768) >> 16);
+ emit_tai(as, PPCI_ADDI, dest, node, ofs);
+ }
+}
+
+static void asm_uref(ASMState *as, IRIns *ir)
+{
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ if (irref_isk(ir->op1)) {
+ GCfunc *fn = ir_kfunc(IR(ir->op1));
+ MRef *v = &gcref(fn->l.uvptr[(ir->op2 >> 8)])->uv.v;
+ emit_lsptr(as, PPCI_LWZ, dest, v, RSET_GPR);
+ } else {
+ Reg uv = ra_scratch(as, RSET_GPR);
+ Reg func = ra_alloc1(as, ir->op1, RSET_GPR);
+ if (ir->o == IR_UREFC) {
+ asm_guardcc(as, CC_NE);
+ emit_ai(as, PPCI_CMPWI, RID_TMP, 1);
+ emit_tai(as, PPCI_ADDI, dest, uv, (int32_t)offsetof(GCupval, tv));
+ emit_tai(as, PPCI_LBZ, RID_TMP, uv, (int32_t)offsetof(GCupval, closed));
+ } else {
+ emit_tai(as, PPCI_LWZ, dest, uv, (int32_t)offsetof(GCupval, v));
+ }
+ emit_tai(as, PPCI_LWZ, uv, func,
+ (int32_t)offsetof(GCfuncL, uvptr) + 4*(int32_t)(ir->op2 >> 8));
+ }
+}
+
+static void asm_fref(ASMState *as, IRIns *ir)
+{
+ UNUSED(as); UNUSED(ir);
+ lj_assertA(!ra_used(ir), "unfused FREF");
+}
+
+static void asm_strref(ASMState *as, IRIns *ir)
+{
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ IRRef ref = ir->op2, refk = ir->op1;
+ int32_t ofs = (int32_t)sizeof(GCstr);
+ Reg r;
+ if (irref_isk(ref)) {
+ IRRef tmp = refk; refk = ref; ref = tmp;
+ } else if (!irref_isk(refk)) {
+ Reg right, left = ra_alloc1(as, ir->op1, RSET_GPR);
+ IRIns *irr = IR(ir->op2);
+ if (ra_hasreg(irr->r)) {
+ ra_noweak(as, irr->r);
+ right = irr->r;
+ } else if (mayfuse(as, irr->op2) &&
+ irr->o == IR_ADD && irref_isk(irr->op2) &&
+ checki16(ofs + IR(irr->op2)->i)) {
+ ofs += IR(irr->op2)->i;
+ right = ra_alloc1(as, irr->op1, rset_exclude(RSET_GPR, left));
+ } else {
+ right = ra_allocref(as, ir->op2, rset_exclude(RSET_GPR, left));
+ }
+ emit_tai(as, PPCI_ADDI, dest, dest, ofs);
+ emit_tab(as, PPCI_ADD, dest, left, right);
+ return;
+ }
+ r = ra_alloc1(as, ref, RSET_GPR);
+ ofs += IR(refk)->i;
+ if (checki16(ofs))
+ emit_tai(as, PPCI_ADDI, dest, r, ofs);
+ else
+ emit_tab(as, PPCI_ADD, dest, r,
+ ra_allock(as, ofs, rset_exclude(RSET_GPR, r)));
+}
+
+/* -- Loads and stores ---------------------------------------------------- */
+
+static PPCIns asm_fxloadins(ASMState *as, IRIns *ir)
+{
+ UNUSED(as);
+ switch (irt_type(ir->t)) {
+ case IRT_I8: return PPCI_LBZ; /* Needs sign-extension. */
+ case IRT_U8: return PPCI_LBZ;
+ case IRT_I16: return PPCI_LHA;
+ case IRT_U16: return PPCI_LHZ;
+ case IRT_NUM: lj_assertA(!LJ_SOFTFP, "unsplit FP op"); return PPCI_LFD;
+ case IRT_FLOAT: if (!LJ_SOFTFP) return PPCI_LFS;
+ default: return PPCI_LWZ;
+ }
+}
+
+static PPCIns asm_fxstoreins(ASMState *as, IRIns *ir)
+{
+ UNUSED(as);
+ switch (irt_type(ir->t)) {
+ case IRT_I8: case IRT_U8: return PPCI_STB;
+ case IRT_I16: case IRT_U16: return PPCI_STH;
+ case IRT_NUM: lj_assertA(!LJ_SOFTFP, "unsplit FP op"); return PPCI_STFD;
+ case IRT_FLOAT: if (!LJ_SOFTFP) return PPCI_STFS;
+ default: return PPCI_STW;
+ }
+}
+
+static void asm_fload(ASMState *as, IRIns *ir)
+{
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ PPCIns pi = asm_fxloadins(as, ir);
+ Reg idx;
+ int32_t ofs;
+ if (ir->op1 == REF_NIL) { /* FLOAD from GG_State with offset. */
+ idx = RID_JGL;
+ ofs = (ir->op2 << 2) - 32768 - GG_OFS(g);
+ } else {
+ idx = ra_alloc1(as, ir->op1, RSET_GPR);
+ if (ir->op2 == IRFL_TAB_ARRAY) {
+ ofs = asm_fuseabase(as, ir->op1);
+ if (ofs) { /* Turn the t->array load into an add for colocated arrays. */
+ emit_tai(as, PPCI_ADDI, dest, idx, ofs);
+ return;
+ }
+ }
+ ofs = field_ofs[ir->op2];
+ }
+ lj_assertA(!irt_isi8(ir->t), "unsupported FLOAD I8");
+ emit_tai(as, pi, dest, idx, ofs);
+}
+
+static void asm_fstore(ASMState *as, IRIns *ir)
+{
+ if (ir->r != RID_SINK) {
+ Reg src = ra_alloc1(as, ir->op2, RSET_GPR);
+ IRIns *irf = IR(ir->op1);
+ Reg idx = ra_alloc1(as, irf->op1, rset_exclude(RSET_GPR, src));
+ int32_t ofs = field_ofs[irf->op2];
+ PPCIns pi = asm_fxstoreins(as, ir);
+ emit_tai(as, pi, src, idx, ofs);
+ }
+}
+
+static void asm_xload(ASMState *as, IRIns *ir)
+{
+ Reg dest = ra_dest(as, ir,
+ (!LJ_SOFTFP && irt_isfp(ir->t)) ? RSET_FPR : RSET_GPR);
+ lj_assertA(!(ir->op2 & IRXLOAD_UNALIGNED), "unaligned XLOAD");
+ if (irt_isi8(ir->t))
+ emit_as(as, PPCI_EXTSB, dest, dest);
+ asm_fusexref(as, asm_fxloadins(as, ir), dest, ir->op1, RSET_GPR, 0);
+}
+
+static void asm_xstore_(ASMState *as, IRIns *ir, int32_t ofs)
+{
+ IRIns *irb;
+ if (ir->r == RID_SINK)
+ return;
+ if (ofs == 0 && mayfuse(as, ir->op2) && (irb = IR(ir->op2))->o == IR_BSWAP &&
+ ra_noreg(irb->r) && (irt_isint(ir->t) || irt_isu32(ir->t))) {
+ /* Fuse BSWAP with XSTORE to stwbrx. */
+ Reg src = ra_alloc1(as, irb->op1, RSET_GPR);
+ asm_fusexrefx(as, PPCI_STWBRX, src, ir->op1, rset_exclude(RSET_GPR, src));
+ } else {
+ Reg src = ra_alloc1(as, ir->op2,
+ (!LJ_SOFTFP && irt_isfp(ir->t)) ? RSET_FPR : RSET_GPR);
+ asm_fusexref(as, asm_fxstoreins(as, ir), src, ir->op1,
+ rset_exclude(RSET_GPR, src), ofs);
+ }
+}
+
+#define asm_xstore(as, ir) asm_xstore_(as, ir, 0)
+
+static void asm_ahuvload(ASMState *as, IRIns *ir)
+{
+ IRType1 t = ir->t;
+ Reg dest = RID_NONE, type = RID_TMP, tmp = RID_TMP, idx;
+ RegSet allow = RSET_GPR;
+ int32_t ofs = AHUREF_LSX;
+ if (LJ_SOFTFP && (ir+1)->o == IR_HIOP) {
+ t.irt = IRT_NUM;
+ if (ra_used(ir+1)) {
+ type = ra_dest(as, ir+1, allow);
+ rset_clear(allow, type);
+ }
+ ofs = 0;
+ }
+ if (ra_used(ir)) {
+ lj_assertA((LJ_SOFTFP ? 0 : irt_isnum(ir->t)) ||
+ irt_isint(ir->t) || irt_isaddr(ir->t),
+ "bad load type %d", irt_type(ir->t));
+ if (LJ_SOFTFP || !irt_isnum(t)) ofs = 0;
+ dest = ra_dest(as, ir, (!LJ_SOFTFP && irt_isnum(t)) ? RSET_FPR : allow);
+ rset_clear(allow, dest);
+ }
+ idx = asm_fuseahuref(as, ir->op1, &ofs, allow);
+ if (ir->o == IR_VLOAD) {
+ ofs = ofs != AHUREF_LSX ? ofs + 8 * ir->op2 :
+ ir->op2 ? 8 * ir->op2 : AHUREF_LSX;
+ }
+ if (irt_isnum(t)) {
+ Reg tisnum = ra_allock(as, (int32_t)LJ_TISNUM, rset_exclude(allow, idx));
+ asm_guardcc(as, CC_GE);
+ emit_ab(as, PPCI_CMPLW, type, tisnum);
+ if (ra_hasreg(dest)) {
+ if (!LJ_SOFTFP && ofs == AHUREF_LSX) {
+ tmp = ra_scratch(as, rset_exclude(rset_exclude(RSET_GPR,
+ (idx&255)), (idx>>8)));
+ emit_fab(as, PPCI_LFDX, dest, (idx&255), tmp);
+ } else {
+ emit_fai(as, LJ_SOFTFP ? PPCI_LWZ : PPCI_LFD, dest, idx,
+ ofs+4*LJ_SOFTFP);
+ }
+ }
+ } else {
+ asm_guardcc(as, CC_NE);
+ emit_ai(as, PPCI_CMPWI, type, irt_toitype(t));
+ if (ra_hasreg(dest)) emit_tai(as, PPCI_LWZ, dest, idx, ofs+4);
+ }
+ if (ofs == AHUREF_LSX) {
+ emit_tab(as, PPCI_LWZX, type, (idx&255), tmp);
+ emit_slwi(as, tmp, (idx>>8), 3);
+ } else {
+ emit_tai(as, PPCI_LWZ, type, idx, ofs);
+ }
+}
+
+static void asm_ahustore(ASMState *as, IRIns *ir)
+{
+ RegSet allow = RSET_GPR;
+ Reg idx, src = RID_NONE, type = RID_NONE;
+ int32_t ofs = AHUREF_LSX;
+ if (ir->r == RID_SINK)
+ return;
+ if (!LJ_SOFTFP && irt_isnum(ir->t)) {
+ src = ra_alloc1(as, ir->op2, RSET_FPR);
+ } else {
+ if (!irt_ispri(ir->t)) {
+ src = ra_alloc1(as, ir->op2, allow);
+ rset_clear(allow, src);
+ ofs = 0;
+ }
+ if (LJ_SOFTFP && (ir+1)->o == IR_HIOP)
+ type = ra_alloc1(as, (ir+1)->op2, allow);
+ else
+ type = ra_allock(as, (int32_t)irt_toitype(ir->t), allow);
+ rset_clear(allow, type);
+ }
+ idx = asm_fuseahuref(as, ir->op1, &ofs, allow);
+ if (!LJ_SOFTFP && irt_isnum(ir->t)) {
+ if (ofs == AHUREF_LSX) {
+ emit_fab(as, PPCI_STFDX, src, (idx&255), RID_TMP);
+ emit_slwi(as, RID_TMP, (idx>>8), 3);
+ } else {
+ emit_fai(as, PPCI_STFD, src, idx, ofs);
+ }
+ } else {
+ if (ra_hasreg(src))
+ emit_tai(as, PPCI_STW, src, idx, ofs+4);
+ if (ofs == AHUREF_LSX) {
+ emit_tab(as, PPCI_STWX, type, (idx&255), RID_TMP);
+ emit_slwi(as, RID_TMP, (idx>>8), 3);
+ } else {
+ emit_tai(as, PPCI_STW, type, idx, ofs);
+ }
+ }
+}
+
+static void asm_sload(ASMState *as, IRIns *ir)
+{
+ int32_t ofs = 8*((int32_t)ir->op1-1) + ((ir->op2 & IRSLOAD_FRAME) ? 0 : 4);
+ IRType1 t = ir->t;
+ Reg dest = RID_NONE, type = RID_NONE, base;
+ RegSet allow = RSET_GPR;
+ int hiop = (LJ_SOFTFP && (ir+1)->o == IR_HIOP);
+ if (hiop)
+ t.irt = IRT_NUM;
+ lj_assertA(!(ir->op2 & IRSLOAD_PARENT),
+ "bad parent SLOAD"); /* Handled by asm_head_side(). */
+ lj_assertA(irt_isguard(ir->t) || !(ir->op2 & IRSLOAD_TYPECHECK),
+ "inconsistent SLOAD variant");
+ lj_assertA(LJ_DUALNUM ||
+ !irt_isint(t) ||
+ (ir->op2 & (IRSLOAD_CONVERT|IRSLOAD_FRAME|IRSLOAD_KEYINDEX)),
+ "bad SLOAD type");
+#if LJ_SOFTFP
+ lj_assertA(!(ir->op2 & IRSLOAD_CONVERT),
+ "unsplit SLOAD convert"); /* Handled by LJ_SOFTFP SPLIT. */
+ if (hiop && ra_used(ir+1)) {
+ type = ra_dest(as, ir+1, allow);
+ rset_clear(allow, type);
+ }
+#else
+ if ((ir->op2 & IRSLOAD_CONVERT) && irt_isguard(t) && irt_isint(t)) {
+ dest = ra_scratch(as, RSET_FPR);
+ asm_tointg(as, ir, dest);
+ t.irt = IRT_NUM; /* Continue with a regular number type check. */
+ } else
+#endif
+ if (ra_used(ir)) {
+ lj_assertA(irt_isnum(t) || irt_isint(t) || irt_isaddr(t),
+ "bad SLOAD type %d", irt_type(ir->t));
+ dest = ra_dest(as, ir, (!LJ_SOFTFP && irt_isnum(t)) ? RSET_FPR : allow);
+ rset_clear(allow, dest);
+ base = ra_alloc1(as, REF_BASE, allow);
+ rset_clear(allow, base);
+ if (!LJ_SOFTFP && (ir->op2 & IRSLOAD_CONVERT)) {
+ if (irt_isint(t)) {
+ emit_tai(as, PPCI_LWZ, dest, RID_SP, SPOFS_TMPLO);
+ dest = ra_scratch(as, RSET_FPR);
+ emit_fai(as, PPCI_STFD, dest, RID_SP, SPOFS_TMP);
+ emit_fb(as, PPCI_FCTIWZ, dest, dest);
+ t.irt = IRT_NUM; /* Check for original type. */
+ } else {
+ Reg tmp = ra_scratch(as, allow);
+ Reg hibias = ra_allock(as, 0x43300000, rset_clear(allow, tmp));
+ Reg fbias = ra_scratch(as, rset_exclude(RSET_FPR, dest));
+ emit_fab(as, PPCI_FSUB, dest, dest, fbias);
+ emit_fai(as, PPCI_LFD, dest, RID_SP, SPOFS_TMP);
+ emit_lsptr(as, PPCI_LFS, (fbias & 31),
+ (void *)&as->J->k32[LJ_K32_2P52_2P31],
+ rset_clear(allow, hibias));
+ emit_tai(as, PPCI_STW, tmp, RID_SP, SPOFS_TMPLO);
+ emit_tai(as, PPCI_STW, hibias, RID_SP, SPOFS_TMPHI);
+ emit_asi(as, PPCI_XORIS, tmp, tmp, 0x8000);
+ dest = tmp;
+ t.irt = IRT_INT; /* Check for original type. */
+ }
+ }
+ goto dotypecheck;
+ }
+ base = ra_alloc1(as, REF_BASE, allow);
+ rset_clear(allow, base);
+dotypecheck:
+ if (irt_isnum(t)) {
+ if ((ir->op2 & IRSLOAD_TYPECHECK)) {
+ Reg tisnum = ra_allock(as, (int32_t)LJ_TISNUM, allow);
+ asm_guardcc(as, CC_GE);
+#if !LJ_SOFTFP
+ type = RID_TMP;
+#endif
+ emit_ab(as, PPCI_CMPLW, type, tisnum);
+ }
+ if (ra_hasreg(dest)) emit_fai(as, LJ_SOFTFP ? PPCI_LWZ : PPCI_LFD, dest,
+ base, ofs-(LJ_SOFTFP?0:4));
+ } else {
+ if ((ir->op2 & IRSLOAD_TYPECHECK)) {
+ asm_guardcc(as, CC_NE);
+ if ((ir->op2 & IRSLOAD_KEYINDEX)) {
+ emit_ai(as, PPCI_CMPWI, RID_TMP, (LJ_KEYINDEX & 0xffff));
+ emit_asi(as, PPCI_XORIS, RID_TMP, RID_TMP, (LJ_KEYINDEX >> 16));
+ } else {
+ emit_ai(as, PPCI_CMPWI, RID_TMP, irt_toitype(t));
+ }
+ type = RID_TMP;
+ }
+ if (ra_hasreg(dest)) emit_tai(as, PPCI_LWZ, dest, base, ofs);
+ }
+ if (ra_hasreg(type)) emit_tai(as, PPCI_LWZ, type, base, ofs-4);
+}
+
+/* -- Allocations --------------------------------------------------------- */
+
+#if LJ_HASFFI
+static void asm_cnew(ASMState *as, IRIns *ir)
+{
+ CTState *cts = ctype_ctsG(J2G(as->J));
+ CTypeID id = (CTypeID)IR(ir->op1)->i;
+ CTSize sz;
+ CTInfo info = lj_ctype_info(cts, id, &sz);
+ const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_mem_newgco];
+ IRRef args[4];
+ RegSet drop = RSET_SCRATCH;
+ lj_assertA(sz != CTSIZE_INVALID || (ir->o == IR_CNEW && ir->op2 != REF_NIL),
+ "bad CNEW/CNEWI operands");
+
+ as->gcsteps++;
+ if (ra_hasreg(ir->r))
+ rset_clear(drop, ir->r); /* Dest reg handled below. */
+ ra_evictset(as, drop);
+ if (ra_used(ir))
+ ra_destreg(as, ir, RID_RET); /* GCcdata * */
+
+ /* Initialize immutable cdata object. */
+ if (ir->o == IR_CNEWI) {
+ RegSet allow = (RSET_GPR & ~RSET_SCRATCH);
+ int32_t ofs = sizeof(GCcdata);
+ lj_assertA(sz == 4 || sz == 8, "bad CNEWI size %d", sz);
+ if (sz == 8) {
+ ofs += 4;
+ lj_assertA((ir+1)->o == IR_HIOP, "expected HIOP for CNEWI");
+ }
+ for (;;) {
+ Reg r = ra_alloc1(as, ir->op2, allow);
+ emit_tai(as, PPCI_STW, r, RID_RET, ofs);
+ rset_clear(allow, r);
+ if (ofs == sizeof(GCcdata)) break;
+ ofs -= 4; ir++;
+ }
+ } else if (ir->op2 != REF_NIL) { /* Create VLA/VLS/aligned cdata. */
+ ci = &lj_ir_callinfo[IRCALL_lj_cdata_newv];
+ args[0] = ASMREF_L; /* lua_State *L */
+ args[1] = ir->op1; /* CTypeID id */
+ args[2] = ir->op2; /* CTSize sz */
+ args[3] = ASMREF_TMP1; /* CTSize align */
+ asm_gencall(as, ci, args);
+ emit_loadi(as, ra_releasetmp(as, ASMREF_TMP1), (int32_t)ctype_align(info));
+ return;
+ }
+
+ /* Initialize gct and ctypeid. lj_mem_newgco() already sets marked. */
+ emit_tai(as, PPCI_STB, RID_RET+1, RID_RET, offsetof(GCcdata, gct));
+ emit_tai(as, PPCI_STH, RID_TMP, RID_RET, offsetof(GCcdata, ctypeid));
+ emit_ti(as, PPCI_LI, RID_RET+1, ~LJ_TCDATA);
+ emit_ti(as, PPCI_LI, RID_TMP, id); /* Lower 16 bit used. Sign-ext ok. */
+ args[0] = ASMREF_L; /* lua_State *L */
+ args[1] = ASMREF_TMP1; /* MSize size */
+ asm_gencall(as, ci, args);
+ ra_allockreg(as, (int32_t)(sz+sizeof(GCcdata)),
+ ra_releasetmp(as, ASMREF_TMP1));
+}
+#endif
+
+/* -- Write barriers ------------------------------------------------------ */
+
+static void asm_tbar(ASMState *as, IRIns *ir)
+{
+ Reg tab = ra_alloc1(as, ir->op1, RSET_GPR);
+ Reg mark = ra_scratch(as, rset_exclude(RSET_GPR, tab));
+ Reg link = RID_TMP;
+ MCLabel l_end = emit_label(as);
+ emit_tai(as, PPCI_STW, link, tab, (int32_t)offsetof(GCtab, gclist));
+ emit_tai(as, PPCI_STB, mark, tab, (int32_t)offsetof(GCtab, marked));
+ emit_setgl(as, tab, gc.grayagain);
+ lj_assertA(LJ_GC_BLACK == 0x04, "bad LJ_GC_BLACK");
+ emit_rot(as, PPCI_RLWINM, mark, mark, 0, 30, 28); /* Clear black bit. */
+ emit_getgl(as, link, gc.grayagain);
+ emit_condbranch(as, PPCI_BC|PPCF_Y, CC_EQ, l_end);
+ emit_asi(as, PPCI_ANDIDOT, RID_TMP, mark, LJ_GC_BLACK);
+ emit_tai(as, PPCI_LBZ, mark, tab, (int32_t)offsetof(GCtab, marked));
+}
+
+static void asm_obar(ASMState *as, IRIns *ir)
+{
+ const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_gc_barrieruv];
+ IRRef args[2];
+ MCLabel l_end;
+ Reg obj, val, tmp;
+ /* No need for other object barriers (yet). */
+ lj_assertA(IR(ir->op1)->o == IR_UREFC, "bad OBAR type");
+ ra_evictset(as, RSET_SCRATCH);
+ l_end = emit_label(as);
+ args[0] = ASMREF_TMP1; /* global_State *g */
+ args[1] = ir->op1; /* TValue *tv */
+ asm_gencall(as, ci, args);
+ emit_tai(as, PPCI_ADDI, ra_releasetmp(as, ASMREF_TMP1), RID_JGL, -32768);
+ obj = IR(ir->op1)->r;
+ tmp = ra_scratch(as, rset_exclude(RSET_GPR, obj));
+ emit_condbranch(as, PPCI_BC|PPCF_Y, CC_EQ, l_end);
+ emit_asi(as, PPCI_ANDIDOT, tmp, tmp, LJ_GC_BLACK);
+ emit_condbranch(as, PPCI_BC, CC_EQ, l_end);
+ emit_asi(as, PPCI_ANDIDOT, RID_TMP, RID_TMP, LJ_GC_WHITES);
+ val = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, obj));
+ emit_tai(as, PPCI_LBZ, tmp, obj,
+ (int32_t)offsetof(GCupval, marked)-(int32_t)offsetof(GCupval, tv));
+ emit_tai(as, PPCI_LBZ, RID_TMP, val, (int32_t)offsetof(GChead, marked));
+}
+
+/* -- Arithmetic and logic operations ------------------------------------- */
+
+#if !LJ_SOFTFP
+static void asm_fparith(ASMState *as, IRIns *ir, PPCIns pi)
+{
+ Reg dest = ra_dest(as, ir, RSET_FPR);
+ Reg right, left = ra_alloc2(as, ir, RSET_FPR);
+ right = (left >> 8); left &= 255;
+ if (pi == PPCI_FMUL)
+ emit_fac(as, pi, dest, left, right);
+ else
+ emit_fab(as, pi, dest, left, right);
+}
+
+static void asm_fpunary(ASMState *as, IRIns *ir, PPCIns pi)
+{
+ Reg dest = ra_dest(as, ir, RSET_FPR);
+ Reg left = ra_hintalloc(as, ir->op1, dest, RSET_FPR);
+ emit_fb(as, pi, dest, left);
+}
+
+static void asm_fpmath(ASMState *as, IRIns *ir)
+{
+ if (ir->op2 == IRFPM_SQRT && (as->flags & JIT_F_SQRT))
+ asm_fpunary(as, ir, PPCI_FSQRT);
+ else
+ asm_callid(as, ir, IRCALL_lj_vm_floor + ir->op2);
+}
+#endif
+
+static void asm_add(ASMState *as, IRIns *ir)
+{
+#if !LJ_SOFTFP
+ if (irt_isnum(ir->t)) {
+ if (!asm_fusemadd(as, ir, PPCI_FMADD, PPCI_FMADD))
+ asm_fparith(as, ir, PPCI_FADD);
+ } else
+#endif
+ {
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ Reg right, left = ra_hintalloc(as, ir->op1, dest, RSET_GPR);
+ PPCIns pi;
+ if (irref_isk(ir->op2)) {
+ int32_t k = IR(ir->op2)->i;
+ if (checki16(k)) {
+ pi = PPCI_ADDI;
+ /* May fail due to spills/restores above, but simplifies the logic. */
+ if (as->flagmcp == as->mcp) {
+ as->flagmcp = NULL;
+ as->mcp++;
+ pi = PPCI_ADDICDOT;
+ }
+ emit_tai(as, pi, dest, left, k);
+ return;
+ } else if ((k & 0xffff) == 0) {
+ emit_tai(as, PPCI_ADDIS, dest, left, (k >> 16));
+ return;
+ } else if (!as->sectref) {
+ emit_tai(as, PPCI_ADDIS, dest, dest, (k + 32768) >> 16);
+ emit_tai(as, PPCI_ADDI, dest, left, k);
+ return;
+ }
+ }
+ pi = PPCI_ADD;
+ /* May fail due to spills/restores above, but simplifies the logic. */
+ if (as->flagmcp == as->mcp) {
+ as->flagmcp = NULL;
+ as->mcp++;
+ pi |= PPCF_DOT;
+ }
+ right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left));
+ emit_tab(as, pi, dest, left, right);
+ }
+}
+
+static void asm_sub(ASMState *as, IRIns *ir)
+{
+#if !LJ_SOFTFP
+ if (irt_isnum(ir->t)) {
+ if (!asm_fusemadd(as, ir, PPCI_FMSUB, PPCI_FNMSUB))
+ asm_fparith(as, ir, PPCI_FSUB);
+ } else
+#endif
+ {
+ PPCIns pi = PPCI_SUBF;
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ Reg left, right;
+ if (irref_isk(ir->op1)) {
+ int32_t k = IR(ir->op1)->i;
+ if (checki16(k)) {
+ right = ra_alloc1(as, ir->op2, RSET_GPR);
+ emit_tai(as, PPCI_SUBFIC, dest, right, k);
+ return;
+ }
+ }
+ /* May fail due to spills/restores above, but simplifies the logic. */
+ if (as->flagmcp == as->mcp) {
+ as->flagmcp = NULL;
+ as->mcp++;
+ pi |= PPCF_DOT;
+ }
+ left = ra_hintalloc(as, ir->op1, dest, RSET_GPR);
+ right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left));
+ emit_tab(as, pi, dest, right, left); /* Subtract right _from_ left. */
+ }
+}
+
+static void asm_mul(ASMState *as, IRIns *ir)
+{
+#if !LJ_SOFTFP
+ if (irt_isnum(ir->t)) {
+ asm_fparith(as, ir, PPCI_FMUL);
+ } else
+#endif
+ {
+ PPCIns pi = PPCI_MULLW;
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ Reg right, left = ra_hintalloc(as, ir->op1, dest, RSET_GPR);
+ if (irref_isk(ir->op2)) {
+ int32_t k = IR(ir->op2)->i;
+ if (checki16(k)) {
+ emit_tai(as, PPCI_MULLI, dest, left, k);
+ return;
+ }
+ }
+ /* May fail due to spills/restores above, but simplifies the logic. */
+ if (as->flagmcp == as->mcp) {
+ as->flagmcp = NULL;
+ as->mcp++;
+ pi |= PPCF_DOT;
+ }
+ right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left));
+ emit_tab(as, pi, dest, left, right);
+ }
+}
+
+#define asm_fpdiv(as, ir) asm_fparith(as, ir, PPCI_FDIV)
+
+static void asm_neg(ASMState *as, IRIns *ir)
+{
+#if !LJ_SOFTFP
+ if (irt_isnum(ir->t)) {
+ asm_fpunary(as, ir, PPCI_FNEG);
+ } else
+#endif
+ {
+ Reg dest, left;
+ PPCIns pi = PPCI_NEG;
+ if (as->flagmcp == as->mcp) {
+ as->flagmcp = NULL;
+ as->mcp++;
+ pi |= PPCF_DOT;
+ }
+ dest = ra_dest(as, ir, RSET_GPR);
+ left = ra_hintalloc(as, ir->op1, dest, RSET_GPR);
+ emit_tab(as, pi, dest, left, 0);
+ }
+}
+
+#define asm_abs(as, ir) asm_fpunary(as, ir, PPCI_FABS)
+
+static void asm_arithov(ASMState *as, IRIns *ir, PPCIns pi)
+{
+ Reg dest, left, right;
+ if (as->flagmcp == as->mcp) {
+ as->flagmcp = NULL;
+ as->mcp++;
+ }
+ asm_guardcc(as, CC_SO);
+ dest = ra_dest(as, ir, RSET_GPR);
+ left = ra_alloc2(as, ir, RSET_GPR);
+ right = (left >> 8); left &= 255;
+ if (pi == PPCI_SUBFO) { Reg tmp = left; left = right; right = tmp; }
+ emit_tab(as, pi|PPCF_DOT, dest, left, right);
+}
+
+#define asm_addov(as, ir) asm_arithov(as, ir, PPCI_ADDO)
+#define asm_subov(as, ir) asm_arithov(as, ir, PPCI_SUBFO)
+#define asm_mulov(as, ir) asm_arithov(as, ir, PPCI_MULLWO)
+
+#if LJ_HASFFI
+static void asm_add64(ASMState *as, IRIns *ir)
+{
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ Reg right, left = ra_alloc1(as, ir->op1, RSET_GPR);
+ PPCIns pi = PPCI_ADDE;
+ if (irref_isk(ir->op2)) {
+ int32_t k = IR(ir->op2)->i;
+ if (k == 0)
+ pi = PPCI_ADDZE;
+ else if (k == -1)
+ pi = PPCI_ADDME;
+ else
+ goto needright;
+ right = 0;
+ } else {
+ needright:
+ right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left));
+ }
+ emit_tab(as, pi, dest, left, right);
+ ir--;
+ dest = ra_dest(as, ir, RSET_GPR);
+ left = ra_alloc1(as, ir->op1, RSET_GPR);
+ if (irref_isk(ir->op2)) {
+ int32_t k = IR(ir->op2)->i;
+ if (checki16(k)) {
+ emit_tai(as, PPCI_ADDIC, dest, left, k);
+ return;
+ }
+ }
+ right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left));
+ emit_tab(as, PPCI_ADDC, dest, left, right);
+}
+
+static void asm_sub64(ASMState *as, IRIns *ir)
+{
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ Reg left, right = ra_alloc1(as, ir->op2, RSET_GPR);
+ PPCIns pi = PPCI_SUBFE;
+ if (irref_isk(ir->op1)) {
+ int32_t k = IR(ir->op1)->i;
+ if (k == 0)
+ pi = PPCI_SUBFZE;
+ else if (k == -1)
+ pi = PPCI_SUBFME;
+ else
+ goto needleft;
+ left = 0;
+ } else {
+ needleft:
+ left = ra_alloc1(as, ir->op1, rset_exclude(RSET_GPR, right));
+ }
+ emit_tab(as, pi, dest, right, left); /* Subtract right _from_ left. */
+ ir--;
+ dest = ra_dest(as, ir, RSET_GPR);
+ right = ra_alloc1(as, ir->op2, RSET_GPR);
+ if (irref_isk(ir->op1)) {
+ int32_t k = IR(ir->op1)->i;
+ if (checki16(k)) {
+ emit_tai(as, PPCI_SUBFIC, dest, right, k);
+ return;
+ }
+ }
+ left = ra_alloc1(as, ir->op1, rset_exclude(RSET_GPR, right));
+ emit_tab(as, PPCI_SUBFC, dest, right, left);
+}
+
+static void asm_neg64(ASMState *as, IRIns *ir)
+{
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ Reg left = ra_alloc1(as, ir->op1, RSET_GPR);
+ emit_tab(as, PPCI_SUBFZE, dest, left, 0);
+ ir--;
+ dest = ra_dest(as, ir, RSET_GPR);
+ left = ra_alloc1(as, ir->op1, RSET_GPR);
+ emit_tai(as, PPCI_SUBFIC, dest, left, 0);
+}
+#endif
+
+static void asm_bnot(ASMState *as, IRIns *ir)
+{
+ Reg dest, left, right;
+ PPCIns pi = PPCI_NOR;
+ if (as->flagmcp == as->mcp) {
+ as->flagmcp = NULL;
+ as->mcp++;
+ pi |= PPCF_DOT;
+ }
+ dest = ra_dest(as, ir, RSET_GPR);
+ if (mayfuse(as, ir->op1)) {
+ IRIns *irl = IR(ir->op1);
+ if (irl->o == IR_BAND)
+ pi ^= (PPCI_NOR ^ PPCI_NAND);
+ else if (irl->o == IR_BXOR)
+ pi ^= (PPCI_NOR ^ PPCI_EQV);
+ else if (irl->o != IR_BOR)
+ goto nofuse;
+ left = ra_hintalloc(as, irl->op1, dest, RSET_GPR);
+ right = ra_alloc1(as, irl->op2, rset_exclude(RSET_GPR, left));
+ } else {
+nofuse:
+ left = right = ra_hintalloc(as, ir->op1, dest, RSET_GPR);
+ }
+ emit_asb(as, pi, dest, left, right);
+}
+
+static void asm_bswap(ASMState *as, IRIns *ir)
+{
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ IRIns *irx;
+ if (mayfuse(as, ir->op1) && (irx = IR(ir->op1))->o == IR_XLOAD &&
+ ra_noreg(irx->r) && (irt_isint(irx->t) || irt_isu32(irx->t))) {
+ /* Fuse BSWAP with XLOAD to lwbrx. */
+ asm_fusexrefx(as, PPCI_LWBRX, dest, irx->op1, RSET_GPR);
+ } else {
+ Reg left = ra_alloc1(as, ir->op1, RSET_GPR);
+ Reg tmp = dest;
+ if (tmp == left) {
+ tmp = RID_TMP;
+ emit_mr(as, dest, RID_TMP);
+ }
+ emit_rot(as, PPCI_RLWIMI, tmp, left, 24, 16, 23);
+ emit_rot(as, PPCI_RLWIMI, tmp, left, 24, 0, 7);
+ emit_rotlwi(as, tmp, left, 8);
+ }
+}
+
+/* Fuse BAND with contiguous bitmask and a shift to rlwinm. */
+static void asm_fuseandsh(ASMState *as, PPCIns pi, int32_t mask, IRRef ref)
+{
+ IRIns *ir;
+ Reg left;
+ if (mayfuse(as, ref) && (ir = IR(ref), ra_noreg(ir->r)) &&
+ irref_isk(ir->op2) && ir->o >= IR_BSHL && ir->o <= IR_BROR) {
+ int32_t sh = (IR(ir->op2)->i & 31);
+ switch (ir->o) {
+ case IR_BSHL:
+ if ((mask & ((1u<<sh)-1))) goto nofuse;
+ break;
+ case IR_BSHR:
+ if ((mask & ~((~0u)>>sh))) goto nofuse;
+ sh = ((32-sh)&31);
+ break;
+ case IR_BROL:
+ break;
+ default:
+ goto nofuse;
+ }
+ left = ra_alloc1(as, ir->op1, RSET_GPR);
+ *--as->mcp = pi | PPCF_T(left) | PPCF_B(sh);
+ return;
+ }
+nofuse:
+ left = ra_alloc1(as, ref, RSET_GPR);
+ *--as->mcp = pi | PPCF_T(left);
+}
+
+static void asm_band(ASMState *as, IRIns *ir)
+{
+ Reg dest, left, right;
+ IRRef lref = ir->op1;
+ PPCIns dot = 0;
+ IRRef op2;
+ if (as->flagmcp == as->mcp) {
+ as->flagmcp = NULL;
+ as->mcp++;
+ dot = PPCF_DOT;
+ }
+ dest = ra_dest(as, ir, RSET_GPR);
+ if (irref_isk(ir->op2)) {
+ int32_t k = IR(ir->op2)->i;
+ if (k) {
+ /* First check for a contiguous bitmask as used by rlwinm. */
+ uint32_t s1 = lj_ffs((uint32_t)k);
+ uint32_t k1 = ((uint32_t)k >> s1);
+ if ((k1 & (k1+1)) == 0) {
+ asm_fuseandsh(as, PPCI_RLWINM|dot | PPCF_A(dest) |
+ PPCF_MB(31-lj_fls((uint32_t)k)) | PPCF_ME(31-s1),
+ k, lref);
+ return;
+ }
+ if (~(uint32_t)k) {
+ uint32_t s2 = lj_ffs(~(uint32_t)k);
+ uint32_t k2 = (~(uint32_t)k >> s2);
+ if ((k2 & (k2+1)) == 0) {
+ asm_fuseandsh(as, PPCI_RLWINM|dot | PPCF_A(dest) |
+ PPCF_MB(32-s2) | PPCF_ME(30-lj_fls(~(uint32_t)k)),
+ k, lref);
+ return;
+ }
+ }
+ }
+ if (checku16(k)) {
+ left = ra_alloc1(as, lref, RSET_GPR);
+ emit_asi(as, PPCI_ANDIDOT, dest, left, k);
+ return;
+ } else if ((k & 0xffff) == 0) {
+ left = ra_alloc1(as, lref, RSET_GPR);
+ emit_asi(as, PPCI_ANDISDOT, dest, left, (k >> 16));
+ return;
+ }
+ }
+ op2 = ir->op2;
+ if (mayfuse(as, op2) && IR(op2)->o == IR_BNOT && ra_noreg(IR(op2)->r)) {
+ dot ^= (PPCI_AND ^ PPCI_ANDC);
+ op2 = IR(op2)->op1;
+ }
+ left = ra_hintalloc(as, lref, dest, RSET_GPR);
+ right = ra_alloc1(as, op2, rset_exclude(RSET_GPR, left));
+ emit_asb(as, PPCI_AND ^ dot, dest, left, right);
+}
+
+static void asm_bitop(ASMState *as, IRIns *ir, PPCIns pi, PPCIns pik)
+{
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ Reg right, left = ra_hintalloc(as, ir->op1, dest, RSET_GPR);
+ if (irref_isk(ir->op2)) {
+ int32_t k = IR(ir->op2)->i;
+ Reg tmp = left;
+ if ((checku16(k) || (k & 0xffff) == 0) || (tmp = dest, !as->sectref)) {
+ if (!checku16(k)) {
+ emit_asi(as, pik ^ (PPCI_ORI ^ PPCI_ORIS), dest, tmp, (k >> 16));
+ if ((k & 0xffff) == 0) return;
+ }
+ emit_asi(as, pik, dest, left, k);
+ return;
+ }
+ }
+ /* May fail due to spills/restores above, but simplifies the logic. */
+ if (as->flagmcp == as->mcp) {
+ as->flagmcp = NULL;
+ as->mcp++;
+ pi |= PPCF_DOT;
+ }
+ right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left));
+ emit_asb(as, pi, dest, left, right);
+}
+
+#define asm_bor(as, ir) asm_bitop(as, ir, PPCI_OR, PPCI_ORI)
+#define asm_bxor(as, ir) asm_bitop(as, ir, PPCI_XOR, PPCI_XORI)
+
+static void asm_bitshift(ASMState *as, IRIns *ir, PPCIns pi, PPCIns pik)
+{
+ Reg dest, left;
+ Reg dot = 0;
+ if (as->flagmcp == as->mcp) {
+ as->flagmcp = NULL;
+ as->mcp++;
+ dot = PPCF_DOT;
+ }
+ dest = ra_dest(as, ir, RSET_GPR);
+ left = ra_alloc1(as, ir->op1, RSET_GPR);
+ if (irref_isk(ir->op2)) { /* Constant shifts. */
+ int32_t shift = (IR(ir->op2)->i & 31);
+ if (pik == 0) /* SLWI */
+ emit_rot(as, PPCI_RLWINM|dot, dest, left, shift, 0, 31-shift);
+ else if (pik == 1) /* SRWI */
+ emit_rot(as, PPCI_RLWINM|dot, dest, left, (32-shift)&31, shift, 31);
+ else
+ emit_asb(as, pik|dot, dest, left, shift);
+ } else {
+ Reg right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left));
+ emit_asb(as, pi|dot, dest, left, right);
+ }
+}
+
+#define asm_bshl(as, ir) asm_bitshift(as, ir, PPCI_SLW, 0)
+#define asm_bshr(as, ir) asm_bitshift(as, ir, PPCI_SRW, 1)
+#define asm_bsar(as, ir) asm_bitshift(as, ir, PPCI_SRAW, PPCI_SRAWI)
+#define asm_brol(as, ir) \
+ asm_bitshift(as, ir, PPCI_RLWNM|PPCF_MB(0)|PPCF_ME(31), \
+ PPCI_RLWINM|PPCF_MB(0)|PPCF_ME(31))
+#define asm_bror(as, ir) lj_assertA(0, "unexpected BROR")
+
+#if LJ_SOFTFP
+static void asm_sfpmin_max(ASMState *as, IRIns *ir)
+{
+ CCallInfo ci = lj_ir_callinfo[IRCALL_softfp_cmp];
+ IRRef args[4];
+ MCLabel l_right, l_end;
+ Reg desthi = ra_dest(as, ir, RSET_GPR), destlo = ra_dest(as, ir+1, RSET_GPR);
+ Reg righthi, lefthi = ra_alloc2(as, ir, RSET_GPR);
+ Reg rightlo, leftlo = ra_alloc2(as, ir+1, RSET_GPR);
+ PPCCC cond = (IROp)ir->o == IR_MIN ? CC_EQ : CC_NE;
+ righthi = (lefthi >> 8); lefthi &= 255;
+ rightlo = (leftlo >> 8); leftlo &= 255;
+ args[0^LJ_BE] = ir->op1; args[1^LJ_BE] = (ir+1)->op1;
+ args[2^LJ_BE] = ir->op2; args[3^LJ_BE] = (ir+1)->op2;
+ l_end = emit_label(as);
+ if (desthi != righthi) emit_mr(as, desthi, righthi);
+ if (destlo != rightlo) emit_mr(as, destlo, rightlo);
+ l_right = emit_label(as);
+ if (l_end != l_right) emit_jmp(as, l_end);
+ if (desthi != lefthi) emit_mr(as, desthi, lefthi);
+ if (destlo != leftlo) emit_mr(as, destlo, leftlo);
+ if (l_right == as->mcp+1) {
+ cond ^= 4; l_right = l_end; ++as->mcp;
+ }
+ emit_condbranch(as, PPCI_BC, cond, l_right);
+ ra_evictset(as, RSET_SCRATCH);
+ emit_cmpi(as, RID_RET, 1);
+ asm_gencall(as, &ci, args);
+}
+#endif
+
+static void asm_min_max(ASMState *as, IRIns *ir, int ismax)
+{
+ if (!LJ_SOFTFP && irt_isnum(ir->t)) {
+ Reg dest = ra_dest(as, ir, RSET_FPR);
+ Reg tmp = dest;
+ Reg right, left = ra_alloc2(as, ir, RSET_FPR);
+ right = (left >> 8); left &= 255;
+ if (tmp == left || tmp == right)
+ tmp = ra_scratch(as, rset_exclude(rset_exclude(rset_exclude(RSET_FPR,
+ dest), left), right));
+ emit_facb(as, PPCI_FSEL, dest, tmp, left, right);
+ emit_fab(as, PPCI_FSUB, tmp, ismax ? left : right, ismax ? right : left);
+ } else {
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ Reg tmp1 = RID_TMP, tmp2 = dest;
+ Reg right, left = ra_alloc2(as, ir, RSET_GPR);
+ right = (left >> 8); left &= 255;
+ if (tmp2 == left || tmp2 == right)
+ tmp2 = ra_scratch(as, rset_exclude(rset_exclude(rset_exclude(RSET_GPR,
+ dest), left), right));
+ emit_tab(as, PPCI_ADD, dest, tmp2, right);
+ emit_asb(as, ismax ? PPCI_ANDC : PPCI_AND, tmp2, tmp2, tmp1);
+ emit_tab(as, PPCI_SUBFE, tmp1, tmp1, tmp1);
+ emit_tab(as, PPCI_SUBFC, tmp2, tmp2, tmp1);
+ emit_asi(as, PPCI_XORIS, tmp2, right, 0x8000);
+ emit_asi(as, PPCI_XORIS, tmp1, left, 0x8000);
+ }
+}
+
+#define asm_min(as, ir) asm_min_max(as, ir, 0)
+#define asm_max(as, ir) asm_min_max(as, ir, 1)
+
+/* -- Comparisons --------------------------------------------------------- */
+
+#define CC_UNSIGNED 0x08 /* Unsigned integer comparison. */
+#define CC_TWO 0x80 /* Check two flags for FP comparison. */
+
+/* Map of comparisons to flags. ORDER IR. */
+static const uint8_t asm_compmap[IR_ABC+1] = {
+ /* op int cc FP cc */
+ /* LT */ CC_GE + (CC_GE<<4),
+ /* GE */ CC_LT + (CC_LE<<4) + CC_TWO,
+ /* LE */ CC_GT + (CC_GE<<4) + CC_TWO,
+ /* GT */ CC_LE + (CC_LE<<4),
+ /* ULT */ CC_GE + CC_UNSIGNED + (CC_GT<<4) + CC_TWO,
+ /* UGE */ CC_LT + CC_UNSIGNED + (CC_LT<<4),
+ /* ULE */ CC_GT + CC_UNSIGNED + (CC_GT<<4),
+ /* UGT */ CC_LE + CC_UNSIGNED + (CC_LT<<4) + CC_TWO,
+ /* EQ */ CC_NE + (CC_NE<<4),
+ /* NE */ CC_EQ + (CC_EQ<<4),
+ /* ABC */ CC_LE + CC_UNSIGNED + (CC_LT<<4) + CC_TWO /* Same as UGT. */
+};
+
+static void asm_intcomp_(ASMState *as, IRRef lref, IRRef rref, Reg cr, PPCCC cc)
+{
+ Reg right, left = ra_alloc1(as, lref, RSET_GPR);
+ if (irref_isk(rref)) {
+ int32_t k = IR(rref)->i;
+ if ((cc & CC_UNSIGNED) == 0) { /* Signed comparison with constant. */
+ if (checki16(k)) {
+ emit_tai(as, PPCI_CMPWI, cr, left, k);
+ /* Signed comparison with zero and referencing previous ins? */
+ if (k == 0 && lref == as->curins-1)
+ as->flagmcp = as->mcp; /* Allow elimination of the compare. */
+ return;
+ } else if ((cc & 3) == (CC_EQ & 3)) { /* Use CMPLWI for EQ or NE. */
+ if (checku16(k)) {
+ emit_tai(as, PPCI_CMPLWI, cr, left, k);
+ return;
+ } else if (!as->sectref && ra_noreg(IR(rref)->r)) {
+ emit_tai(as, PPCI_CMPLWI, cr, RID_TMP, k);
+ emit_asi(as, PPCI_XORIS, RID_TMP, left, (k >> 16));
+ return;
+ }
+ }
+ } else { /* Unsigned comparison with constant. */
+ if (checku16(k)) {
+ emit_tai(as, PPCI_CMPLWI, cr, left, k);
+ return;
+ }
+ }
+ }
+ right = ra_alloc1(as, rref, rset_exclude(RSET_GPR, left));
+ emit_tab(as, (cc & CC_UNSIGNED) ? PPCI_CMPLW : PPCI_CMPW, cr, left, right);
+}
+
+static void asm_comp(ASMState *as, IRIns *ir)
+{
+ PPCCC cc = asm_compmap[ir->o];
+ if (!LJ_SOFTFP && irt_isnum(ir->t)) {
+ Reg right, left = ra_alloc2(as, ir, RSET_FPR);
+ right = (left >> 8); left &= 255;
+ asm_guardcc(as, (cc >> 4));
+ if ((cc & CC_TWO))
+ emit_tab(as, PPCI_CROR, ((cc>>4)&3), ((cc>>4)&3), (CC_EQ&3));
+ emit_fab(as, PPCI_FCMPU, 0, left, right);
+ } else {
+ IRRef lref = ir->op1, rref = ir->op2;
+ if (irref_isk(lref) && !irref_isk(rref)) {
+ /* Swap constants to the right (only for ABC). */
+ IRRef tmp = lref; lref = rref; rref = tmp;
+ if ((cc & 2) == 0) cc ^= 1; /* LT <-> GT, LE <-> GE */
+ }
+ asm_guardcc(as, cc);
+ asm_intcomp_(as, lref, rref, 0, cc);
+ }
+}
+
+#define asm_equal(as, ir) asm_comp(as, ir)
+
+#if LJ_SOFTFP
+/* SFP comparisons. */
+static void asm_sfpcomp(ASMState *as, IRIns *ir)
+{
+ const CCallInfo *ci = &lj_ir_callinfo[IRCALL_softfp_cmp];
+ RegSet drop = RSET_SCRATCH;
+ Reg r;
+ IRRef args[4];
+ args[0^LJ_BE] = ir->op1; args[1^LJ_BE] = (ir+1)->op1;
+ args[2^LJ_BE] = ir->op2; args[3^LJ_BE] = (ir+1)->op2;
+
+ for (r = REGARG_FIRSTGPR; r <= REGARG_FIRSTGPR+3; r++) {
+ if (!rset_test(as->freeset, r) &&
+ regcost_ref(as->cost[r]) == args[r-REGARG_FIRSTGPR])
+ rset_clear(drop, r);
+ }
+ ra_evictset(as, drop);
+ asm_setupresult(as, ir, ci);
+ switch ((IROp)ir->o) {
+ case IR_ULT:
+ asm_guardcc(as, CC_EQ);
+ emit_ai(as, PPCI_CMPWI, RID_RET, 0);
+ case IR_ULE:
+ asm_guardcc(as, CC_EQ);
+ emit_ai(as, PPCI_CMPWI, RID_RET, 1);
+ break;
+ case IR_GE: case IR_GT:
+ asm_guardcc(as, CC_EQ);
+ emit_ai(as, PPCI_CMPWI, RID_RET, 2);
+ default:
+ asm_guardcc(as, (asm_compmap[ir->o] & 0xf));
+ emit_ai(as, PPCI_CMPWI, RID_RET, 0);
+ break;
+ }
+ asm_gencall(as, ci, args);
+}
+#endif
+
+#if LJ_HASFFI
+/* 64 bit integer comparisons. */
+static void asm_comp64(ASMState *as, IRIns *ir)
+{
+ PPCCC cc = asm_compmap[(ir-1)->o];
+ if ((cc&3) == (CC_EQ&3)) {
+ asm_guardcc(as, cc);
+ emit_tab(as, (cc&4) ? PPCI_CRAND : PPCI_CROR,
+ (CC_EQ&3), (CC_EQ&3), 4+(CC_EQ&3));
+ } else {
+ asm_guardcc(as, CC_EQ);
+ emit_tab(as, PPCI_CROR, (CC_EQ&3), (CC_EQ&3), ((cc^~(cc>>2))&1));
+ emit_tab(as, (cc&4) ? PPCI_CRAND : PPCI_CRANDC,
+ (CC_EQ&3), (CC_EQ&3), 4+(cc&3));
+ }
+ /* Loword comparison sets cr1 and is unsigned, except for equality. */
+ asm_intcomp_(as, (ir-1)->op1, (ir-1)->op2, 4,
+ cc | ((cc&3) == (CC_EQ&3) ? 0 : CC_UNSIGNED));
+ /* Hiword comparison sets cr0. */
+ asm_intcomp_(as, ir->op1, ir->op2, 0, cc);
+ as->flagmcp = NULL; /* Doesn't work here. */
+}
+#endif
+
+/* -- Split register ops -------------------------------------------------- */
+
+/* Hiword op of a split 32/32 bit op. Previous op is be the loword op. */
+static void asm_hiop(ASMState *as, IRIns *ir)
+{
+ /* HIOP is marked as a store because it needs its own DCE logic. */
+ int uselo = ra_used(ir-1), usehi = ra_used(ir); /* Loword/hiword used? */
+ if (LJ_UNLIKELY(!(as->flags & JIT_F_OPT_DCE))) uselo = usehi = 1;
+#if LJ_HASFFI || LJ_SOFTFP
+ if ((ir-1)->o == IR_CONV) { /* Conversions to/from 64 bit. */
+ as->curins--; /* Always skip the CONV. */
+#if LJ_HASFFI && !LJ_SOFTFP
+ if (usehi || uselo)
+ asm_conv64(as, ir);
+ return;
+#endif
+ } else if ((ir-1)->o <= IR_NE) { /* 64 bit integer comparisons. ORDER IR. */
+ as->curins--; /* Always skip the loword comparison. */
+#if LJ_SOFTFP
+ if (!irt_isint(ir->t)) {
+ asm_sfpcomp(as, ir-1);
+ return;
+ }
+#endif
+#if LJ_HASFFI
+ asm_comp64(as, ir);
+#endif
+ return;
+#if LJ_SOFTFP
+ } else if ((ir-1)->o == IR_MIN || (ir-1)->o == IR_MAX) {
+ as->curins--; /* Always skip the loword min/max. */
+ if (uselo || usehi)
+ asm_sfpmin_max(as, ir-1);
+ return;
+#endif
+ } else if ((ir-1)->o == IR_XSTORE) {
+ as->curins--; /* Handle both stores here. */
+ if ((ir-1)->r != RID_SINK) {
+ asm_xstore_(as, ir, 0);
+ asm_xstore_(as, ir-1, 4);
+ }
+ return;
+ }
+#endif
+ if (!usehi) return; /* Skip unused hiword op for all remaining ops. */
+ switch ((ir-1)->o) {
+#if LJ_HASFFI
+ case IR_ADD: as->curins--; asm_add64(as, ir); break;
+ case IR_SUB: as->curins--; asm_sub64(as, ir); break;
+ case IR_NEG: as->curins--; asm_neg64(as, ir); break;
+ case IR_CNEWI:
+ /* Nothing to do here. Handled by lo op itself. */
+ break;
+#endif
+#if LJ_SOFTFP
+ case IR_SLOAD: case IR_ALOAD: case IR_HLOAD: case IR_ULOAD: case IR_VLOAD:
+ case IR_STRTO:
+ if (!uselo)
+ ra_allocref(as, ir->op1, RSET_GPR); /* Mark lo op as used. */
+ break;
+ case IR_ASTORE: case IR_HSTORE: case IR_USTORE: case IR_TOSTR: case IR_TMPREF:
+ /* Nothing to do here. Handled by lo op itself. */
+ break;
+#endif
+ case IR_CALLN: case IR_CALLL: case IR_CALLS: case IR_CALLXS:
+ if (!uselo)
+ ra_allocref(as, ir->op1, RID2RSET(RID_RETLO)); /* Mark lo op as used. */
+ break;
+ default: lj_assertA(0, "bad HIOP for op %d", (ir-1)->o); break;
+ }
+}
+
+/* -- Profiling ----------------------------------------------------------- */
+
+static void asm_prof(ASMState *as, IRIns *ir)
+{
+ UNUSED(ir);
+ asm_guardcc(as, CC_NE);
+ emit_asi(as, PPCI_ANDIDOT, RID_TMP, RID_TMP, HOOK_PROFILE);
+ emit_lsglptr(as, PPCI_LBZ, RID_TMP,
+ (int32_t)offsetof(global_State, hookmask));
+}
+
+/* -- Stack handling ------------------------------------------------------ */
+
+/* Check Lua stack size for overflow. Use exit handler as fallback. */
+static void asm_stack_check(ASMState *as, BCReg topslot,
+ IRIns *irp, RegSet allow, ExitNo exitno)
+{
+ /* Try to get an unused temp. register, otherwise spill/restore RID_RET*. */
+ Reg tmp, pbase = irp ? (ra_hasreg(irp->r) ? irp->r : RID_TMP) : RID_BASE;
+ rset_clear(allow, pbase);
+ tmp = allow ? rset_pickbot(allow) :
+ (pbase == RID_RETHI ? RID_RETLO : RID_RETHI);
+ emit_condbranch(as, PPCI_BC, CC_LT, asm_exitstub_addr(as, exitno));
+ if (allow == RSET_EMPTY) /* Restore temp. register. */
+ emit_tai(as, PPCI_LWZ, tmp, RID_SP, SPOFS_TMPW);
+ else
+ ra_modified(as, tmp);
+ emit_ai(as, PPCI_CMPLWI, RID_TMP, (int32_t)(8*topslot));
+ emit_tab(as, PPCI_SUBF, RID_TMP, pbase, tmp);
+ emit_tai(as, PPCI_LWZ, tmp, tmp, offsetof(lua_State, maxstack));
+ if (pbase == RID_TMP)
+ emit_getgl(as, RID_TMP, jit_base);
+ emit_getgl(as, tmp, cur_L);
+ if (allow == RSET_EMPTY) /* Spill temp. register. */
+ emit_tai(as, PPCI_STW, tmp, RID_SP, SPOFS_TMPW);
+}
+
+/* Restore Lua stack from on-trace state. */
+static void asm_stack_restore(ASMState *as, SnapShot *snap)
+{
+ SnapEntry *map = &as->T->snapmap[snap->mapofs];
+ SnapEntry *flinks = &as->T->snapmap[snap_nextofs(as->T, snap)-1];
+ MSize n, nent = snap->nent;
+ /* Store the value of all modified slots to the Lua stack. */
+ for (n = 0; n < nent; n++) {
+ SnapEntry sn = map[n];
+ BCReg s = snap_slot(sn);
+ int32_t ofs = 8*((int32_t)s-1);
+ IRRef ref = snap_ref(sn);
+ IRIns *ir = IR(ref);
+ if ((sn & SNAP_NORESTORE))
+ continue;
+ if (irt_isnum(ir->t)) {
+#if LJ_SOFTFP
+ Reg tmp;
+ RegSet allow = rset_exclude(RSET_GPR, RID_BASE);
+ /* LJ_SOFTFP: must be a number constant. */
+ lj_assertA(irref_isk(ref), "unsplit FP op");
+ tmp = ra_allock(as, (int32_t)ir_knum(ir)->u32.lo, allow);
+ emit_tai(as, PPCI_STW, tmp, RID_BASE, ofs+(LJ_BE?4:0));
+ if (rset_test(as->freeset, tmp+1)) allow = RID2RSET(tmp+1);
+ tmp = ra_allock(as, (int32_t)ir_knum(ir)->u32.hi, allow);
+ emit_tai(as, PPCI_STW, tmp, RID_BASE, ofs+(LJ_BE?0:4));
+#else
+ Reg src = ra_alloc1(as, ref, RSET_FPR);
+ emit_fai(as, PPCI_STFD, src, RID_BASE, ofs);
+#endif
+ } else {
+ Reg type;
+ RegSet allow = rset_exclude(RSET_GPR, RID_BASE);
+ lj_assertA(irt_ispri(ir->t) || irt_isaddr(ir->t) || irt_isinteger(ir->t),
+ "restore of IR type %d", irt_type(ir->t));
+ if (!irt_ispri(ir->t)) {
+ Reg src = ra_alloc1(as, ref, allow);
+ rset_clear(allow, src);
+ emit_tai(as, PPCI_STW, src, RID_BASE, ofs+4);
+ }
+ if ((sn & (SNAP_CONT|SNAP_FRAME))) {
+ if (s == 0) continue; /* Do not overwrite link to previous frame. */
+ type = ra_allock(as, (int32_t)(*flinks--), allow);
+#if LJ_SOFTFP
+ } else if ((sn & SNAP_SOFTFPNUM)) {
+ type = ra_alloc1(as, ref+1, rset_exclude(RSET_GPR, RID_BASE));
+#endif
+ } else if ((sn & SNAP_KEYINDEX)) {
+ type = ra_allock(as, (int32_t)LJ_KEYINDEX, allow);
+ } else {
+ type = ra_allock(as, (int32_t)irt_toitype(ir->t), allow);
+ }
+ emit_tai(as, PPCI_STW, type, RID_BASE, ofs);
+ }
+ checkmclim(as);
+ }
+ lj_assertA(map + nent == flinks, "inconsistent frames in snapshot");
+}
+
+/* -- GC handling --------------------------------------------------------- */
+
+/* Marker to prevent patching the GC check exit. */
+#define PPC_NOPATCH_GC_CHECK PPCI_ORIS
+
+/* Check GC threshold and do one or more GC steps. */
+static void asm_gc_check(ASMState *as)
+{
+ const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_gc_step_jit];
+ IRRef args[2];
+ MCLabel l_end;
+ Reg tmp;
+ ra_evictset(as, RSET_SCRATCH);
+ l_end = emit_label(as);
+ /* Exit trace if in GCSatomic or GCSfinalize. Avoids syncing GC objects. */
+ asm_guardcc(as, CC_NE); /* Assumes asm_snap_prep() already done. */
+ *--as->mcp = PPC_NOPATCH_GC_CHECK;
+ emit_ai(as, PPCI_CMPWI, RID_RET, 0);
+ args[0] = ASMREF_TMP1; /* global_State *g */
+ args[1] = ASMREF_TMP2; /* MSize steps */
+ asm_gencall(as, ci, args);
+ emit_tai(as, PPCI_ADDI, ra_releasetmp(as, ASMREF_TMP1), RID_JGL, -32768);
+ tmp = ra_releasetmp(as, ASMREF_TMP2);
+ emit_loadi(as, tmp, as->gcsteps);
+ /* Jump around GC step if GC total < GC threshold. */
+ emit_condbranch(as, PPCI_BC|PPCF_Y, CC_LT, l_end);
+ emit_ab(as, PPCI_CMPLW, RID_TMP, tmp);
+ emit_getgl(as, tmp, gc.threshold);
+ emit_getgl(as, RID_TMP, gc.total);
+ as->gcsteps = 0;
+ checkmclim(as);
+}
+
+/* -- Loop handling ------------------------------------------------------- */
+
+/* Fixup the loop branch. */
+static void asm_loop_fixup(ASMState *as)
+{
+ MCode *p = as->mctop;
+ MCode *target = as->mcp;
+ if (as->loopinv) { /* Inverted loop branch? */
+ /* asm_guardcc already inverted the cond branch and patched the final b. */
+ p[-2] = (p[-2] & (0xffff0000u & ~PPCF_Y)) | (((target-p+2) & 0x3fffu) << 2);
+ } else {
+ p[-1] = PPCI_B|(((target-p+1)&0x00ffffffu)<<2);
+ }
+}
+
+/* Fixup the tail of the loop. */
+static void asm_loop_tail_fixup(ASMState *as)
+{
+ UNUSED(as); /* Nothing to do. */
+}
+
+/* -- Head of trace ------------------------------------------------------- */
+
+/* Coalesce BASE register for a root trace. */
+static void asm_head_root_base(ASMState *as)
+{
+ IRIns *ir = IR(REF_BASE);
+ Reg r = ir->r;
+ if (ra_hasreg(r)) {
+ ra_free(as, r);
+ if (rset_test(as->modset, r) || irt_ismarked(ir->t))
+ ir->r = RID_INIT; /* No inheritance for modified BASE register. */
+ if (r != RID_BASE)
+ emit_mr(as, r, RID_BASE);
+ }
+}
+
+/* Coalesce BASE register for a side trace. */
+static RegSet asm_head_side_base(ASMState *as, IRIns *irp, RegSet allow)
+{
+ IRIns *ir = IR(REF_BASE);
+ Reg r = ir->r;
+ if (ra_hasreg(r)) {
+ ra_free(as, r);
+ if (rset_test(as->modset, r) || irt_ismarked(ir->t))
+ ir->r = RID_INIT; /* No inheritance for modified BASE register. */
+ if (irp->r == r) {
+ rset_clear(allow, r); /* Mark same BASE register as coalesced. */
+ } else if (ra_hasreg(irp->r) && rset_test(as->freeset, irp->r)) {
+ rset_clear(allow, irp->r);
+ emit_mr(as, r, irp->r); /* Move from coalesced parent reg. */
+ } else {
+ emit_getgl(as, r, jit_base); /* Otherwise reload BASE. */
+ }
+ }
+ return allow;
+}
+
+/* -- Tail of trace ------------------------------------------------------- */
+
+/* Fixup the tail code. */
+static void asm_tail_fixup(ASMState *as, TraceNo lnk)
+{
+ MCode *p = as->mctop;
+ MCode *target;
+ int32_t spadj = as->T->spadjust;
+ if (spadj == 0) {
+ *--p = PPCI_NOP;
+ *--p = PPCI_NOP;
+ as->mctop = p;
+ } else {
+ /* Patch stack adjustment. */
+ lj_assertA(checki16(CFRAME_SIZE+spadj), "stack adjustment out of range");
+ p[-3] = PPCI_ADDI | PPCF_T(RID_TMP) | PPCF_A(RID_SP) | (CFRAME_SIZE+spadj);
+ p[-2] = PPCI_STWU | PPCF_T(RID_TMP) | PPCF_A(RID_SP) | spadj;
+ }
+ /* Patch exit branch. */
+ target = lnk ? traceref(as->J, lnk)->mcode : (MCode *)lj_vm_exit_interp;
+ p[-1] = PPCI_B|(((target-p+1)&0x00ffffffu)<<2);
+}
+
+/* Prepare tail of code. */
+static void asm_tail_prep(ASMState *as)
+{
+ MCode *p = as->mctop - 1; /* Leave room for exit branch. */
+ if (as->loopref) {
+ as->invmcp = as->mcp = p;
+ } else {
+ as->mcp = p-2; /* Leave room for stack pointer adjustment. */
+ as->invmcp = NULL;
+ }
+}
+
+/* -- Trace setup --------------------------------------------------------- */
+
+/* Ensure there are enough stack slots for call arguments. */
+static Reg asm_setup_call_slots(ASMState *as, IRIns *ir, const CCallInfo *ci)
+{
+ IRRef args[CCI_NARGS_MAX*2];
+ uint32_t i, nargs = CCI_XNARGS(ci);
+ int nslots = 2, ngpr = REGARG_NUMGPR, nfpr = REGARG_NUMFPR;
+ asm_collectargs(as, ir, ci, args);
+ for (i = 0; i < nargs; i++)
+ if (!LJ_SOFTFP && args[i] && irt_isfp(IR(args[i])->t)) {
+ if (nfpr > 0) nfpr--; else nslots = (nslots+3) & ~1;
+ } else {
+ if (ngpr > 0) ngpr--; else nslots++;
+ }
+ if (nslots > as->evenspill) /* Leave room for args in stack slots. */
+ as->evenspill = nslots;
+ return (!LJ_SOFTFP && irt_isfp(ir->t)) ? REGSP_HINT(RID_FPRET) :
+ REGSP_HINT(RID_RET);
+}
+
+static void asm_setup_target(ASMState *as)
+{
+ asm_exitstub_setup(as, as->T->nsnap + (as->parent ? 1 : 0));
+}
+
+/* -- Trace patching ------------------------------------------------------ */
+
+/* Patch exit jumps of existing machine code to a new target. */
+void lj_asm_patchexit(jit_State *J, GCtrace *T, ExitNo exitno, MCode *target)
+{
+ MCode *p = T->mcode;
+ MCode *pe = (MCode *)((char *)p + T->szmcode);
+ MCode *px = exitstub_trace_addr(T, exitno);
+ MCode *cstart = NULL;
+ MCode *mcarea = lj_mcode_patch(J, p, 0);
+ int clearso = 0, patchlong = 1;
+ for (; p < pe; p++) {
+ /* Look for exitstub branch, try to replace with branch to target. */
+ uint32_t ins = *p;
+ if ((ins & 0xfc000000u) == 0x40000000u &&
+ ((ins ^ ((char *)px-(char *)p)) & 0xffffu) == 0) {
+ ptrdiff_t delta = (char *)target - (char *)p;
+ if (((ins >> 16) & 3) == (CC_SO&3)) {
+ clearso = sizeof(MCode);
+ delta -= sizeof(MCode);
+ }
+ /* Many, but not all short-range branches can be patched directly. */
+ if (p[-1] == PPC_NOPATCH_GC_CHECK) {
+ patchlong = 0;
+ } else if (((delta + 0x8000) >> 16) == 0) {
+ *p = (ins & 0xffdf0000u) | ((uint32_t)delta & 0xffffu) |
+ ((delta & 0x8000) * (PPCF_Y/0x8000));
+ if (!cstart) cstart = p;
+ }
+ } else if ((ins & 0xfc000000u) == PPCI_B &&
+ ((ins ^ ((char *)px-(char *)p)) & 0x03ffffffu) == 0) {
+ ptrdiff_t delta = (char *)target - (char *)p;
+ lj_assertJ(((delta + 0x02000000) >> 26) == 0,
+ "branch target out of range");
+ *p = PPCI_B | ((uint32_t)delta & 0x03ffffffu);
+ if (!cstart) cstart = p;
+ }
+ }
+ /* Always patch long-range branch in exit stub itself. Except, if we can't. */
+ if (patchlong) {
+ ptrdiff_t delta = (char *)target - (char *)px - clearso;
+ lj_assertJ(((delta + 0x02000000) >> 26) == 0,
+ "branch target out of range");
+ *px = PPCI_B | ((uint32_t)delta & 0x03ffffffu);
+ }
+ if (!cstart) cstart = px;
+ lj_mcode_sync(cstart, px+1);
+ if (clearso) { /* Extend the current trace. Ugly workaround. */
+ MCode *pp = J->cur.mcode;
+ J->cur.szmcode += sizeof(MCode);
+ *--pp = PPCI_MCRXR; /* Clear SO flag. */
+ J->cur.mcode = pp;
+ lj_mcode_sync(pp, pp+1);
+ }
+ lj_mcode_patch(J, mcarea, 1);
+}
+
diff --git a/libs/luajit-cmake/luajit/src/lj_asm_x86.h b/libs/luajit-cmake/luajit/src/lj_asm_x86.h
new file mode 100644
index 0000000..2bf9d93
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/lj_asm_x86.h
@@ -0,0 +1,3125 @@
+/*
+** x86/x64 IR assembler (SSA IR -> machine code).
+** Copyright (C) 2005-2022 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+/* -- Guard handling ------------------------------------------------------ */
+
+/* Generate an exit stub group at the bottom of the reserved MCode memory. */
+static MCode *asm_exitstub_gen(ASMState *as, ExitNo group)
+{
+ ExitNo i, groupofs = (group*EXITSTUBS_PER_GROUP) & 0xff;
+ MCode *mxp = as->mcbot;
+ MCode *mxpstart = mxp;
+ if (mxp + (2+2)*EXITSTUBS_PER_GROUP+8+5 >= as->mctop)
+ asm_mclimit(as);
+ /* Push low byte of exitno for each exit stub. */
+ *mxp++ = XI_PUSHi8; *mxp++ = (MCode)groupofs;
+ for (i = 1; i < EXITSTUBS_PER_GROUP; i++) {
+ *mxp++ = XI_JMPs; *mxp++ = (MCode)((2+2)*(EXITSTUBS_PER_GROUP - i) - 2);
+ *mxp++ = XI_PUSHi8; *mxp++ = (MCode)(groupofs + i);
+ }
+ /* Push the high byte of the exitno for each exit stub group. */
+ *mxp++ = XI_PUSHi8; *mxp++ = (MCode)((group*EXITSTUBS_PER_GROUP)>>8);
+#if !LJ_GC64
+ /* Store DISPATCH at original stack slot 0. Account for the two push ops. */
+ *mxp++ = XI_MOVmi;
+ *mxp++ = MODRM(XM_OFS8, 0, RID_ESP);
+ *mxp++ = MODRM(XM_SCALE1, RID_ESP, RID_ESP);
+ *mxp++ = 2*sizeof(void *);
+ *(int32_t *)mxp = ptr2addr(J2GG(as->J)->dispatch); mxp += 4;
+#endif
+ /* Jump to exit handler which fills in the ExitState. */
+ *mxp++ = XI_JMP; mxp += 4;
+ *((int32_t *)(mxp-4)) = jmprel(as->J, mxp, (MCode *)(void *)lj_vm_exit_handler);
+ /* Commit the code for this group (even if assembly fails later on). */
+ lj_mcode_commitbot(as->J, mxp);
+ as->mcbot = mxp;
+ as->mclim = as->mcbot + MCLIM_REDZONE;
+ return mxpstart;
+}
+
+/* Setup all needed exit stubs. */
+static void asm_exitstub_setup(ASMState *as, ExitNo nexits)
+{
+ ExitNo i;
+ if (nexits >= EXITSTUBS_PER_GROUP*LJ_MAX_EXITSTUBGR)
+ lj_trace_err(as->J, LJ_TRERR_SNAPOV);
+ for (i = 0; i < (nexits+EXITSTUBS_PER_GROUP-1)/EXITSTUBS_PER_GROUP; i++)
+ if (as->J->exitstubgroup[i] == NULL)
+ as->J->exitstubgroup[i] = asm_exitstub_gen(as, i);
+}
+
+/* Emit conditional branch to exit for guard.
+** It's important to emit this *after* all registers have been allocated,
+** because rematerializations may invalidate the flags.
+*/
+static void asm_guardcc(ASMState *as, int cc)
+{
+ MCode *target = exitstub_addr(as->J, as->snapno);
+ MCode *p = as->mcp;
+ if (LJ_UNLIKELY(p == as->invmcp)) {
+ as->loopinv = 1;
+ *(int32_t *)(p+1) = jmprel(as->J, p+5, target);
+ target = p;
+ cc ^= 1;
+ if (as->realign) {
+ if (LJ_GC64 && LJ_UNLIKELY(as->mrm.base == RID_RIP))
+ as->mrm.ofs += 2; /* Fixup RIP offset for pending fused load. */
+ emit_sjcc(as, cc, target);
+ return;
+ }
+ }
+ if (LJ_GC64 && LJ_UNLIKELY(as->mrm.base == RID_RIP))
+ as->mrm.ofs += 6; /* Fixup RIP offset for pending fused load. */
+ emit_jcc(as, cc, target);
+}
+
+/* -- Memory operand fusion ----------------------------------------------- */
+
+/* Limit linear search to this distance. Avoids O(n^2) behavior. */
+#define CONFLICT_SEARCH_LIM 31
+
+/* Check if a reference is a signed 32 bit constant. */
+static int asm_isk32(ASMState *as, IRRef ref, int32_t *k)
+{
+ if (irref_isk(ref)) {
+ IRIns *ir = IR(ref);
+#if LJ_GC64
+ if (ir->o == IR_KNULL || !irt_is64(ir->t)) {
+ *k = ir->i;
+ return 1;
+ } else if (checki32((int64_t)ir_k64(ir)->u64)) {
+ *k = (int32_t)ir_k64(ir)->u64;
+ return 1;
+ }
+#else
+ if (ir->o != IR_KINT64) {
+ *k = ir->i;
+ return 1;
+ } else if (checki32((int64_t)ir_kint64(ir)->u64)) {
+ *k = (int32_t)ir_kint64(ir)->u64;
+ return 1;
+ }
+#endif
+ }
+ return 0;
+}
+
+/* Check if there's no conflicting instruction between curins and ref.
+** Also avoid fusing loads if there are multiple references.
+*/
+static int noconflict(ASMState *as, IRRef ref, IROp conflict, int noload)
+{
+ IRIns *ir = as->ir;
+ IRRef i = as->curins;
+ if (i > ref + CONFLICT_SEARCH_LIM)
+ return 0; /* Give up, ref is too far away. */
+ while (--i > ref) {
+ if (ir[i].o == conflict)
+ return 0; /* Conflict found. */
+ else if (!noload && (ir[i].op1 == ref || ir[i].op2 == ref))
+ return 0;
+ }
+ return 1; /* Ok, no conflict. */
+}
+
+/* Fuse array base into memory operand. */
+static IRRef asm_fuseabase(ASMState *as, IRRef ref)
+{
+ IRIns *irb = IR(ref);
+ as->mrm.ofs = 0;
+ if (irb->o == IR_FLOAD) {
+ IRIns *ira = IR(irb->op1);
+ lj_assertA(irb->op2 == IRFL_TAB_ARRAY, "expected FLOAD TAB_ARRAY");
+ /* We can avoid the FLOAD of t->array for colocated arrays. */
+ if (ira->o == IR_TNEW && ira->op1 <= LJ_MAX_COLOSIZE &&
+ !neverfuse(as) && noconflict(as, irb->op1, IR_NEWREF, 1)) {
+ as->mrm.ofs = (int32_t)sizeof(GCtab); /* Ofs to colocated array. */
+ return irb->op1; /* Table obj. */
+ }
+ } else if (irb->o == IR_ADD && irref_isk(irb->op2)) {
+ /* Fuse base offset (vararg load). */
+ as->mrm.ofs = IR(irb->op2)->i;
+ return irb->op1;
+ }
+ return ref; /* Otherwise use the given array base. */
+}
+
+/* Fuse array reference into memory operand. */
+static void asm_fusearef(ASMState *as, IRIns *ir, RegSet allow)
+{
+ IRIns *irx;
+ lj_assertA(ir->o == IR_AREF, "expected AREF");
+ as->mrm.base = (uint8_t)ra_alloc1(as, asm_fuseabase(as, ir->op1), allow);
+ irx = IR(ir->op2);
+ if (irref_isk(ir->op2)) {
+ as->mrm.ofs += 8*irx->i;
+ as->mrm.idx = RID_NONE;
+ } else {
+ rset_clear(allow, as->mrm.base);
+ as->mrm.scale = XM_SCALE8;
+ /* Fuse a constant ADD (e.g. t[i+1]) into the offset.
+ ** Doesn't help much without ABCelim, but reduces register pressure.
+ */
+ if (!LJ_64 && /* Has bad effects with negative index on x64. */
+ mayfuse(as, ir->op2) && ra_noreg(irx->r) &&
+ irx->o == IR_ADD && irref_isk(irx->op2)) {
+ as->mrm.ofs += 8*IR(irx->op2)->i;
+ as->mrm.idx = (uint8_t)ra_alloc1(as, irx->op1, allow);
+ } else {
+ as->mrm.idx = (uint8_t)ra_alloc1(as, ir->op2, allow);
+ }
+ }
+}
+
+/* Fuse array/hash/upvalue reference into memory operand.
+** Caveat: this may allocate GPRs for the base/idx registers. Be sure to
+** pass the final allow mask, excluding any GPRs used for other inputs.
+** In particular: 2-operand GPR instructions need to call ra_dest() first!
+*/
+static void asm_fuseahuref(ASMState *as, IRRef ref, RegSet allow)
+{
+ IRIns *ir = IR(ref);
+ if (ra_noreg(ir->r)) {
+ switch ((IROp)ir->o) {
+ case IR_AREF:
+ if (mayfuse(as, ref)) {
+ asm_fusearef(as, ir, allow);
+ return;
+ }
+ break;
+ case IR_HREFK:
+ if (mayfuse(as, ref)) {
+ as->mrm.base = (uint8_t)ra_alloc1(as, ir->op1, allow);
+ as->mrm.ofs = (int32_t)(IR(ir->op2)->op2 * sizeof(Node));
+ as->mrm.idx = RID_NONE;
+ return;
+ }
+ break;
+ case IR_UREFC:
+ if (irref_isk(ir->op1)) {
+ GCfunc *fn = ir_kfunc(IR(ir->op1));
+ GCupval *uv = &gcref(fn->l.uvptr[(ir->op2 >> 8)])->uv;
+#if LJ_GC64
+ int64_t ofs = dispofs(as, &uv->tv);
+ if (checki32(ofs) && checki32(ofs+4)) {
+ as->mrm.ofs = (int32_t)ofs;
+ as->mrm.base = RID_DISPATCH;
+ as->mrm.idx = RID_NONE;
+ return;
+ }
+#else
+ as->mrm.ofs = ptr2addr(&uv->tv);
+ as->mrm.base = as->mrm.idx = RID_NONE;
+ return;
+#endif
+ }
+ break;
+ case IR_TMPREF:
+#if LJ_GC64
+ as->mrm.ofs = (int32_t)dispofs(as, &J2G(as->J)->tmptv);
+ as->mrm.base = RID_DISPATCH;
+ as->mrm.idx = RID_NONE;
+#else
+ as->mrm.ofs = igcptr(&J2G(as->J)->tmptv);
+ as->mrm.base = as->mrm.idx = RID_NONE;
+#endif
+ return;
+ default:
+ break;
+ }
+ }
+ as->mrm.base = (uint8_t)ra_alloc1(as, ref, allow);
+ as->mrm.ofs = 0;
+ as->mrm.idx = RID_NONE;
+}
+
+/* Fuse FLOAD/FREF reference into memory operand. */
+static void asm_fusefref(ASMState *as, IRIns *ir, RegSet allow)
+{
+ lj_assertA(ir->o == IR_FLOAD || ir->o == IR_FREF,
+ "bad IR op %d", ir->o);
+ as->mrm.idx = RID_NONE;
+ if (ir->op1 == REF_NIL) { /* FLOAD from GG_State with offset. */
+#if LJ_GC64
+ as->mrm.ofs = (int32_t)(ir->op2 << 2) - GG_OFS(dispatch);
+ as->mrm.base = RID_DISPATCH;
+#else
+ as->mrm.ofs = (int32_t)(ir->op2 << 2) + ptr2addr(J2GG(as->J));
+ as->mrm.base = RID_NONE;
+#endif
+ return;
+ }
+ as->mrm.ofs = field_ofs[ir->op2];
+ if (irref_isk(ir->op1)) {
+ IRIns *op1 = IR(ir->op1);
+#if LJ_GC64
+ if (ir->op1 == REF_NIL) {
+ as->mrm.ofs -= GG_OFS(dispatch);
+ as->mrm.base = RID_DISPATCH;
+ return;
+ } else if (op1->o == IR_KPTR || op1->o == IR_KKPTR) {
+ intptr_t ofs = dispofs(as, ir_kptr(op1));
+ if (checki32(as->mrm.ofs + ofs)) {
+ as->mrm.ofs += (int32_t)ofs;
+ as->mrm.base = RID_DISPATCH;
+ return;
+ }
+ }
+#else
+ as->mrm.ofs += op1->i;
+ as->mrm.base = RID_NONE;
+ return;
+#endif
+ }
+ as->mrm.base = (uint8_t)ra_alloc1(as, ir->op1, allow);
+}
+
+/* Fuse string reference into memory operand. */
+static void asm_fusestrref(ASMState *as, IRIns *ir, RegSet allow)
+{
+ IRIns *irr;
+ lj_assertA(ir->o == IR_STRREF, "bad IR op %d", ir->o);
+ as->mrm.base = as->mrm.idx = RID_NONE;
+ as->mrm.scale = XM_SCALE1;
+ as->mrm.ofs = sizeof(GCstr);
+ if (!LJ_GC64 && irref_isk(ir->op1)) {
+ as->mrm.ofs += IR(ir->op1)->i;
+ } else {
+ Reg r = ra_alloc1(as, ir->op1, allow);
+ rset_clear(allow, r);
+ as->mrm.base = (uint8_t)r;
+ }
+ irr = IR(ir->op2);
+ if (irref_isk(ir->op2)) {
+ as->mrm.ofs += irr->i;
+ } else {
+ Reg r;
+ /* Fuse a constant add into the offset, e.g. string.sub(s, i+10). */
+ if (!LJ_64 && /* Has bad effects with negative index on x64. */
+ mayfuse(as, ir->op2) && irr->o == IR_ADD && irref_isk(irr->op2)) {
+ as->mrm.ofs += IR(irr->op2)->i;
+ r = ra_alloc1(as, irr->op1, allow);
+ } else {
+ r = ra_alloc1(as, ir->op2, allow);
+ }
+ if (as->mrm.base == RID_NONE)
+ as->mrm.base = (uint8_t)r;
+ else
+ as->mrm.idx = (uint8_t)r;
+ }
+}
+
+static void asm_fusexref(ASMState *as, IRRef ref, RegSet allow)
+{
+ IRIns *ir = IR(ref);
+ as->mrm.idx = RID_NONE;
+ if (ir->o == IR_KPTR || ir->o == IR_KKPTR) {
+#if LJ_GC64
+ intptr_t ofs = dispofs(as, ir_kptr(ir));
+ if (checki32(ofs)) {
+ as->mrm.ofs = (int32_t)ofs;
+ as->mrm.base = RID_DISPATCH;
+ return;
+ }
+ } if (0) {
+#else
+ as->mrm.ofs = ir->i;
+ as->mrm.base = RID_NONE;
+ } else if (ir->o == IR_STRREF) {
+ asm_fusestrref(as, ir, allow);
+#endif
+ } else {
+ as->mrm.ofs = 0;
+ if (canfuse(as, ir) && ir->o == IR_ADD && ra_noreg(ir->r)) {
+ /* Gather (base+idx*sz)+ofs as emitted by cdata ptr/array indexing. */
+ IRIns *irx;
+ IRRef idx;
+ Reg r;
+ if (asm_isk32(as, ir->op2, &as->mrm.ofs)) { /* Recognize x+ofs. */
+ ref = ir->op1;
+ ir = IR(ref);
+ if (!(ir->o == IR_ADD && canfuse(as, ir) && ra_noreg(ir->r)))
+ goto noadd;
+ }
+ as->mrm.scale = XM_SCALE1;
+ idx = ir->op1;
+ ref = ir->op2;
+ irx = IR(idx);
+ if (!(irx->o == IR_BSHL || irx->o == IR_ADD)) { /* Try other operand. */
+ idx = ir->op2;
+ ref = ir->op1;
+ irx = IR(idx);
+ }
+ if (canfuse(as, irx) && ra_noreg(irx->r)) {
+ if (irx->o == IR_BSHL && irref_isk(irx->op2) && IR(irx->op2)->i <= 3) {
+ /* Recognize idx<<b with b = 0-3, corresponding to sz = (1),2,4,8. */
+ idx = irx->op1;
+ as->mrm.scale = (uint8_t)(IR(irx->op2)->i << 6);
+ } else if (irx->o == IR_ADD && irx->op1 == irx->op2) {
+ /* FOLD does idx*2 ==> idx<<1 ==> idx+idx. */
+ idx = irx->op1;
+ as->mrm.scale = XM_SCALE2;
+ }
+ }
+ r = ra_alloc1(as, idx, allow);
+ rset_clear(allow, r);
+ as->mrm.idx = (uint8_t)r;
+ }
+ noadd:
+ as->mrm.base = (uint8_t)ra_alloc1(as, ref, allow);
+ }
+}
+
+/* Fuse load of 64 bit IR constant into memory operand. */
+static Reg asm_fuseloadk64(ASMState *as, IRIns *ir)
+{
+ const uint64_t *k = &ir_k64(ir)->u64;
+ if (!LJ_GC64 || checki32((intptr_t)k)) {
+ as->mrm.ofs = ptr2addr(k);
+ as->mrm.base = RID_NONE;
+#if LJ_GC64
+ } else if (checki32(dispofs(as, k))) {
+ as->mrm.ofs = (int32_t)dispofs(as, k);
+ as->mrm.base = RID_DISPATCH;
+ } else if (checki32(mcpofs(as, k)) && checki32(mcpofs(as, k+1)) &&
+ checki32(mctopofs(as, k)) && checki32(mctopofs(as, k+1))) {
+ as->mrm.ofs = (int32_t)mcpofs(as, k);
+ as->mrm.base = RID_RIP;
+ } else { /* Intern 64 bit constant at bottom of mcode. */
+ if (ir->i) {
+ lj_assertA(*k == *(uint64_t*)(as->mctop - ir->i),
+ "bad interned 64 bit constant");
+ } else {
+ while ((uintptr_t)as->mcbot & 7) *as->mcbot++ = XI_INT3;
+ *(uint64_t*)as->mcbot = *k;
+ ir->i = (int32_t)(as->mctop - as->mcbot);
+ as->mcbot += 8;
+ as->mclim = as->mcbot + MCLIM_REDZONE;
+ lj_mcode_commitbot(as->J, as->mcbot);
+ }
+ as->mrm.ofs = (int32_t)mcpofs(as, as->mctop - ir->i);
+ as->mrm.base = RID_RIP;
+#endif
+ }
+ as->mrm.idx = RID_NONE;
+ return RID_MRM;
+}
+
+/* Fuse load into memory operand.
+**
+** Important caveat: this may emit RIP-relative loads! So don't place any
+** code emitters between this function and the use of its result.
+** The only permitted exception is asm_guardcc().
+*/
+static Reg asm_fuseload(ASMState *as, IRRef ref, RegSet allow)
+{
+ IRIns *ir = IR(ref);
+ if (ra_hasreg(ir->r)) {
+ if (allow != RSET_EMPTY) { /* Fast path. */
+ ra_noweak(as, ir->r);
+ return ir->r;
+ }
+ fusespill:
+ /* Force a spill if only memory operands are allowed (asm_x87load). */
+ as->mrm.base = RID_ESP;
+ as->mrm.ofs = ra_spill(as, ir);
+ as->mrm.idx = RID_NONE;
+ return RID_MRM;
+ }
+ if (ir->o == IR_KNUM) {
+ RegSet avail = as->freeset & ~as->modset & RSET_FPR;
+ lj_assertA(allow != RSET_EMPTY, "no register allowed");
+ if (!(avail & (avail-1))) /* Fuse if less than two regs available. */
+ return asm_fuseloadk64(as, ir);
+ } else if (ref == REF_BASE || ir->o == IR_KINT64) {
+ RegSet avail = as->freeset & ~as->modset & RSET_GPR;
+ lj_assertA(allow != RSET_EMPTY, "no register allowed");
+ if (!(avail & (avail-1))) { /* Fuse if less than two regs available. */
+ if (ref == REF_BASE) {
+#if LJ_GC64
+ as->mrm.ofs = (int32_t)dispofs(as, &J2G(as->J)->jit_base);
+ as->mrm.base = RID_DISPATCH;
+#else
+ as->mrm.ofs = ptr2addr(&J2G(as->J)->jit_base);
+ as->mrm.base = RID_NONE;
+#endif
+ as->mrm.idx = RID_NONE;
+ return RID_MRM;
+ } else {
+ return asm_fuseloadk64(as, ir);
+ }
+ }
+ } else if (mayfuse(as, ref)) {
+ RegSet xallow = (allow & RSET_GPR) ? allow : RSET_GPR;
+ if (ir->o == IR_SLOAD) {
+ if (!(ir->op2 & (IRSLOAD_PARENT|IRSLOAD_CONVERT)) &&
+ noconflict(as, ref, IR_RETF, 0) &&
+ !(LJ_GC64 && irt_isaddr(ir->t))) {
+ as->mrm.base = (uint8_t)ra_alloc1(as, REF_BASE, xallow);
+ as->mrm.ofs = 8*((int32_t)ir->op1-1-LJ_FR2) +
+ (!LJ_FR2 && (ir->op2 & IRSLOAD_FRAME) ? 4 : 0);
+ as->mrm.idx = RID_NONE;
+ return RID_MRM;
+ }
+ } else if (ir->o == IR_FLOAD) {
+ /* Generic fusion is only ok for 32 bit operand (but see asm_comp). */
+ if ((irt_isint(ir->t) || irt_isu32(ir->t) || irt_isaddr(ir->t)) &&
+ noconflict(as, ref, IR_FSTORE, 0)) {
+ asm_fusefref(as, ir, xallow);
+ return RID_MRM;
+ }
+ } else if (ir->o == IR_ALOAD || ir->o == IR_HLOAD || ir->o == IR_ULOAD) {
+ if (noconflict(as, ref, ir->o + IRDELTA_L2S, 0) &&
+ !(LJ_GC64 && irt_isaddr(ir->t))) {
+ asm_fuseahuref(as, ir->op1, xallow);
+ return RID_MRM;
+ }
+ } else if (ir->o == IR_XLOAD) {
+ /* Generic fusion is not ok for 8/16 bit operands (but see asm_comp).
+ ** Fusing unaligned memory operands is ok on x86 (except for SIMD types).
+ */
+ if ((!irt_typerange(ir->t, IRT_I8, IRT_U16)) &&
+ noconflict(as, ref, IR_XSTORE, 0)) {
+ asm_fusexref(as, ir->op1, xallow);
+ return RID_MRM;
+ }
+ } else if (ir->o == IR_VLOAD && IR(ir->op1)->o == IR_AREF &&
+ !(LJ_GC64 && irt_isaddr(ir->t))) {
+ asm_fuseahuref(as, ir->op1, xallow);
+ as->mrm.ofs += 8 * ir->op2;
+ return RID_MRM;
+ }
+ }
+ if (ir->o == IR_FLOAD && ir->op1 == REF_NIL) {
+ asm_fusefref(as, ir, RSET_EMPTY);
+ return RID_MRM;
+ }
+ if (!(as->freeset & allow) && !emit_canremat(ref) &&
+ (allow == RSET_EMPTY || ra_hasspill(ir->s) || iscrossref(as, ref)))
+ goto fusespill;
+ return ra_allocref(as, ref, allow);
+}
+
+#if LJ_64
+/* Don't fuse a 32 bit load into a 64 bit operation. */
+static Reg asm_fuseloadm(ASMState *as, IRRef ref, RegSet allow, int is64)
+{
+ if (is64 && !irt_is64(IR(ref)->t))
+ return ra_alloc1(as, ref, allow);
+ return asm_fuseload(as, ref, allow);
+}
+#else
+#define asm_fuseloadm(as, ref, allow, is64) asm_fuseload(as, (ref), (allow))
+#endif
+
+/* -- Calls --------------------------------------------------------------- */
+
+/* Count the required number of stack slots for a call. */
+static int asm_count_call_slots(ASMState *as, const CCallInfo *ci, IRRef *args)
+{
+ uint32_t i, nargs = CCI_XNARGS(ci);
+ int nslots = 0;
+#if LJ_64
+ if (LJ_ABI_WIN) {
+ nslots = (int)(nargs*2); /* Only matters for more than four args. */
+ } else {
+ int ngpr = REGARG_NUMGPR, nfpr = REGARG_NUMFPR;
+ for (i = 0; i < nargs; i++)
+ if (args[i] && irt_isfp(IR(args[i])->t)) {
+ if (nfpr > 0) nfpr--; else nslots += 2;
+ } else {
+ if (ngpr > 0) ngpr--; else nslots += 2;
+ }
+ }
+#else
+ int ngpr = 0;
+ if ((ci->flags & CCI_CC_MASK) == CCI_CC_FASTCALL)
+ ngpr = 2;
+ else if ((ci->flags & CCI_CC_MASK) == CCI_CC_THISCALL)
+ ngpr = 1;
+ for (i = 0; i < nargs; i++)
+ if (args[i] && irt_isfp(IR(args[i])->t)) {
+ nslots += irt_isnum(IR(args[i])->t) ? 2 : 1;
+ } else {
+ if (ngpr > 0) ngpr--; else nslots++;
+ }
+#endif
+ return nslots;
+}
+
+/* Generate a call to a C function. */
+static void asm_gencall(ASMState *as, const CCallInfo *ci, IRRef *args)
+{
+ uint32_t n, nargs = CCI_XNARGS(ci);
+ int32_t ofs = STACKARG_OFS;
+#if LJ_64
+ uint32_t gprs = REGARG_GPRS;
+ Reg fpr = REGARG_FIRSTFPR;
+#if !LJ_ABI_WIN
+ MCode *patchnfpr = NULL;
+#endif
+#else
+ uint32_t gprs = 0;
+ if ((ci->flags & CCI_CC_MASK) != CCI_CC_CDECL) {
+ if ((ci->flags & CCI_CC_MASK) == CCI_CC_THISCALL)
+ gprs = (REGARG_GPRS & 31);
+ else if ((ci->flags & CCI_CC_MASK) == CCI_CC_FASTCALL)
+ gprs = REGARG_GPRS;
+ }
+#endif
+ if ((void *)ci->func)
+ emit_call(as, ci->func);
+#if LJ_64
+ if ((ci->flags & CCI_VARARG)) { /* Special handling for vararg calls. */
+#if LJ_ABI_WIN
+ for (n = 0; n < 4 && n < nargs; n++) {
+ IRIns *ir = IR(args[n]);
+ if (irt_isfp(ir->t)) /* Duplicate FPRs in GPRs. */
+ emit_rr(as, XO_MOVDto, (irt_isnum(ir->t) ? REX_64 : 0) | (fpr+n),
+ ((gprs >> (n*5)) & 31)); /* Either MOVD or MOVQ. */
+ }
+#else
+ patchnfpr = --as->mcp; /* Indicate number of used FPRs in register al. */
+ *--as->mcp = XI_MOVrib | RID_EAX;
+#endif
+ }
+#endif
+ for (n = 0; n < nargs; n++) { /* Setup args. */
+ IRRef ref = args[n];
+ IRIns *ir = IR(ref);
+ Reg r;
+#if LJ_64 && LJ_ABI_WIN
+ /* Windows/x64 argument registers are strictly positional. */
+ r = irt_isfp(ir->t) ? (fpr <= REGARG_LASTFPR ? fpr : 0) : (gprs & 31);
+ fpr++; gprs >>= 5;
+#elif LJ_64
+ /* POSIX/x64 argument registers are used in order of appearance. */
+ if (irt_isfp(ir->t)) {
+ r = fpr <= REGARG_LASTFPR ? fpr++ : 0;
+ } else {
+ r = gprs & 31; gprs >>= 5;
+ }
+#else
+ if (ref && irt_isfp(ir->t)) {
+ r = 0;
+ } else {
+ r = gprs & 31; gprs >>= 5;
+ if (!ref) continue;
+ }
+#endif
+ if (r) { /* Argument is in a register. */
+ if (r < RID_MAX_GPR && ref < ASMREF_TMP1) {
+#if LJ_64
+ if (LJ_GC64 ? !(ir->o == IR_KINT || ir->o == IR_KNULL) : ir->o == IR_KINT64)
+ emit_loadu64(as, r, ir_k64(ir)->u64);
+ else
+#endif
+ emit_loadi(as, r, ir->i);
+ } else {
+ /* Must have been evicted. */
+ lj_assertA(rset_test(as->freeset, r), "reg %d not free", r);
+ if (ra_hasreg(ir->r)) {
+ ra_noweak(as, ir->r);
+ emit_movrr(as, ir, r, ir->r);
+ } else {
+ ra_allocref(as, ref, RID2RSET(r));
+ }
+ }
+ } else if (irt_isfp(ir->t)) { /* FP argument is on stack. */
+ lj_assertA(!(irt_isfloat(ir->t) && irref_isk(ref)),
+ "unexpected float constant");
+ if (LJ_32 && (ofs & 4) && irref_isk(ref)) {
+ /* Split stores for unaligned FP consts. */
+ emit_movmroi(as, RID_ESP, ofs, (int32_t)ir_knum(ir)->u32.lo);
+ emit_movmroi(as, RID_ESP, ofs+4, (int32_t)ir_knum(ir)->u32.hi);
+ } else {
+ r = ra_alloc1(as, ref, RSET_FPR);
+ emit_rmro(as, irt_isnum(ir->t) ? XO_MOVSDto : XO_MOVSSto,
+ r, RID_ESP, ofs);
+ }
+ ofs += (LJ_32 && irt_isfloat(ir->t)) ? 4 : 8;
+ } else { /* Non-FP argument is on stack. */
+ if (LJ_32 && ref < ASMREF_TMP1) {
+ emit_movmroi(as, RID_ESP, ofs, ir->i);
+ } else {
+ r = ra_alloc1(as, ref, RSET_GPR);
+ emit_movtomro(as, REX_64 + r, RID_ESP, ofs);
+ }
+ ofs += sizeof(intptr_t);
+ }
+ checkmclim(as);
+ }
+#if LJ_64 && !LJ_ABI_WIN
+ if (patchnfpr) *patchnfpr = fpr - REGARG_FIRSTFPR;
+#endif
+}
+
+/* Setup result reg/sp for call. Evict scratch regs. */
+static void asm_setupresult(ASMState *as, IRIns *ir, const CCallInfo *ci)
+{
+ RegSet drop = RSET_SCRATCH;
+ int hiop = ((ir+1)->o == IR_HIOP && !irt_isnil((ir+1)->t));
+ if ((ci->flags & CCI_NOFPRCLOBBER))
+ drop &= ~RSET_FPR;
+ if (ra_hasreg(ir->r))
+ rset_clear(drop, ir->r); /* Dest reg handled below. */
+ if (hiop && ra_hasreg((ir+1)->r))
+ rset_clear(drop, (ir+1)->r); /* Dest reg handled below. */
+ ra_evictset(as, drop); /* Evictions must be performed first. */
+ if (ra_used(ir)) {
+ if (irt_isfp(ir->t)) {
+ int32_t ofs = sps_scale(ir->s); /* Use spill slot or temp slots. */
+#if LJ_64
+ if ((ci->flags & CCI_CASTU64)) {
+ Reg dest = ir->r;
+ if (ra_hasreg(dest)) {
+ ra_free(as, dest);
+ ra_modified(as, dest);
+ emit_rr(as, XO_MOVD, dest|REX_64, RID_RET); /* Really MOVQ. */
+ }
+ if (ofs) emit_movtomro(as, RID_RET|REX_64, RID_ESP, ofs);
+ } else {
+ ra_destreg(as, ir, RID_FPRET);
+ }
+#else
+ /* Number result is in x87 st0 for x86 calling convention. */
+ Reg dest = ir->r;
+ if (ra_hasreg(dest)) {
+ ra_free(as, dest);
+ ra_modified(as, dest);
+ emit_rmro(as, irt_isnum(ir->t) ? XO_MOVSD : XO_MOVSS,
+ dest, RID_ESP, ofs);
+ }
+ if ((ci->flags & CCI_CASTU64)) {
+ emit_movtomro(as, RID_RETLO, RID_ESP, ofs);
+ emit_movtomro(as, RID_RETHI, RID_ESP, ofs+4);
+ } else {
+ emit_rmro(as, irt_isnum(ir->t) ? XO_FSTPq : XO_FSTPd,
+ irt_isnum(ir->t) ? XOg_FSTPq : XOg_FSTPd, RID_ESP, ofs);
+ }
+#endif
+ } else if (hiop) {
+ ra_destpair(as, ir);
+ } else {
+ lj_assertA(!irt_ispri(ir->t), "PRI dest");
+ ra_destreg(as, ir, RID_RET);
+ }
+ } else if (LJ_32 && irt_isfp(ir->t) && !(ci->flags & CCI_CASTU64)) {
+ emit_x87op(as, XI_FPOP); /* Pop unused result from x87 st0. */
+ }
+}
+
+/* Return a constant function pointer or NULL for indirect calls. */
+static void *asm_callx_func(ASMState *as, IRIns *irf, IRRef func)
+{
+#if LJ_32
+ UNUSED(as);
+ if (irref_isk(func))
+ return (void *)irf->i;
+#else
+ if (irref_isk(func)) {
+ MCode *p;
+ if (irf->o == IR_KINT64)
+ p = (MCode *)(void *)ir_k64(irf)->u64;
+ else
+ p = (MCode *)(void *)(uintptr_t)(uint32_t)irf->i;
+ if (p - as->mcp == (int32_t)(p - as->mcp))
+ return p; /* Call target is still in +-2GB range. */
+ /* Avoid the indirect case of emit_call(). Try to hoist func addr. */
+ }
+#endif
+ return NULL;
+}
+
+static void asm_callx(ASMState *as, IRIns *ir)
+{
+ IRRef args[CCI_NARGS_MAX*2];
+ CCallInfo ci;
+ IRRef func;
+ IRIns *irf;
+ int32_t spadj = 0;
+ ci.flags = asm_callx_flags(as, ir);
+ asm_collectargs(as, ir, &ci, args);
+ asm_setupresult(as, ir, &ci);
+#if LJ_32
+ /* Have to readjust stack after non-cdecl calls due to callee cleanup. */
+ if ((ci.flags & CCI_CC_MASK) != CCI_CC_CDECL)
+ spadj = 4 * asm_count_call_slots(as, &ci, args);
+#endif
+ func = ir->op2; irf = IR(func);
+ if (irf->o == IR_CARG) { func = irf->op1; irf = IR(func); }
+ ci.func = (ASMFunction)asm_callx_func(as, irf, func);
+ if (!(void *)ci.func) {
+ /* Use a (hoistable) non-scratch register for indirect calls. */
+ RegSet allow = (RSET_GPR & ~RSET_SCRATCH);
+ Reg r = ra_alloc1(as, func, allow);
+ if (LJ_32) emit_spsub(as, spadj); /* Above code may cause restores! */
+ emit_rr(as, XO_GROUP5, XOg_CALL, r);
+ } else if (LJ_32) {
+ emit_spsub(as, spadj);
+ }
+ asm_gencall(as, &ci, args);
+}
+
+/* -- Returns ------------------------------------------------------------- */
+
+/* Return to lower frame. Guard that it goes to the right spot. */
+static void asm_retf(ASMState *as, IRIns *ir)
+{
+ Reg base = ra_alloc1(as, REF_BASE, RSET_GPR);
+#if LJ_FR2
+ Reg rpc = ra_scratch(as, rset_exclude(RSET_GPR, base));
+#endif
+ void *pc = ir_kptr(IR(ir->op2));
+ int32_t delta = 1+LJ_FR2+bc_a(*((const BCIns *)pc - 1));
+ as->topslot -= (BCReg)delta;
+ if ((int32_t)as->topslot < 0) as->topslot = 0;
+ irt_setmark(IR(REF_BASE)->t); /* Children must not coalesce with BASE reg. */
+ emit_setgl(as, base, jit_base);
+ emit_addptr(as, base, -8*delta);
+ asm_guardcc(as, CC_NE);
+#if LJ_FR2
+ emit_rmro(as, XO_CMP, rpc|REX_GC64, base, -8);
+ emit_loadu64(as, rpc, u64ptr(pc));
+#else
+ emit_gmroi(as, XG_ARITHi(XOg_CMP), base, -4, ptr2addr(pc));
+#endif
+}
+
+/* -- Buffer operations --------------------------------------------------- */
+
+#if LJ_HASBUFFER
+static void asm_bufhdr_write(ASMState *as, Reg sb)
+{
+ Reg tmp = ra_scratch(as, rset_exclude(RSET_GPR, sb));
+ IRIns irgc;
+ irgc.ot = IRT(0, IRT_PGC); /* GC type. */
+ emit_storeofs(as, &irgc, tmp, sb, offsetof(SBuf, L));
+ emit_opgl(as, XO_ARITH(XOg_OR), tmp|REX_GC64, cur_L);
+ emit_gri(as, XG_ARITHi(XOg_AND), tmp, SBUF_MASK_FLAG);
+ emit_loadofs(as, &irgc, tmp, sb, offsetof(SBuf, L));
+}
+#endif
+
+/* -- Type conversions ---------------------------------------------------- */
+
+static void asm_tointg(ASMState *as, IRIns *ir, Reg left)
+{
+ Reg tmp = ra_scratch(as, rset_exclude(RSET_FPR, left));
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ asm_guardcc(as, CC_P);
+ asm_guardcc(as, CC_NE);
+ emit_rr(as, XO_UCOMISD, left, tmp);
+ emit_rr(as, XO_CVTSI2SD, tmp, dest);
+ emit_rr(as, XO_XORPS, tmp, tmp); /* Avoid partial register stall. */
+ emit_rr(as, XO_CVTTSD2SI, dest, left);
+ /* Can't fuse since left is needed twice. */
+}
+
+static void asm_tobit(ASMState *as, IRIns *ir)
+{
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ Reg tmp = ra_noreg(IR(ir->op1)->r) ?
+ ra_alloc1(as, ir->op1, RSET_FPR) :
+ ra_scratch(as, RSET_FPR);
+ Reg right;
+ emit_rr(as, XO_MOVDto, tmp, dest);
+ right = asm_fuseload(as, ir->op2, rset_exclude(RSET_FPR, tmp));
+ emit_mrm(as, XO_ADDSD, tmp, right);
+ ra_left(as, tmp, ir->op1);
+}
+
+static void asm_conv(ASMState *as, IRIns *ir)
+{
+ IRType st = (IRType)(ir->op2 & IRCONV_SRCMASK);
+ int st64 = (st == IRT_I64 || st == IRT_U64 || (LJ_64 && st == IRT_P64));
+ int stfp = (st == IRT_NUM || st == IRT_FLOAT);
+ IRRef lref = ir->op1;
+ lj_assertA(irt_type(ir->t) != st, "inconsistent types for CONV");
+ lj_assertA(!(LJ_32 && (irt_isint64(ir->t) || st64)),
+ "IR %04d has unsplit 64 bit type",
+ (int)(ir - as->ir) - REF_BIAS);
+ if (irt_isfp(ir->t)) {
+ Reg dest = ra_dest(as, ir, RSET_FPR);
+ if (stfp) { /* FP to FP conversion. */
+ Reg left = asm_fuseload(as, lref, RSET_FPR);
+ emit_mrm(as, st == IRT_NUM ? XO_CVTSD2SS : XO_CVTSS2SD, dest, left);
+ if (left == dest) return; /* Avoid the XO_XORPS. */
+ } else if (LJ_32 && st == IRT_U32) { /* U32 to FP conversion on x86. */
+ /* number = (2^52+2^51 .. u32) - (2^52+2^51) */
+ cTValue *k = &as->J->k64[LJ_K64_TOBIT];
+ Reg bias = ra_scratch(as, rset_exclude(RSET_FPR, dest));
+ if (irt_isfloat(ir->t))
+ emit_rr(as, XO_CVTSD2SS, dest, dest);
+ emit_rr(as, XO_SUBSD, dest, bias); /* Subtract 2^52+2^51 bias. */
+ emit_rr(as, XO_XORPS, dest, bias); /* Merge bias and integer. */
+ emit_rma(as, XO_MOVSD, bias, k);
+ emit_mrm(as, XO_MOVD, dest, asm_fuseload(as, lref, RSET_GPR));
+ return;
+ } else { /* Integer to FP conversion. */
+ Reg left = (LJ_64 && (st == IRT_U32 || st == IRT_U64)) ?
+ ra_alloc1(as, lref, RSET_GPR) :
+ asm_fuseloadm(as, lref, RSET_GPR, st64);
+ if (LJ_64 && st == IRT_U64) {
+ MCLabel l_end = emit_label(as);
+ cTValue *k = &as->J->k64[LJ_K64_2P64];
+ emit_rma(as, XO_ADDSD, dest, k); /* Add 2^64 to compensate. */
+ emit_sjcc(as, CC_NS, l_end);
+ emit_rr(as, XO_TEST, left|REX_64, left); /* Check if u64 >= 2^63. */
+ }
+ emit_mrm(as, irt_isnum(ir->t) ? XO_CVTSI2SD : XO_CVTSI2SS,
+ dest|((LJ_64 && (st64 || st == IRT_U32)) ? REX_64 : 0), left);
+ }
+ emit_rr(as, XO_XORPS, dest, dest); /* Avoid partial register stall. */
+ } else if (stfp) { /* FP to integer conversion. */
+ if (irt_isguard(ir->t)) {
+ /* Checked conversions are only supported from number to int. */
+ lj_assertA(irt_isint(ir->t) && st == IRT_NUM,
+ "bad type for checked CONV");
+ asm_tointg(as, ir, ra_alloc1(as, lref, RSET_FPR));
+ } else {
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ x86Op op = st == IRT_NUM ? XO_CVTTSD2SI : XO_CVTTSS2SI;
+ if (LJ_64 ? irt_isu64(ir->t) : irt_isu32(ir->t)) {
+ /* LJ_64: For inputs >= 2^63 add -2^64, convert again. */
+ /* LJ_32: For inputs >= 2^31 add -2^31, convert again and add 2^31. */
+ Reg tmp = ra_noreg(IR(lref)->r) ? ra_alloc1(as, lref, RSET_FPR) :
+ ra_scratch(as, RSET_FPR);
+ MCLabel l_end = emit_label(as);
+ if (LJ_32)
+ emit_gri(as, XG_ARITHi(XOg_ADD), dest, (int32_t)0x80000000);
+ emit_rr(as, op, dest|REX_64, tmp);
+ if (st == IRT_NUM)
+ emit_rma(as, XO_ADDSD, tmp, &as->J->k64[LJ_K64_M2P64_31]);
+ else
+ emit_rma(as, XO_ADDSS, tmp, &as->J->k32[LJ_K32_M2P64_31]);
+ emit_sjcc(as, CC_NS, l_end);
+ emit_rr(as, XO_TEST, dest|REX_64, dest); /* Check if dest negative. */
+ emit_rr(as, op, dest|REX_64, tmp);
+ ra_left(as, tmp, lref);
+ } else {
+ if (LJ_64 && irt_isu32(ir->t))
+ emit_rr(as, XO_MOV, dest, dest); /* Zero hiword. */
+ emit_mrm(as, op,
+ dest|((LJ_64 &&
+ (irt_is64(ir->t) || irt_isu32(ir->t))) ? REX_64 : 0),
+ asm_fuseload(as, lref, RSET_FPR));
+ }
+ }
+ } else if (st >= IRT_I8 && st <= IRT_U16) { /* Extend to 32 bit integer. */
+ Reg left, dest = ra_dest(as, ir, RSET_GPR);
+ RegSet allow = RSET_GPR;
+ x86Op op;
+ lj_assertA(irt_isint(ir->t) || irt_isu32(ir->t), "bad type for CONV EXT");
+ if (st == IRT_I8) {
+ op = XO_MOVSXb; allow = RSET_GPR8; dest |= FORCE_REX;
+ } else if (st == IRT_U8) {
+ op = XO_MOVZXb; allow = RSET_GPR8; dest |= FORCE_REX;
+ } else if (st == IRT_I16) {
+ op = XO_MOVSXw;
+ } else {
+ op = XO_MOVZXw;
+ }
+ left = asm_fuseload(as, lref, allow);
+ /* Add extra MOV if source is already in wrong register. */
+ if (!LJ_64 && left != RID_MRM && !rset_test(allow, left)) {
+ Reg tmp = ra_scratch(as, allow);
+ emit_rr(as, op, dest, tmp);
+ emit_rr(as, XO_MOV, tmp, left);
+ } else {
+ emit_mrm(as, op, dest, left);
+ }
+ } else { /* 32/64 bit integer conversions. */
+ if (LJ_32) { /* Only need to handle 32/32 bit no-op (cast) on x86. */
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ ra_left(as, dest, lref); /* Do nothing, but may need to move regs. */
+ } else if (irt_is64(ir->t)) {
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ if (st64 || !(ir->op2 & IRCONV_SEXT)) {
+ /* 64/64 bit no-op (cast) or 32 to 64 bit zero extension. */
+ ra_left(as, dest, lref); /* Do nothing, but may need to move regs. */
+ } else { /* 32 to 64 bit sign extension. */
+ Reg left = asm_fuseload(as, lref, RSET_GPR);
+ emit_mrm(as, XO_MOVSXd, dest|REX_64, left);
+ }
+ } else {
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ if (st64 && !(ir->op2 & IRCONV_NONE)) {
+ Reg left = asm_fuseload(as, lref, RSET_GPR);
+ /* This is either a 32 bit reg/reg mov which zeroes the hiword
+ ** or a load of the loword from a 64 bit address.
+ */
+ emit_mrm(as, XO_MOV, dest, left);
+ } else { /* 32/32 bit no-op (cast). */
+ ra_left(as, dest, lref); /* Do nothing, but may need to move regs. */
+ }
+ }
+ }
+}
+
+#if LJ_32 && LJ_HASFFI
+/* No SSE conversions to/from 64 bit on x86, so resort to ugly x87 code. */
+
+/* 64 bit integer to FP conversion in 32 bit mode. */
+static void asm_conv_fp_int64(ASMState *as, IRIns *ir)
+{
+ Reg hi = ra_alloc1(as, ir->op1, RSET_GPR);
+ Reg lo = ra_alloc1(as, (ir-1)->op1, rset_exclude(RSET_GPR, hi));
+ int32_t ofs = sps_scale(ir->s); /* Use spill slot or temp slots. */
+ Reg dest = ir->r;
+ if (ra_hasreg(dest)) {
+ ra_free(as, dest);
+ ra_modified(as, dest);
+ emit_rmro(as, irt_isnum(ir->t) ? XO_MOVSD : XO_MOVSS, dest, RID_ESP, ofs);
+ }
+ emit_rmro(as, irt_isnum(ir->t) ? XO_FSTPq : XO_FSTPd,
+ irt_isnum(ir->t) ? XOg_FSTPq : XOg_FSTPd, RID_ESP, ofs);
+ if (((ir-1)->op2 & IRCONV_SRCMASK) == IRT_U64) {
+ /* For inputs in [2^63,2^64-1] add 2^64 to compensate. */
+ MCLabel l_end = emit_label(as);
+ emit_rma(as, XO_FADDq, XOg_FADDq, &as->J->k64[LJ_K64_2P64]);
+ emit_sjcc(as, CC_NS, l_end);
+ emit_rr(as, XO_TEST, hi, hi); /* Check if u64 >= 2^63. */
+ } else {
+ lj_assertA(((ir-1)->op2 & IRCONV_SRCMASK) == IRT_I64, "bad type for CONV");
+ }
+ emit_rmro(as, XO_FILDq, XOg_FILDq, RID_ESP, 0);
+ /* NYI: Avoid narrow-to-wide store-to-load forwarding stall. */
+ emit_rmro(as, XO_MOVto, hi, RID_ESP, 4);
+ emit_rmro(as, XO_MOVto, lo, RID_ESP, 0);
+}
+
+/* FP to 64 bit integer conversion in 32 bit mode. */
+static void asm_conv_int64_fp(ASMState *as, IRIns *ir)
+{
+ IRType st = (IRType)((ir-1)->op2 & IRCONV_SRCMASK);
+ IRType dt = (((ir-1)->op2 & IRCONV_DSTMASK) >> IRCONV_DSH);
+ Reg lo, hi;
+ lj_assertA(st == IRT_NUM || st == IRT_FLOAT, "bad type for CONV");
+ lj_assertA(dt == IRT_I64 || dt == IRT_U64, "bad type for CONV");
+ hi = ra_dest(as, ir, RSET_GPR);
+ lo = ra_dest(as, ir-1, rset_exclude(RSET_GPR, hi));
+ if (ra_used(ir-1)) emit_rmro(as, XO_MOV, lo, RID_ESP, 0);
+ /* NYI: Avoid wide-to-narrow store-to-load forwarding stall. */
+ if (!(as->flags & JIT_F_SSE3)) { /* Set FPU rounding mode to default. */
+ emit_rmro(as, XO_FLDCW, XOg_FLDCW, RID_ESP, 4);
+ emit_rmro(as, XO_MOVto, lo, RID_ESP, 4);
+ emit_gri(as, XG_ARITHi(XOg_AND), lo, 0xf3ff);
+ }
+ if (dt == IRT_U64) {
+ /* For inputs in [2^63,2^64-1] add -2^64 and convert again. */
+ MCLabel l_pop, l_end = emit_label(as);
+ emit_x87op(as, XI_FPOP);
+ l_pop = emit_label(as);
+ emit_sjmp(as, l_end);
+ emit_rmro(as, XO_MOV, hi, RID_ESP, 4);
+ if ((as->flags & JIT_F_SSE3))
+ emit_rmro(as, XO_FISTTPq, XOg_FISTTPq, RID_ESP, 0);
+ else
+ emit_rmro(as, XO_FISTPq, XOg_FISTPq, RID_ESP, 0);
+ emit_rma(as, XO_FADDq, XOg_FADDq, &as->J->k64[LJ_K64_M2P64]);
+ emit_sjcc(as, CC_NS, l_pop);
+ emit_rr(as, XO_TEST, hi, hi); /* Check if out-of-range (2^63). */
+ }
+ emit_rmro(as, XO_MOV, hi, RID_ESP, 4);
+ if ((as->flags & JIT_F_SSE3)) { /* Truncation is easy with SSE3. */
+ emit_rmro(as, XO_FISTTPq, XOg_FISTTPq, RID_ESP, 0);
+ } else { /* Otherwise set FPU rounding mode to truncate before the store. */
+ emit_rmro(as, XO_FISTPq, XOg_FISTPq, RID_ESP, 0);
+ emit_rmro(as, XO_FLDCW, XOg_FLDCW, RID_ESP, 0);
+ emit_rmro(as, XO_MOVtow, lo, RID_ESP, 0);
+ emit_rmro(as, XO_ARITHw(XOg_OR), lo, RID_ESP, 0);
+ emit_loadi(as, lo, 0xc00);
+ emit_rmro(as, XO_FNSTCW, XOg_FNSTCW, RID_ESP, 0);
+ }
+ if (dt == IRT_U64)
+ emit_x87op(as, XI_FDUP);
+ emit_mrm(as, st == IRT_NUM ? XO_FLDq : XO_FLDd,
+ st == IRT_NUM ? XOg_FLDq: XOg_FLDd,
+ asm_fuseload(as, ir->op1, RSET_EMPTY));
+}
+
+static void asm_conv64(ASMState *as, IRIns *ir)
+{
+ if (irt_isfp(ir->t))
+ asm_conv_fp_int64(as, ir);
+ else
+ asm_conv_int64_fp(as, ir);
+}
+#endif
+
+static void asm_strto(ASMState *as, IRIns *ir)
+{
+ /* Force a spill slot for the destination register (if any). */
+ const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_strscan_num];
+ IRRef args[2];
+ RegSet drop = RSET_SCRATCH;
+ if ((drop & RSET_FPR) != RSET_FPR && ra_hasreg(ir->r))
+ rset_set(drop, ir->r); /* WIN64 doesn't spill all FPRs. */
+ ra_evictset(as, drop);
+ asm_guardcc(as, CC_E);
+ emit_rr(as, XO_TEST, RID_RET, RID_RET); /* Test return status. */
+ args[0] = ir->op1; /* GCstr *str */
+ args[1] = ASMREF_TMP1; /* TValue *n */
+ asm_gencall(as, ci, args);
+ /* Store the result to the spill slot or temp slots. */
+ emit_rmro(as, XO_LEA, ra_releasetmp(as, ASMREF_TMP1)|REX_64,
+ RID_ESP, sps_scale(ir->s));
+}
+
+/* -- Memory references --------------------------------------------------- */
+
+/* Get pointer to TValue. */
+static void asm_tvptr(ASMState *as, Reg dest, IRRef ref, MSize mode)
+{
+ if ((mode & IRTMPREF_IN1)) {
+ IRIns *ir = IR(ref);
+ if (irt_isnum(ir->t)) {
+ if (irref_isk(ref) && !(mode & IRTMPREF_OUT1)) {
+ /* Use the number constant itself as a TValue. */
+ emit_loada(as, dest, ir_knum(ir));
+ return;
+ }
+ emit_rmro(as, XO_MOVSDto, ra_alloc1(as, ref, RSET_FPR), dest, 0);
+ } else {
+#if LJ_GC64
+ if (irref_isk(ref)) {
+ TValue k;
+ lj_ir_kvalue(as->J->L, &k, ir);
+ emit_movmroi(as, dest, 4, k.u32.hi);
+ emit_movmroi(as, dest, 0, k.u32.lo);
+ } else {
+ /* TODO: 64 bit store + 32 bit load-modify-store is suboptimal. */
+ Reg src = ra_alloc1(as, ref, rset_exclude(RSET_GPR, dest));
+ if (irt_is64(ir->t)) {
+ emit_u32(as, irt_toitype(ir->t) << 15);
+ emit_rmro(as, XO_ARITHi, XOg_OR, dest, 4);
+ } else {
+ emit_movmroi(as, dest, 4, (irt_toitype(ir->t) << 15));
+ }
+ emit_movtomro(as, REX_64IR(ir, src), dest, 0);
+ }
+#else
+ if (!irref_isk(ref)) {
+ Reg src = ra_alloc1(as, ref, rset_exclude(RSET_GPR, dest));
+ emit_movtomro(as, REX_64IR(ir, src), dest, 0);
+ } else if (!irt_ispri(ir->t)) {
+ emit_movmroi(as, dest, 0, ir->i);
+ }
+ if (!(LJ_64 && irt_islightud(ir->t)))
+ emit_movmroi(as, dest, 4, irt_toitype(ir->t));
+#endif
+ }
+ }
+ emit_loada(as, dest, &J2G(as->J)->tmptv); /* g->tmptv holds the TValue(s). */
+}
+
+static void asm_aref(ASMState *as, IRIns *ir)
+{
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ asm_fusearef(as, ir, RSET_GPR);
+ if (!(as->mrm.idx == RID_NONE && as->mrm.ofs == 0))
+ emit_mrm(as, XO_LEA, dest|REX_GC64, RID_MRM);
+ else if (as->mrm.base != dest)
+ emit_rr(as, XO_MOV, dest|REX_GC64, as->mrm.base);
+}
+
+/* Inlined hash lookup. Specialized for key type and for const keys.
+** The equivalent C code is:
+** Node *n = hashkey(t, key);
+** do {
+** if (lj_obj_equal(&n->key, key)) return &n->val;
+** } while ((n = nextnode(n)));
+** return niltv(L);
+*/
+static void asm_href(ASMState *as, IRIns *ir, IROp merge)
+{
+ RegSet allow = RSET_GPR;
+ int destused = ra_used(ir);
+ Reg dest = ra_dest(as, ir, allow);
+ Reg tab = ra_alloc1(as, ir->op1, rset_clear(allow, dest));
+ Reg key = RID_NONE, tmp = RID_NONE;
+ IRIns *irkey = IR(ir->op2);
+ int isk = irref_isk(ir->op2);
+ IRType1 kt = irkey->t;
+ uint32_t khash;
+ MCLabel l_end, l_loop, l_next;
+
+ if (!isk) {
+ rset_clear(allow, tab);
+ key = ra_alloc1(as, ir->op2, irt_isnum(kt) ? RSET_FPR : allow);
+ if (LJ_GC64 || !irt_isstr(kt))
+ tmp = ra_scratch(as, rset_exclude(allow, key));
+ }
+
+ /* Key not found in chain: jump to exit (if merged) or load niltv. */
+ l_end = emit_label(as);
+ if (merge == IR_NE)
+ asm_guardcc(as, CC_E); /* XI_JMP is not found by lj_asm_patchexit. */
+ else if (destused)
+ emit_loada(as, dest, niltvg(J2G(as->J)));
+
+ /* Follow hash chain until the end. */
+ l_loop = emit_sjcc_label(as, CC_NZ);
+ emit_rr(as, XO_TEST, dest|REX_GC64, dest);
+ emit_rmro(as, XO_MOV, dest|REX_GC64, dest, offsetof(Node, next));
+ l_next = emit_label(as);
+
+ /* Type and value comparison. */
+ if (merge == IR_EQ)
+ asm_guardcc(as, CC_E);
+ else
+ emit_sjcc(as, CC_E, l_end);
+ if (irt_isnum(kt)) {
+ if (isk) {
+ /* Assumes -0.0 is already canonicalized to +0.0. */
+ emit_gmroi(as, XG_ARITHi(XOg_CMP), dest, offsetof(Node, key.u32.lo),
+ (int32_t)ir_knum(irkey)->u32.lo);
+ emit_sjcc(as, CC_NE, l_next);
+ emit_gmroi(as, XG_ARITHi(XOg_CMP), dest, offsetof(Node, key.u32.hi),
+ (int32_t)ir_knum(irkey)->u32.hi);
+ } else {
+ emit_sjcc(as, CC_P, l_next);
+ emit_rmro(as, XO_UCOMISD, key, dest, offsetof(Node, key.n));
+ emit_sjcc(as, CC_AE, l_next);
+ /* The type check avoids NaN penalties and complaints from Valgrind. */
+#if LJ_64 && !LJ_GC64
+ emit_u32(as, LJ_TISNUM);
+ emit_rmro(as, XO_ARITHi, XOg_CMP, dest, offsetof(Node, key.it));
+#else
+ emit_i8(as, LJ_TISNUM);
+ emit_rmro(as, XO_ARITHi8, XOg_CMP, dest, offsetof(Node, key.it));
+#endif
+ }
+#if LJ_64 && !LJ_GC64
+ } else if (irt_islightud(kt)) {
+ emit_rmro(as, XO_CMP, key|REX_64, dest, offsetof(Node, key.u64));
+#endif
+#if LJ_GC64
+ } else if (irt_isaddr(kt)) {
+ if (isk) {
+ TValue k;
+ k.u64 = ((uint64_t)irt_toitype(irkey->t) << 47) | irkey[1].tv.u64;
+ emit_gmroi(as, XG_ARITHi(XOg_CMP), dest, offsetof(Node, key.u32.lo),
+ k.u32.lo);
+ emit_sjcc(as, CC_NE, l_next);
+ emit_gmroi(as, XG_ARITHi(XOg_CMP), dest, offsetof(Node, key.u32.hi),
+ k.u32.hi);
+ } else {
+ emit_rmro(as, XO_CMP, tmp|REX_64, dest, offsetof(Node, key.u64));
+ }
+ } else {
+ lj_assertA(irt_ispri(kt) && !irt_isnil(kt), "bad HREF key type");
+ emit_u32(as, (irt_toitype(kt)<<15)|0x7fff);
+ emit_rmro(as, XO_ARITHi, XOg_CMP, dest, offsetof(Node, key.it));
+#else
+ } else {
+ if (!irt_ispri(kt)) {
+ lj_assertA(irt_isaddr(kt), "bad HREF key type");
+ if (isk)
+ emit_gmroi(as, XG_ARITHi(XOg_CMP), dest, offsetof(Node, key.gcr),
+ ptr2addr(ir_kgc(irkey)));
+ else
+ emit_rmro(as, XO_CMP, key, dest, offsetof(Node, key.gcr));
+ emit_sjcc(as, CC_NE, l_next);
+ }
+ lj_assertA(!irt_isnil(kt), "bad HREF key type");
+ emit_i8(as, irt_toitype(kt));
+ emit_rmro(as, XO_ARITHi8, XOg_CMP, dest, offsetof(Node, key.it));
+#endif
+ }
+ emit_sfixup(as, l_loop);
+ checkmclim(as);
+#if LJ_GC64
+ if (!isk && irt_isaddr(kt)) {
+ emit_rr(as, XO_OR, tmp|REX_64, key);
+ emit_loadu64(as, tmp, (uint64_t)irt_toitype(kt) << 47);
+ }
+#endif
+
+ /* Load main position relative to tab->node into dest. */
+ khash = isk ? ir_khash(as, irkey) : 1;
+ if (khash == 0) {
+ emit_rmro(as, XO_MOV, dest|REX_GC64, tab, offsetof(GCtab, node));
+ } else {
+ emit_rmro(as, XO_ARITH(XOg_ADD), dest|REX_GC64, tab, offsetof(GCtab,node));
+ emit_shifti(as, XOg_SHL, dest, 3);
+ emit_rmrxo(as, XO_LEA, dest, dest, dest, XM_SCALE2, 0);
+ if (isk) {
+ emit_gri(as, XG_ARITHi(XOg_AND), dest, (int32_t)khash);
+ emit_rmro(as, XO_MOV, dest, tab, offsetof(GCtab, hmask));
+ } else if (irt_isstr(kt)) {
+ emit_rmro(as, XO_ARITH(XOg_AND), dest, key, offsetof(GCstr, sid));
+ emit_rmro(as, XO_MOV, dest, tab, offsetof(GCtab, hmask));
+ } else { /* Must match with hashrot() in lj_tab.c. */
+ emit_rmro(as, XO_ARITH(XOg_AND), dest, tab, offsetof(GCtab, hmask));
+ emit_rr(as, XO_ARITH(XOg_SUB), dest, tmp);
+ emit_shifti(as, XOg_ROL, tmp, HASH_ROT3);
+ emit_rr(as, XO_ARITH(XOg_XOR), dest, tmp);
+ emit_shifti(as, XOg_ROL, dest, HASH_ROT2);
+ emit_rr(as, XO_ARITH(XOg_SUB), tmp, dest);
+ emit_shifti(as, XOg_ROL, dest, HASH_ROT1);
+ emit_rr(as, XO_ARITH(XOg_XOR), tmp, dest);
+ if (irt_isnum(kt)) {
+ emit_rr(as, XO_ARITH(XOg_ADD), dest, dest);
+#if LJ_64
+ emit_shifti(as, XOg_SHR|REX_64, dest, 32);
+ emit_rr(as, XO_MOV, tmp, dest);
+ emit_rr(as, XO_MOVDto, key|REX_64, dest);
+#else
+ emit_rmro(as, XO_MOV, dest, RID_ESP, ra_spill(as, irkey)+4);
+ emit_rr(as, XO_MOVDto, key, tmp);
+#endif
+ } else {
+ emit_rr(as, XO_MOV, tmp, key);
+#if LJ_GC64
+ checkmclim(as);
+ emit_gri(as, XG_ARITHi(XOg_XOR), dest, irt_toitype(kt) << 15);
+ if ((as->flags & JIT_F_BMI2)) {
+ emit_i8(as, 32);
+ emit_mrm(as, XV_RORX|VEX_64, dest, key);
+ } else {
+ emit_shifti(as, XOg_SHR|REX_64, dest, 32);
+ emit_rr(as, XO_MOV, dest|REX_64, key|REX_64);
+ }
+#else
+ emit_rmro(as, XO_LEA, dest, key, HASH_BIAS);
+#endif
+ }
+ }
+ }
+}
+
+static void asm_hrefk(ASMState *as, IRIns *ir)
+{
+ IRIns *kslot = IR(ir->op2);
+ IRIns *irkey = IR(kslot->op1);
+ int32_t ofs = (int32_t)(kslot->op2 * sizeof(Node));
+ Reg dest = ra_used(ir) ? ra_dest(as, ir, RSET_GPR) : RID_NONE;
+ Reg node = ra_alloc1(as, ir->op1, RSET_GPR);
+#if !LJ_64
+ MCLabel l_exit;
+#endif
+ lj_assertA(ofs % sizeof(Node) == 0, "unaligned HREFK slot");
+ if (ra_hasreg(dest)) {
+ if (ofs != 0) {
+ if (dest == node)
+ emit_gri(as, XG_ARITHi(XOg_ADD), dest|REX_GC64, ofs);
+ else
+ emit_rmro(as, XO_LEA, dest|REX_GC64, node, ofs);
+ } else if (dest != node) {
+ emit_rr(as, XO_MOV, dest|REX_GC64, node);
+ }
+ }
+ asm_guardcc(as, CC_NE);
+#if LJ_64
+ if (!irt_ispri(irkey->t)) {
+ Reg key = ra_scratch(as, rset_exclude(RSET_GPR, node));
+ emit_rmro(as, XO_CMP, key|REX_64, node,
+ ofs + (int32_t)offsetof(Node, key.u64));
+ lj_assertA(irt_isnum(irkey->t) || irt_isgcv(irkey->t),
+ "bad HREFK key type");
+ /* Assumes -0.0 is already canonicalized to +0.0. */
+ emit_loadu64(as, key, irt_isnum(irkey->t) ? ir_knum(irkey)->u64 :
+#if LJ_GC64
+ ((uint64_t)irt_toitype(irkey->t) << 47) |
+ (uint64_t)ir_kgc(irkey));
+#else
+ ((uint64_t)irt_toitype(irkey->t) << 32) |
+ (uint64_t)(uint32_t)ptr2addr(ir_kgc(irkey)));
+#endif
+ } else {
+ lj_assertA(!irt_isnil(irkey->t), "bad HREFK key type");
+#if LJ_GC64
+ emit_i32(as, (irt_toitype(irkey->t)<<15)|0x7fff);
+ emit_rmro(as, XO_ARITHi, XOg_CMP, node,
+ ofs + (int32_t)offsetof(Node, key.it));
+#else
+ emit_i8(as, irt_toitype(irkey->t));
+ emit_rmro(as, XO_ARITHi8, XOg_CMP, node,
+ ofs + (int32_t)offsetof(Node, key.it));
+#endif
+ }
+#else
+ l_exit = emit_label(as);
+ if (irt_isnum(irkey->t)) {
+ /* Assumes -0.0 is already canonicalized to +0.0. */
+ emit_gmroi(as, XG_ARITHi(XOg_CMP), node,
+ ofs + (int32_t)offsetof(Node, key.u32.lo),
+ (int32_t)ir_knum(irkey)->u32.lo);
+ emit_sjcc(as, CC_NE, l_exit);
+ emit_gmroi(as, XG_ARITHi(XOg_CMP), node,
+ ofs + (int32_t)offsetof(Node, key.u32.hi),
+ (int32_t)ir_knum(irkey)->u32.hi);
+ } else {
+ if (!irt_ispri(irkey->t)) {
+ lj_assertA(irt_isgcv(irkey->t), "bad HREFK key type");
+ emit_gmroi(as, XG_ARITHi(XOg_CMP), node,
+ ofs + (int32_t)offsetof(Node, key.gcr),
+ ptr2addr(ir_kgc(irkey)));
+ emit_sjcc(as, CC_NE, l_exit);
+ }
+ lj_assertA(!irt_isnil(irkey->t), "bad HREFK key type");
+ emit_i8(as, irt_toitype(irkey->t));
+ emit_rmro(as, XO_ARITHi8, XOg_CMP, node,
+ ofs + (int32_t)offsetof(Node, key.it));
+ }
+#endif
+}
+
+static void asm_uref(ASMState *as, IRIns *ir)
+{
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ if (irref_isk(ir->op1)) {
+ GCfunc *fn = ir_kfunc(IR(ir->op1));
+ MRef *v = &gcref(fn->l.uvptr[(ir->op2 >> 8)])->uv.v;
+ emit_rma(as, XO_MOV, dest|REX_GC64, v);
+ } else {
+ Reg uv = ra_scratch(as, RSET_GPR);
+ Reg func = ra_alloc1(as, ir->op1, RSET_GPR);
+ if (ir->o == IR_UREFC) {
+ emit_rmro(as, XO_LEA, dest|REX_GC64, uv, offsetof(GCupval, tv));
+ asm_guardcc(as, CC_NE);
+ emit_i8(as, 1);
+ emit_rmro(as, XO_ARITHib, XOg_CMP, uv, offsetof(GCupval, closed));
+ } else {
+ emit_rmro(as, XO_MOV, dest|REX_GC64, uv, offsetof(GCupval, v));
+ }
+ emit_rmro(as, XO_MOV, uv|REX_GC64, func,
+ (int32_t)offsetof(GCfuncL, uvptr) +
+ (int32_t)sizeof(MRef) * (int32_t)(ir->op2 >> 8));
+ }
+}
+
+static void asm_fref(ASMState *as, IRIns *ir)
+{
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ asm_fusefref(as, ir, RSET_GPR);
+ emit_mrm(as, XO_LEA, dest, RID_MRM);
+}
+
+static void asm_strref(ASMState *as, IRIns *ir)
+{
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ asm_fusestrref(as, ir, RSET_GPR);
+ if (as->mrm.base == RID_NONE)
+ emit_loadi(as, dest, as->mrm.ofs);
+ else if (as->mrm.base == dest && as->mrm.idx == RID_NONE)
+ emit_gri(as, XG_ARITHi(XOg_ADD), dest|REX_GC64, as->mrm.ofs);
+ else
+ emit_mrm(as, XO_LEA, dest|REX_GC64, RID_MRM);
+}
+
+/* -- Loads and stores ---------------------------------------------------- */
+
+static void asm_fxload(ASMState *as, IRIns *ir)
+{
+ Reg dest = ra_dest(as, ir, irt_isfp(ir->t) ? RSET_FPR : RSET_GPR);
+ x86Op xo;
+ if (ir->o == IR_FLOAD)
+ asm_fusefref(as, ir, RSET_GPR);
+ else
+ asm_fusexref(as, ir->op1, RSET_GPR);
+ /* ir->op2 is ignored -- unaligned loads are ok on x86. */
+ switch (irt_type(ir->t)) {
+ case IRT_I8: xo = XO_MOVSXb; break;
+ case IRT_U8: xo = XO_MOVZXb; break;
+ case IRT_I16: xo = XO_MOVSXw; break;
+ case IRT_U16: xo = XO_MOVZXw; break;
+ case IRT_NUM: xo = XO_MOVSD; break;
+ case IRT_FLOAT: xo = XO_MOVSS; break;
+ default:
+ if (LJ_64 && irt_is64(ir->t))
+ dest |= REX_64;
+ else
+ lj_assertA(irt_isint(ir->t) || irt_isu32(ir->t) || irt_isaddr(ir->t),
+ "unsplit 64 bit load");
+ xo = XO_MOV;
+ break;
+ }
+ emit_mrm(as, xo, dest, RID_MRM);
+}
+
+#define asm_fload(as, ir) asm_fxload(as, ir)
+#define asm_xload(as, ir) asm_fxload(as, ir)
+
+static void asm_fxstore(ASMState *as, IRIns *ir)
+{
+ RegSet allow = RSET_GPR;
+ Reg src = RID_NONE, osrc = RID_NONE;
+ int32_t k = 0;
+ if (ir->r == RID_SINK)
+ return;
+ /* The IRT_I16/IRT_U16 stores should never be simplified for constant
+ ** values since mov word [mem], imm16 has a length-changing prefix.
+ */
+ if (irt_isi16(ir->t) || irt_isu16(ir->t) || irt_isfp(ir->t) ||
+ !asm_isk32(as, ir->op2, &k)) {
+ RegSet allow8 = irt_isfp(ir->t) ? RSET_FPR :
+ (irt_isi8(ir->t) || irt_isu8(ir->t)) ? RSET_GPR8 : RSET_GPR;
+ src = osrc = ra_alloc1(as, ir->op2, allow8);
+ if (!LJ_64 && !rset_test(allow8, src)) { /* Already in wrong register. */
+ rset_clear(allow, osrc);
+ src = ra_scratch(as, allow8);
+ }
+ rset_clear(allow, src);
+ }
+ if (ir->o == IR_FSTORE) {
+ asm_fusefref(as, IR(ir->op1), allow);
+ } else {
+ asm_fusexref(as, ir->op1, allow);
+ if (LJ_32 && ir->o == IR_HIOP) as->mrm.ofs += 4;
+ }
+ if (ra_hasreg(src)) {
+ x86Op xo;
+ switch (irt_type(ir->t)) {
+ case IRT_I8: case IRT_U8: xo = XO_MOVtob; src |= FORCE_REX; break;
+ case IRT_I16: case IRT_U16: xo = XO_MOVtow; break;
+ case IRT_NUM: xo = XO_MOVSDto; break;
+ case IRT_FLOAT: xo = XO_MOVSSto; break;
+#if LJ_64 && !LJ_GC64
+ case IRT_LIGHTUD:
+ /* NYI: mask 64 bit lightuserdata. */
+ lj_assertA(0, "store of lightuserdata");
+#endif
+ default:
+ if (LJ_64 && irt_is64(ir->t))
+ src |= REX_64;
+ else
+ lj_assertA(irt_isint(ir->t) || irt_isu32(ir->t) || irt_isaddr(ir->t),
+ "unsplit 64 bit store");
+ xo = XO_MOVto;
+ break;
+ }
+ emit_mrm(as, xo, src, RID_MRM);
+ if (!LJ_64 && src != osrc) {
+ ra_noweak(as, osrc);
+ emit_rr(as, XO_MOV, src, osrc);
+ }
+ } else {
+ if (irt_isi8(ir->t) || irt_isu8(ir->t)) {
+ emit_i8(as, k);
+ emit_mrm(as, XO_MOVmib, 0, RID_MRM);
+ } else {
+ lj_assertA(irt_is64(ir->t) || irt_isint(ir->t) || irt_isu32(ir->t) ||
+ irt_isaddr(ir->t), "bad store type");
+ emit_i32(as, k);
+ emit_mrm(as, XO_MOVmi, REX_64IR(ir, 0), RID_MRM);
+ }
+ }
+}
+
+#define asm_fstore(as, ir) asm_fxstore(as, ir)
+#define asm_xstore(as, ir) asm_fxstore(as, ir)
+
+#if LJ_64 && !LJ_GC64
+static Reg asm_load_lightud64(ASMState *as, IRIns *ir, int typecheck)
+{
+ if (ra_used(ir) || typecheck) {
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ if (typecheck) {
+ Reg tmp = ra_scratch(as, rset_exclude(RSET_GPR, dest));
+ asm_guardcc(as, CC_NE);
+ emit_i8(as, -2);
+ emit_rr(as, XO_ARITHi8, XOg_CMP, tmp);
+ emit_shifti(as, XOg_SAR|REX_64, tmp, 47);
+ emit_rr(as, XO_MOV, tmp|REX_64, dest);
+ }
+ return dest;
+ } else {
+ return RID_NONE;
+ }
+}
+#endif
+
+static void asm_ahuvload(ASMState *as, IRIns *ir)
+{
+#if LJ_GC64
+ Reg tmp = RID_NONE;
+#endif
+ lj_assertA(irt_isnum(ir->t) || irt_ispri(ir->t) || irt_isaddr(ir->t) ||
+ (LJ_DUALNUM && irt_isint(ir->t)),
+ "bad load type %d", irt_type(ir->t));
+#if LJ_64 && !LJ_GC64
+ if (irt_islightud(ir->t)) {
+ Reg dest = asm_load_lightud64(as, ir, 1);
+ if (ra_hasreg(dest)) {
+ asm_fuseahuref(as, ir->op1, RSET_GPR);
+ if (ir->o == IR_VLOAD) as->mrm.ofs += 8 * ir->op2;
+ emit_mrm(as, XO_MOV, dest|REX_64, RID_MRM);
+ }
+ return;
+ } else
+#endif
+ if (ra_used(ir)) {
+ RegSet allow = irt_isnum(ir->t) ? RSET_FPR : RSET_GPR;
+ Reg dest = ra_dest(as, ir, allow);
+ asm_fuseahuref(as, ir->op1, RSET_GPR);
+ if (ir->o == IR_VLOAD) as->mrm.ofs += 8 * ir->op2;
+#if LJ_GC64
+ if (irt_isaddr(ir->t)) {
+ emit_shifti(as, XOg_SHR|REX_64, dest, 17);
+ asm_guardcc(as, CC_NE);
+ emit_i8(as, irt_toitype(ir->t));
+ emit_rr(as, XO_ARITHi8, XOg_CMP, dest);
+ emit_i8(as, XI_O16);
+ if ((as->flags & JIT_F_BMI2)) {
+ emit_i8(as, 47);
+ emit_mrm(as, XV_RORX|VEX_64, dest, RID_MRM);
+ } else {
+ emit_shifti(as, XOg_ROR|REX_64, dest, 47);
+ emit_mrm(as, XO_MOV, dest|REX_64, RID_MRM);
+ }
+ return;
+ } else
+#endif
+ emit_mrm(as, dest < RID_MAX_GPR ? XO_MOV : XO_MOVSD, dest, RID_MRM);
+ } else {
+ RegSet gpr = RSET_GPR;
+#if LJ_GC64
+ if (irt_isaddr(ir->t)) {
+ tmp = ra_scratch(as, RSET_GPR);
+ gpr = rset_exclude(gpr, tmp);
+ }
+#endif
+ asm_fuseahuref(as, ir->op1, gpr);
+ if (ir->o == IR_VLOAD) as->mrm.ofs += 8 * ir->op2;
+ }
+ /* Always do the type check, even if the load result is unused. */
+ as->mrm.ofs += 4;
+ asm_guardcc(as, irt_isnum(ir->t) ? CC_AE : CC_NE);
+ if (LJ_64 && irt_type(ir->t) >= IRT_NUM) {
+ lj_assertA(irt_isinteger(ir->t) || irt_isnum(ir->t),
+ "bad load type %d", irt_type(ir->t));
+#if LJ_GC64
+ emit_u32(as, LJ_TISNUM << 15);
+#else
+ emit_u32(as, LJ_TISNUM);
+#endif
+ emit_mrm(as, XO_ARITHi, XOg_CMP, RID_MRM);
+#if LJ_GC64
+ } else if (irt_isaddr(ir->t)) {
+ as->mrm.ofs -= 4;
+ emit_i8(as, irt_toitype(ir->t));
+ emit_mrm(as, XO_ARITHi8, XOg_CMP, tmp);
+ emit_shifti(as, XOg_SAR|REX_64, tmp, 47);
+ emit_mrm(as, XO_MOV, tmp|REX_64, RID_MRM);
+ } else if (irt_isnil(ir->t)) {
+ as->mrm.ofs -= 4;
+ emit_i8(as, -1);
+ emit_mrm(as, XO_ARITHi8, XOg_CMP|REX_64, RID_MRM);
+ } else {
+ emit_u32(as, (irt_toitype(ir->t) << 15) | 0x7fff);
+ emit_mrm(as, XO_ARITHi, XOg_CMP, RID_MRM);
+#else
+ } else {
+ emit_i8(as, irt_toitype(ir->t));
+ emit_mrm(as, XO_ARITHi8, XOg_CMP, RID_MRM);
+#endif
+ }
+}
+
+static void asm_ahustore(ASMState *as, IRIns *ir)
+{
+ if (ir->r == RID_SINK)
+ return;
+ if (irt_isnum(ir->t)) {
+ Reg src = ra_alloc1(as, ir->op2, RSET_FPR);
+ asm_fuseahuref(as, ir->op1, RSET_GPR);
+ emit_mrm(as, XO_MOVSDto, src, RID_MRM);
+#if LJ_64 && !LJ_GC64
+ } else if (irt_islightud(ir->t)) {
+ Reg src = ra_alloc1(as, ir->op2, RSET_GPR);
+ asm_fuseahuref(as, ir->op1, rset_exclude(RSET_GPR, src));
+ emit_mrm(as, XO_MOVto, src|REX_64, RID_MRM);
+#endif
+#if LJ_GC64
+ } else if (irref_isk(ir->op2)) {
+ TValue k;
+ lj_ir_kvalue(as->J->L, &k, IR(ir->op2));
+ asm_fuseahuref(as, ir->op1, RSET_GPR);
+ if (tvisnil(&k)) {
+ emit_i32(as, -1);
+ emit_mrm(as, XO_MOVmi, REX_64, RID_MRM);
+ } else {
+ emit_u32(as, k.u32.lo);
+ emit_mrm(as, XO_MOVmi, 0, RID_MRM);
+ as->mrm.ofs += 4;
+ emit_u32(as, k.u32.hi);
+ emit_mrm(as, XO_MOVmi, 0, RID_MRM);
+ }
+#endif
+ } else {
+ IRIns *irr = IR(ir->op2);
+ RegSet allow = RSET_GPR;
+ Reg src = RID_NONE;
+ if (!irref_isk(ir->op2)) {
+ src = ra_alloc1(as, ir->op2, allow);
+ rset_clear(allow, src);
+ }
+ asm_fuseahuref(as, ir->op1, allow);
+ if (ra_hasreg(src)) {
+#if LJ_GC64
+ if (!(LJ_DUALNUM && irt_isinteger(ir->t))) {
+ /* TODO: 64 bit store + 32 bit load-modify-store is suboptimal. */
+ as->mrm.ofs += 4;
+ emit_u32(as, irt_toitype(ir->t) << 15);
+ emit_mrm(as, XO_ARITHi, XOg_OR, RID_MRM);
+ as->mrm.ofs -= 4;
+ emit_mrm(as, XO_MOVto, src|REX_64, RID_MRM);
+ return;
+ }
+#endif
+ emit_mrm(as, XO_MOVto, src, RID_MRM);
+ } else if (!irt_ispri(irr->t)) {
+ lj_assertA(irt_isaddr(ir->t) || (LJ_DUALNUM && irt_isinteger(ir->t)),
+ "bad store type");
+ emit_i32(as, irr->i);
+ emit_mrm(as, XO_MOVmi, 0, RID_MRM);
+ }
+ as->mrm.ofs += 4;
+#if LJ_GC64
+ lj_assertA(LJ_DUALNUM && irt_isinteger(ir->t), "bad store type");
+ emit_i32(as, LJ_TNUMX << 15);
+#else
+ emit_i32(as, (int32_t)irt_toitype(ir->t));
+#endif
+ emit_mrm(as, XO_MOVmi, 0, RID_MRM);
+ }
+}
+
+static void asm_sload(ASMState *as, IRIns *ir)
+{
+ int32_t ofs = 8*((int32_t)ir->op1-1-LJ_FR2) +
+ (!LJ_FR2 && (ir->op2 & IRSLOAD_FRAME) ? 4 : 0);
+ IRType1 t = ir->t;
+ Reg base;
+ lj_assertA(!(ir->op2 & IRSLOAD_PARENT),
+ "bad parent SLOAD"); /* Handled by asm_head_side(). */
+ lj_assertA(irt_isguard(t) || !(ir->op2 & IRSLOAD_TYPECHECK),
+ "inconsistent SLOAD variant");
+ lj_assertA(LJ_DUALNUM ||
+ !irt_isint(t) ||
+ (ir->op2 & (IRSLOAD_CONVERT|IRSLOAD_FRAME|IRSLOAD_KEYINDEX)),
+ "bad SLOAD type");
+ if ((ir->op2 & IRSLOAD_CONVERT) && irt_isguard(t) && irt_isint(t)) {
+ Reg left = ra_scratch(as, RSET_FPR);
+ asm_tointg(as, ir, left); /* Frees dest reg. Do this before base alloc. */
+ base = ra_alloc1(as, REF_BASE, RSET_GPR);
+ emit_rmro(as, XO_MOVSD, left, base, ofs);
+ t.irt = IRT_NUM; /* Continue with a regular number type check. */
+#if LJ_64 && !LJ_GC64
+ } else if (irt_islightud(t)) {
+ Reg dest = asm_load_lightud64(as, ir, (ir->op2 & IRSLOAD_TYPECHECK));
+ if (ra_hasreg(dest)) {
+ base = ra_alloc1(as, REF_BASE, RSET_GPR);
+ emit_rmro(as, XO_MOV, dest|REX_64, base, ofs);
+ }
+ return;
+#endif
+ } else if (ra_used(ir)) {
+ RegSet allow = irt_isnum(t) ? RSET_FPR : RSET_GPR;
+ Reg dest = ra_dest(as, ir, allow);
+ base = ra_alloc1(as, REF_BASE, RSET_GPR);
+ lj_assertA(irt_isnum(t) || irt_isint(t) || irt_isaddr(t),
+ "bad SLOAD type %d", irt_type(t));
+ if ((ir->op2 & IRSLOAD_CONVERT)) {
+ t.irt = irt_isint(t) ? IRT_NUM : IRT_INT; /* Check for original type. */
+ emit_rmro(as, irt_isint(t) ? XO_CVTSI2SD : XO_CVTTSD2SI, dest, base, ofs);
+ } else {
+#if LJ_GC64
+ if (irt_isaddr(t)) {
+ /* LJ_GC64 type check + tag removal without BMI2 and with BMI2:
+ **
+ ** mov r64, [addr] rorx r64, [addr], 47
+ ** ror r64, 47
+ ** cmp r16, itype cmp r16, itype
+ ** jne ->exit jne ->exit
+ ** shr r64, 16 shr r64, 16
+ */
+ emit_shifti(as, XOg_SHR|REX_64, dest, 17);
+ if ((ir->op2 & IRSLOAD_TYPECHECK)) {
+ asm_guardcc(as, CC_NE);
+ emit_i8(as, irt_toitype(t));
+ emit_rr(as, XO_ARITHi8, XOg_CMP, dest);
+ emit_i8(as, XI_O16);
+ }
+ if ((as->flags & JIT_F_BMI2)) {
+ emit_i8(as, 47);
+ emit_rmro(as, XV_RORX|VEX_64, dest, base, ofs);
+ } else {
+ if ((ir->op2 & IRSLOAD_TYPECHECK))
+ emit_shifti(as, XOg_ROR|REX_64, dest, 47);
+ else
+ emit_shifti(as, XOg_SHL|REX_64, dest, 17);
+ emit_rmro(as, XO_MOV, dest|REX_64, base, ofs);
+ }
+ return;
+ } else
+#endif
+ emit_rmro(as, irt_isnum(t) ? XO_MOVSD : XO_MOV, dest, base, ofs);
+ }
+ } else {
+ if (!(ir->op2 & IRSLOAD_TYPECHECK))
+ return; /* No type check: avoid base alloc. */
+ base = ra_alloc1(as, REF_BASE, RSET_GPR);
+ }
+ if ((ir->op2 & IRSLOAD_TYPECHECK)) {
+ /* Need type check, even if the load result is unused. */
+ asm_guardcc(as, irt_isnum(t) ? CC_AE : CC_NE);
+ if ((LJ_64 && irt_type(t) >= IRT_NUM) || (ir->op2 & IRSLOAD_KEYINDEX)) {
+ lj_assertA(irt_isinteger(t) || irt_isnum(t),
+ "bad SLOAD type %d", irt_type(t));
+ emit_u32(as, (ir->op2 & IRSLOAD_KEYINDEX) ? LJ_KEYINDEX :
+ LJ_GC64 ? (LJ_TISNUM << 15) : LJ_TISNUM);
+ emit_rmro(as, XO_ARITHi, XOg_CMP, base, ofs+4);
+#if LJ_GC64
+ } else if (irt_isnil(t)) {
+ /* LJ_GC64 type check for nil:
+ **
+ ** cmp qword [addr], -1
+ ** jne ->exit
+ */
+ emit_i8(as, -1);
+ emit_rmro(as, XO_ARITHi8, XOg_CMP|REX_64, base, ofs);
+ } else if (irt_ispri(t)) {
+ emit_u32(as, (irt_toitype(t) << 15) | 0x7fff);
+ emit_rmro(as, XO_ARITHi, XOg_CMP, base, ofs+4);
+ } else {
+ /* LJ_GC64 type check only:
+ **
+ ** mov r64, [addr]
+ ** sar r64, 47
+ ** cmp r32, itype
+ ** jne ->exit
+ */
+ Reg tmp = ra_scratch(as, rset_exclude(RSET_GPR, base));
+ emit_i8(as, irt_toitype(t));
+ emit_rr(as, XO_ARITHi8, XOg_CMP, tmp);
+ emit_shifti(as, XOg_SAR|REX_64, tmp, 47);
+ emit_rmro(as, XO_MOV, tmp|REX_64, base, ofs);
+#else
+ } else {
+ emit_i8(as, irt_toitype(t));
+ emit_rmro(as, XO_ARITHi8, XOg_CMP, base, ofs+4);
+#endif
+ }
+ }
+}
+
+/* -- Allocations --------------------------------------------------------- */
+
+#if LJ_HASFFI
+static void asm_cnew(ASMState *as, IRIns *ir)
+{
+ CTState *cts = ctype_ctsG(J2G(as->J));
+ CTypeID id = (CTypeID)IR(ir->op1)->i;
+ CTSize sz;
+ CTInfo info = lj_ctype_info(cts, id, &sz);
+ const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_mem_newgco];
+ IRRef args[4];
+ lj_assertA(sz != CTSIZE_INVALID || (ir->o == IR_CNEW && ir->op2 != REF_NIL),
+ "bad CNEW/CNEWI operands");
+
+ as->gcsteps++;
+ asm_setupresult(as, ir, ci); /* GCcdata * */
+
+ /* Initialize immutable cdata object. */
+ if (ir->o == IR_CNEWI) {
+ RegSet allow = (RSET_GPR & ~RSET_SCRATCH);
+#if LJ_64
+ Reg r64 = sz == 8 ? REX_64 : 0;
+ if (irref_isk(ir->op2)) {
+ IRIns *irk = IR(ir->op2);
+ uint64_t k = (irk->o == IR_KINT64 ||
+ (LJ_GC64 && (irk->o == IR_KPTR || irk->o == IR_KKPTR))) ?
+ ir_k64(irk)->u64 : (uint64_t)(uint32_t)irk->i;
+ if (sz == 4 || checki32((int64_t)k)) {
+ emit_i32(as, (int32_t)k);
+ emit_rmro(as, XO_MOVmi, r64, RID_RET, sizeof(GCcdata));
+ } else {
+ emit_movtomro(as, RID_ECX + r64, RID_RET, sizeof(GCcdata));
+ emit_loadu64(as, RID_ECX, k);
+ }
+ } else {
+ Reg r = ra_alloc1(as, ir->op2, allow);
+ emit_movtomro(as, r + r64, RID_RET, sizeof(GCcdata));
+ }
+#else
+ int32_t ofs = sizeof(GCcdata);
+ if (sz == 8) {
+ ofs += 4; ir++;
+ lj_assertA(ir->o == IR_HIOP, "missing CNEWI HIOP");
+ }
+ do {
+ if (irref_isk(ir->op2)) {
+ emit_movmroi(as, RID_RET, ofs, IR(ir->op2)->i);
+ } else {
+ Reg r = ra_alloc1(as, ir->op2, allow);
+ emit_movtomro(as, r, RID_RET, ofs);
+ rset_clear(allow, r);
+ }
+ if (ofs == sizeof(GCcdata)) break;
+ ofs -= 4; ir--;
+ } while (1);
+#endif
+ lj_assertA(sz == 4 || sz == 8, "bad CNEWI size %d", sz);
+ } else if (ir->op2 != REF_NIL) { /* Create VLA/VLS/aligned cdata. */
+ ci = &lj_ir_callinfo[IRCALL_lj_cdata_newv];
+ args[0] = ASMREF_L; /* lua_State *L */
+ args[1] = ir->op1; /* CTypeID id */
+ args[2] = ir->op2; /* CTSize sz */
+ args[3] = ASMREF_TMP1; /* CTSize align */
+ asm_gencall(as, ci, args);
+ emit_loadi(as, ra_releasetmp(as, ASMREF_TMP1), (int32_t)ctype_align(info));
+ return;
+ }
+
+ /* Combine initialization of marked, gct and ctypeid. */
+ emit_movtomro(as, RID_ECX, RID_RET, offsetof(GCcdata, marked));
+ emit_gri(as, XG_ARITHi(XOg_OR), RID_ECX,
+ (int32_t)((~LJ_TCDATA<<8)+(id<<16)));
+ emit_gri(as, XG_ARITHi(XOg_AND), RID_ECX, LJ_GC_WHITES);
+ emit_opgl(as, XO_MOVZXb, RID_ECX, gc.currentwhite);
+
+ args[0] = ASMREF_L; /* lua_State *L */
+ args[1] = ASMREF_TMP1; /* MSize size */
+ asm_gencall(as, ci, args);
+ emit_loadi(as, ra_releasetmp(as, ASMREF_TMP1), (int32_t)(sz+sizeof(GCcdata)));
+}
+#endif
+
+/* -- Write barriers ------------------------------------------------------ */
+
+static void asm_tbar(ASMState *as, IRIns *ir)
+{
+ Reg tab = ra_alloc1(as, ir->op1, RSET_GPR);
+ Reg tmp = ra_scratch(as, rset_exclude(RSET_GPR, tab));
+ MCLabel l_end = emit_label(as);
+ emit_movtomro(as, tmp|REX_GC64, tab, offsetof(GCtab, gclist));
+ emit_setgl(as, tab, gc.grayagain);
+ emit_getgl(as, tmp, gc.grayagain);
+ emit_i8(as, ~LJ_GC_BLACK);
+ emit_rmro(as, XO_ARITHib, XOg_AND, tab, offsetof(GCtab, marked));
+ emit_sjcc(as, CC_Z, l_end);
+ emit_i8(as, LJ_GC_BLACK);
+ emit_rmro(as, XO_GROUP3b, XOg_TEST, tab, offsetof(GCtab, marked));
+}
+
+static void asm_obar(ASMState *as, IRIns *ir)
+{
+ const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_gc_barrieruv];
+ IRRef args[2];
+ MCLabel l_end;
+ Reg obj;
+ /* No need for other object barriers (yet). */
+ lj_assertA(IR(ir->op1)->o == IR_UREFC, "bad OBAR type");
+ ra_evictset(as, RSET_SCRATCH);
+ l_end = emit_label(as);
+ args[0] = ASMREF_TMP1; /* global_State *g */
+ args[1] = ir->op1; /* TValue *tv */
+ asm_gencall(as, ci, args);
+ emit_loada(as, ra_releasetmp(as, ASMREF_TMP1), J2G(as->J));
+ obj = IR(ir->op1)->r;
+ emit_sjcc(as, CC_Z, l_end);
+ emit_i8(as, LJ_GC_WHITES);
+ if (irref_isk(ir->op2)) {
+ GCobj *vp = ir_kgc(IR(ir->op2));
+ emit_rma(as, XO_GROUP3b, XOg_TEST, &vp->gch.marked);
+ } else {
+ Reg val = ra_alloc1(as, ir->op2, rset_exclude(RSET_SCRATCH&RSET_GPR, obj));
+ emit_rmro(as, XO_GROUP3b, XOg_TEST, val, (int32_t)offsetof(GChead, marked));
+ }
+ emit_sjcc(as, CC_Z, l_end);
+ emit_i8(as, LJ_GC_BLACK);
+ emit_rmro(as, XO_GROUP3b, XOg_TEST, obj,
+ (int32_t)offsetof(GCupval, marked)-(int32_t)offsetof(GCupval, tv));
+}
+
+/* -- FP/int arithmetic and logic operations ------------------------------ */
+
+/* Load reference onto x87 stack. Force a spill to memory if needed. */
+static void asm_x87load(ASMState *as, IRRef ref)
+{
+ IRIns *ir = IR(ref);
+ if (ir->o == IR_KNUM) {
+ cTValue *tv = ir_knum(ir);
+ if (tvispzero(tv)) /* Use fldz only for +0. */
+ emit_x87op(as, XI_FLDZ);
+ else if (tvispone(tv))
+ emit_x87op(as, XI_FLD1);
+ else
+ emit_rma(as, XO_FLDq, XOg_FLDq, tv);
+ } else if (ir->o == IR_CONV && ir->op2 == IRCONV_NUM_INT && !ra_used(ir) &&
+ !irref_isk(ir->op1) && mayfuse(as, ir->op1)) {
+ IRIns *iri = IR(ir->op1);
+ emit_rmro(as, XO_FILDd, XOg_FILDd, RID_ESP, ra_spill(as, iri));
+ } else {
+ emit_mrm(as, XO_FLDq, XOg_FLDq, asm_fuseload(as, ref, RSET_EMPTY));
+ }
+}
+
+static void asm_fpmath(ASMState *as, IRIns *ir)
+{
+ IRFPMathOp fpm = (IRFPMathOp)ir->op2;
+ if (fpm == IRFPM_SQRT) {
+ Reg dest = ra_dest(as, ir, RSET_FPR);
+ Reg left = asm_fuseload(as, ir->op1, RSET_FPR);
+ emit_mrm(as, XO_SQRTSD, dest, left);
+ } else if (fpm <= IRFPM_TRUNC) {
+ if (as->flags & JIT_F_SSE4_1) { /* SSE4.1 has a rounding instruction. */
+ Reg dest = ra_dest(as, ir, RSET_FPR);
+ Reg left = asm_fuseload(as, ir->op1, RSET_FPR);
+ /* ROUNDSD has a 4-byte opcode which doesn't fit in x86Op.
+ ** Let's pretend it's a 3-byte opcode, and compensate afterwards.
+ ** This is atrocious, but the alternatives are much worse.
+ */
+ /* Round down/up/trunc == 1001/1010/1011. */
+ emit_i8(as, 0x09 + fpm);
+ emit_mrm(as, XO_ROUNDSD, dest, left);
+ if (LJ_64 && as->mcp[1] != (MCode)(XO_ROUNDSD >> 16)) {
+ as->mcp[0] = as->mcp[1]; as->mcp[1] = 0x0f; /* Swap 0F and REX. */
+ }
+ *--as->mcp = 0x66; /* 1st byte of ROUNDSD opcode. */
+ } else { /* Call helper functions for SSE2 variant. */
+ /* The modified regs must match with the *.dasc implementation. */
+ RegSet drop = RSET_RANGE(RID_XMM0, RID_XMM3+1)|RID2RSET(RID_EAX);
+ if (ra_hasreg(ir->r))
+ rset_clear(drop, ir->r); /* Dest reg handled below. */
+ ra_evictset(as, drop);
+ ra_destreg(as, ir, RID_XMM0);
+ emit_call(as, fpm == IRFPM_FLOOR ? lj_vm_floor_sse :
+ fpm == IRFPM_CEIL ? lj_vm_ceil_sse : lj_vm_trunc_sse);
+ ra_left(as, RID_XMM0, ir->op1);
+ }
+ } else {
+ asm_callid(as, ir, IRCALL_lj_vm_floor + fpm);
+ }
+}
+
+static void asm_ldexp(ASMState *as, IRIns *ir)
+{
+ int32_t ofs = sps_scale(ir->s); /* Use spill slot or temp slots. */
+ Reg dest = ir->r;
+ if (ra_hasreg(dest)) {
+ ra_free(as, dest);
+ ra_modified(as, dest);
+ emit_rmro(as, XO_MOVSD, dest, RID_ESP, ofs);
+ }
+ emit_rmro(as, XO_FSTPq, XOg_FSTPq, RID_ESP, ofs);
+ emit_x87op(as, XI_FPOP1);
+ emit_x87op(as, XI_FSCALE);
+ asm_x87load(as, ir->op1);
+ asm_x87load(as, ir->op2);
+}
+
+static int asm_swapops(ASMState *as, IRIns *ir)
+{
+ IRIns *irl = IR(ir->op1);
+ IRIns *irr = IR(ir->op2);
+ lj_assertA(ra_noreg(irr->r), "bad usage");
+ if (!irm_iscomm(lj_ir_mode[ir->o]))
+ return 0; /* Can't swap non-commutative operations. */
+ if (irref_isk(ir->op2))
+ return 0; /* Don't swap constants to the left. */
+ if (ra_hasreg(irl->r))
+ return 1; /* Swap if left already has a register. */
+ if (ra_samehint(ir->r, irr->r))
+ return 1; /* Swap if dest and right have matching hints. */
+ if (as->curins > as->loopref) { /* In variant part? */
+ if (ir->op2 < as->loopref && !irt_isphi(irr->t))
+ return 0; /* Keep invariants on the right. */
+ if (ir->op1 < as->loopref && !irt_isphi(irl->t))
+ return 1; /* Swap invariants to the right. */
+ }
+ if (opisfusableload(irl->o))
+ return 1; /* Swap fusable loads to the right. */
+ return 0; /* Otherwise don't swap. */
+}
+
+static void asm_fparith(ASMState *as, IRIns *ir, x86Op xo)
+{
+ IRRef lref = ir->op1;
+ IRRef rref = ir->op2;
+ RegSet allow = RSET_FPR;
+ Reg dest;
+ Reg right = IR(rref)->r;
+ if (ra_hasreg(right)) {
+ rset_clear(allow, right);
+ ra_noweak(as, right);
+ }
+ dest = ra_dest(as, ir, allow);
+ if (lref == rref) {
+ right = dest;
+ } else if (ra_noreg(right)) {
+ if (asm_swapops(as, ir)) {
+ IRRef tmp = lref; lref = rref; rref = tmp;
+ }
+ right = asm_fuseload(as, rref, rset_clear(allow, dest));
+ }
+ emit_mrm(as, xo, dest, right);
+ ra_left(as, dest, lref);
+}
+
+static void asm_intarith(ASMState *as, IRIns *ir, x86Arith xa)
+{
+ IRRef lref = ir->op1;
+ IRRef rref = ir->op2;
+ RegSet allow = RSET_GPR;
+ Reg dest, right;
+ int32_t k = 0;
+ if (as->flagmcp == as->mcp) { /* Drop test r,r instruction. */
+ MCode *p = as->mcp + ((LJ_64 && *as->mcp < XI_TESTb) ? 3 : 2);
+ MCode *q = p[0] == 0x0f ? p+1 : p;
+ if ((*q & 15) < 14) {
+ if ((*q & 15) >= 12) *q -= 4; /* L <->S, NL <-> NS */
+ as->flagmcp = NULL;
+ as->mcp = p;
+ } /* else: cannot transform LE/NLE to cc without use of OF. */
+ }
+ right = IR(rref)->r;
+ if (ra_hasreg(right)) {
+ rset_clear(allow, right);
+ ra_noweak(as, right);
+ }
+ dest = ra_dest(as, ir, allow);
+ if (lref == rref) {
+ right = dest;
+ } else if (ra_noreg(right) && !asm_isk32(as, rref, &k)) {
+ if (asm_swapops(as, ir)) {
+ IRRef tmp = lref; lref = rref; rref = tmp;
+ }
+ right = asm_fuseloadm(as, rref, rset_clear(allow, dest), irt_is64(ir->t));
+ }
+ if (irt_isguard(ir->t)) /* For IR_ADDOV etc. */
+ asm_guardcc(as, CC_O);
+ if (xa != XOg_X_IMUL) {
+ if (ra_hasreg(right))
+ emit_mrm(as, XO_ARITH(xa), REX_64IR(ir, dest), right);
+ else
+ emit_gri(as, XG_ARITHi(xa), REX_64IR(ir, dest), k);
+ } else if (ra_hasreg(right)) { /* IMUL r, mrm. */
+ emit_mrm(as, XO_IMUL, REX_64IR(ir, dest), right);
+ } else { /* IMUL r, r, k. */
+ /* NYI: use lea/shl/add/sub (FOLD only does 2^k) depending on CPU. */
+ Reg left = asm_fuseloadm(as, lref, RSET_GPR, irt_is64(ir->t));
+ x86Op xo;
+ if (checki8(k)) { emit_i8(as, k); xo = XO_IMULi8;
+ } else { emit_i32(as, k); xo = XO_IMULi; }
+ emit_mrm(as, xo, REX_64IR(ir, dest), left);
+ return;
+ }
+ ra_left(as, dest, lref);
+}
+
+/* LEA is really a 4-operand ADD with an independent destination register,
+** up to two source registers and an immediate. One register can be scaled
+** by 1, 2, 4 or 8. This can be used to avoid moves or to fuse several
+** instructions.
+**
+** Currently only a few common cases are supported:
+** - 3-operand ADD: y = a+b; y = a+k with a and b already allocated
+** - Left ADD fusion: y = (a+b)+k; y = (a+k)+b
+** - Right ADD fusion: y = a+(b+k)
+** The ommited variants have already been reduced by FOLD.
+**
+** There are more fusion opportunities, like gathering shifts or joining
+** common references. But these are probably not worth the trouble, since
+** array indexing is not decomposed and already makes use of all fields
+** of the ModRM operand.
+*/
+static int asm_lea(ASMState *as, IRIns *ir)
+{
+ IRIns *irl = IR(ir->op1);
+ IRIns *irr = IR(ir->op2);
+ RegSet allow = RSET_GPR;
+ Reg dest;
+ as->mrm.base = as->mrm.idx = RID_NONE;
+ as->mrm.scale = XM_SCALE1;
+ as->mrm.ofs = 0;
+ if (ra_hasreg(irl->r)) {
+ rset_clear(allow, irl->r);
+ ra_noweak(as, irl->r);
+ as->mrm.base = irl->r;
+ if (irref_isk(ir->op2) || ra_hasreg(irr->r)) {
+ /* The PHI renaming logic does a better job in some cases. */
+ if (ra_hasreg(ir->r) &&
+ ((irt_isphi(irl->t) && as->phireg[ir->r] == ir->op1) ||
+ (irt_isphi(irr->t) && as->phireg[ir->r] == ir->op2)))
+ return 0;
+ if (irref_isk(ir->op2)) {
+ as->mrm.ofs = irr->i;
+ } else {
+ rset_clear(allow, irr->r);
+ ra_noweak(as, irr->r);
+ as->mrm.idx = irr->r;
+ }
+ } else if (irr->o == IR_ADD && mayfuse(as, ir->op2) &&
+ irref_isk(irr->op2)) {
+ Reg idx = ra_alloc1(as, irr->op1, allow);
+ rset_clear(allow, idx);
+ as->mrm.idx = (uint8_t)idx;
+ as->mrm.ofs = IR(irr->op2)->i;
+ } else {
+ return 0;
+ }
+ } else if (ir->op1 != ir->op2 && irl->o == IR_ADD && mayfuse(as, ir->op1) &&
+ (irref_isk(ir->op2) || irref_isk(irl->op2))) {
+ Reg idx, base = ra_alloc1(as, irl->op1, allow);
+ rset_clear(allow, base);
+ as->mrm.base = (uint8_t)base;
+ if (irref_isk(ir->op2)) {
+ as->mrm.ofs = irr->i;
+ idx = ra_alloc1(as, irl->op2, allow);
+ } else {
+ as->mrm.ofs = IR(irl->op2)->i;
+ idx = ra_alloc1(as, ir->op2, allow);
+ }
+ rset_clear(allow, idx);
+ as->mrm.idx = (uint8_t)idx;
+ } else {
+ return 0;
+ }
+ dest = ra_dest(as, ir, allow);
+ emit_mrm(as, XO_LEA, dest, RID_MRM);
+ return 1; /* Success. */
+}
+
+static void asm_add(ASMState *as, IRIns *ir)
+{
+ if (irt_isnum(ir->t))
+ asm_fparith(as, ir, XO_ADDSD);
+ else if (as->flagmcp == as->mcp || irt_is64(ir->t) || !asm_lea(as, ir))
+ asm_intarith(as, ir, XOg_ADD);
+}
+
+static void asm_sub(ASMState *as, IRIns *ir)
+{
+ if (irt_isnum(ir->t))
+ asm_fparith(as, ir, XO_SUBSD);
+ else /* Note: no need for LEA trick here. i-k is encoded as i+(-k). */
+ asm_intarith(as, ir, XOg_SUB);
+}
+
+static void asm_mul(ASMState *as, IRIns *ir)
+{
+ if (irt_isnum(ir->t))
+ asm_fparith(as, ir, XO_MULSD);
+ else
+ asm_intarith(as, ir, XOg_X_IMUL);
+}
+
+#define asm_fpdiv(as, ir) asm_fparith(as, ir, XO_DIVSD)
+
+static void asm_neg_not(ASMState *as, IRIns *ir, x86Group3 xg)
+{
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ emit_rr(as, XO_GROUP3, REX_64IR(ir, xg), dest);
+ ra_left(as, dest, ir->op1);
+}
+
+static void asm_neg(ASMState *as, IRIns *ir)
+{
+ if (irt_isnum(ir->t))
+ asm_fparith(as, ir, XO_XORPS);
+ else
+ asm_neg_not(as, ir, XOg_NEG);
+}
+
+#define asm_abs(as, ir) asm_fparith(as, ir, XO_ANDPS)
+
+static void asm_intmin_max(ASMState *as, IRIns *ir, int cc)
+{
+ Reg right, dest = ra_dest(as, ir, RSET_GPR);
+ IRRef lref = ir->op1, rref = ir->op2;
+ if (irref_isk(rref)) { lref = rref; rref = ir->op1; }
+ right = ra_alloc1(as, rref, rset_exclude(RSET_GPR, dest));
+ emit_rr(as, XO_CMOV + (cc<<24), REX_64IR(ir, dest), right);
+ emit_rr(as, XO_CMP, REX_64IR(ir, dest), right);
+ ra_left(as, dest, lref);
+}
+
+static void asm_min(ASMState *as, IRIns *ir)
+{
+ if (irt_isnum(ir->t))
+ asm_fparith(as, ir, XO_MINSD);
+ else
+ asm_intmin_max(as, ir, CC_G);
+}
+
+static void asm_max(ASMState *as, IRIns *ir)
+{
+ if (irt_isnum(ir->t))
+ asm_fparith(as, ir, XO_MAXSD);
+ else
+ asm_intmin_max(as, ir, CC_L);
+}
+
+/* Note: don't use LEA for overflow-checking arithmetic! */
+#define asm_addov(as, ir) asm_intarith(as, ir, XOg_ADD)
+#define asm_subov(as, ir) asm_intarith(as, ir, XOg_SUB)
+#define asm_mulov(as, ir) asm_intarith(as, ir, XOg_X_IMUL)
+
+#define asm_bnot(as, ir) asm_neg_not(as, ir, XOg_NOT)
+
+static void asm_bswap(ASMState *as, IRIns *ir)
+{
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ as->mcp = emit_op(XO_BSWAP + ((dest&7) << 24),
+ REX_64IR(ir, 0), dest, 0, as->mcp, 1);
+ ra_left(as, dest, ir->op1);
+}
+
+#define asm_band(as, ir) asm_intarith(as, ir, XOg_AND)
+#define asm_bor(as, ir) asm_intarith(as, ir, XOg_OR)
+#define asm_bxor(as, ir) asm_intarith(as, ir, XOg_XOR)
+
+static void asm_bitshift(ASMState *as, IRIns *ir, x86Shift xs, x86Op xv)
+{
+ IRRef rref = ir->op2;
+ IRIns *irr = IR(rref);
+ Reg dest;
+ if (irref_isk(rref)) { /* Constant shifts. */
+ int shift;
+ dest = ra_dest(as, ir, RSET_GPR);
+ shift = irr->i & (irt_is64(ir->t) ? 63 : 31);
+ if (!xv && shift && (as->flags & JIT_F_BMI2)) {
+ Reg left = asm_fuseloadm(as, ir->op1, RSET_GPR, irt_is64(ir->t));
+ if (left != dest) { /* BMI2 rotate right by constant. */
+ emit_i8(as, xs == XOg_ROL ? -shift : shift);
+ emit_mrm(as, VEX_64IR(ir, XV_RORX), dest, left);
+ return;
+ }
+ }
+ switch (shift) {
+ case 0: break;
+ case 1: emit_rr(as, XO_SHIFT1, REX_64IR(ir, xs), dest); break;
+ default: emit_shifti(as, REX_64IR(ir, xs), dest, shift); break;
+ }
+ } else if ((as->flags & JIT_F_BMI2) && xv) { /* BMI2 variable shifts. */
+ Reg left, right;
+ dest = ra_dest(as, ir, RSET_GPR);
+ right = ra_alloc1(as, rref, RSET_GPR);
+ left = asm_fuseloadm(as, ir->op1, rset_exclude(RSET_GPR, right),
+ irt_is64(ir->t));
+ emit_mrm(as, VEX_64IR(ir, xv) ^ (right << 19), dest, left);
+ return;
+ } else { /* Variable shifts implicitly use register cl (i.e. ecx). */
+ Reg right;
+ dest = ra_dest(as, ir, rset_exclude(RSET_GPR, RID_ECX));
+ if (dest == RID_ECX) {
+ dest = ra_scratch(as, rset_exclude(RSET_GPR, RID_ECX));
+ emit_rr(as, XO_MOV, REX_64IR(ir, RID_ECX), dest);
+ }
+ right = irr->r;
+ if (ra_noreg(right))
+ right = ra_allocref(as, rref, RID2RSET(RID_ECX));
+ else if (right != RID_ECX)
+ ra_scratch(as, RID2RSET(RID_ECX));
+ emit_rr(as, XO_SHIFTcl, REX_64IR(ir, xs), dest);
+ ra_noweak(as, right);
+ if (right != RID_ECX)
+ emit_rr(as, XO_MOV, RID_ECX, right);
+ }
+ ra_left(as, dest, ir->op1);
+ /*
+ ** Note: avoid using the flags resulting from a shift or rotate!
+ ** All of them cause a partial flag stall, except for r,1 shifts
+ ** (but not rotates). And a shift count of 0 leaves the flags unmodified.
+ */
+}
+
+#define asm_bshl(as, ir) asm_bitshift(as, ir, XOg_SHL, XV_SHLX)
+#define asm_bshr(as, ir) asm_bitshift(as, ir, XOg_SHR, XV_SHRX)
+#define asm_bsar(as, ir) asm_bitshift(as, ir, XOg_SAR, XV_SARX)
+#define asm_brol(as, ir) asm_bitshift(as, ir, XOg_ROL, 0)
+#define asm_bror(as, ir) asm_bitshift(as, ir, XOg_ROR, 0)
+
+/* -- Comparisons --------------------------------------------------------- */
+
+/* Virtual flags for unordered FP comparisons. */
+#define VCC_U 0x1000 /* Unordered. */
+#define VCC_P 0x2000 /* Needs extra CC_P branch. */
+#define VCC_S 0x4000 /* Swap avoids CC_P branch. */
+#define VCC_PS (VCC_P|VCC_S)
+
+/* Map of comparisons to flags. ORDER IR. */
+#define COMPFLAGS(ci, cin, cu, cf) ((ci)+((cu)<<4)+((cin)<<8)+(cf))
+static const uint16_t asm_compmap[IR_ABC+1] = {
+ /* signed non-eq unsigned flags */
+ /* LT */ COMPFLAGS(CC_GE, CC_G, CC_AE, VCC_PS),
+ /* GE */ COMPFLAGS(CC_L, CC_L, CC_B, 0),
+ /* LE */ COMPFLAGS(CC_G, CC_G, CC_A, VCC_PS),
+ /* GT */ COMPFLAGS(CC_LE, CC_L, CC_BE, 0),
+ /* ULT */ COMPFLAGS(CC_AE, CC_A, CC_AE, VCC_U),
+ /* UGE */ COMPFLAGS(CC_B, CC_B, CC_B, VCC_U|VCC_PS),
+ /* ULE */ COMPFLAGS(CC_A, CC_A, CC_A, VCC_U),
+ /* UGT */ COMPFLAGS(CC_BE, CC_B, CC_BE, VCC_U|VCC_PS),
+ /* EQ */ COMPFLAGS(CC_NE, CC_NE, CC_NE, VCC_P),
+ /* NE */ COMPFLAGS(CC_E, CC_E, CC_E, VCC_U|VCC_P),
+ /* ABC */ COMPFLAGS(CC_BE, CC_B, CC_BE, VCC_U|VCC_PS) /* Same as UGT. */
+};
+
+/* FP and integer comparisons. */
+static void asm_comp(ASMState *as, IRIns *ir)
+{
+ uint32_t cc = asm_compmap[ir->o];
+ if (irt_isnum(ir->t)) {
+ IRRef lref = ir->op1;
+ IRRef rref = ir->op2;
+ Reg left, right;
+ MCLabel l_around;
+ /*
+ ** An extra CC_P branch is required to preserve ordered/unordered
+ ** semantics for FP comparisons. This can be avoided by swapping
+ ** the operands and inverting the condition (except for EQ and UNE).
+ ** So always try to swap if possible.
+ **
+ ** Another option would be to swap operands to achieve better memory
+ ** operand fusion. But it's unlikely that this outweighs the cost
+ ** of the extra branches.
+ */
+ if (cc & VCC_S) { /* Swap? */
+ IRRef tmp = lref; lref = rref; rref = tmp;
+ cc ^= (VCC_PS|(5<<4)); /* A <-> B, AE <-> BE, PS <-> none */
+ }
+ left = ra_alloc1(as, lref, RSET_FPR);
+ l_around = emit_label(as);
+ asm_guardcc(as, cc >> 4);
+ if (cc & VCC_P) { /* Extra CC_P branch required? */
+ if (!(cc & VCC_U)) {
+ asm_guardcc(as, CC_P); /* Branch to exit for ordered comparisons. */
+ } else if (l_around != as->invmcp) {
+ emit_sjcc(as, CC_P, l_around); /* Branch around for unordered. */
+ } else {
+ /* Patched to mcloop by asm_loop_fixup. */
+ as->loopinv = 2;
+ if (as->realign)
+ emit_sjcc(as, CC_P, as->mcp);
+ else
+ emit_jcc(as, CC_P, as->mcp);
+ }
+ }
+ right = asm_fuseload(as, rref, rset_exclude(RSET_FPR, left));
+ emit_mrm(as, XO_UCOMISD, left, right);
+ } else {
+ IRRef lref = ir->op1, rref = ir->op2;
+ IROp leftop = (IROp)(IR(lref)->o);
+ Reg r64 = REX_64IR(ir, 0);
+ int32_t imm = 0;
+ lj_assertA(irt_is64(ir->t) || irt_isint(ir->t) ||
+ irt_isu32(ir->t) || irt_isaddr(ir->t) || irt_isu8(ir->t),
+ "bad comparison data type %d", irt_type(ir->t));
+ /* Swap constants (only for ABC) and fusable loads to the right. */
+ if (irref_isk(lref) || (!irref_isk(rref) && opisfusableload(leftop))) {
+ if ((cc & 0xc) == 0xc) cc ^= 0x53; /* L <-> G, LE <-> GE */
+ else if ((cc & 0xa) == 0x2) cc ^= 0x55; /* A <-> B, AE <-> BE */
+ lref = ir->op2; rref = ir->op1;
+ }
+ if (asm_isk32(as, rref, &imm)) {
+ IRIns *irl = IR(lref);
+ /* Check wether we can use test ins. Not for unsigned, since CF=0. */
+ int usetest = (imm == 0 && (cc & 0xa) != 0x2);
+ if (usetest && irl->o == IR_BAND && irl+1 == ir && !ra_used(irl)) {
+ /* Combine comp(BAND(ref, r/imm), 0) into test mrm, r/imm. */
+ Reg right, left = RID_NONE;
+ RegSet allow = RSET_GPR;
+ if (!asm_isk32(as, irl->op2, &imm)) {
+ left = ra_alloc1(as, irl->op2, allow);
+ rset_clear(allow, left);
+ } else { /* Try to Fuse IRT_I8/IRT_U8 loads, too. See below. */
+ IRIns *irll = IR(irl->op1);
+ if (opisfusableload((IROp)irll->o) &&
+ (irt_isi8(irll->t) || irt_isu8(irll->t))) {
+ IRType1 origt = irll->t; /* Temporarily flip types. */
+ irll->t.irt = (irll->t.irt & ~IRT_TYPE) | IRT_INT;
+ as->curins--; /* Skip to BAND to avoid failing in noconflict(). */
+ right = asm_fuseload(as, irl->op1, RSET_GPR);
+ as->curins++;
+ irll->t = origt;
+ if (right != RID_MRM) goto test_nofuse;
+ /* Fusion succeeded, emit test byte mrm, imm8. */
+ asm_guardcc(as, cc);
+ emit_i8(as, (imm & 0xff));
+ emit_mrm(as, XO_GROUP3b, XOg_TEST, RID_MRM);
+ return;
+ }
+ }
+ as->curins--; /* Skip to BAND to avoid failing in noconflict(). */
+ right = asm_fuseloadm(as, irl->op1, allow, r64);
+ as->curins++; /* Undo the above. */
+ test_nofuse:
+ asm_guardcc(as, cc);
+ if (ra_noreg(left)) {
+ emit_i32(as, imm);
+ emit_mrm(as, XO_GROUP3, r64 + XOg_TEST, right);
+ } else {
+ emit_mrm(as, XO_TEST, r64 + left, right);
+ }
+ } else {
+ Reg left;
+ if (opisfusableload((IROp)irl->o) &&
+ ((irt_isu8(irl->t) && checku8(imm)) ||
+ ((irt_isi8(irl->t) || irt_isi16(irl->t)) && checki8(imm)) ||
+ (irt_isu16(irl->t) && checku16(imm) && checki8((int16_t)imm)))) {
+ /* Only the IRT_INT case is fused by asm_fuseload.
+ ** The IRT_I8/IRT_U8 loads and some IRT_I16/IRT_U16 loads
+ ** are handled here.
+ ** Note that cmp word [mem], imm16 should not be generated,
+ ** since it has a length-changing prefix. Compares of a word
+ ** against a sign-extended imm8 are ok, however.
+ */
+ IRType1 origt = irl->t; /* Temporarily flip types. */
+ irl->t.irt = (irl->t.irt & ~IRT_TYPE) | IRT_INT;
+ left = asm_fuseload(as, lref, RSET_GPR);
+ irl->t = origt;
+ if (left == RID_MRM) { /* Fusion succeeded? */
+ if (irt_isu8(irl->t) || irt_isu16(irl->t))
+ cc >>= 4; /* Need unsigned compare. */
+ asm_guardcc(as, cc);
+ emit_i8(as, imm);
+ emit_mrm(as, (irt_isi8(origt) || irt_isu8(origt)) ?
+ XO_ARITHib : XO_ARITHiw8, r64 + XOg_CMP, RID_MRM);
+ return;
+ } /* Otherwise handle register case as usual. */
+ } else {
+ left = asm_fuseloadm(as, lref,
+ irt_isu8(ir->t) ? RSET_GPR8 : RSET_GPR, r64);
+ }
+ asm_guardcc(as, cc);
+ if (usetest && left != RID_MRM) {
+ /* Use test r,r instead of cmp r,0. */
+ x86Op xo = XO_TEST;
+ if (irt_isu8(ir->t)) {
+ lj_assertA(ir->o == IR_EQ || ir->o == IR_NE, "bad usage");
+ xo = XO_TESTb;
+ if (!rset_test(RSET_RANGE(RID_EAX, RID_EBX+1), left)) {
+ if (LJ_64) {
+ left |= FORCE_REX;
+ } else {
+ emit_i32(as, 0xff);
+ emit_mrm(as, XO_GROUP3, XOg_TEST, left);
+ return;
+ }
+ }
+ }
+ emit_rr(as, xo, r64 + left, left);
+ if (irl+1 == ir) /* Referencing previous ins? */
+ as->flagmcp = as->mcp; /* Set flag to drop test r,r if possible. */
+ } else {
+ emit_gmrmi(as, XG_ARITHi(XOg_CMP), r64 + left, imm);
+ }
+ }
+ } else {
+ Reg left = ra_alloc1(as, lref, RSET_GPR);
+ Reg right = asm_fuseloadm(as, rref, rset_exclude(RSET_GPR, left), r64);
+ asm_guardcc(as, cc);
+ emit_mrm(as, XO_CMP, r64 + left, right);
+ }
+ }
+}
+
+#define asm_equal(as, ir) asm_comp(as, ir)
+
+#if LJ_32 && LJ_HASFFI
+/* 64 bit integer comparisons in 32 bit mode. */
+static void asm_comp_int64(ASMState *as, IRIns *ir)
+{
+ uint32_t cc = asm_compmap[(ir-1)->o];
+ RegSet allow = RSET_GPR;
+ Reg lefthi = RID_NONE, leftlo = RID_NONE;
+ Reg righthi = RID_NONE, rightlo = RID_NONE;
+ MCLabel l_around;
+ x86ModRM mrm;
+
+ as->curins--; /* Skip loword ins. Avoids failing in noconflict(), too. */
+
+ /* Allocate/fuse hiword operands. */
+ if (irref_isk(ir->op2)) {
+ lefthi = asm_fuseload(as, ir->op1, allow);
+ } else {
+ lefthi = ra_alloc1(as, ir->op1, allow);
+ rset_clear(allow, lefthi);
+ righthi = asm_fuseload(as, ir->op2, allow);
+ if (righthi == RID_MRM) {
+ if (as->mrm.base != RID_NONE) rset_clear(allow, as->mrm.base);
+ if (as->mrm.idx != RID_NONE) rset_clear(allow, as->mrm.idx);
+ } else {
+ rset_clear(allow, righthi);
+ }
+ }
+ mrm = as->mrm; /* Save state for hiword instruction. */
+
+ /* Allocate/fuse loword operands. */
+ if (irref_isk((ir-1)->op2)) {
+ leftlo = asm_fuseload(as, (ir-1)->op1, allow);
+ } else {
+ leftlo = ra_alloc1(as, (ir-1)->op1, allow);
+ rset_clear(allow, leftlo);
+ rightlo = asm_fuseload(as, (ir-1)->op2, allow);
+ }
+
+ /* All register allocations must be performed _before_ this point. */
+ l_around = emit_label(as);
+ as->invmcp = as->flagmcp = NULL; /* Cannot use these optimizations. */
+
+ /* Loword comparison and branch. */
+ asm_guardcc(as, cc >> 4); /* Always use unsigned compare for loword. */
+ if (ra_noreg(rightlo)) {
+ int32_t imm = IR((ir-1)->op2)->i;
+ if (imm == 0 && ((cc >> 4) & 0xa) != 0x2 && leftlo != RID_MRM)
+ emit_rr(as, XO_TEST, leftlo, leftlo);
+ else
+ emit_gmrmi(as, XG_ARITHi(XOg_CMP), leftlo, imm);
+ } else {
+ emit_mrm(as, XO_CMP, leftlo, rightlo);
+ }
+
+ /* Hiword comparison and branches. */
+ if ((cc & 15) != CC_NE)
+ emit_sjcc(as, CC_NE, l_around); /* Hiword unequal: skip loword compare. */
+ if ((cc & 15) != CC_E)
+ asm_guardcc(as, cc >> 8); /* Hiword compare without equality check. */
+ as->mrm = mrm; /* Restore state. */
+ if (ra_noreg(righthi)) {
+ int32_t imm = IR(ir->op2)->i;
+ if (imm == 0 && (cc & 0xa) != 0x2 && lefthi != RID_MRM)
+ emit_rr(as, XO_TEST, lefthi, lefthi);
+ else
+ emit_gmrmi(as, XG_ARITHi(XOg_CMP), lefthi, imm);
+ } else {
+ emit_mrm(as, XO_CMP, lefthi, righthi);
+ }
+}
+#endif
+
+/* -- Split register ops -------------------------------------------------- */
+
+/* Hiword op of a split 32/32 or 64/64 bit op. Previous op is the loword op. */
+static void asm_hiop(ASMState *as, IRIns *ir)
+{
+ /* HIOP is marked as a store because it needs its own DCE logic. */
+ int uselo = ra_used(ir-1), usehi = ra_used(ir); /* Loword/hiword used? */
+ if (LJ_UNLIKELY(!(as->flags & JIT_F_OPT_DCE))) uselo = usehi = 1;
+#if LJ_32 && LJ_HASFFI
+ if ((ir-1)->o == IR_CONV) { /* Conversions to/from 64 bit. */
+ as->curins--; /* Always skip the CONV. */
+ if (usehi || uselo)
+ asm_conv64(as, ir);
+ return;
+ } else if ((ir-1)->o <= IR_NE) { /* 64 bit integer comparisons. ORDER IR. */
+ asm_comp_int64(as, ir);
+ return;
+ } else if ((ir-1)->o == IR_XSTORE) {
+ if ((ir-1)->r != RID_SINK)
+ asm_fxstore(as, ir);
+ return;
+ }
+#endif
+ if (!usehi) return; /* Skip unused hiword op for all remaining ops. */
+ switch ((ir-1)->o) {
+#if LJ_32 && LJ_HASFFI
+ case IR_ADD:
+ as->flagmcp = NULL;
+ as->curins--;
+ asm_intarith(as, ir, XOg_ADC);
+ asm_intarith(as, ir-1, XOg_ADD);
+ break;
+ case IR_SUB:
+ as->flagmcp = NULL;
+ as->curins--;
+ asm_intarith(as, ir, XOg_SBB);
+ asm_intarith(as, ir-1, XOg_SUB);
+ break;
+ case IR_NEG: {
+ Reg dest = ra_dest(as, ir, RSET_GPR);
+ emit_rr(as, XO_GROUP3, XOg_NEG, dest);
+ emit_i8(as, 0);
+ emit_rr(as, XO_ARITHi8, XOg_ADC, dest);
+ ra_left(as, dest, ir->op1);
+ as->curins--;
+ asm_neg_not(as, ir-1, XOg_NEG);
+ break;
+ }
+ case IR_CNEWI:
+ /* Nothing to do here. Handled by CNEWI itself. */
+ break;
+#endif
+ case IR_CALLN: case IR_CALLL: case IR_CALLS: case IR_CALLXS:
+ if (!uselo)
+ ra_allocref(as, ir->op1, RID2RSET(RID_RETLO)); /* Mark lo op as used. */
+ break;
+ default: lj_assertA(0, "bad HIOP for op %d", (ir-1)->o); break;
+ }
+}
+
+/* -- Profiling ----------------------------------------------------------- */
+
+static void asm_prof(ASMState *as, IRIns *ir)
+{
+ UNUSED(ir);
+ asm_guardcc(as, CC_NE);
+ emit_i8(as, HOOK_PROFILE);
+ emit_rma(as, XO_GROUP3b, XOg_TEST, &J2G(as->J)->hookmask);
+}
+
+/* -- Stack handling ------------------------------------------------------ */
+
+/* Check Lua stack size for overflow. Use exit handler as fallback. */
+static void asm_stack_check(ASMState *as, BCReg topslot,
+ IRIns *irp, RegSet allow, ExitNo exitno)
+{
+ /* Try to get an unused temp. register, otherwise spill/restore eax. */
+ Reg pbase = irp ? irp->r : RID_BASE;
+ Reg r = allow ? rset_pickbot(allow) : RID_EAX;
+ emit_jcc(as, CC_B, exitstub_addr(as->J, exitno));
+ if (allow == RSET_EMPTY) /* Restore temp. register. */
+ emit_rmro(as, XO_MOV, r|REX_64, RID_ESP, 0);
+ else
+ ra_modified(as, r);
+ emit_gri(as, XG_ARITHi(XOg_CMP), r|REX_GC64, (int32_t)(8*topslot));
+ if (ra_hasreg(pbase) && pbase != r)
+ emit_rr(as, XO_ARITH(XOg_SUB), r|REX_GC64, pbase);
+ else
+#if LJ_GC64
+ emit_rmro(as, XO_ARITH(XOg_SUB), r|REX_64, RID_DISPATCH,
+ (int32_t)dispofs(as, &J2G(as->J)->jit_base));
+#else
+ emit_rmro(as, XO_ARITH(XOg_SUB), r, RID_NONE,
+ ptr2addr(&J2G(as->J)->jit_base));
+#endif
+ emit_rmro(as, XO_MOV, r|REX_GC64, r, offsetof(lua_State, maxstack));
+ emit_getgl(as, r, cur_L);
+ if (allow == RSET_EMPTY) /* Spill temp. register. */
+ emit_rmro(as, XO_MOVto, r|REX_64, RID_ESP, 0);
+}
+
+/* Restore Lua stack from on-trace state. */
+static void asm_stack_restore(ASMState *as, SnapShot *snap)
+{
+ SnapEntry *map = &as->T->snapmap[snap->mapofs];
+#if !LJ_FR2 || defined(LUA_USE_ASSERT)
+ SnapEntry *flinks = &as->T->snapmap[snap_nextofs(as->T, snap)-1-LJ_FR2];
+#endif
+ MSize n, nent = snap->nent;
+ /* Store the value of all modified slots to the Lua stack. */
+ for (n = 0; n < nent; n++) {
+ SnapEntry sn = map[n];
+ BCReg s = snap_slot(sn);
+ int32_t ofs = 8*((int32_t)s-1-LJ_FR2);
+ IRRef ref = snap_ref(sn);
+ IRIns *ir = IR(ref);
+ if ((sn & SNAP_NORESTORE))
+ continue;
+ if ((sn & SNAP_KEYINDEX)) {
+ emit_movmroi(as, RID_BASE, ofs+4, LJ_KEYINDEX);
+ if (irref_isk(ref)) {
+ emit_movmroi(as, RID_BASE, ofs, ir->i);
+ } else {
+ Reg src = ra_alloc1(as, ref, rset_exclude(RSET_GPR, RID_BASE));
+ emit_movtomro(as, src, RID_BASE, ofs);
+ }
+ } else if (irt_isnum(ir->t)) {
+ Reg src = ra_alloc1(as, ref, RSET_FPR);
+ emit_rmro(as, XO_MOVSDto, src, RID_BASE, ofs);
+ } else {
+ lj_assertA(irt_ispri(ir->t) || irt_isaddr(ir->t) ||
+ (LJ_DUALNUM && irt_isinteger(ir->t)),
+ "restore of IR type %d", irt_type(ir->t));
+ if (!irref_isk(ref)) {
+ Reg src = ra_alloc1(as, ref, rset_exclude(RSET_GPR, RID_BASE));
+#if LJ_GC64
+ if (irt_is64(ir->t)) {
+ /* TODO: 64 bit store + 32 bit load-modify-store is suboptimal. */
+ emit_u32(as, irt_toitype(ir->t) << 15);
+ emit_rmro(as, XO_ARITHi, XOg_OR, RID_BASE, ofs+4);
+ } else if (LJ_DUALNUM && irt_isinteger(ir->t)) {
+ emit_movmroi(as, RID_BASE, ofs+4, LJ_TISNUM << 15);
+ } else {
+ emit_movmroi(as, RID_BASE, ofs+4, (irt_toitype(ir->t)<<15)|0x7fff);
+ }
+#endif
+ emit_movtomro(as, REX_64IR(ir, src), RID_BASE, ofs);
+#if LJ_GC64
+ } else {
+ TValue k;
+ lj_ir_kvalue(as->J->L, &k, ir);
+ if (tvisnil(&k)) {
+ emit_i32(as, -1);
+ emit_rmro(as, XO_MOVmi, REX_64, RID_BASE, ofs);
+ } else {
+ emit_movmroi(as, RID_BASE, ofs+4, k.u32.hi);
+ emit_movmroi(as, RID_BASE, ofs, k.u32.lo);
+ }
+#else
+ } else if (!irt_ispri(ir->t)) {
+ emit_movmroi(as, RID_BASE, ofs, ir->i);
+#endif
+ }
+ if ((sn & (SNAP_CONT|SNAP_FRAME))) {
+#if !LJ_FR2
+ if (s != 0) /* Do not overwrite link to previous frame. */
+ emit_movmroi(as, RID_BASE, ofs+4, (int32_t)(*flinks--));
+#endif
+#if !LJ_GC64
+ } else {
+ if (!(LJ_64 && irt_islightud(ir->t)))
+ emit_movmroi(as, RID_BASE, ofs+4, irt_toitype(ir->t));
+#endif
+ }
+ }
+ checkmclim(as);
+ }
+ lj_assertA(map + nent == flinks, "inconsistent frames in snapshot");
+}
+
+/* -- GC handling --------------------------------------------------------- */
+
+/* Check GC threshold and do one or more GC steps. */
+static void asm_gc_check(ASMState *as)
+{
+ const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_gc_step_jit];
+ IRRef args[2];
+ MCLabel l_end;
+ Reg tmp;
+ ra_evictset(as, RSET_SCRATCH);
+ l_end = emit_label(as);
+ /* Exit trace if in GCSatomic or GCSfinalize. Avoids syncing GC objects. */
+ asm_guardcc(as, CC_NE); /* Assumes asm_snap_prep() already done. */
+ emit_rr(as, XO_TEST, RID_RET, RID_RET);
+ args[0] = ASMREF_TMP1; /* global_State *g */
+ args[1] = ASMREF_TMP2; /* MSize steps */
+ asm_gencall(as, ci, args);
+ tmp = ra_releasetmp(as, ASMREF_TMP1);
+#if LJ_GC64
+ emit_rmro(as, XO_LEA, tmp|REX_64, RID_DISPATCH, GG_DISP2G);
+#else
+ emit_loada(as, tmp, J2G(as->J));
+#endif
+ emit_loadi(as, ra_releasetmp(as, ASMREF_TMP2), as->gcsteps);
+ /* Jump around GC step if GC total < GC threshold. */
+ emit_sjcc(as, CC_B, l_end);
+ emit_opgl(as, XO_ARITH(XOg_CMP), tmp|REX_GC64, gc.threshold);
+ emit_getgl(as, tmp, gc.total);
+ as->gcsteps = 0;
+ checkmclim(as);
+}
+
+/* -- Loop handling ------------------------------------------------------- */
+
+/* Fixup the loop branch. */
+static void asm_loop_fixup(ASMState *as)
+{
+ MCode *p = as->mctop;
+ MCode *target = as->mcp;
+ if (as->realign) { /* Realigned loops use short jumps. */
+ as->realign = NULL; /* Stop another retry. */
+ lj_assertA(((intptr_t)target & 15) == 0, "loop realign failed");
+ if (as->loopinv) { /* Inverted loop branch? */
+ p -= 5;
+ p[0] = XI_JMP;
+ lj_assertA(target - p >= -128, "loop realign failed");
+ p[-1] = (MCode)(target - p); /* Patch sjcc. */
+ if (as->loopinv == 2)
+ p[-3] = (MCode)(target - p + 2); /* Patch opt. short jp. */
+ } else {
+ lj_assertA(target - p >= -128, "loop realign failed");
+ p[-1] = (MCode)(int8_t)(target - p); /* Patch short jmp. */
+ p[-2] = XI_JMPs;
+ }
+ } else {
+ MCode *newloop;
+ p[-5] = XI_JMP;
+ if (as->loopinv) { /* Inverted loop branch? */
+ /* asm_guardcc already inverted the jcc and patched the jmp. */
+ p -= 5;
+ newloop = target+4;
+ *(int32_t *)(p-4) = (int32_t)(target - p); /* Patch jcc. */
+ if (as->loopinv == 2) {
+ *(int32_t *)(p-10) = (int32_t)(target - p + 6); /* Patch opt. jp. */
+ newloop = target+8;
+ }
+ } else { /* Otherwise just patch jmp. */
+ *(int32_t *)(p-4) = (int32_t)(target - p);
+ newloop = target+3;
+ }
+ /* Realign small loops and shorten the loop branch. */
+ if (newloop >= p - 128) {
+ as->realign = newloop; /* Force a retry and remember alignment. */
+ as->curins = as->stopins; /* Abort asm_trace now. */
+ as->T->nins = as->orignins; /* Remove any added renames. */
+ }
+ }
+}
+
+/* Fixup the tail of the loop. */
+static void asm_loop_tail_fixup(ASMState *as)
+{
+ UNUSED(as); /* Nothing to do. */
+}
+
+/* -- Head of trace ------------------------------------------------------- */
+
+/* Coalesce BASE register for a root trace. */
+static void asm_head_root_base(ASMState *as)
+{
+ IRIns *ir = IR(REF_BASE);
+ Reg r = ir->r;
+ if (ra_hasreg(r)) {
+ ra_free(as, r);
+ if (rset_test(as->modset, r) || irt_ismarked(ir->t))
+ ir->r = RID_INIT; /* No inheritance for modified BASE register. */
+ if (r != RID_BASE)
+ emit_rr(as, XO_MOV, r|REX_GC64, RID_BASE);
+ }
+}
+
+/* Coalesce or reload BASE register for a side trace. */
+static RegSet asm_head_side_base(ASMState *as, IRIns *irp, RegSet allow)
+{
+ IRIns *ir = IR(REF_BASE);
+ Reg r = ir->r;
+ if (ra_hasreg(r)) {
+ ra_free(as, r);
+ if (rset_test(as->modset, r) || irt_ismarked(ir->t))
+ ir->r = RID_INIT; /* No inheritance for modified BASE register. */
+ if (irp->r == r) {
+ rset_clear(allow, r); /* Mark same BASE register as coalesced. */
+ } else if (ra_hasreg(irp->r) && rset_test(as->freeset, irp->r)) {
+ /* Move from coalesced parent reg. */
+ rset_clear(allow, irp->r);
+ emit_rr(as, XO_MOV, r|REX_GC64, irp->r);
+ } else {
+ emit_getgl(as, r, jit_base); /* Otherwise reload BASE. */
+ }
+ }
+ return allow;
+}
+
+/* -- Tail of trace ------------------------------------------------------- */
+
+/* Fixup the tail code. */
+static void asm_tail_fixup(ASMState *as, TraceNo lnk)
+{
+ /* Note: don't use as->mcp swap + emit_*: emit_op overwrites more bytes. */
+ MCode *p = as->mctop;
+ MCode *target, *q;
+ int32_t spadj = as->T->spadjust;
+ if (spadj == 0) {
+ p -= LJ_64 ? 7 : 6;
+ } else {
+ MCode *p1;
+ /* Patch stack adjustment. */
+ if (checki8(spadj)) {
+ p -= 3;
+ p1 = p-6;
+ *p1 = (MCode)spadj;
+ } else {
+ p1 = p-9;
+ *(int32_t *)p1 = spadj;
+ }
+#if LJ_64
+ p1[-3] = 0x48;
+#endif
+ p1[-2] = (MCode)(checki8(spadj) ? XI_ARITHi8 : XI_ARITHi);
+ p1[-1] = MODRM(XM_REG, XOg_ADD, RID_ESP);
+ }
+ /* Patch exit branch. */
+ target = lnk ? traceref(as->J, lnk)->mcode : (MCode *)lj_vm_exit_interp;
+ *(int32_t *)(p-4) = jmprel(as->J, p, target);
+ p[-5] = XI_JMP;
+ /* Drop unused mcode tail. Fill with NOPs to make the prefetcher happy. */
+ for (q = as->mctop-1; q >= p; q--)
+ *q = XI_NOP;
+ as->mctop = p;
+}
+
+/* Prepare tail of code. */
+static void asm_tail_prep(ASMState *as)
+{
+ MCode *p = as->mctop;
+ /* Realign and leave room for backwards loop branch or exit branch. */
+ if (as->realign) {
+ int i = ((int)(intptr_t)as->realign) & 15;
+ /* Fill unused mcode tail with NOPs to make the prefetcher happy. */
+ while (i-- > 0)
+ *--p = XI_NOP;
+ as->mctop = p;
+ p -= (as->loopinv ? 5 : 2); /* Space for short/near jmp. */
+ } else {
+ p -= 5; /* Space for exit branch (near jmp). */
+ }
+ if (as->loopref) {
+ as->invmcp = as->mcp = p;
+ } else {
+ /* Leave room for ESP adjustment: add esp, imm or lea esp, [esp+imm] */
+ as->mcp = p - (LJ_64 ? 7 : 6);
+ as->invmcp = NULL;
+ }
+}
+
+/* -- Trace setup --------------------------------------------------------- */
+
+/* Ensure there are enough stack slots for call arguments. */
+static Reg asm_setup_call_slots(ASMState *as, IRIns *ir, const CCallInfo *ci)
+{
+ IRRef args[CCI_NARGS_MAX*2];
+ int nslots;
+ asm_collectargs(as, ir, ci, args);
+ nslots = asm_count_call_slots(as, ci, args);
+ if (nslots > as->evenspill) /* Leave room for args in stack slots. */
+ as->evenspill = nslots;
+#if LJ_64
+ return irt_isfp(ir->t) ? REGSP_HINT(RID_FPRET) : REGSP_HINT(RID_RET);
+#else
+ return irt_isfp(ir->t) ? REGSP_INIT : REGSP_HINT(RID_RET);
+#endif
+}
+
+/* Target-specific setup. */
+static void asm_setup_target(ASMState *as)
+{
+ asm_exitstub_setup(as, as->T->nsnap);
+ as->mrm.base = 0;
+}
+
+/* -- Trace patching ------------------------------------------------------ */
+
+static const uint8_t map_op1[256] = {
+0x92,0x92,0x92,0x92,0x52,0x45,0x51,0x51,0x92,0x92,0x92,0x92,0x52,0x45,0x51,0x20,
+0x92,0x92,0x92,0x92,0x52,0x45,0x51,0x51,0x92,0x92,0x92,0x92,0x52,0x45,0x51,0x51,
+0x92,0x92,0x92,0x92,0x52,0x45,0x10,0x51,0x92,0x92,0x92,0x92,0x52,0x45,0x10,0x51,
+0x92,0x92,0x92,0x92,0x52,0x45,0x10,0x51,0x92,0x92,0x92,0x92,0x52,0x45,0x10,0x51,
+#if LJ_64
+0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x14,0x14,0x14,0x14,0x14,0x14,0x14,0x14,
+#else
+0x51,0x51,0x51,0x51,0x51,0x51,0x51,0x51,0x51,0x51,0x51,0x51,0x51,0x51,0x51,0x51,
+#endif
+0x51,0x51,0x51,0x51,0x51,0x51,0x51,0x51,0x51,0x51,0x51,0x51,0x51,0x51,0x51,0x51,
+0x51,0x51,0x92,0x92,0x10,0x10,0x12,0x11,0x45,0x86,0x52,0x93,0x51,0x51,0x51,0x51,
+0x52,0x52,0x52,0x52,0x52,0x52,0x52,0x52,0x52,0x52,0x52,0x52,0x52,0x52,0x52,0x52,
+0x93,0x86,0x93,0x93,0x92,0x92,0x92,0x92,0x92,0x92,0x92,0x92,0x92,0x92,0x92,0x92,
+0x51,0x51,0x51,0x51,0x51,0x51,0x51,0x51,0x51,0x51,0x47,0x51,0x51,0x51,0x51,0x51,
+#if LJ_64
+0x59,0x59,0x59,0x59,0x51,0x51,0x51,0x51,0x52,0x45,0x51,0x51,0x51,0x51,0x51,0x51,
+#else
+0x55,0x55,0x55,0x55,0x51,0x51,0x51,0x51,0x52,0x45,0x51,0x51,0x51,0x51,0x51,0x51,
+#endif
+0x52,0x52,0x52,0x52,0x52,0x52,0x52,0x52,0x05,0x05,0x05,0x05,0x05,0x05,0x05,0x05,
+0x93,0x93,0x53,0x51,0x70,0x71,0x93,0x86,0x54,0x51,0x53,0x51,0x51,0x52,0x51,0x51,
+0x92,0x92,0x92,0x92,0x52,0x52,0x51,0x51,0x92,0x92,0x92,0x92,0x92,0x92,0x92,0x92,
+0x52,0x52,0x52,0x52,0x52,0x52,0x52,0x52,0x45,0x45,0x47,0x52,0x51,0x51,0x51,0x51,
+0x10,0x51,0x10,0x10,0x51,0x51,0x63,0x66,0x51,0x51,0x51,0x51,0x51,0x51,0x92,0x92
+};
+
+static const uint8_t map_op2[256] = {
+0x93,0x93,0x93,0x93,0x52,0x52,0x52,0x52,0x52,0x52,0x51,0x52,0x51,0x93,0x52,0x94,
+0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,
+0x53,0x53,0x53,0x53,0x53,0x53,0x53,0x53,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,
+0x52,0x52,0x52,0x52,0x52,0x52,0x52,0x52,0x34,0x51,0x35,0x51,0x51,0x51,0x51,0x51,
+0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,
+0x53,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,
+0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,
+0x94,0x54,0x54,0x54,0x93,0x93,0x93,0x52,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,
+0x46,0x46,0x46,0x46,0x46,0x46,0x46,0x46,0x46,0x46,0x46,0x46,0x46,0x46,0x46,0x46,
+0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,
+0x52,0x52,0x52,0x93,0x94,0x93,0x51,0x51,0x52,0x52,0x52,0x93,0x94,0x93,0x93,0x93,
+0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x94,0x93,0x93,0x93,0x93,0x93,
+0x93,0x93,0x94,0x93,0x94,0x94,0x94,0x93,0x52,0x52,0x52,0x52,0x52,0x52,0x52,0x52,
+0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,
+0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,
+0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x52
+};
+
+static uint32_t asm_x86_inslen(const uint8_t* p)
+{
+ uint32_t result = 0;
+ uint32_t prefixes = 0;
+ uint32_t x = map_op1[*p];
+ for (;;) {
+ switch (x >> 4) {
+ case 0: return result + x + (prefixes & 4);
+ case 1: prefixes |= x; x = map_op1[*++p]; result++; break;
+ case 2: x = map_op2[*++p]; break;
+ case 3: p++; goto mrm;
+ case 4: result -= (prefixes & 2); /* fallthrough */
+ case 5: return result + (x & 15);
+ case 6: /* Group 3. */
+ if (p[1] & 0x38) x = 2;
+ else if ((prefixes & 2) && (x == 0x66)) x = 4;
+ goto mrm;
+ case 7: /* VEX c4/c5. */
+ if (LJ_32 && p[1] < 0xc0) {
+ x = 2;
+ goto mrm;
+ }
+ if (x == 0x70) {
+ x = *++p & 0x1f;
+ result++;
+ if (x >= 2) {
+ p += 2;
+ result += 2;
+ goto mrm;
+ }
+ }
+ p++;
+ result++;
+ x = map_op2[*++p];
+ break;
+ case 8: result -= (prefixes & 2); /* fallthrough */
+ case 9: mrm: /* ModR/M and possibly SIB. */
+ result += (x & 15);
+ x = *++p;
+ switch (x >> 6) {
+ case 0: if ((x & 7) == 5) return result + 4; break;
+ case 1: result++; break;
+ case 2: result += 4; break;
+ case 3: return result;
+ }
+ if ((x & 7) == 4) {
+ result++;
+ if (x < 0x40 && (p[1] & 7) == 5) result += 4;
+ }
+ return result;
+ }
+ }
+}
+
+/* Patch exit jumps of existing machine code to a new target. */
+void lj_asm_patchexit(jit_State *J, GCtrace *T, ExitNo exitno, MCode *target)
+{
+ MCode *p = T->mcode;
+ MCode *mcarea = lj_mcode_patch(J, p, 0);
+ MSize len = T->szmcode;
+ MCode *px = exitstub_addr(J, exitno) - 6;
+ MCode *pe = p+len-6;
+ MCode *pgc = NULL;
+#if LJ_GC64
+ uint32_t statei = (uint32_t)(GG_OFS(g.vmstate) - GG_OFS(dispatch));
+#else
+ uint32_t statei = u32ptr(&J2G(J)->vmstate);
+#endif
+ if (len > 5 && p[len-5] == XI_JMP && p+len-6 + *(int32_t *)(p+len-4) == px)
+ *(int32_t *)(p+len-4) = jmprel(J, p+len, target);
+ /* Do not patch parent exit for a stack check. Skip beyond vmstate update. */
+ for (; p < pe; p += asm_x86_inslen(p)) {
+ intptr_t ofs = LJ_GC64 ? (p[0] & 0xf0) == 0x40 : LJ_64;
+ if (*(uint32_t *)(p+2+ofs) == statei && p[ofs+LJ_GC64-LJ_64] == XI_MOVmi)
+ break;
+ }
+ lj_assertJ(p < pe, "instruction length decoder failed");
+ for (; p < pe; p += asm_x86_inslen(p)) {
+ if ((*(uint16_t *)p & 0xf0ff) == 0x800f && p + *(int32_t *)(p+2) == px &&
+ p != pgc) {
+ *(int32_t *)(p+2) = jmprel(J, p+6, target);
+ } else if (*p == XI_CALL &&
+ (void *)(p+5+*(int32_t *)(p+1)) == (void *)lj_gc_step_jit) {
+ pgc = p+7; /* Do not patch GC check exit. */
+ }
+ }
+ lj_mcode_sync(T->mcode, T->mcode + T->szmcode);
+ lj_mcode_patch(J, mcarea, 1);
+}
+
diff --git a/libs/luajit-cmake/luajit/src/lj_assert.c b/libs/luajit-cmake/luajit/src/lj_assert.c
new file mode 100644
index 0000000..4b713b2
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/lj_assert.c
@@ -0,0 +1,28 @@
+/*
+** Internal assertions.
+** Copyright (C) 2005-2022 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#define lj_assert_c
+#define LUA_CORE
+
+#if defined(LUA_USE_ASSERT) || defined(LUA_USE_APICHECK)
+
+#include <stdio.h>
+
+#include "lj_obj.h"
+
+void lj_assert_fail(global_State *g, const char *file, int line,
+ const char *func, const char *fmt, ...)
+{
+ va_list argp;
+ va_start(argp, fmt);
+ fprintf(stderr, "LuaJIT ASSERT %s:%d: %s: ", file, line, func);
+ vfprintf(stderr, fmt, argp);
+ fputc('\n', stderr);
+ va_end(argp);
+ UNUSED(g); /* May be NULL. TODO: optionally dump state. */
+ abort();
+}
+
+#endif
diff --git a/libs/luajit-cmake/luajit/src/lj_bc.c b/libs/luajit-cmake/luajit/src/lj_bc.c
new file mode 100644
index 0000000..b692cb5
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/lj_bc.c
@@ -0,0 +1,14 @@
+/*
+** Bytecode instruction modes.
+** Copyright (C) 2005-2022 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#define lj_bc_c
+#define LUA_CORE
+
+#include "lj_obj.h"
+#include "lj_bc.h"
+
+/* Bytecode offsets and bytecode instruction modes. */
+#include "lj_bcdef.h"
+
diff --git a/libs/luajit-cmake/luajit/src/lj_bc.h b/libs/luajit-cmake/luajit/src/lj_bc.h
new file mode 100644
index 0000000..02356e5
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/lj_bc.h
@@ -0,0 +1,265 @@
+/*
+** Bytecode instruction format.
+** Copyright (C) 2005-2022 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LJ_BC_H
+#define _LJ_BC_H
+
+#include "lj_def.h"
+#include "lj_arch.h"
+
+/* Bytecode instruction format, 32 bit wide, fields of 8 or 16 bit:
+**
+** +----+----+----+----+
+** | B | C | A | OP | Format ABC
+** +----+----+----+----+
+** | D | A | OP | Format AD
+** +--------------------
+** MSB LSB
+**
+** In-memory instructions are always stored in host byte order.
+*/
+
+/* Operand ranges and related constants. */
+#define BCMAX_A 0xff
+#define BCMAX_B 0xff
+#define BCMAX_C 0xff
+#define BCMAX_D 0xffff
+#define BCBIAS_J 0x8000
+#define NO_REG BCMAX_A
+#define NO_JMP (~(BCPos)0)
+
+/* Macros to get instruction fields. */
+#define bc_op(i) ((BCOp)((i)&0xff))
+#define bc_a(i) ((BCReg)(((i)>>8)&0xff))
+#define bc_b(i) ((BCReg)((i)>>24))
+#define bc_c(i) ((BCReg)(((i)>>16)&0xff))
+#define bc_d(i) ((BCReg)((i)>>16))
+#define bc_j(i) ((ptrdiff_t)bc_d(i)-BCBIAS_J)
+
+/* Macros to set instruction fields. */
+#define setbc_byte(p, x, ofs) \
+ ((uint8_t *)(p))[LJ_ENDIAN_SELECT(ofs, 3-ofs)] = (uint8_t)(x)
+#define setbc_op(p, x) setbc_byte(p, (x), 0)
+#define setbc_a(p, x) setbc_byte(p, (x), 1)
+#define setbc_b(p, x) setbc_byte(p, (x), 3)
+#define setbc_c(p, x) setbc_byte(p, (x), 2)
+#define setbc_d(p, x) \
+ ((uint16_t *)(p))[LJ_ENDIAN_SELECT(1, 0)] = (uint16_t)(x)
+#define setbc_j(p, x) setbc_d(p, (BCPos)((int32_t)(x)+BCBIAS_J))
+
+/* Macros to compose instructions. */
+#define BCINS_ABC(o, a, b, c) \
+ (((BCIns)(o))|((BCIns)(a)<<8)|((BCIns)(b)<<24)|((BCIns)(c)<<16))
+#define BCINS_AD(o, a, d) \
+ (((BCIns)(o))|((BCIns)(a)<<8)|((BCIns)(d)<<16))
+#define BCINS_AJ(o, a, j) BCINS_AD(o, a, (BCPos)((int32_t)(j)+BCBIAS_J))
+
+/* Bytecode instruction definition. Order matters, see below.
+**
+** (name, filler, Amode, Bmode, Cmode or Dmode, metamethod)
+**
+** The opcode name suffixes specify the type for RB/RC or RD:
+** V = variable slot
+** S = string const
+** N = number const
+** P = primitive type (~itype)
+** B = unsigned byte literal
+** M = multiple args/results
+*/
+#define BCDEF(_) \
+ /* Comparison ops. ORDER OPR. */ \
+ _(ISLT, var, ___, var, lt) \
+ _(ISGE, var, ___, var, lt) \
+ _(ISLE, var, ___, var, le) \
+ _(ISGT, var, ___, var, le) \
+ \
+ _(ISEQV, var, ___, var, eq) \
+ _(ISNEV, var, ___, var, eq) \
+ _(ISEQS, var, ___, str, eq) \
+ _(ISNES, var, ___, str, eq) \
+ _(ISEQN, var, ___, num, eq) \
+ _(ISNEN, var, ___, num, eq) \
+ _(ISEQP, var, ___, pri, eq) \
+ _(ISNEP, var, ___, pri, eq) \
+ \
+ /* Unary test and copy ops. */ \
+ _(ISTC, dst, ___, var, ___) \
+ _(ISFC, dst, ___, var, ___) \
+ _(IST, ___, ___, var, ___) \
+ _(ISF, ___, ___, var, ___) \
+ _(ISTYPE, var, ___, lit, ___) \
+ _(ISNUM, var, ___, lit, ___) \
+ \
+ /* Unary ops. */ \
+ _(MOV, dst, ___, var, ___) \
+ _(NOT, dst, ___, var, ___) \
+ _(UNM, dst, ___, var, unm) \
+ _(LEN, dst, ___, var, len) \
+ \
+ /* Binary ops. ORDER OPR. VV last, POW must be next. */ \
+ _(ADDVN, dst, var, num, add) \
+ _(SUBVN, dst, var, num, sub) \
+ _(MULVN, dst, var, num, mul) \
+ _(DIVVN, dst, var, num, div) \
+ _(MODVN, dst, var, num, mod) \
+ \
+ _(ADDNV, dst, var, num, add) \
+ _(SUBNV, dst, var, num, sub) \
+ _(MULNV, dst, var, num, mul) \
+ _(DIVNV, dst, var, num, div) \
+ _(MODNV, dst, var, num, mod) \
+ \
+ _(ADDVV, dst, var, var, add) \
+ _(SUBVV, dst, var, var, sub) \
+ _(MULVV, dst, var, var, mul) \
+ _(DIVVV, dst, var, var, div) \
+ _(MODVV, dst, var, var, mod) \
+ \
+ _(POW, dst, var, var, pow) \
+ _(CAT, dst, rbase, rbase, concat) \
+ \
+ /* Constant ops. */ \
+ _(KSTR, dst, ___, str, ___) \
+ _(KCDATA, dst, ___, cdata, ___) \
+ _(KSHORT, dst, ___, lits, ___) \
+ _(KNUM, dst, ___, num, ___) \
+ _(KPRI, dst, ___, pri, ___) \
+ _(KNIL, base, ___, base, ___) \
+ \
+ /* Upvalue and function ops. */ \
+ _(UGET, dst, ___, uv, ___) \
+ _(USETV, uv, ___, var, ___) \
+ _(USETS, uv, ___, str, ___) \
+ _(USETN, uv, ___, num, ___) \
+ _(USETP, uv, ___, pri, ___) \
+ _(UCLO, rbase, ___, jump, ___) \
+ _(FNEW, dst, ___, func, gc) \
+ \
+ /* Table ops. */ \
+ _(TNEW, dst, ___, lit, gc) \
+ _(TDUP, dst, ___, tab, gc) \
+ _(GGET, dst, ___, str, index) \
+ _(GSET, var, ___, str, newindex) \
+ _(TGETV, dst, var, var, index) \
+ _(TGETS, dst, var, str, index) \
+ _(TGETB, dst, var, lit, index) \
+ _(TGETR, dst, var, var, index) \
+ _(TSETV, var, var, var, newindex) \
+ _(TSETS, var, var, str, newindex) \
+ _(TSETB, var, var, lit, newindex) \
+ _(TSETM, base, ___, num, newindex) \
+ _(TSETR, var, var, var, newindex) \
+ \
+ /* Calls and vararg handling. T = tail call. */ \
+ _(CALLM, base, lit, lit, call) \
+ _(CALL, base, lit, lit, call) \
+ _(CALLMT, base, ___, lit, call) \
+ _(CALLT, base, ___, lit, call) \
+ _(ITERC, base, lit, lit, call) \
+ _(ITERN, base, lit, lit, call) \
+ _(VARG, base, lit, lit, ___) \
+ _(ISNEXT, base, ___, jump, ___) \
+ \
+ /* Returns. */ \
+ _(RETM, base, ___, lit, ___) \
+ _(RET, rbase, ___, lit, ___) \
+ _(RET0, rbase, ___, lit, ___) \
+ _(RET1, rbase, ___, lit, ___) \
+ \
+ /* Loops and branches. I/J = interp/JIT, I/C/L = init/call/loop. */ \
+ _(FORI, base, ___, jump, ___) \
+ _(JFORI, base, ___, jump, ___) \
+ \
+ _(FORL, base, ___, jump, ___) \
+ _(IFORL, base, ___, jump, ___) \
+ _(JFORL, base, ___, lit, ___) \
+ \
+ _(ITERL, base, ___, jump, ___) \
+ _(IITERL, base, ___, jump, ___) \
+ _(JITERL, base, ___, lit, ___) \
+ \
+ _(LOOP, rbase, ___, jump, ___) \
+ _(ILOOP, rbase, ___, jump, ___) \
+ _(JLOOP, rbase, ___, lit, ___) \
+ \
+ _(JMP, rbase, ___, jump, ___) \
+ \
+ /* Function headers. I/J = interp/JIT, F/V/C = fixarg/vararg/C func. */ \
+ _(FUNCF, rbase, ___, ___, ___) \
+ _(IFUNCF, rbase, ___, ___, ___) \
+ _(JFUNCF, rbase, ___, lit, ___) \
+ _(FUNCV, rbase, ___, ___, ___) \
+ _(IFUNCV, rbase, ___, ___, ___) \
+ _(JFUNCV, rbase, ___, lit, ___) \
+ _(FUNCC, rbase, ___, ___, ___) \
+ _(FUNCCW, rbase, ___, ___, ___)
+
+/* Bytecode opcode numbers. */
+typedef enum {
+#define BCENUM(name, ma, mb, mc, mt) BC_##name,
+BCDEF(BCENUM)
+#undef BCENUM
+ BC__MAX
+} BCOp;
+
+LJ_STATIC_ASSERT((int)BC_ISEQV+1 == (int)BC_ISNEV);
+LJ_STATIC_ASSERT(((int)BC_ISEQV^1) == (int)BC_ISNEV);
+LJ_STATIC_ASSERT(((int)BC_ISEQS^1) == (int)BC_ISNES);
+LJ_STATIC_ASSERT(((int)BC_ISEQN^1) == (int)BC_ISNEN);
+LJ_STATIC_ASSERT(((int)BC_ISEQP^1) == (int)BC_ISNEP);
+LJ_STATIC_ASSERT(((int)BC_ISLT^1) == (int)BC_ISGE);
+LJ_STATIC_ASSERT(((int)BC_ISLE^1) == (int)BC_ISGT);
+LJ_STATIC_ASSERT(((int)BC_ISLT^3) == (int)BC_ISGT);
+LJ_STATIC_ASSERT((int)BC_IST-(int)BC_ISTC == (int)BC_ISF-(int)BC_ISFC);
+LJ_STATIC_ASSERT((int)BC_CALLT-(int)BC_CALL == (int)BC_CALLMT-(int)BC_CALLM);
+LJ_STATIC_ASSERT((int)BC_CALLMT + 1 == (int)BC_CALLT);
+LJ_STATIC_ASSERT((int)BC_RETM + 1 == (int)BC_RET);
+LJ_STATIC_ASSERT((int)BC_FORL + 1 == (int)BC_IFORL);
+LJ_STATIC_ASSERT((int)BC_FORL + 2 == (int)BC_JFORL);
+LJ_STATIC_ASSERT((int)BC_ITERL + 1 == (int)BC_IITERL);
+LJ_STATIC_ASSERT((int)BC_ITERL + 2 == (int)BC_JITERL);
+LJ_STATIC_ASSERT((int)BC_LOOP + 1 == (int)BC_ILOOP);
+LJ_STATIC_ASSERT((int)BC_LOOP + 2 == (int)BC_JLOOP);
+LJ_STATIC_ASSERT((int)BC_FUNCF + 1 == (int)BC_IFUNCF);
+LJ_STATIC_ASSERT((int)BC_FUNCF + 2 == (int)BC_JFUNCF);
+LJ_STATIC_ASSERT((int)BC_FUNCV + 1 == (int)BC_IFUNCV);
+LJ_STATIC_ASSERT((int)BC_FUNCV + 2 == (int)BC_JFUNCV);
+
+/* This solves a circular dependency problem, change as needed. */
+#define FF_next_N 4
+
+/* Stack slots used by FORI/FORL, relative to operand A. */
+enum {
+ FORL_IDX, FORL_STOP, FORL_STEP, FORL_EXT
+};
+
+/* Bytecode operand modes. ORDER BCMode */
+typedef enum {
+ BCMnone, BCMdst, BCMbase, BCMvar, BCMrbase, BCMuv, /* Mode A must be <= 7 */
+ BCMlit, BCMlits, BCMpri, BCMnum, BCMstr, BCMtab, BCMfunc, BCMjump, BCMcdata,
+ BCM_max
+} BCMode;
+#define BCM___ BCMnone
+
+#define bcmode_a(op) ((BCMode)(lj_bc_mode[op] & 7))
+#define bcmode_b(op) ((BCMode)((lj_bc_mode[op]>>3) & 15))
+#define bcmode_c(op) ((BCMode)((lj_bc_mode[op]>>7) & 15))
+#define bcmode_d(op) bcmode_c(op)
+#define bcmode_hasd(op) ((lj_bc_mode[op] & (15<<3)) == (BCMnone<<3))
+#define bcmode_mm(op) ((MMS)(lj_bc_mode[op]>>11))
+
+#define BCMODE(name, ma, mb, mc, mm) \
+ (BCM##ma|(BCM##mb<<3)|(BCM##mc<<7)|(MM_##mm<<11)),
+#define BCMODE_FF 0
+
+static LJ_AINLINE int bc_isret(BCOp op)
+{
+ return (op == BC_RETM || op == BC_RET || op == BC_RET0 || op == BC_RET1);
+}
+
+LJ_DATA const uint16_t lj_bc_mode[];
+LJ_DATA const uint16_t lj_bc_ofs[];
+
+#endif
diff --git a/libs/luajit-cmake/luajit/src/lj_bcdump.h b/libs/luajit-cmake/luajit/src/lj_bcdump.h
new file mode 100644
index 0000000..69da16e
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/lj_bcdump.h
@@ -0,0 +1,68 @@
+/*
+** Bytecode dump definitions.
+** Copyright (C) 2005-2022 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LJ_BCDUMP_H
+#define _LJ_BCDUMP_H
+
+#include "lj_obj.h"
+#include "lj_lex.h"
+
+/* -- Bytecode dump format ------------------------------------------------ */
+
+/*
+** dump = header proto+ 0U
+** header = ESC 'L' 'J' versionB flagsU [namelenU nameB*]
+** proto = lengthU pdata
+** pdata = phead bcinsW* uvdataH* kgc* knum* [debugB*]
+** phead = flagsB numparamsB framesizeB numuvB numkgcU numknU numbcU
+** [debuglenU [firstlineU numlineU]]
+** kgc = kgctypeU { ktab | (loU hiU) | (rloU rhiU iloU ihiU) | strB* }
+** knum = intU0 | (loU1 hiU)
+** ktab = narrayU nhashU karray* khash*
+** karray = ktabk
+** khash = ktabk ktabk
+** ktabk = ktabtypeU { intU | (loU hiU) | strB* }
+**
+** B = 8 bit, H = 16 bit, W = 32 bit, U = ULEB128 of W, U0/U1 = ULEB128 of W+1
+*/
+
+/* Bytecode dump header. */
+#define BCDUMP_HEAD1 0x1b
+#define BCDUMP_HEAD2 0x4c
+#define BCDUMP_HEAD3 0x4a
+
+/* If you perform *any* kind of private modifications to the bytecode itself
+** or to the dump format, you *must* set BCDUMP_VERSION to 0x80 or higher.
+*/
+#define BCDUMP_VERSION 2
+
+/* Compatibility flags. */
+#define BCDUMP_F_BE 0x01
+#define BCDUMP_F_STRIP 0x02
+#define BCDUMP_F_FFI 0x04
+#define BCDUMP_F_FR2 0x08
+
+#define BCDUMP_F_KNOWN (BCDUMP_F_FR2*2-1)
+
+/* Type codes for the GC constants of a prototype. Plus length for strings. */
+enum {
+ BCDUMP_KGC_CHILD, BCDUMP_KGC_TAB, BCDUMP_KGC_I64, BCDUMP_KGC_U64,
+ BCDUMP_KGC_COMPLEX, BCDUMP_KGC_STR
+};
+
+/* Type codes for the keys/values of a constant table. */
+enum {
+ BCDUMP_KTAB_NIL, BCDUMP_KTAB_FALSE, BCDUMP_KTAB_TRUE,
+ BCDUMP_KTAB_INT, BCDUMP_KTAB_NUM, BCDUMP_KTAB_STR
+};
+
+/* -- Bytecode reader/writer ---------------------------------------------- */
+
+LJ_FUNC int lj_bcwrite(lua_State *L, GCproto *pt, lua_Writer writer,
+ void *data, int strip);
+LJ_FUNC GCproto *lj_bcread_proto(LexState *ls);
+LJ_FUNC GCproto *lj_bcread(LexState *ls);
+
+#endif
diff --git a/libs/luajit-cmake/luajit/src/lj_bcread.c b/libs/luajit-cmake/luajit/src/lj_bcread.c
new file mode 100644
index 0000000..2ce0570
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/lj_bcread.c
@@ -0,0 +1,453 @@
+/*
+** Bytecode reader.
+** Copyright (C) 2005-2022 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#define lj_bcread_c
+#define LUA_CORE
+
+#include "lj_obj.h"
+#include "lj_gc.h"
+#include "lj_err.h"
+#include "lj_buf.h"
+#include "lj_str.h"
+#include "lj_tab.h"
+#include "lj_bc.h"
+#if LJ_HASFFI
+#include "lj_ctype.h"
+#include "lj_cdata.h"
+#include "lualib.h"
+#endif
+#include "lj_lex.h"
+#include "lj_bcdump.h"
+#include "lj_state.h"
+#include "lj_strfmt.h"
+
+/* Reuse some lexer fields for our own purposes. */
+#define bcread_flags(ls) ls->level
+#define bcread_swap(ls) \
+ ((bcread_flags(ls) & BCDUMP_F_BE) != LJ_BE*BCDUMP_F_BE)
+#define bcread_oldtop(L, ls) restorestack(L, ls->lastline)
+#define bcread_savetop(L, ls, top) \
+ ls->lastline = (BCLine)savestack(L, (top))
+
+/* -- Input buffer handling ----------------------------------------------- */
+
+/* Throw reader error. */
+static LJ_NOINLINE void bcread_error(LexState *ls, ErrMsg em)
+{
+ lua_State *L = ls->L;
+ const char *name = ls->chunkarg;
+ if (*name == BCDUMP_HEAD1) name = "(binary)";
+ else if (*name == '@' || *name == '=') name++;
+ lj_strfmt_pushf(L, "%s: %s", name, err2msg(em));
+ lj_err_throw(L, LUA_ERRSYNTAX);
+}
+
+/* Refill buffer. */
+static LJ_NOINLINE void bcread_fill(LexState *ls, MSize len, int need)
+{
+ lj_assertLS(len != 0, "empty refill");
+ if (len > LJ_MAX_BUF || ls->c < 0)
+ bcread_error(ls, LJ_ERR_BCBAD);
+ do {
+ const char *buf;
+ size_t sz;
+ char *p = ls->sb.b;
+ MSize n = (MSize)(ls->pe - ls->p);
+ if (n) { /* Copy remainder to buffer. */
+ if (sbuflen(&ls->sb)) { /* Move down in buffer. */
+ lj_assertLS(ls->pe == ls->sb.w, "bad buffer pointer");
+ if (ls->p != p) memmove(p, ls->p, n);
+ } else { /* Copy from buffer provided by reader. */
+ p = lj_buf_need(&ls->sb, len);
+ memcpy(p, ls->p, n);
+ }
+ ls->p = p;
+ ls->pe = p + n;
+ }
+ ls->sb.w = p + n;
+ buf = ls->rfunc(ls->L, ls->rdata, &sz); /* Get more data from reader. */
+ if (buf == NULL || sz == 0) { /* EOF? */
+ if (need) bcread_error(ls, LJ_ERR_BCBAD);
+ ls->c = -1; /* Only bad if we get called again. */
+ break;
+ }
+ if (sz >= LJ_MAX_BUF - n) lj_err_mem(ls->L);
+ if (n) { /* Append to buffer. */
+ n += (MSize)sz;
+ p = lj_buf_need(&ls->sb, n < len ? len : n);
+ memcpy(ls->sb.w, buf, sz);
+ ls->sb.w = p + n;
+ ls->p = p;
+ ls->pe = p + n;
+ } else { /* Return buffer provided by reader. */
+ ls->p = buf;
+ ls->pe = buf + sz;
+ }
+ } while ((MSize)(ls->pe - ls->p) < len);
+}
+
+/* Need a certain number of bytes. */
+static LJ_AINLINE void bcread_need(LexState *ls, MSize len)
+{
+ if (LJ_UNLIKELY((MSize)(ls->pe - ls->p) < len))
+ bcread_fill(ls, len, 1);
+}
+
+/* Want to read up to a certain number of bytes, but may need less. */
+static LJ_AINLINE void bcread_want(LexState *ls, MSize len)
+{
+ if (LJ_UNLIKELY((MSize)(ls->pe - ls->p) < len))
+ bcread_fill(ls, len, 0);
+}
+
+/* Return memory block from buffer. */
+static LJ_AINLINE uint8_t *bcread_mem(LexState *ls, MSize len)
+{
+ uint8_t *p = (uint8_t *)ls->p;
+ ls->p += len;
+ lj_assertLS(ls->p <= ls->pe, "buffer read overflow");
+ return p;
+}
+
+/* Copy memory block from buffer. */
+static void bcread_block(LexState *ls, void *q, MSize len)
+{
+ memcpy(q, bcread_mem(ls, len), len);
+}
+
+/* Read byte from buffer. */
+static LJ_AINLINE uint32_t bcread_byte(LexState *ls)
+{
+ lj_assertLS(ls->p < ls->pe, "buffer read overflow");
+ return (uint32_t)(uint8_t)*ls->p++;
+}
+
+/* Read ULEB128 value from buffer. */
+static LJ_AINLINE uint32_t bcread_uleb128(LexState *ls)
+{
+ uint32_t v = lj_buf_ruleb128(&ls->p);
+ lj_assertLS(ls->p <= ls->pe, "buffer read overflow");
+ return v;
+}
+
+/* Read top 32 bits of 33 bit ULEB128 value from buffer. */
+static uint32_t bcread_uleb128_33(LexState *ls)
+{
+ const uint8_t *p = (const uint8_t *)ls->p;
+ uint32_t v = (*p++ >> 1);
+ if (LJ_UNLIKELY(v >= 0x40)) {
+ int sh = -1;
+ v &= 0x3f;
+ do {
+ v |= ((*p & 0x7f) << (sh += 7));
+ } while (*p++ >= 0x80);
+ }
+ ls->p = (char *)p;
+ lj_assertLS(ls->p <= ls->pe, "buffer read overflow");
+ return v;
+}
+
+/* -- Bytecode reader ----------------------------------------------------- */
+
+/* Read debug info of a prototype. */
+static void bcread_dbg(LexState *ls, GCproto *pt, MSize sizedbg)
+{
+ void *lineinfo = (void *)proto_lineinfo(pt);
+ bcread_block(ls, lineinfo, sizedbg);
+ /* Swap lineinfo if the endianess differs. */
+ if (bcread_swap(ls) && pt->numline >= 256) {
+ MSize i, n = pt->sizebc-1;
+ if (pt->numline < 65536) {
+ uint16_t *p = (uint16_t *)lineinfo;
+ for (i = 0; i < n; i++) p[i] = (uint16_t)((p[i] >> 8)|(p[i] << 8));
+ } else {
+ uint32_t *p = (uint32_t *)lineinfo;
+ for (i = 0; i < n; i++) p[i] = lj_bswap(p[i]);
+ }
+ }
+}
+
+/* Find pointer to varinfo. */
+static const void *bcread_varinfo(GCproto *pt)
+{
+ const uint8_t *p = proto_uvinfo(pt);
+ MSize n = pt->sizeuv;
+ if (n) while (*p++ || --n) ;
+ return p;
+}
+
+/* Read a single constant key/value of a template table. */
+static void bcread_ktabk(LexState *ls, TValue *o)
+{
+ MSize tp = bcread_uleb128(ls);
+ if (tp >= BCDUMP_KTAB_STR) {
+ MSize len = tp - BCDUMP_KTAB_STR;
+ const char *p = (const char *)bcread_mem(ls, len);
+ setstrV(ls->L, o, lj_str_new(ls->L, p, len));
+ } else if (tp == BCDUMP_KTAB_INT) {
+ setintV(o, (int32_t)bcread_uleb128(ls));
+ } else if (tp == BCDUMP_KTAB_NUM) {
+ o->u32.lo = bcread_uleb128(ls);
+ o->u32.hi = bcread_uleb128(ls);
+ } else {
+ lj_assertLS(tp <= BCDUMP_KTAB_TRUE, "bad constant type %d", tp);
+ setpriV(o, ~tp);
+ }
+}
+
+/* Read a template table. */
+static GCtab *bcread_ktab(LexState *ls)
+{
+ MSize narray = bcread_uleb128(ls);
+ MSize nhash = bcread_uleb128(ls);
+ GCtab *t = lj_tab_new(ls->L, narray, hsize2hbits(nhash));
+ if (narray) { /* Read array entries. */
+ MSize i;
+ TValue *o = tvref(t->array);
+ for (i = 0; i < narray; i++, o++)
+ bcread_ktabk(ls, o);
+ }
+ if (nhash) { /* Read hash entries. */
+ MSize i;
+ for (i = 0; i < nhash; i++) {
+ TValue key;
+ bcread_ktabk(ls, &key);
+ lj_assertLS(!tvisnil(&key), "nil key");
+ bcread_ktabk(ls, lj_tab_set(ls->L, t, &key));
+ }
+ }
+ return t;
+}
+
+/* Read GC constants of a prototype. */
+static void bcread_kgc(LexState *ls, GCproto *pt, MSize sizekgc)
+{
+ MSize i;
+ GCRef *kr = mref(pt->k, GCRef) - (ptrdiff_t)sizekgc;
+ for (i = 0; i < sizekgc; i++, kr++) {
+ MSize tp = bcread_uleb128(ls);
+ if (tp >= BCDUMP_KGC_STR) {
+ MSize len = tp - BCDUMP_KGC_STR;
+ const char *p = (const char *)bcread_mem(ls, len);
+ setgcref(*kr, obj2gco(lj_str_new(ls->L, p, len)));
+ } else if (tp == BCDUMP_KGC_TAB) {
+ setgcref(*kr, obj2gco(bcread_ktab(ls)));
+#if LJ_HASFFI
+ } else if (tp != BCDUMP_KGC_CHILD) {
+ CTypeID id = tp == BCDUMP_KGC_COMPLEX ? CTID_COMPLEX_DOUBLE :
+ tp == BCDUMP_KGC_I64 ? CTID_INT64 : CTID_UINT64;
+ CTSize sz = tp == BCDUMP_KGC_COMPLEX ? 16 : 8;
+ GCcdata *cd = lj_cdata_new_(ls->L, id, sz);
+ TValue *p = (TValue *)cdataptr(cd);
+ setgcref(*kr, obj2gco(cd));
+ p[0].u32.lo = bcread_uleb128(ls);
+ p[0].u32.hi = bcread_uleb128(ls);
+ if (tp == BCDUMP_KGC_COMPLEX) {
+ p[1].u32.lo = bcread_uleb128(ls);
+ p[1].u32.hi = bcread_uleb128(ls);
+ }
+#endif
+ } else {
+ lua_State *L = ls->L;
+ lj_assertLS(tp == BCDUMP_KGC_CHILD, "bad constant type %d", tp);
+ if (L->top <= bcread_oldtop(L, ls)) /* Stack underflow? */
+ bcread_error(ls, LJ_ERR_BCBAD);
+ L->top--;
+ setgcref(*kr, obj2gco(protoV(L->top)));
+ }
+ }
+}
+
+/* Read number constants of a prototype. */
+static void bcread_knum(LexState *ls, GCproto *pt, MSize sizekn)
+{
+ MSize i;
+ TValue *o = mref(pt->k, TValue);
+ for (i = 0; i < sizekn; i++, o++) {
+ int isnum = (ls->p[0] & 1);
+ uint32_t lo = bcread_uleb128_33(ls);
+ if (isnum) {
+ o->u32.lo = lo;
+ o->u32.hi = bcread_uleb128(ls);
+ } else {
+ setintV(o, lo);
+ }
+ }
+}
+
+/* Read bytecode instructions. */
+static void bcread_bytecode(LexState *ls, GCproto *pt, MSize sizebc)
+{
+ BCIns *bc = proto_bc(pt);
+ bc[0] = BCINS_AD((pt->flags & PROTO_VARARG) ? BC_FUNCV : BC_FUNCF,
+ pt->framesize, 0);
+ bcread_block(ls, bc+1, (sizebc-1)*(MSize)sizeof(BCIns));
+ /* Swap bytecode instructions if the endianess differs. */
+ if (bcread_swap(ls)) {
+ MSize i;
+ for (i = 1; i < sizebc; i++) bc[i] = lj_bswap(bc[i]);
+ }
+}
+
+/* Read upvalue refs. */
+static void bcread_uv(LexState *ls, GCproto *pt, MSize sizeuv)
+{
+ if (sizeuv) {
+ uint16_t *uv = proto_uv(pt);
+ bcread_block(ls, uv, sizeuv*2);
+ /* Swap upvalue refs if the endianess differs. */
+ if (bcread_swap(ls)) {
+ MSize i;
+ for (i = 0; i < sizeuv; i++)
+ uv[i] = (uint16_t)((uv[i] >> 8)|(uv[i] << 8));
+ }
+ }
+}
+
+/* Read a prototype. */
+GCproto *lj_bcread_proto(LexState *ls)
+{
+ GCproto *pt;
+ MSize framesize, numparams, flags, sizeuv, sizekgc, sizekn, sizebc, sizept;
+ MSize ofsk, ofsuv, ofsdbg;
+ MSize sizedbg = 0;
+ BCLine firstline = 0, numline = 0;
+
+ /* Read prototype header. */
+ flags = bcread_byte(ls);
+ numparams = bcread_byte(ls);
+ framesize = bcread_byte(ls);
+ sizeuv = bcread_byte(ls);
+ sizekgc = bcread_uleb128(ls);
+ sizekn = bcread_uleb128(ls);
+ sizebc = bcread_uleb128(ls) + 1;
+ if (!(bcread_flags(ls) & BCDUMP_F_STRIP)) {
+ sizedbg = bcread_uleb128(ls);
+ if (sizedbg) {
+ firstline = bcread_uleb128(ls);
+ numline = bcread_uleb128(ls);
+ }
+ }
+
+ /* Calculate total size of prototype including all colocated arrays. */
+ sizept = (MSize)sizeof(GCproto) +
+ sizebc*(MSize)sizeof(BCIns) +
+ sizekgc*(MSize)sizeof(GCRef);
+ sizept = (sizept + (MSize)sizeof(TValue)-1) & ~((MSize)sizeof(TValue)-1);
+ ofsk = sizept; sizept += sizekn*(MSize)sizeof(TValue);
+ ofsuv = sizept; sizept += ((sizeuv+1)&~1)*2;
+ ofsdbg = sizept; sizept += sizedbg;
+
+ /* Allocate prototype object and initialize its fields. */
+ pt = (GCproto *)lj_mem_newgco(ls->L, (MSize)sizept);
+ pt->gct = ~LJ_TPROTO;
+ pt->numparams = (uint8_t)numparams;
+ pt->framesize = (uint8_t)framesize;
+ pt->sizebc = sizebc;
+ setmref(pt->k, (char *)pt + ofsk);
+ setmref(pt->uv, (char *)pt + ofsuv);
+ pt->sizekgc = 0; /* Set to zero until fully initialized. */
+ pt->sizekn = sizekn;
+ pt->sizept = sizept;
+ pt->sizeuv = (uint8_t)sizeuv;
+ pt->flags = (uint8_t)flags;
+ pt->trace = 0;
+ setgcref(pt->chunkname, obj2gco(ls->chunkname));
+
+ /* Close potentially uninitialized gap between bc and kgc. */
+ *(uint32_t *)((char *)pt + ofsk - sizeof(GCRef)*(sizekgc+1)) = 0;
+
+ /* Read bytecode instructions and upvalue refs. */
+ bcread_bytecode(ls, pt, sizebc);
+ bcread_uv(ls, pt, sizeuv);
+
+ /* Read constants. */
+ bcread_kgc(ls, pt, sizekgc);
+ pt->sizekgc = sizekgc;
+ bcread_knum(ls, pt, sizekn);
+
+ /* Read and initialize debug info. */
+ pt->firstline = firstline;
+ pt->numline = numline;
+ if (sizedbg) {
+ MSize sizeli = (sizebc-1) << (numline < 256 ? 0 : numline < 65536 ? 1 : 2);
+ setmref(pt->lineinfo, (char *)pt + ofsdbg);
+ setmref(pt->uvinfo, (char *)pt + ofsdbg + sizeli);
+ bcread_dbg(ls, pt, sizedbg);
+ setmref(pt->varinfo, bcread_varinfo(pt));
+ } else {
+ setmref(pt->lineinfo, NULL);
+ setmref(pt->uvinfo, NULL);
+ setmref(pt->varinfo, NULL);
+ }
+ return pt;
+}
+
+/* Read and check header of bytecode dump. */
+static int bcread_header(LexState *ls)
+{
+ uint32_t flags;
+ bcread_want(ls, 3+5+5);
+ if (bcread_byte(ls) != BCDUMP_HEAD2 ||
+ bcread_byte(ls) != BCDUMP_HEAD3 ||
+ bcread_byte(ls) != BCDUMP_VERSION) return 0;
+ bcread_flags(ls) = flags = bcread_uleb128(ls);
+ if ((flags & ~(BCDUMP_F_KNOWN)) != 0) return 0;
+ if ((flags & BCDUMP_F_FR2) != LJ_FR2*BCDUMP_F_FR2) return 0;
+ if ((flags & BCDUMP_F_FFI)) {
+#if LJ_HASFFI
+ lua_State *L = ls->L;
+ ctype_loadffi(L);
+#else
+ return 0;
+#endif
+ }
+ if ((flags & BCDUMP_F_STRIP)) {
+ ls->chunkname = lj_str_newz(ls->L, ls->chunkarg);
+ } else {
+ MSize len = bcread_uleb128(ls);
+ bcread_need(ls, len);
+ ls->chunkname = lj_str_new(ls->L, (const char *)bcread_mem(ls, len), len);
+ }
+ return 1; /* Ok. */
+}
+
+/* Read a bytecode dump. */
+GCproto *lj_bcread(LexState *ls)
+{
+ lua_State *L = ls->L;
+ lj_assertLS(ls->c == BCDUMP_HEAD1, "bad bytecode header");
+ bcread_savetop(L, ls, L->top);
+ lj_buf_reset(&ls->sb);
+ /* Check for a valid bytecode dump header. */
+ if (!bcread_header(ls))
+ bcread_error(ls, LJ_ERR_BCFMT);
+ for (;;) { /* Process all prototypes in the bytecode dump. */
+ GCproto *pt;
+ MSize len;
+ const char *startp;
+ /* Read length. */
+ if (ls->p < ls->pe && ls->p[0] == 0) { /* Shortcut EOF. */
+ ls->p++;
+ break;
+ }
+ bcread_want(ls, 5);
+ len = bcread_uleb128(ls);
+ if (!len) break; /* EOF */
+ bcread_need(ls, len);
+ startp = ls->p;
+ pt = lj_bcread_proto(ls);
+ if (ls->p != startp + len)
+ bcread_error(ls, LJ_ERR_BCBAD);
+ setprotoV(L, L->top, pt);
+ incr_top(L);
+ }
+ if ((ls->pe != ls->p && !ls->endmark) || L->top-1 != bcread_oldtop(L, ls))
+ bcread_error(ls, LJ_ERR_BCBAD);
+ /* Pop off last prototype. */
+ L->top--;
+ return protoV(L->top);
+}
+
diff --git a/libs/luajit-cmake/luajit/src/lj_bcwrite.c b/libs/luajit-cmake/luajit/src/lj_bcwrite.c
new file mode 100644
index 0000000..2c70ff4
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/lj_bcwrite.c
@@ -0,0 +1,372 @@
+/*
+** Bytecode writer.
+** Copyright (C) 2005-2022 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#define lj_bcwrite_c
+#define LUA_CORE
+
+#include "lj_obj.h"
+#include "lj_gc.h"
+#include "lj_buf.h"
+#include "lj_bc.h"
+#if LJ_HASFFI
+#include "lj_ctype.h"
+#endif
+#if LJ_HASJIT
+#include "lj_dispatch.h"
+#include "lj_jit.h"
+#endif
+#include "lj_strfmt.h"
+#include "lj_bcdump.h"
+#include "lj_vm.h"
+
+/* Context for bytecode writer. */
+typedef struct BCWriteCtx {
+ SBuf sb; /* Output buffer. */
+ GCproto *pt; /* Root prototype. */
+ lua_Writer wfunc; /* Writer callback. */
+ void *wdata; /* Writer callback data. */
+ int strip; /* Strip debug info. */
+ int status; /* Status from writer callback. */
+#ifdef LUA_USE_ASSERT
+ global_State *g;
+#endif
+} BCWriteCtx;
+
+#ifdef LUA_USE_ASSERT
+#define lj_assertBCW(c, ...) lj_assertG_(ctx->g, (c), __VA_ARGS__)
+#else
+#define lj_assertBCW(c, ...) ((void)ctx)
+#endif
+
+/* -- Bytecode writer ----------------------------------------------------- */
+
+/* Write a single constant key/value of a template table. */
+static void bcwrite_ktabk(BCWriteCtx *ctx, cTValue *o, int narrow)
+{
+ char *p = lj_buf_more(&ctx->sb, 1+10);
+ if (tvisstr(o)) {
+ const GCstr *str = strV(o);
+ MSize len = str->len;
+ p = lj_buf_more(&ctx->sb, 5+len);
+ p = lj_strfmt_wuleb128(p, BCDUMP_KTAB_STR+len);
+ p = lj_buf_wmem(p, strdata(str), len);
+ } else if (tvisint(o)) {
+ *p++ = BCDUMP_KTAB_INT;
+ p = lj_strfmt_wuleb128(p, intV(o));
+ } else if (tvisnum(o)) {
+ if (!LJ_DUALNUM && narrow) { /* Narrow number constants to integers. */
+ lua_Number num = numV(o);
+ int32_t k = lj_num2int(num);
+ if (num == (lua_Number)k) { /* -0 is never a constant. */
+ *p++ = BCDUMP_KTAB_INT;
+ p = lj_strfmt_wuleb128(p, k);
+ ctx->sb.w = p;
+ return;
+ }
+ }
+ *p++ = BCDUMP_KTAB_NUM;
+ p = lj_strfmt_wuleb128(p, o->u32.lo);
+ p = lj_strfmt_wuleb128(p, o->u32.hi);
+ } else {
+ lj_assertBCW(tvispri(o), "unhandled type %d", itype(o));
+ *p++ = BCDUMP_KTAB_NIL+~itype(o);
+ }
+ ctx->sb.w = p;
+}
+
+/* Write a template table. */
+static void bcwrite_ktab(BCWriteCtx *ctx, char *p, const GCtab *t)
+{
+ MSize narray = 0, nhash = 0;
+ if (t->asize > 0) { /* Determine max. length of array part. */
+ ptrdiff_t i;
+ TValue *array = tvref(t->array);
+ for (i = (ptrdiff_t)t->asize-1; i >= 0; i--)
+ if (!tvisnil(&array[i]))
+ break;
+ narray = (MSize)(i+1);
+ }
+ if (t->hmask > 0) { /* Count number of used hash slots. */
+ MSize i, hmask = t->hmask;
+ Node *node = noderef(t->node);
+ for (i = 0; i <= hmask; i++)
+ nhash += !tvisnil(&node[i].val);
+ }
+ /* Write number of array slots and hash slots. */
+ p = lj_strfmt_wuleb128(p, narray);
+ p = lj_strfmt_wuleb128(p, nhash);
+ ctx->sb.w = p;
+ if (narray) { /* Write array entries (may contain nil). */
+ MSize i;
+ TValue *o = tvref(t->array);
+ for (i = 0; i < narray; i++, o++)
+ bcwrite_ktabk(ctx, o, 1);
+ }
+ if (nhash) { /* Write hash entries. */
+ MSize i = nhash;
+ Node *node = noderef(t->node) + t->hmask;
+ for (;; node--)
+ if (!tvisnil(&node->val)) {
+ bcwrite_ktabk(ctx, &node->key, 0);
+ bcwrite_ktabk(ctx, &node->val, 1);
+ if (--i == 0) break;
+ }
+ }
+}
+
+/* Write GC constants of a prototype. */
+static void bcwrite_kgc(BCWriteCtx *ctx, GCproto *pt)
+{
+ MSize i, sizekgc = pt->sizekgc;
+ GCRef *kr = mref(pt->k, GCRef) - (ptrdiff_t)sizekgc;
+ for (i = 0; i < sizekgc; i++, kr++) {
+ GCobj *o = gcref(*kr);
+ MSize tp, need = 1;
+ char *p;
+ /* Determine constant type and needed size. */
+ if (o->gch.gct == ~LJ_TSTR) {
+ tp = BCDUMP_KGC_STR + gco2str(o)->len;
+ need = 5+gco2str(o)->len;
+ } else if (o->gch.gct == ~LJ_TPROTO) {
+ lj_assertBCW((pt->flags & PROTO_CHILD), "prototype has unexpected child");
+ tp = BCDUMP_KGC_CHILD;
+#if LJ_HASFFI
+ } else if (o->gch.gct == ~LJ_TCDATA) {
+ CTypeID id = gco2cd(o)->ctypeid;
+ need = 1+4*5;
+ if (id == CTID_INT64) {
+ tp = BCDUMP_KGC_I64;
+ } else if (id == CTID_UINT64) {
+ tp = BCDUMP_KGC_U64;
+ } else {
+ lj_assertBCW(id == CTID_COMPLEX_DOUBLE,
+ "bad cdata constant CTID %d", id);
+ tp = BCDUMP_KGC_COMPLEX;
+ }
+#endif
+ } else {
+ lj_assertBCW(o->gch.gct == ~LJ_TTAB,
+ "bad constant GC type %d", o->gch.gct);
+ tp = BCDUMP_KGC_TAB;
+ need = 1+2*5;
+ }
+ /* Write constant type. */
+ p = lj_buf_more(&ctx->sb, need);
+ p = lj_strfmt_wuleb128(p, tp);
+ /* Write constant data (if any). */
+ if (tp >= BCDUMP_KGC_STR) {
+ p = lj_buf_wmem(p, strdata(gco2str(o)), gco2str(o)->len);
+ } else if (tp == BCDUMP_KGC_TAB) {
+ bcwrite_ktab(ctx, p, gco2tab(o));
+ continue;
+#if LJ_HASFFI
+ } else if (tp != BCDUMP_KGC_CHILD) {
+ cTValue *q = (TValue *)cdataptr(gco2cd(o));
+ p = lj_strfmt_wuleb128(p, q[0].u32.lo);
+ p = lj_strfmt_wuleb128(p, q[0].u32.hi);
+ if (tp == BCDUMP_KGC_COMPLEX) {
+ p = lj_strfmt_wuleb128(p, q[1].u32.lo);
+ p = lj_strfmt_wuleb128(p, q[1].u32.hi);
+ }
+#endif
+ }
+ ctx->sb.w = p;
+ }
+}
+
+/* Write number constants of a prototype. */
+static void bcwrite_knum(BCWriteCtx *ctx, GCproto *pt)
+{
+ MSize i, sizekn = pt->sizekn;
+ cTValue *o = mref(pt->k, TValue);
+ char *p = lj_buf_more(&ctx->sb, 10*sizekn);
+ for (i = 0; i < sizekn; i++, o++) {
+ int32_t k;
+ if (tvisint(o)) {
+ k = intV(o);
+ goto save_int;
+ } else {
+ /* Write a 33 bit ULEB128 for the int (lsb=0) or loword (lsb=1). */
+ if (!LJ_DUALNUM) { /* Narrow number constants to integers. */
+ lua_Number num = numV(o);
+ k = lj_num2int(num);
+ if (num == (lua_Number)k) { /* -0 is never a constant. */
+ save_int:
+ p = lj_strfmt_wuleb128(p, 2*(uint32_t)k | ((uint32_t)k&0x80000000u));
+ if (k < 0)
+ p[-1] = (p[-1] & 7) | ((k>>27) & 0x18);
+ continue;
+ }
+ }
+ p = lj_strfmt_wuleb128(p, 1+(2*o->u32.lo | (o->u32.lo & 0x80000000u)));
+ if (o->u32.lo >= 0x80000000u)
+ p[-1] = (p[-1] & 7) | ((o->u32.lo>>27) & 0x18);
+ p = lj_strfmt_wuleb128(p, o->u32.hi);
+ }
+ }
+ ctx->sb.w = p;
+}
+
+/* Write bytecode instructions. */
+static char *bcwrite_bytecode(BCWriteCtx *ctx, char *p, GCproto *pt)
+{
+ MSize nbc = pt->sizebc-1; /* Omit the [JI]FUNC* header. */
+#if LJ_HASJIT
+ uint8_t *q = (uint8_t *)p;
+#endif
+ p = lj_buf_wmem(p, proto_bc(pt)+1, nbc*(MSize)sizeof(BCIns));
+ UNUSED(ctx);
+#if LJ_HASJIT
+ /* Unpatch modified bytecode containing ILOOP/JLOOP etc. */
+ if ((pt->flags & PROTO_ILOOP) || pt->trace) {
+ jit_State *J = L2J(sbufL(&ctx->sb));
+ MSize i;
+ for (i = 0; i < nbc; i++, q += sizeof(BCIns)) {
+ BCOp op = (BCOp)q[LJ_ENDIAN_SELECT(0, 3)];
+ if (op == BC_IFORL || op == BC_IITERL || op == BC_ILOOP ||
+ op == BC_JFORI) {
+ q[LJ_ENDIAN_SELECT(0, 3)] = (uint8_t)(op-BC_IFORL+BC_FORL);
+ } else if (op == BC_JFORL || op == BC_JITERL || op == BC_JLOOP) {
+ BCReg rd = q[LJ_ENDIAN_SELECT(2, 1)] + (q[LJ_ENDIAN_SELECT(3, 0)] << 8);
+ memcpy(q, &traceref(J, rd)->startins, 4);
+ }
+ }
+ }
+#endif
+ return p;
+}
+
+/* Write prototype. */
+static void bcwrite_proto(BCWriteCtx *ctx, GCproto *pt)
+{
+ MSize sizedbg = 0;
+ char *p;
+
+ /* Recursively write children of prototype. */
+ if ((pt->flags & PROTO_CHILD)) {
+ ptrdiff_t i, n = pt->sizekgc;
+ GCRef *kr = mref(pt->k, GCRef) - 1;
+ for (i = 0; i < n; i++, kr--) {
+ GCobj *o = gcref(*kr);
+ if (o->gch.gct == ~LJ_TPROTO)
+ bcwrite_proto(ctx, gco2pt(o));
+ }
+ }
+
+ /* Start writing the prototype info to a buffer. */
+ p = lj_buf_need(&ctx->sb,
+ 5+4+6*5+(pt->sizebc-1)*(MSize)sizeof(BCIns)+pt->sizeuv*2);
+ p += 5; /* Leave room for final size. */
+
+ /* Write prototype header. */
+ *p++ = (pt->flags & (PROTO_CHILD|PROTO_VARARG|PROTO_FFI));
+ *p++ = pt->numparams;
+ *p++ = pt->framesize;
+ *p++ = pt->sizeuv;
+ p = lj_strfmt_wuleb128(p, pt->sizekgc);
+ p = lj_strfmt_wuleb128(p, pt->sizekn);
+ p = lj_strfmt_wuleb128(p, pt->sizebc-1);
+ if (!ctx->strip) {
+ if (proto_lineinfo(pt))
+ sizedbg = pt->sizept - (MSize)((char *)proto_lineinfo(pt) - (char *)pt);
+ p = lj_strfmt_wuleb128(p, sizedbg);
+ if (sizedbg) {
+ p = lj_strfmt_wuleb128(p, pt->firstline);
+ p = lj_strfmt_wuleb128(p, pt->numline);
+ }
+ }
+
+ /* Write bytecode instructions and upvalue refs. */
+ p = bcwrite_bytecode(ctx, p, pt);
+ p = lj_buf_wmem(p, proto_uv(pt), pt->sizeuv*2);
+ ctx->sb.w = p;
+
+ /* Write constants. */
+ bcwrite_kgc(ctx, pt);
+ bcwrite_knum(ctx, pt);
+
+ /* Write debug info, if not stripped. */
+ if (sizedbg) {
+ p = lj_buf_more(&ctx->sb, sizedbg);
+ p = lj_buf_wmem(p, proto_lineinfo(pt), sizedbg);
+ ctx->sb.w = p;
+ }
+
+ /* Pass buffer to writer function. */
+ if (ctx->status == 0) {
+ MSize n = sbuflen(&ctx->sb) - 5;
+ MSize nn = (lj_fls(n)+8)*9 >> 6;
+ char *q = ctx->sb.b + (5 - nn);
+ p = lj_strfmt_wuleb128(q, n); /* Fill in final size. */
+ lj_assertBCW(p == ctx->sb.b + 5, "bad ULEB128 write");
+ ctx->status = ctx->wfunc(sbufL(&ctx->sb), q, nn+n, ctx->wdata);
+ }
+}
+
+/* Write header of bytecode dump. */
+static void bcwrite_header(BCWriteCtx *ctx)
+{
+ GCstr *chunkname = proto_chunkname(ctx->pt);
+ const char *name = strdata(chunkname);
+ MSize len = chunkname->len;
+ char *p = lj_buf_need(&ctx->sb, 5+5+len);
+ *p++ = BCDUMP_HEAD1;
+ *p++ = BCDUMP_HEAD2;
+ *p++ = BCDUMP_HEAD3;
+ *p++ = BCDUMP_VERSION;
+ *p++ = (ctx->strip ? BCDUMP_F_STRIP : 0) +
+ LJ_BE*BCDUMP_F_BE +
+ ((ctx->pt->flags & PROTO_FFI) ? BCDUMP_F_FFI : 0) +
+ LJ_FR2*BCDUMP_F_FR2;
+ if (!ctx->strip) {
+ p = lj_strfmt_wuleb128(p, len);
+ p = lj_buf_wmem(p, name, len);
+ }
+ ctx->status = ctx->wfunc(sbufL(&ctx->sb), ctx->sb.b,
+ (MSize)(p - ctx->sb.b), ctx->wdata);
+}
+
+/* Write footer of bytecode dump. */
+static void bcwrite_footer(BCWriteCtx *ctx)
+{
+ if (ctx->status == 0) {
+ uint8_t zero = 0;
+ ctx->status = ctx->wfunc(sbufL(&ctx->sb), &zero, 1, ctx->wdata);
+ }
+}
+
+/* Protected callback for bytecode writer. */
+static TValue *cpwriter(lua_State *L, lua_CFunction dummy, void *ud)
+{
+ BCWriteCtx *ctx = (BCWriteCtx *)ud;
+ UNUSED(L); UNUSED(dummy);
+ lj_buf_need(&ctx->sb, 1024); /* Avoids resize for most prototypes. */
+ bcwrite_header(ctx);
+ bcwrite_proto(ctx, ctx->pt);
+ bcwrite_footer(ctx);
+ return NULL;
+}
+
+/* Write bytecode for a prototype. */
+int lj_bcwrite(lua_State *L, GCproto *pt, lua_Writer writer, void *data,
+ int strip)
+{
+ BCWriteCtx ctx;
+ int status;
+ ctx.pt = pt;
+ ctx.wfunc = writer;
+ ctx.wdata = data;
+ ctx.strip = strip;
+ ctx.status = 0;
+#ifdef LUA_USE_ASSERT
+ ctx.g = G(L);
+#endif
+ lj_buf_init(L, &ctx.sb);
+ status = lj_vm_cpcall(L, NULL, &ctx, cpwriter);
+ if (status == 0) status = ctx.status;
+ lj_buf_free(G(sbufL(&ctx.sb)), &ctx.sb);
+ return status;
+}
+
diff --git a/libs/luajit-cmake/luajit/src/lj_buf.c b/libs/luajit-cmake/luajit/src/lj_buf.c
new file mode 100644
index 0000000..cf268af
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/lj_buf.c
@@ -0,0 +1,305 @@
+/*
+** Buffer handling.
+** Copyright (C) 2005-2022 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#define lj_buf_c
+#define LUA_CORE
+
+#include "lj_obj.h"
+#include "lj_gc.h"
+#include "lj_err.h"
+#include "lj_buf.h"
+#include "lj_str.h"
+#include "lj_tab.h"
+#include "lj_strfmt.h"
+
+/* -- Buffer management --------------------------------------------------- */
+
+static void buf_grow(SBuf *sb, MSize sz)
+{
+ MSize osz = sbufsz(sb), len = sbuflen(sb), nsz = osz;
+ char *b;
+ GCSize flag;
+ if (nsz < LJ_MIN_SBUF) nsz = LJ_MIN_SBUF;
+ while (nsz < sz) nsz += nsz;
+ flag = sbufflag(sb);
+ if ((flag & SBUF_FLAG_COW)) { /* Copy-on-write semantics. */
+ lj_assertG_(G(sbufL(sb)), sb->w == sb->e, "bad SBuf COW");
+ b = (char *)lj_mem_new(sbufL(sb), nsz);
+ setsbufflag(sb, flag & ~(GCSize)SBUF_FLAG_COW);
+ setgcrefnull(sbufX(sb)->cowref);
+ memcpy(b, sb->b, osz);
+ } else {
+ b = (char *)lj_mem_realloc(sbufL(sb), sb->b, osz, nsz);
+ }
+ if ((flag & SBUF_FLAG_EXT)) {
+ sbufX(sb)->r = sbufX(sb)->r - sb->b + b; /* Adjust read pointer, too. */
+ }
+ /* Adjust buffer pointers. */
+ sb->b = b;
+ sb->w = b + len;
+ sb->e = b + nsz;
+ if ((flag & SBUF_FLAG_BORROW)) { /* Adjust borrowed buffer pointers. */
+ SBuf *bsb = mref(sbufX(sb)->bsb, SBuf);
+ bsb->b = b;
+ bsb->w = b + len;
+ bsb->e = b + nsz;
+ }
+}
+
+LJ_NOINLINE char *LJ_FASTCALL lj_buf_need2(SBuf *sb, MSize sz)
+{
+ lj_assertG_(G(sbufL(sb)), sz > sbufsz(sb), "SBuf overflow");
+ if (LJ_UNLIKELY(sz > LJ_MAX_BUF))
+ lj_err_mem(sbufL(sb));
+ buf_grow(sb, sz);
+ return sb->b;
+}
+
+LJ_NOINLINE char *LJ_FASTCALL lj_buf_more2(SBuf *sb, MSize sz)
+{
+ if (sbufisext(sb)) {
+ SBufExt *sbx = (SBufExt *)sb;
+ MSize len = sbufxlen(sbx);
+ if (LJ_UNLIKELY(sz > LJ_MAX_BUF || len + sz > LJ_MAX_BUF))
+ lj_err_mem(sbufL(sbx));
+ if (len + sz > sbufsz(sbx)) { /* Must grow. */
+ buf_grow((SBuf *)sbx, len + sz);
+ } else if (sbufiscow(sb) || sbufxslack(sbx) < (sbufsz(sbx) >> 3)) {
+ /* Also grow to avoid excessive compactions, if slack < size/8. */
+ buf_grow((SBuf *)sbx, sbuflen(sbx) + sz); /* Not sbufxlen! */
+ return sbx->w;
+ }
+ if (sbx->r != sbx->b) { /* Compact by moving down. */
+ memmove(sbx->b, sbx->r, len);
+ sbx->r = sbx->b;
+ sbx->w = sbx->b + len;
+ lj_assertG_(G(sbufL(sbx)), len + sz <= sbufsz(sbx), "bad SBuf compact");
+ }
+ } else {
+ MSize len = sbuflen(sb);
+ lj_assertG_(G(sbufL(sb)), sz > sbufleft(sb), "SBuf overflow");
+ if (LJ_UNLIKELY(sz > LJ_MAX_BUF || len + sz > LJ_MAX_BUF))
+ lj_err_mem(sbufL(sb));
+ buf_grow(sb, len + sz);
+ }
+ return sb->w;
+}
+
+void LJ_FASTCALL lj_buf_shrink(lua_State *L, SBuf *sb)
+{
+ char *b = sb->b;
+ MSize osz = (MSize)(sb->e - b);
+ if (osz > 2*LJ_MIN_SBUF) {
+ MSize n = (MSize)(sb->w - b);
+ b = lj_mem_realloc(L, b, osz, (osz >> 1));
+ sb->b = b;
+ sb->w = b + n;
+ sb->e = b + (osz >> 1);
+ }
+ lj_assertG_(G(sbufL(sb)), !sbufisext(sb), "YAGNI shrink SBufExt");
+}
+
+char * LJ_FASTCALL lj_buf_tmp(lua_State *L, MSize sz)
+{
+ SBuf *sb = &G(L)->tmpbuf;
+ setsbufL(sb, L);
+ return lj_buf_need(sb, sz);
+}
+
+#if LJ_HASBUFFER && LJ_HASJIT
+void lj_bufx_set(SBufExt *sbx, const char *p, MSize len, GCobj *ref)
+{
+ lua_State *L = sbufL(sbx);
+ lj_bufx_free(L, sbx);
+ lj_bufx_set_cow(L, sbx, p, len);
+ setgcref(sbx->cowref, ref);
+ lj_gc_objbarrier(L, (GCudata *)sbx - 1, ref);
+}
+
+#if LJ_HASFFI
+MSize LJ_FASTCALL lj_bufx_more(SBufExt *sbx, MSize sz)
+{
+ lj_buf_more((SBuf *)sbx, sz);
+ return sbufleft(sbx);
+}
+#endif
+#endif
+
+/* -- Low-level buffer put operations ------------------------------------- */
+
+SBuf *lj_buf_putmem(SBuf *sb, const void *q, MSize len)
+{
+ char *w = lj_buf_more(sb, len);
+ w = lj_buf_wmem(w, q, len);
+ sb->w = w;
+ return sb;
+}
+
+#if LJ_HASJIT || LJ_HASFFI
+static LJ_NOINLINE SBuf * LJ_FASTCALL lj_buf_putchar2(SBuf *sb, int c)
+{
+ char *w = lj_buf_more2(sb, 1);
+ *w++ = (char)c;
+ sb->w = w;
+ return sb;
+}
+
+SBuf * LJ_FASTCALL lj_buf_putchar(SBuf *sb, int c)
+{
+ char *w = sb->w;
+ if (LJ_LIKELY(w < sb->e)) {
+ *w++ = (char)c;
+ sb->w = w;
+ return sb;
+ }
+ return lj_buf_putchar2(sb, c);
+}
+#endif
+
+SBuf * LJ_FASTCALL lj_buf_putstr(SBuf *sb, GCstr *s)
+{
+ MSize len = s->len;
+ char *w = lj_buf_more(sb, len);
+ w = lj_buf_wmem(w, strdata(s), len);
+ sb->w = w;
+ return sb;
+}
+
+/* -- High-level buffer put operations ------------------------------------ */
+
+SBuf * LJ_FASTCALL lj_buf_putstr_reverse(SBuf *sb, GCstr *s)
+{
+ MSize len = s->len;
+ char *w = lj_buf_more(sb, len), *e = w+len;
+ const char *q = strdata(s)+len-1;
+ while (w < e)
+ *w++ = *q--;
+ sb->w = w;
+ return sb;
+}
+
+SBuf * LJ_FASTCALL lj_buf_putstr_lower(SBuf *sb, GCstr *s)
+{
+ MSize len = s->len;
+ char *w = lj_buf_more(sb, len), *e = w+len;
+ const char *q = strdata(s);
+ for (; w < e; w++, q++) {
+ uint32_t c = *(unsigned char *)q;
+#if LJ_TARGET_PPC
+ *w = c + ((c >= 'A' && c <= 'Z') << 5);
+#else
+ if (c >= 'A' && c <= 'Z') c += 0x20;
+ *w = c;
+#endif
+ }
+ sb->w = w;
+ return sb;
+}
+
+SBuf * LJ_FASTCALL lj_buf_putstr_upper(SBuf *sb, GCstr *s)
+{
+ MSize len = s->len;
+ char *w = lj_buf_more(sb, len), *e = w+len;
+ const char *q = strdata(s);
+ for (; w < e; w++, q++) {
+ uint32_t c = *(unsigned char *)q;
+#if LJ_TARGET_PPC
+ *w = c - ((c >= 'a' && c <= 'z') << 5);
+#else
+ if (c >= 'a' && c <= 'z') c -= 0x20;
+ *w = c;
+#endif
+ }
+ sb->w = w;
+ return sb;
+}
+
+SBuf *lj_buf_putstr_rep(SBuf *sb, GCstr *s, int32_t rep)
+{
+ MSize len = s->len;
+ if (rep > 0 && len) {
+ uint64_t tlen = (uint64_t)rep * len;
+ char *w;
+ if (LJ_UNLIKELY(tlen > LJ_MAX_STR))
+ lj_err_mem(sbufL(sb));
+ w = lj_buf_more(sb, (MSize)tlen);
+ if (len == 1) { /* Optimize a common case. */
+ uint32_t c = strdata(s)[0];
+ do { *w++ = c; } while (--rep > 0);
+ } else {
+ const char *e = strdata(s) + len;
+ do {
+ const char *q = strdata(s);
+ do { *w++ = *q++; } while (q < e);
+ } while (--rep > 0);
+ }
+ sb->w = w;
+ }
+ return sb;
+}
+
+SBuf *lj_buf_puttab(SBuf *sb, GCtab *t, GCstr *sep, int32_t i, int32_t e)
+{
+ MSize seplen = sep ? sep->len : 0;
+ if (i <= e) {
+ for (;;) {
+ cTValue *o = lj_tab_getint(t, i);
+ char *w;
+ if (!o) {
+ badtype: /* Error: bad element type. */
+ sb->w = (char *)(intptr_t)i; /* Store failing index. */
+ return NULL;
+ } else if (tvisstr(o)) {
+ MSize len = strV(o)->len;
+ w = lj_buf_wmem(lj_buf_more(sb, len + seplen), strVdata(o), len);
+ } else if (tvisint(o)) {
+ w = lj_strfmt_wint(lj_buf_more(sb, STRFMT_MAXBUF_INT+seplen), intV(o));
+ } else if (tvisnum(o)) {
+ w = lj_buf_more(lj_strfmt_putfnum(sb, STRFMT_G14, numV(o)), seplen);
+ } else {
+ goto badtype;
+ }
+ if (i++ == e) {
+ sb->w = w;
+ break;
+ }
+ if (seplen) w = lj_buf_wmem(w, strdata(sep), seplen);
+ sb->w = w;
+ }
+ }
+ return sb;
+}
+
+/* -- Miscellaneous buffer operations ------------------------------------- */
+
+GCstr * LJ_FASTCALL lj_buf_tostr(SBuf *sb)
+{
+ return lj_str_new(sbufL(sb), sb->b, sbuflen(sb));
+}
+
+/* Concatenate two strings. */
+GCstr *lj_buf_cat2str(lua_State *L, GCstr *s1, GCstr *s2)
+{
+ MSize len1 = s1->len, len2 = s2->len;
+ char *buf = lj_buf_tmp(L, len1 + len2);
+ memcpy(buf, strdata(s1), len1);
+ memcpy(buf+len1, strdata(s2), len2);
+ return lj_str_new(L, buf, len1 + len2);
+}
+
+/* Read ULEB128 from buffer. */
+uint32_t LJ_FASTCALL lj_buf_ruleb128(const char **pp)
+{
+ const uint8_t *w = (const uint8_t *)*pp;
+ uint32_t v = *w++;
+ if (LJ_UNLIKELY(v >= 0x80)) {
+ int sh = 0;
+ v &= 0x7f;
+ do { v |= ((*w & 0x7f) << (sh += 7)); } while (*w++ >= 0x80);
+ }
+ *pp = (const char *)w;
+ return v;
+}
+
diff --git a/libs/luajit-cmake/luajit/src/lj_buf.h b/libs/luajit-cmake/luajit/src/lj_buf.h
new file mode 100644
index 0000000..7611420
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/lj_buf.h
@@ -0,0 +1,198 @@
+/*
+** Buffer handling.
+** Copyright (C) 2005-2022 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LJ_BUF_H
+#define _LJ_BUF_H
+
+#include "lj_obj.h"
+#include "lj_gc.h"
+#include "lj_str.h"
+
+/* Resizable string buffers. */
+
+/* The SBuf struct definition is in lj_obj.h:
+** char *w; Write pointer.
+** char *e; End pointer.
+** char *b; Base pointer.
+** MRef L; lua_State, used for buffer resizing. Extension bits in 3 LSB.
+*/
+
+/* Extended string buffer. */
+typedef struct SBufExt {
+ SBufHeader;
+ union {
+ GCRef cowref; /* Copy-on-write object reference. */
+ MRef bsb; /* Borrowed string buffer. */
+ };
+ char *r; /* Read pointer. */
+ GCRef dict_str; /* Serialization string dictionary table. */
+ GCRef dict_mt; /* Serialization metatable dictionary table. */
+ int depth; /* Remaining recursion depth. */
+} SBufExt;
+
+#define sbufsz(sb) ((MSize)((sb)->e - (sb)->b))
+#define sbuflen(sb) ((MSize)((sb)->w - (sb)->b))
+#define sbufleft(sb) ((MSize)((sb)->e - (sb)->w))
+#define sbufxlen(sbx) ((MSize)((sbx)->w - (sbx)->r))
+#define sbufxslack(sbx) ((MSize)((sbx)->r - (sbx)->b))
+
+#define SBUF_MASK_FLAG (7)
+#define SBUF_MASK_L (~(GCSize)SBUF_MASK_FLAG)
+#define SBUF_FLAG_EXT 1 /* Extended string buffer. */
+#define SBUF_FLAG_COW 2 /* Copy-on-write buffer. */
+#define SBUF_FLAG_BORROW 4 /* Borrowed string buffer. */
+
+#define sbufL(sb) \
+ ((lua_State *)(void *)(uintptr_t)(mrefu((sb)->L) & SBUF_MASK_L))
+#define setsbufL(sb, l) (setmref((sb)->L, (l)))
+#define setsbufXL(sb, l, flag) \
+ (setmrefu((sb)->L, (GCSize)(uintptr_t)(void *)(l) + (flag)))
+#define setsbufXL_(sb, l) \
+ (setmrefu((sb)->L, (GCSize)(uintptr_t)(void *)(l) | (mrefu((sb)->L) & SBUF_MASK_FLAG)))
+
+#define sbufflag(sb) (mrefu((sb)->L))
+#define sbufisext(sb) (sbufflag((sb)) & SBUF_FLAG_EXT)
+#define sbufiscow(sb) (sbufflag((sb)) & SBUF_FLAG_COW)
+#define sbufisborrow(sb) (sbufflag((sb)) & SBUF_FLAG_BORROW)
+#define sbufiscoworborrow(sb) (sbufflag((sb)) & (SBUF_FLAG_COW|SBUF_FLAG_BORROW))
+#define sbufX(sb) \
+ (lj_assertG_(G(sbufL(sb)), sbufisext(sb), "not an SBufExt"), (SBufExt *)(sb))
+#define setsbufflag(sb, flag) (setmrefu((sb)->L, (flag)))
+
+#define tvisbuf(o) \
+ (LJ_HASBUFFER && tvisudata(o) && udataV(o)->udtype == UDTYPE_BUFFER)
+#define bufV(o) check_exp(tvisbuf(o), ((SBufExt *)uddata(udataV(o))))
+
+/* Buffer management */
+LJ_FUNC char *LJ_FASTCALL lj_buf_need2(SBuf *sb, MSize sz);
+LJ_FUNC char *LJ_FASTCALL lj_buf_more2(SBuf *sb, MSize sz);
+LJ_FUNC void LJ_FASTCALL lj_buf_shrink(lua_State *L, SBuf *sb);
+LJ_FUNC char * LJ_FASTCALL lj_buf_tmp(lua_State *L, MSize sz);
+
+static LJ_AINLINE void lj_buf_init(lua_State *L, SBuf *sb)
+{
+ setsbufL(sb, L);
+ sb->w = sb->e = sb->b = NULL;
+}
+
+static LJ_AINLINE void lj_buf_reset(SBuf *sb)
+{
+ sb->w = sb->b;
+}
+
+static LJ_AINLINE SBuf *lj_buf_tmp_(lua_State *L)
+{
+ SBuf *sb = &G(L)->tmpbuf;
+ setsbufL(sb, L);
+ lj_buf_reset(sb);
+ return sb;
+}
+
+static LJ_AINLINE void lj_buf_free(global_State *g, SBuf *sb)
+{
+ lj_assertG(!sbufisext(sb), "bad free of SBufExt");
+ lj_mem_free(g, sb->b, sbufsz(sb));
+}
+
+static LJ_AINLINE char *lj_buf_need(SBuf *sb, MSize sz)
+{
+ if (LJ_UNLIKELY(sz > sbufsz(sb)))
+ return lj_buf_need2(sb, sz);
+ return sb->b;
+}
+
+static LJ_AINLINE char *lj_buf_more(SBuf *sb, MSize sz)
+{
+ if (LJ_UNLIKELY(sz > sbufleft(sb)))
+ return lj_buf_more2(sb, sz);
+ return sb->w;
+}
+
+/* Extended buffer management */
+static LJ_AINLINE void lj_bufx_init(lua_State *L, SBufExt *sbx)
+{
+ memset(sbx, 0, sizeof(SBufExt));
+ setsbufXL(sbx, L, SBUF_FLAG_EXT);
+}
+
+static LJ_AINLINE void lj_bufx_set_borrow(lua_State *L, SBufExt *sbx, SBuf *sb)
+{
+ setsbufXL(sbx, L, SBUF_FLAG_EXT | SBUF_FLAG_BORROW);
+ setmref(sbx->bsb, sb);
+ sbx->r = sbx->w = sbx->b = sb->b;
+ sbx->e = sb->e;
+}
+
+static LJ_AINLINE void lj_bufx_set_cow(lua_State *L, SBufExt *sbx,
+ const char *p, MSize len)
+{
+ setsbufXL(sbx, L, SBUF_FLAG_EXT | SBUF_FLAG_COW);
+ sbx->r = sbx->b = (char *)p;
+ sbx->w = sbx->e = (char *)p + len;
+}
+
+static LJ_AINLINE void lj_bufx_reset(SBufExt *sbx)
+{
+ if (sbufiscow(sbx)) {
+ setmrefu(sbx->L, (mrefu(sbx->L) & ~(GCSize)SBUF_FLAG_COW));
+ setgcrefnull(sbx->cowref);
+ sbx->b = sbx->e = NULL;
+ }
+ sbx->r = sbx->w = sbx->b;
+}
+
+static LJ_AINLINE void lj_bufx_free(lua_State *L, SBufExt *sbx)
+{
+ if (!sbufiscoworborrow(sbx)) lj_mem_free(G(L), sbx->b, sbufsz(sbx));
+ setsbufXL(sbx, L, SBUF_FLAG_EXT);
+ setgcrefnull(sbx->cowref);
+ sbx->r = sbx->w = sbx->b = sbx->e = NULL;
+}
+
+#if LJ_HASBUFFER && LJ_HASJIT
+LJ_FUNC void lj_bufx_set(SBufExt *sbx, const char *p, MSize len, GCobj *o);
+#if LJ_HASFFI
+LJ_FUNC MSize LJ_FASTCALL lj_bufx_more(SBufExt *sbx, MSize sz);
+#endif
+#endif
+
+/* Low-level buffer put operations */
+LJ_FUNC SBuf *lj_buf_putmem(SBuf *sb, const void *q, MSize len);
+#if LJ_HASJIT || LJ_HASFFI
+LJ_FUNC SBuf * LJ_FASTCALL lj_buf_putchar(SBuf *sb, int c);
+#endif
+LJ_FUNC SBuf * LJ_FASTCALL lj_buf_putstr(SBuf *sb, GCstr *s);
+
+static LJ_AINLINE char *lj_buf_wmem(char *p, const void *q, MSize len)
+{
+ return (char *)memcpy(p, q, len) + len;
+}
+
+static LJ_AINLINE void lj_buf_putb(SBuf *sb, int c)
+{
+ char *w = lj_buf_more(sb, 1);
+ *w++ = (char)c;
+ sb->w = w;
+}
+
+/* High-level buffer put operations */
+LJ_FUNCA SBuf * LJ_FASTCALL lj_buf_putstr_reverse(SBuf *sb, GCstr *s);
+LJ_FUNCA SBuf * LJ_FASTCALL lj_buf_putstr_lower(SBuf *sb, GCstr *s);
+LJ_FUNCA SBuf * LJ_FASTCALL lj_buf_putstr_upper(SBuf *sb, GCstr *s);
+LJ_FUNC SBuf *lj_buf_putstr_rep(SBuf *sb, GCstr *s, int32_t rep);
+LJ_FUNC SBuf *lj_buf_puttab(SBuf *sb, GCtab *t, GCstr *sep,
+ int32_t i, int32_t e);
+
+/* Miscellaneous buffer operations */
+LJ_FUNCA GCstr * LJ_FASTCALL lj_buf_tostr(SBuf *sb);
+LJ_FUNC GCstr *lj_buf_cat2str(lua_State *L, GCstr *s1, GCstr *s2);
+LJ_FUNC uint32_t LJ_FASTCALL lj_buf_ruleb128(const char **pp);
+
+static LJ_AINLINE GCstr *lj_buf_str(lua_State *L, SBuf *sb)
+{
+ return lj_str_new(L, sb->b, sbuflen(sb));
+}
+
+#endif
diff --git a/libs/luajit-cmake/luajit/src/lj_carith.c b/libs/luajit-cmake/luajit/src/lj_carith.c
new file mode 100644
index 0000000..1a2a058
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/lj_carith.c
@@ -0,0 +1,432 @@
+/*
+** C data arithmetic.
+** Copyright (C) 2005-2022 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#include "lj_obj.h"
+
+#if LJ_HASFFI
+
+#include "lj_gc.h"
+#include "lj_err.h"
+#include "lj_tab.h"
+#include "lj_meta.h"
+#include "lj_ir.h"
+#include "lj_ctype.h"
+#include "lj_cconv.h"
+#include "lj_cdata.h"
+#include "lj_carith.h"
+#include "lj_strscan.h"
+
+/* -- C data arithmetic --------------------------------------------------- */
+
+/* Binary operands of an operator converted to ctypes. */
+typedef struct CDArith {
+ uint8_t *p[2];
+ CType *ct[2];
+} CDArith;
+
+/* Check arguments for arithmetic metamethods. */
+static int carith_checkarg(lua_State *L, CTState *cts, CDArith *ca)
+{
+ TValue *o = L->base;
+ int ok = 1;
+ MSize i;
+ if (o+1 >= L->top)
+ lj_err_argt(L, 1, LUA_TCDATA);
+ for (i = 0; i < 2; i++, o++) {
+ if (tviscdata(o)) {
+ GCcdata *cd = cdataV(o);
+ CTypeID id = (CTypeID)cd->ctypeid;
+ CType *ct = ctype_raw(cts, id);
+ uint8_t *p = (uint8_t *)cdataptr(cd);
+ if (ctype_isptr(ct->info)) {
+ p = (uint8_t *)cdata_getptr(p, ct->size);
+ if (ctype_isref(ct->info)) ct = ctype_rawchild(cts, ct);
+ } else if (ctype_isfunc(ct->info)) {
+ p = (uint8_t *)*(void **)p;
+ ct = ctype_get(cts,
+ lj_ctype_intern(cts, CTINFO(CT_PTR, CTALIGN_PTR|id), CTSIZE_PTR));
+ }
+ if (ctype_isenum(ct->info)) ct = ctype_child(cts, ct);
+ ca->ct[i] = ct;
+ ca->p[i] = p;
+ } else if (tvisint(o)) {
+ ca->ct[i] = ctype_get(cts, CTID_INT32);
+ ca->p[i] = (uint8_t *)&o->i;
+ } else if (tvisnum(o)) {
+ ca->ct[i] = ctype_get(cts, CTID_DOUBLE);
+ ca->p[i] = (uint8_t *)&o->n;
+ } else if (tvisnil(o)) {
+ ca->ct[i] = ctype_get(cts, CTID_P_VOID);
+ ca->p[i] = (uint8_t *)0;
+ } else if (tvisstr(o)) {
+ TValue *o2 = i == 0 ? o+1 : o-1;
+ CType *ct = ctype_raw(cts, cdataV(o2)->ctypeid);
+ ca->ct[i] = NULL;
+ ca->p[i] = (uint8_t *)strVdata(o);
+ ok = 0;
+ if (ctype_isenum(ct->info)) {
+ CTSize ofs;
+ CType *cct = lj_ctype_getfield(cts, ct, strV(o), &ofs);
+ if (cct && ctype_isconstval(cct->info)) {
+ ca->ct[i] = ctype_child(cts, cct);
+ ca->p[i] = (uint8_t *)&cct->size; /* Assumes ct does not grow. */
+ ok = 1;
+ } else {
+ ca->ct[1-i] = ct; /* Use enum to improve error message. */
+ ca->p[1-i] = NULL;
+ break;
+ }
+ }
+ } else {
+ ca->ct[i] = NULL;
+ ca->p[i] = (void *)(intptr_t)1; /* To make it unequal. */
+ ok = 0;
+ }
+ }
+ return ok;
+}
+
+/* Pointer arithmetic. */
+static int carith_ptr(lua_State *L, CTState *cts, CDArith *ca, MMS mm)
+{
+ CType *ctp = ca->ct[0];
+ uint8_t *pp = ca->p[0];
+ ptrdiff_t idx;
+ CTSize sz;
+ CTypeID id;
+ GCcdata *cd;
+ if (ctype_isptr(ctp->info) || ctype_isrefarray(ctp->info)) {
+ if ((mm == MM_sub || mm == MM_eq || mm == MM_lt || mm == MM_le) &&
+ (ctype_isptr(ca->ct[1]->info) || ctype_isrefarray(ca->ct[1]->info))) {
+ uint8_t *pp2 = ca->p[1];
+ if (mm == MM_eq) { /* Pointer equality. Incompatible pointers are ok. */
+ setboolV(L->top-1, (pp == pp2));
+ return 1;
+ }
+ if (!lj_cconv_compatptr(cts, ctp, ca->ct[1], CCF_IGNQUAL))
+ return 0;
+ if (mm == MM_sub) { /* Pointer difference. */
+ intptr_t diff;
+ sz = lj_ctype_size(cts, ctype_cid(ctp->info)); /* Element size. */
+ if (sz == 0 || sz == CTSIZE_INVALID)
+ return 0;
+ diff = ((intptr_t)pp - (intptr_t)pp2) / (int32_t)sz;
+ /* All valid pointer differences on x64 are in (-2^47, +2^47),
+ ** which fits into a double without loss of precision.
+ */
+ setintptrV(L->top-1, (int32_t)diff);
+ return 1;
+ } else if (mm == MM_lt) { /* Pointer comparison (unsigned). */
+ setboolV(L->top-1, ((uintptr_t)pp < (uintptr_t)pp2));
+ return 1;
+ } else {
+ lj_assertL(mm == MM_le, "bad metamethod %d", mm);
+ setboolV(L->top-1, ((uintptr_t)pp <= (uintptr_t)pp2));
+ return 1;
+ }
+ }
+ if (!((mm == MM_add || mm == MM_sub) && ctype_isnum(ca->ct[1]->info)))
+ return 0;
+ lj_cconv_ct_ct(cts, ctype_get(cts, CTID_INT_PSZ), ca->ct[1],
+ (uint8_t *)&idx, ca->p[1], 0);
+ if (mm == MM_sub) idx = -idx;
+ } else if (mm == MM_add && ctype_isnum(ctp->info) &&
+ (ctype_isptr(ca->ct[1]->info) || ctype_isrefarray(ca->ct[1]->info))) {
+ /* Swap pointer and index. */
+ ctp = ca->ct[1]; pp = ca->p[1];
+ lj_cconv_ct_ct(cts, ctype_get(cts, CTID_INT_PSZ), ca->ct[0],
+ (uint8_t *)&idx, ca->p[0], 0);
+ } else {
+ return 0;
+ }
+ sz = lj_ctype_size(cts, ctype_cid(ctp->info)); /* Element size. */
+ if (sz == CTSIZE_INVALID)
+ return 0;
+ pp += idx*(int32_t)sz; /* Compute pointer + index. */
+ id = lj_ctype_intern(cts, CTINFO(CT_PTR, CTALIGN_PTR|ctype_cid(ctp->info)),
+ CTSIZE_PTR);
+ cd = lj_cdata_new(cts, id, CTSIZE_PTR);
+ *(uint8_t **)cdataptr(cd) = pp;
+ setcdataV(L, L->top-1, cd);
+ lj_gc_check(L);
+ return 1;
+}
+
+/* 64 bit integer arithmetic. */
+static int carith_int64(lua_State *L, CTState *cts, CDArith *ca, MMS mm)
+{
+ if (ctype_isnum(ca->ct[0]->info) && ca->ct[0]->size <= 8 &&
+ ctype_isnum(ca->ct[1]->info) && ca->ct[1]->size <= 8) {
+ CTypeID id = (((ca->ct[0]->info & CTF_UNSIGNED) && ca->ct[0]->size == 8) ||
+ ((ca->ct[1]->info & CTF_UNSIGNED) && ca->ct[1]->size == 8)) ?
+ CTID_UINT64 : CTID_INT64;
+ CType *ct = ctype_get(cts, id);
+ GCcdata *cd;
+ uint64_t u0, u1, *up;
+ lj_cconv_ct_ct(cts, ct, ca->ct[0], (uint8_t *)&u0, ca->p[0], 0);
+ if (mm != MM_unm)
+ lj_cconv_ct_ct(cts, ct, ca->ct[1], (uint8_t *)&u1, ca->p[1], 0);
+ switch (mm) {
+ case MM_eq:
+ setboolV(L->top-1, (u0 == u1));
+ return 1;
+ case MM_lt:
+ setboolV(L->top-1,
+ id == CTID_INT64 ? ((int64_t)u0 < (int64_t)u1) : (u0 < u1));
+ return 1;
+ case MM_le:
+ setboolV(L->top-1,
+ id == CTID_INT64 ? ((int64_t)u0 <= (int64_t)u1) : (u0 <= u1));
+ return 1;
+ default: break;
+ }
+ cd = lj_cdata_new(cts, id, 8);
+ up = (uint64_t *)cdataptr(cd);
+ setcdataV(L, L->top-1, cd);
+ switch (mm) {
+ case MM_add: *up = u0 + u1; break;
+ case MM_sub: *up = u0 - u1; break;
+ case MM_mul: *up = u0 * u1; break;
+ case MM_div:
+ if (id == CTID_INT64)
+ *up = (uint64_t)lj_carith_divi64((int64_t)u0, (int64_t)u1);
+ else
+ *up = lj_carith_divu64(u0, u1);
+ break;
+ case MM_mod:
+ if (id == CTID_INT64)
+ *up = (uint64_t)lj_carith_modi64((int64_t)u0, (int64_t)u1);
+ else
+ *up = lj_carith_modu64(u0, u1);
+ break;
+ case MM_pow:
+ if (id == CTID_INT64)
+ *up = (uint64_t)lj_carith_powi64((int64_t)u0, (int64_t)u1);
+ else
+ *up = lj_carith_powu64(u0, u1);
+ break;
+ case MM_unm: *up = (uint64_t)-(int64_t)u0; break;
+ default:
+ lj_assertL(0, "bad metamethod %d", mm);
+ break;
+ }
+ lj_gc_check(L);
+ return 1;
+ }
+ return 0;
+}
+
+/* Handle ctype arithmetic metamethods. */
+static int lj_carith_meta(lua_State *L, CTState *cts, CDArith *ca, MMS mm)
+{
+ cTValue *tv = NULL;
+ if (tviscdata(L->base)) {
+ CTypeID id = cdataV(L->base)->ctypeid;
+ CType *ct = ctype_raw(cts, id);
+ if (ctype_isptr(ct->info)) id = ctype_cid(ct->info);
+ tv = lj_ctype_meta(cts, id, mm);
+ }
+ if (!tv && L->base+1 < L->top && tviscdata(L->base+1)) {
+ CTypeID id = cdataV(L->base+1)->ctypeid;
+ CType *ct = ctype_raw(cts, id);
+ if (ctype_isptr(ct->info)) id = ctype_cid(ct->info);
+ tv = lj_ctype_meta(cts, id, mm);
+ }
+ if (!tv) {
+ const char *repr[2];
+ int i, isenum = -1, isstr = -1;
+ if (mm == MM_eq) { /* Equality checks never raise an error. */
+ int eq = ca->p[0] == ca->p[1];
+ setboolV(L->top-1, eq);
+ setboolV(&G(L)->tmptv2, eq); /* Remember for trace recorder. */
+ return 1;
+ }
+ for (i = 0; i < 2; i++) {
+ if (ca->ct[i] && tviscdata(L->base+i)) {
+ if (ctype_isenum(ca->ct[i]->info)) isenum = i;
+ repr[i] = strdata(lj_ctype_repr(L, ctype_typeid(cts, ca->ct[i]), NULL));
+ } else {
+ if (tvisstr(&L->base[i])) isstr = i;
+ repr[i] = lj_typename(&L->base[i]);
+ }
+ }
+ if ((isenum ^ isstr) == 1)
+ lj_err_callerv(L, LJ_ERR_FFI_BADCONV, repr[isstr], repr[isenum]);
+ lj_err_callerv(L, mm == MM_len ? LJ_ERR_FFI_BADLEN :
+ mm == MM_concat ? LJ_ERR_FFI_BADCONCAT :
+ mm < MM_add ? LJ_ERR_FFI_BADCOMP : LJ_ERR_FFI_BADARITH,
+ repr[0], repr[1]);
+ }
+ return lj_meta_tailcall(L, tv);
+}
+
+/* Arithmetic operators for cdata. */
+int lj_carith_op(lua_State *L, MMS mm)
+{
+ CTState *cts = ctype_cts(L);
+ CDArith ca;
+ if (carith_checkarg(L, cts, &ca) && mm != MM_len && mm != MM_concat) {
+ if (carith_int64(L, cts, &ca, mm) || carith_ptr(L, cts, &ca, mm)) {
+ copyTV(L, &G(L)->tmptv2, L->top-1); /* Remember for trace recorder. */
+ return 1;
+ }
+ }
+ return lj_carith_meta(L, cts, &ca, mm);
+}
+
+/* -- 64 bit bit operations helpers --------------------------------------- */
+
+#if LJ_64
+#define B64DEF(name) \
+ static LJ_AINLINE uint64_t lj_carith_##name(uint64_t x, int32_t sh)
+#else
+/* Not inlined on 32 bit archs, since some of these are quite lengthy. */
+#define B64DEF(name) \
+ uint64_t LJ_NOINLINE lj_carith_##name(uint64_t x, int32_t sh)
+#endif
+
+B64DEF(shl64) { return x << (sh&63); }
+B64DEF(shr64) { return x >> (sh&63); }
+B64DEF(sar64) { return (uint64_t)((int64_t)x >> (sh&63)); }
+B64DEF(rol64) { return lj_rol(x, (sh&63)); }
+B64DEF(ror64) { return lj_ror(x, (sh&63)); }
+
+#undef B64DEF
+
+uint64_t lj_carith_shift64(uint64_t x, int32_t sh, int op)
+{
+ switch (op) {
+ case IR_BSHL-IR_BSHL: x = lj_carith_shl64(x, sh); break;
+ case IR_BSHR-IR_BSHL: x = lj_carith_shr64(x, sh); break;
+ case IR_BSAR-IR_BSHL: x = lj_carith_sar64(x, sh); break;
+ case IR_BROL-IR_BSHL: x = lj_carith_rol64(x, sh); break;
+ case IR_BROR-IR_BSHL: x = lj_carith_ror64(x, sh); break;
+ default:
+ lj_assertX(0, "bad shift op %d", op);
+ break;
+ }
+ return x;
+}
+
+/* Equivalent to lj_lib_checkbit(), but handles cdata. */
+uint64_t lj_carith_check64(lua_State *L, int narg, CTypeID *id)
+{
+ TValue *o = L->base + narg-1;
+ if (o >= L->top) {
+ err:
+ lj_err_argt(L, narg, LUA_TNUMBER);
+ } else if (LJ_LIKELY(tvisnumber(o))) {
+ /* Handled below. */
+ } else if (tviscdata(o)) {
+ CTState *cts = ctype_cts(L);
+ uint8_t *sp = (uint8_t *)cdataptr(cdataV(o));
+ CTypeID sid = cdataV(o)->ctypeid;
+ CType *s = ctype_get(cts, sid);
+ uint64_t x;
+ if (ctype_isref(s->info)) {
+ sp = *(void **)sp;
+ sid = ctype_cid(s->info);
+ }
+ s = ctype_raw(cts, sid);
+ if (ctype_isenum(s->info)) s = ctype_child(cts, s);
+ if ((s->info & (CTMASK_NUM|CTF_BOOL|CTF_FP|CTF_UNSIGNED)) ==
+ CTINFO(CT_NUM, CTF_UNSIGNED) && s->size == 8)
+ *id = CTID_UINT64; /* Use uint64_t, since it has the highest rank. */
+ else if (!*id)
+ *id = CTID_INT64; /* Use int64_t, unless already set. */
+ lj_cconv_ct_ct(cts, ctype_get(cts, *id), s,
+ (uint8_t *)&x, sp, CCF_ARG(narg));
+ return x;
+ } else if (!(tvisstr(o) && lj_strscan_number(strV(o), o))) {
+ goto err;
+ }
+ if (LJ_LIKELY(tvisint(o))) {
+ return (uint32_t)intV(o);
+ } else {
+ int32_t i = lj_num2bit(numV(o));
+ if (LJ_DUALNUM) setintV(o, i);
+ return (uint32_t)i;
+ }
+}
+
+/* -- 64 bit integer arithmetic helpers ----------------------------------- */
+
+#if LJ_32 && LJ_HASJIT
+/* Signed/unsigned 64 bit multiplication. */
+int64_t lj_carith_mul64(int64_t a, int64_t b)
+{
+ return a * b;
+}
+#endif
+
+/* Unsigned 64 bit division. */
+uint64_t lj_carith_divu64(uint64_t a, uint64_t b)
+{
+ if (b == 0) return U64x(80000000,00000000);
+ return a / b;
+}
+
+/* Signed 64 bit division. */
+int64_t lj_carith_divi64(int64_t a, int64_t b)
+{
+ if (b == 0 || (a == (int64_t)U64x(80000000,00000000) && b == -1))
+ return U64x(80000000,00000000);
+ return a / b;
+}
+
+/* Unsigned 64 bit modulo. */
+uint64_t lj_carith_modu64(uint64_t a, uint64_t b)
+{
+ if (b == 0) return U64x(80000000,00000000);
+ return a % b;
+}
+
+/* Signed 64 bit modulo. */
+int64_t lj_carith_modi64(int64_t a, int64_t b)
+{
+ if (b == 0) return U64x(80000000,00000000);
+ if (a == (int64_t)U64x(80000000,00000000) && b == -1) return 0;
+ return a % b;
+}
+
+/* Unsigned 64 bit x^k. */
+uint64_t lj_carith_powu64(uint64_t x, uint64_t k)
+{
+ uint64_t y;
+ if (k == 0)
+ return 1;
+ for (; (k & 1) == 0; k >>= 1) x *= x;
+ y = x;
+ if ((k >>= 1) != 0) {
+ for (;;) {
+ x *= x;
+ if (k == 1) break;
+ if (k & 1) y *= x;
+ k >>= 1;
+ }
+ y *= x;
+ }
+ return y;
+}
+
+/* Signed 64 bit x^k. */
+int64_t lj_carith_powi64(int64_t x, int64_t k)
+{
+ if (k == 0)
+ return 1;
+ if (k < 0) {
+ if (x == 0)
+ return U64x(7fffffff,ffffffff);
+ else if (x == 1)
+ return 1;
+ else if (x == -1)
+ return (k & 1) ? -1 : 1;
+ else
+ return 0;
+ }
+ return (int64_t)lj_carith_powu64((uint64_t)x, (uint64_t)k);
+}
+
+#endif
diff --git a/libs/luajit-cmake/luajit/src/lj_carith.h b/libs/luajit-cmake/luajit/src/lj_carith.h
new file mode 100644
index 0000000..9d6b1dc
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/lj_carith.h
@@ -0,0 +1,37 @@
+/*
+** C data arithmetic.
+** Copyright (C) 2005-2022 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LJ_CARITH_H
+#define _LJ_CARITH_H
+
+#include "lj_obj.h"
+
+#if LJ_HASFFI
+
+LJ_FUNC int lj_carith_op(lua_State *L, MMS mm);
+
+#if LJ_32
+LJ_FUNC uint64_t lj_carith_shl64(uint64_t x, int32_t sh);
+LJ_FUNC uint64_t lj_carith_shr64(uint64_t x, int32_t sh);
+LJ_FUNC uint64_t lj_carith_sar64(uint64_t x, int32_t sh);
+LJ_FUNC uint64_t lj_carith_rol64(uint64_t x, int32_t sh);
+LJ_FUNC uint64_t lj_carith_ror64(uint64_t x, int32_t sh);
+#endif
+LJ_FUNC uint64_t lj_carith_shift64(uint64_t x, int32_t sh, int op);
+LJ_FUNC uint64_t lj_carith_check64(lua_State *L, int narg, CTypeID *id);
+
+#if LJ_32 && LJ_HASJIT
+LJ_FUNC int64_t lj_carith_mul64(int64_t x, int64_t k);
+#endif
+LJ_FUNC uint64_t lj_carith_divu64(uint64_t a, uint64_t b);
+LJ_FUNC int64_t lj_carith_divi64(int64_t a, int64_t b);
+LJ_FUNC uint64_t lj_carith_modu64(uint64_t a, uint64_t b);
+LJ_FUNC int64_t lj_carith_modi64(int64_t a, int64_t b);
+LJ_FUNC uint64_t lj_carith_powu64(uint64_t x, uint64_t k);
+LJ_FUNC int64_t lj_carith_powi64(int64_t x, int64_t k);
+
+#endif
+
+#endif
diff --git a/libs/luajit-cmake/luajit/src/lj_ccall.c b/libs/luajit-cmake/luajit/src/lj_ccall.c
new file mode 100644
index 0000000..25f54de
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/lj_ccall.c
@@ -0,0 +1,1189 @@
+/*
+** FFI C call handling.
+** Copyright (C) 2005-2022 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#include "lj_obj.h"
+
+#if LJ_HASFFI
+
+#include "lj_gc.h"
+#include "lj_err.h"
+#include "lj_tab.h"
+#include "lj_ctype.h"
+#include "lj_cconv.h"
+#include "lj_cdata.h"
+#include "lj_ccall.h"
+#include "lj_trace.h"
+
+/* Target-specific handling of register arguments. */
+#if LJ_TARGET_X86
+/* -- x86 calling conventions --------------------------------------------- */
+
+#if LJ_ABI_WIN
+
+#define CCALL_HANDLE_STRUCTRET \
+ /* Return structs bigger than 8 by reference (on stack only). */ \
+ cc->retref = (sz > 8); \
+ if (cc->retref) cc->stack[nsp++] = (GPRArg)dp;
+
+#define CCALL_HANDLE_COMPLEXRET CCALL_HANDLE_STRUCTRET
+
+#else
+
+#if LJ_TARGET_OSX
+
+#define CCALL_HANDLE_STRUCTRET \
+ /* Return structs of size 1, 2, 4 or 8 in registers. */ \
+ cc->retref = !(sz == 1 || sz == 2 || sz == 4 || sz == 8); \
+ if (cc->retref) { \
+ if (ngpr < maxgpr) \
+ cc->gpr[ngpr++] = (GPRArg)dp; \
+ else \
+ cc->stack[nsp++] = (GPRArg)dp; \
+ } else { /* Struct with single FP field ends up in FPR. */ \
+ cc->resx87 = ccall_classify_struct(cts, ctr); \
+ }
+
+#define CCALL_HANDLE_STRUCTRET2 \
+ if (cc->resx87) sp = (uint8_t *)&cc->fpr[0]; \
+ memcpy(dp, sp, ctr->size);
+
+#else
+
+#define CCALL_HANDLE_STRUCTRET \
+ cc->retref = 1; /* Return all structs by reference (in reg or on stack). */ \
+ if (ngpr < maxgpr) \
+ cc->gpr[ngpr++] = (GPRArg)dp; \
+ else \
+ cc->stack[nsp++] = (GPRArg)dp;
+
+#endif
+
+#define CCALL_HANDLE_COMPLEXRET \
+ /* Return complex float in GPRs and complex double by reference. */ \
+ cc->retref = (sz > 8); \
+ if (cc->retref) { \
+ if (ngpr < maxgpr) \
+ cc->gpr[ngpr++] = (GPRArg)dp; \
+ else \
+ cc->stack[nsp++] = (GPRArg)dp; \
+ }
+
+#endif
+
+#define CCALL_HANDLE_COMPLEXRET2 \
+ if (!cc->retref) \
+ *(int64_t *)dp = *(int64_t *)sp; /* Copy complex float from GPRs. */
+
+#define CCALL_HANDLE_STRUCTARG \
+ ngpr = maxgpr; /* Pass all structs by value on the stack. */
+
+#define CCALL_HANDLE_COMPLEXARG \
+ isfp = 1; /* Pass complex by value on stack. */
+
+#define CCALL_HANDLE_REGARG \
+ if (!isfp) { /* Only non-FP values may be passed in registers. */ \
+ if (n > 1) { /* Anything > 32 bit is passed on the stack. */ \
+ if (!LJ_ABI_WIN) ngpr = maxgpr; /* Prevent reordering. */ \
+ } else if (ngpr + 1 <= maxgpr) { \
+ dp = &cc->gpr[ngpr]; \
+ ngpr += n; \
+ goto done; \
+ } \
+ }
+
+#elif LJ_TARGET_X64 && LJ_ABI_WIN
+/* -- Windows/x64 calling conventions ------------------------------------- */
+
+#define CCALL_HANDLE_STRUCTRET \
+ /* Return structs of size 1, 2, 4 or 8 in a GPR. */ \
+ cc->retref = !(sz == 1 || sz == 2 || sz == 4 || sz == 8); \
+ if (cc->retref) cc->gpr[ngpr++] = (GPRArg)dp;
+
+#define CCALL_HANDLE_COMPLEXRET CCALL_HANDLE_STRUCTRET
+
+#define CCALL_HANDLE_COMPLEXRET2 \
+ if (!cc->retref) \
+ *(int64_t *)dp = *(int64_t *)sp; /* Copy complex float from GPRs. */
+
+#define CCALL_HANDLE_STRUCTARG \
+ /* Pass structs of size 1, 2, 4 or 8 in a GPR by value. */ \
+ if (!(sz == 1 || sz == 2 || sz == 4 || sz == 8)) { \
+ rp = cdataptr(lj_cdata_new(cts, did, sz)); \
+ sz = CTSIZE_PTR; /* Pass all other structs by reference. */ \
+ }
+
+#define CCALL_HANDLE_COMPLEXARG \
+ /* Pass complex float in a GPR and complex double by reference. */ \
+ if (sz != 2*sizeof(float)) { \
+ rp = cdataptr(lj_cdata_new(cts, did, sz)); \
+ sz = CTSIZE_PTR; \
+ }
+
+/* Windows/x64 argument registers are strictly positional (use ngpr). */
+#define CCALL_HANDLE_REGARG \
+ if (isfp) { \
+ if (ngpr < maxgpr) { dp = &cc->fpr[ngpr++]; nfpr = ngpr; goto done; } \
+ } else { \
+ if (ngpr < maxgpr) { dp = &cc->gpr[ngpr++]; goto done; } \
+ }
+
+#elif LJ_TARGET_X64
+/* -- POSIX/x64 calling conventions --------------------------------------- */
+
+#define CCALL_HANDLE_STRUCTRET \
+ int rcl[2]; rcl[0] = rcl[1] = 0; \
+ if (ccall_classify_struct(cts, ctr, rcl, 0)) { \
+ cc->retref = 1; /* Return struct by reference. */ \
+ cc->gpr[ngpr++] = (GPRArg)dp; \
+ } else { \
+ cc->retref = 0; /* Return small structs in registers. */ \
+ }
+
+#define CCALL_HANDLE_STRUCTRET2 \
+ int rcl[2]; rcl[0] = rcl[1] = 0; \
+ ccall_classify_struct(cts, ctr, rcl, 0); \
+ ccall_struct_ret(cc, rcl, dp, ctr->size);
+
+#define CCALL_HANDLE_COMPLEXRET \
+ /* Complex values are returned in one or two FPRs. */ \
+ cc->retref = 0;
+
+#define CCALL_HANDLE_COMPLEXRET2 \
+ if (ctr->size == 2*sizeof(float)) { /* Copy complex float from FPR. */ \
+ *(int64_t *)dp = cc->fpr[0].l[0]; \
+ } else { /* Copy non-contiguous complex double from FPRs. */ \
+ ((int64_t *)dp)[0] = cc->fpr[0].l[0]; \
+ ((int64_t *)dp)[1] = cc->fpr[1].l[0]; \
+ }
+
+#define CCALL_HANDLE_STRUCTARG \
+ int rcl[2]; rcl[0] = rcl[1] = 0; \
+ if (!ccall_classify_struct(cts, d, rcl, 0)) { \
+ cc->nsp = nsp; cc->ngpr = ngpr; cc->nfpr = nfpr; \
+ if (ccall_struct_arg(cc, cts, d, rcl, o, narg)) goto err_nyi; \
+ nsp = cc->nsp; ngpr = cc->ngpr; nfpr = cc->nfpr; \
+ continue; \
+ } /* Pass all other structs by value on stack. */
+
+#define CCALL_HANDLE_COMPLEXARG \
+ isfp = 2; /* Pass complex in FPRs or on stack. Needs postprocessing. */
+
+#define CCALL_HANDLE_REGARG \
+ if (isfp) { /* Try to pass argument in FPRs. */ \
+ int n2 = ctype_isvector(d->info) ? 1 : n; \
+ if (nfpr + n2 <= CCALL_NARG_FPR) { \
+ dp = &cc->fpr[nfpr]; \
+ nfpr += n2; \
+ goto done; \
+ } \
+ } else { /* Try to pass argument in GPRs. */ \
+ /* Note that reordering is explicitly allowed in the x64 ABI. */ \
+ if (n <= 2 && ngpr + n <= maxgpr) { \
+ dp = &cc->gpr[ngpr]; \
+ ngpr += n; \
+ goto done; \
+ } \
+ }
+
+#elif LJ_TARGET_ARM
+/* -- ARM calling conventions --------------------------------------------- */
+
+#if LJ_ABI_SOFTFP
+
+#define CCALL_HANDLE_STRUCTRET \
+ /* Return structs of size <= 4 in a GPR. */ \
+ cc->retref = !(sz <= 4); \
+ if (cc->retref) cc->gpr[ngpr++] = (GPRArg)dp;
+
+#define CCALL_HANDLE_COMPLEXRET \
+ cc->retref = 1; /* Return all complex values by reference. */ \
+ cc->gpr[ngpr++] = (GPRArg)dp;
+
+#define CCALL_HANDLE_COMPLEXRET2 \
+ UNUSED(dp); /* Nothing to do. */
+
+#define CCALL_HANDLE_STRUCTARG \
+ /* Pass all structs by value in registers and/or on the stack. */
+
+#define CCALL_HANDLE_COMPLEXARG \
+ /* Pass complex by value in 2 or 4 GPRs. */
+
+#define CCALL_HANDLE_REGARG_FP1
+#define CCALL_HANDLE_REGARG_FP2
+
+#else
+
+#define CCALL_HANDLE_STRUCTRET \
+ cc->retref = !ccall_classify_struct(cts, ctr, ct); \
+ if (cc->retref) cc->gpr[ngpr++] = (GPRArg)dp;
+
+#define CCALL_HANDLE_STRUCTRET2 \
+ if (ccall_classify_struct(cts, ctr, ct) > 1) sp = (uint8_t *)&cc->fpr[0]; \
+ memcpy(dp, sp, ctr->size);
+
+#define CCALL_HANDLE_COMPLEXRET \
+ if (!(ct->info & CTF_VARARG)) cc->retref = 0; /* Return complex in FPRs. */
+
+#define CCALL_HANDLE_COMPLEXRET2 \
+ if (!(ct->info & CTF_VARARG)) memcpy(dp, &cc->fpr[0], ctr->size);
+
+#define CCALL_HANDLE_STRUCTARG \
+ isfp = (ccall_classify_struct(cts, d, ct) > 1);
+ /* Pass all structs by value in registers and/or on the stack. */
+
+#define CCALL_HANDLE_COMPLEXARG \
+ isfp = 1; /* Pass complex by value in FPRs or on stack. */
+
+#define CCALL_HANDLE_REGARG_FP1 \
+ if (isfp && !(ct->info & CTF_VARARG)) { \
+ if ((d->info & CTF_ALIGN) > CTALIGN_PTR) { \
+ if (nfpr + (n >> 1) <= CCALL_NARG_FPR) { \
+ dp = &cc->fpr[nfpr]; \
+ nfpr += (n >> 1); \
+ goto done; \
+ } \
+ } else { \
+ if (sz > 1 && fprodd != nfpr) fprodd = 0; \
+ if (fprodd) { \
+ if (2*nfpr+n <= 2*CCALL_NARG_FPR+1) { \
+ dp = (void *)&cc->fpr[fprodd-1].f[1]; \
+ nfpr += (n >> 1); \
+ if ((n & 1)) fprodd = 0; else fprodd = nfpr-1; \
+ goto done; \
+ } \
+ } else { \
+ if (2*nfpr+n <= 2*CCALL_NARG_FPR) { \
+ dp = (void *)&cc->fpr[nfpr]; \
+ nfpr += (n >> 1); \
+ if ((n & 1)) fprodd = ++nfpr; else fprodd = 0; \
+ goto done; \
+ } \
+ } \
+ } \
+ fprodd = 0; /* No reordering after the first FP value is on stack. */ \
+ } else {
+
+#define CCALL_HANDLE_REGARG_FP2 }
+
+#endif
+
+#define CCALL_HANDLE_REGARG \
+ CCALL_HANDLE_REGARG_FP1 \
+ if ((d->info & CTF_ALIGN) > CTALIGN_PTR) { \
+ if (ngpr < maxgpr) \
+ ngpr = (ngpr + 1u) & ~1u; /* Align to regpair. */ \
+ } \
+ if (ngpr < maxgpr) { \
+ dp = &cc->gpr[ngpr]; \
+ if (ngpr + n > maxgpr) { \
+ nsp += ngpr + n - maxgpr; /* Assumes contiguous gpr/stack fields. */ \
+ if (nsp > CCALL_MAXSTACK) goto err_nyi; /* Too many arguments. */ \
+ ngpr = maxgpr; \
+ } else { \
+ ngpr += n; \
+ } \
+ goto done; \
+ } CCALL_HANDLE_REGARG_FP2
+
+#define CCALL_HANDLE_RET \
+ if ((ct->info & CTF_VARARG)) sp = (uint8_t *)&cc->gpr[0];
+
+#elif LJ_TARGET_ARM64
+/* -- ARM64 calling conventions ------------------------------------------- */
+
+#define CCALL_HANDLE_STRUCTRET \
+ cc->retref = !ccall_classify_struct(cts, ctr); \
+ if (cc->retref) cc->retp = dp;
+
+#define CCALL_HANDLE_STRUCTRET2 \
+ unsigned int cl = ccall_classify_struct(cts, ctr); \
+ if ((cl & 4)) { /* Combine float HFA from separate registers. */ \
+ CTSize i = (cl >> 8) - 1; \
+ do { ((uint32_t *)dp)[i] = cc->fpr[i].lo; } while (i--); \
+ } else { \
+ if (cl > 1) sp = (uint8_t *)&cc->fpr[0]; \
+ memcpy(dp, sp, ctr->size); \
+ }
+
+#define CCALL_HANDLE_COMPLEXRET \
+ /* Complex values are returned in one or two FPRs. */ \
+ cc->retref = 0;
+
+#define CCALL_HANDLE_COMPLEXRET2 \
+ if (ctr->size == 2*sizeof(float)) { /* Copy complex float from FPRs. */ \
+ ((float *)dp)[0] = cc->fpr[0].f; \
+ ((float *)dp)[1] = cc->fpr[1].f; \
+ } else { /* Copy complex double from FPRs. */ \
+ ((double *)dp)[0] = cc->fpr[0].d; \
+ ((double *)dp)[1] = cc->fpr[1].d; \
+ }
+
+#define CCALL_HANDLE_STRUCTARG \
+ unsigned int cl = ccall_classify_struct(cts, d); \
+ if (cl == 0) { /* Pass struct by reference. */ \
+ rp = cdataptr(lj_cdata_new(cts, did, sz)); \
+ sz = CTSIZE_PTR; \
+ } else if (cl > 1) { /* Pass struct in FPRs or on stack. */ \
+ isfp = (cl & 4) ? 2 : 1; \
+ } /* else: Pass struct in GPRs or on stack. */
+
+#define CCALL_HANDLE_COMPLEXARG \
+ /* Pass complex by value in separate (!) FPRs or on stack. */ \
+ isfp = sz == 2*sizeof(float) ? 2 : 1;
+
+#define CCALL_HANDLE_REGARG \
+ if (LJ_TARGET_OSX && isva) { \
+ /* IOS: All variadic arguments are on the stack. */ \
+ } else if (isfp) { /* Try to pass argument in FPRs. */ \
+ int n2 = ctype_isvector(d->info) ? 1 : \
+ isfp == 1 ? n : (d->size >> (4-isfp)); \
+ if (nfpr + n2 <= CCALL_NARG_FPR) { \
+ dp = &cc->fpr[nfpr]; \
+ nfpr += n2; \
+ goto done; \
+ } else { \
+ nfpr = CCALL_NARG_FPR; /* Prevent reordering. */ \
+ if (LJ_TARGET_OSX && d->size < 8) goto err_nyi; \
+ } \
+ } else { /* Try to pass argument in GPRs. */ \
+ if (!LJ_TARGET_OSX && (d->info & CTF_ALIGN) > CTALIGN_PTR) \
+ ngpr = (ngpr + 1u) & ~1u; /* Align to regpair. */ \
+ if (ngpr + n <= maxgpr) { \
+ dp = &cc->gpr[ngpr]; \
+ ngpr += n; \
+ goto done; \
+ } else { \
+ ngpr = maxgpr; /* Prevent reordering. */ \
+ if (LJ_TARGET_OSX && d->size < 8) goto err_nyi; \
+ } \
+ }
+
+#if LJ_BE
+#define CCALL_HANDLE_RET \
+ if (ctype_isfp(ctr->info) && ctr->size == sizeof(float)) \
+ sp = (uint8_t *)&cc->fpr[0].f;
+#endif
+
+
+#elif LJ_TARGET_PPC
+/* -- PPC calling conventions --------------------------------------------- */
+
+#define CCALL_HANDLE_STRUCTRET \
+ cc->retref = 1; /* Return all structs by reference. */ \
+ cc->gpr[ngpr++] = (GPRArg)dp;
+
+#define CCALL_HANDLE_COMPLEXRET \
+ /* Complex values are returned in 2 or 4 GPRs. */ \
+ cc->retref = 0;
+
+#define CCALL_HANDLE_COMPLEXRET2 \
+ memcpy(dp, sp, ctr->size); /* Copy complex from GPRs. */
+
+#define CCALL_HANDLE_STRUCTARG \
+ rp = cdataptr(lj_cdata_new(cts, did, sz)); \
+ sz = CTSIZE_PTR; /* Pass all structs by reference. */
+
+#define CCALL_HANDLE_COMPLEXARG \
+ /* Pass complex by value in 2 or 4 GPRs. */
+
+#define CCALL_HANDLE_GPR \
+ /* Try to pass argument in GPRs. */ \
+ if (n > 1) { \
+ /* int64_t or complex (float). */ \
+ lj_assertL(n == 2 || n == 4, "bad GPR size %d", n); \
+ if (ctype_isinteger(d->info) || ctype_isfp(d->info)) \
+ ngpr = (ngpr + 1u) & ~1u; /* Align int64_t to regpair. */ \
+ else if (ngpr + n > maxgpr) \
+ ngpr = maxgpr; /* Prevent reordering. */ \
+ } \
+ if (ngpr + n <= maxgpr) { \
+ dp = &cc->gpr[ngpr]; \
+ ngpr += n; \
+ goto done; \
+ } \
+
+#if LJ_ABI_SOFTFP
+#define CCALL_HANDLE_REGARG CCALL_HANDLE_GPR
+#else
+#define CCALL_HANDLE_REGARG \
+ if (isfp) { /* Try to pass argument in FPRs. */ \
+ if (nfpr + 1 <= CCALL_NARG_FPR) { \
+ dp = &cc->fpr[nfpr]; \
+ nfpr += 1; \
+ d = ctype_get(cts, CTID_DOUBLE); /* FPRs always hold doubles. */ \
+ goto done; \
+ } \
+ } else { \
+ CCALL_HANDLE_GPR \
+ }
+#endif
+
+#if !LJ_ABI_SOFTFP
+#define CCALL_HANDLE_RET \
+ if (ctype_isfp(ctr->info) && ctr->size == sizeof(float)) \
+ ctr = ctype_get(cts, CTID_DOUBLE); /* FPRs always hold doubles. */
+#endif
+
+#elif LJ_TARGET_MIPS32
+/* -- MIPS o32 calling conventions ---------------------------------------- */
+
+#define CCALL_HANDLE_STRUCTRET \
+ cc->retref = 1; /* Return all structs by reference. */ \
+ cc->gpr[ngpr++] = (GPRArg)dp;
+
+#define CCALL_HANDLE_COMPLEXRET \
+ /* Complex values are returned in 1 or 2 FPRs. */ \
+ cc->retref = 0;
+
+#if LJ_ABI_SOFTFP
+#define CCALL_HANDLE_COMPLEXRET2 \
+ if (ctr->size == 2*sizeof(float)) { /* Copy complex float from GPRs. */ \
+ ((intptr_t *)dp)[0] = cc->gpr[0]; \
+ ((intptr_t *)dp)[1] = cc->gpr[1]; \
+ } else { /* Copy complex double from GPRs. */ \
+ ((intptr_t *)dp)[0] = cc->gpr[0]; \
+ ((intptr_t *)dp)[1] = cc->gpr[1]; \
+ ((intptr_t *)dp)[2] = cc->gpr[2]; \
+ ((intptr_t *)dp)[3] = cc->gpr[3]; \
+ }
+#else
+#define CCALL_HANDLE_COMPLEXRET2 \
+ if (ctr->size == 2*sizeof(float)) { /* Copy complex float from FPRs. */ \
+ ((float *)dp)[0] = cc->fpr[0].f; \
+ ((float *)dp)[1] = cc->fpr[1].f; \
+ } else { /* Copy complex double from FPRs. */ \
+ ((double *)dp)[0] = cc->fpr[0].d; \
+ ((double *)dp)[1] = cc->fpr[1].d; \
+ }
+#endif
+
+#define CCALL_HANDLE_STRUCTARG \
+ /* Pass all structs by value in registers and/or on the stack. */
+
+#define CCALL_HANDLE_COMPLEXARG \
+ /* Pass complex by value in 2 or 4 GPRs. */
+
+#define CCALL_HANDLE_GPR \
+ if ((d->info & CTF_ALIGN) > CTALIGN_PTR) \
+ ngpr = (ngpr + 1u) & ~1u; /* Align to regpair. */ \
+ if (ngpr < maxgpr) { \
+ dp = &cc->gpr[ngpr]; \
+ if (ngpr + n > maxgpr) { \
+ nsp += ngpr + n - maxgpr; /* Assumes contiguous gpr/stack fields. */ \
+ if (nsp > CCALL_MAXSTACK) goto err_nyi; /* Too many arguments. */ \
+ ngpr = maxgpr; \
+ } else { \
+ ngpr += n; \
+ } \
+ goto done; \
+ }
+
+#if !LJ_ABI_SOFTFP /* MIPS32 hard-float */
+#define CCALL_HANDLE_REGARG \
+ if (isfp && nfpr < CCALL_NARG_FPR && !(ct->info & CTF_VARARG)) { \
+ /* Try to pass argument in FPRs. */ \
+ dp = n == 1 ? (void *)&cc->fpr[nfpr].f : (void *)&cc->fpr[nfpr].d; \
+ nfpr++; ngpr += n; \
+ goto done; \
+ } else { /* Try to pass argument in GPRs. */ \
+ nfpr = CCALL_NARG_FPR; \
+ CCALL_HANDLE_GPR \
+ }
+#else /* MIPS32 soft-float */
+#define CCALL_HANDLE_REGARG CCALL_HANDLE_GPR
+#endif
+
+#if !LJ_ABI_SOFTFP
+/* On MIPS64 soft-float, position of float return values is endian-dependant. */
+#define CCALL_HANDLE_RET \
+ if (ctype_isfp(ctr->info) && ctr->size == sizeof(float)) \
+ sp = (uint8_t *)&cc->fpr[0].f;
+#endif
+
+#elif LJ_TARGET_MIPS64
+/* -- MIPS n64 calling conventions ---------------------------------------- */
+
+#define CCALL_HANDLE_STRUCTRET \
+ cc->retref = !(sz <= 16); \
+ if (cc->retref) cc->gpr[ngpr++] = (GPRArg)dp;
+
+#define CCALL_HANDLE_STRUCTRET2 \
+ ccall_copy_struct(cc, ctr, dp, sp, ccall_classify_struct(cts, ctr, ct));
+
+#define CCALL_HANDLE_COMPLEXRET \
+ /* Complex values are returned in 1 or 2 FPRs. */ \
+ cc->retref = 0;
+
+#if LJ_ABI_SOFTFP /* MIPS64 soft-float */
+
+#define CCALL_HANDLE_COMPLEXRET2 \
+ if (ctr->size == 2*sizeof(float)) { /* Copy complex float from GPRs. */ \
+ ((intptr_t *)dp)[0] = cc->gpr[0]; \
+ } else { /* Copy complex double from GPRs. */ \
+ ((intptr_t *)dp)[0] = cc->gpr[0]; \
+ ((intptr_t *)dp)[1] = cc->gpr[1]; \
+ }
+
+#define CCALL_HANDLE_COMPLEXARG \
+ /* Pass complex by value in 2 or 4 GPRs. */
+
+/* Position of soft-float 'float' return value depends on endianess. */
+#define CCALL_HANDLE_RET \
+ if (ctype_isfp(ctr->info) && ctr->size == sizeof(float)) \
+ sp = (uint8_t *)cc->gpr + LJ_ENDIAN_SELECT(0, 4);
+
+#else /* MIPS64 hard-float */
+
+#define CCALL_HANDLE_COMPLEXRET2 \
+ if (ctr->size == 2*sizeof(float)) { /* Copy complex float from FPRs. */ \
+ ((float *)dp)[0] = cc->fpr[0].f; \
+ ((float *)dp)[1] = cc->fpr[1].f; \
+ } else { /* Copy complex double from FPRs. */ \
+ ((double *)dp)[0] = cc->fpr[0].d; \
+ ((double *)dp)[1] = cc->fpr[1].d; \
+ }
+
+#define CCALL_HANDLE_COMPLEXARG \
+ if (sz == 2*sizeof(float)) { \
+ isfp = 2; \
+ if (ngpr < maxgpr) \
+ sz *= 2; \
+ }
+
+#define CCALL_HANDLE_RET \
+ if (ctype_isfp(ctr->info) && ctr->size == sizeof(float)) \
+ sp = (uint8_t *)&cc->fpr[0].f;
+
+#endif
+
+#define CCALL_HANDLE_STRUCTARG \
+ /* Pass all structs by value in registers and/or on the stack. */
+
+#define CCALL_HANDLE_REGARG \
+ if (ngpr < maxgpr) { \
+ dp = &cc->gpr[ngpr]; \
+ if (ngpr + n > maxgpr) { \
+ nsp += ngpr + n - maxgpr; /* Assumes contiguous gpr/stack fields. */ \
+ if (nsp > CCALL_MAXSTACK) goto err_nyi; /* Too many arguments. */ \
+ ngpr = maxgpr; \
+ } else { \
+ ngpr += n; \
+ } \
+ goto done; \
+ }
+
+#else
+#error "Missing calling convention definitions for this architecture"
+#endif
+
+#ifndef CCALL_HANDLE_STRUCTRET2
+#define CCALL_HANDLE_STRUCTRET2 \
+ memcpy(dp, sp, ctr->size); /* Copy struct return value from GPRs. */
+#endif
+
+/* -- x86 OSX ABI struct classification ----------------------------------- */
+
+#if LJ_TARGET_X86 && LJ_TARGET_OSX
+
+/* Check for struct with single FP field. */
+static int ccall_classify_struct(CTState *cts, CType *ct)
+{
+ CTSize sz = ct->size;
+ if (!(sz == sizeof(float) || sz == sizeof(double))) return 0;
+ if ((ct->info & CTF_UNION)) return 0;
+ while (ct->sib) {
+ ct = ctype_get(cts, ct->sib);
+ if (ctype_isfield(ct->info)) {
+ CType *sct = ctype_rawchild(cts, ct);
+ if (ctype_isfp(sct->info)) {
+ if (sct->size == sz)
+ return (sz >> 2); /* Return 1 for float or 2 for double. */
+ } else if (ctype_isstruct(sct->info)) {
+ if (sct->size)
+ return ccall_classify_struct(cts, sct);
+ } else {
+ break;
+ }
+ } else if (ctype_isbitfield(ct->info)) {
+ break;
+ } else if (ctype_isxattrib(ct->info, CTA_SUBTYPE)) {
+ CType *sct = ctype_rawchild(cts, ct);
+ if (sct->size)
+ return ccall_classify_struct(cts, sct);
+ }
+ }
+ return 0;
+}
+
+#endif
+
+/* -- x64 struct classification ------------------------------------------- */
+
+#if LJ_TARGET_X64 && !LJ_ABI_WIN
+
+/* Register classes for x64 struct classification. */
+#define CCALL_RCL_INT 1
+#define CCALL_RCL_SSE 2
+#define CCALL_RCL_MEM 4
+/* NYI: classify vectors. */
+
+static int ccall_classify_struct(CTState *cts, CType *ct, int *rcl, CTSize ofs);
+
+/* Classify a C type. */
+static void ccall_classify_ct(CTState *cts, CType *ct, int *rcl, CTSize ofs)
+{
+ if (ctype_isarray(ct->info)) {
+ CType *cct = ctype_rawchild(cts, ct);
+ CTSize eofs, esz = cct->size, asz = ct->size;
+ for (eofs = 0; eofs < asz; eofs += esz)
+ ccall_classify_ct(cts, cct, rcl, ofs+eofs);
+ } else if (ctype_isstruct(ct->info)) {
+ ccall_classify_struct(cts, ct, rcl, ofs);
+ } else {
+ int cl = ctype_isfp(ct->info) ? CCALL_RCL_SSE : CCALL_RCL_INT;
+ lj_assertCTS(ctype_hassize(ct->info),
+ "classify ctype %08x without size", ct->info);
+ if ((ofs & (ct->size-1))) cl = CCALL_RCL_MEM; /* Unaligned. */
+ rcl[(ofs >= 8)] |= cl;
+ }
+}
+
+/* Recursively classify a struct based on its fields. */
+static int ccall_classify_struct(CTState *cts, CType *ct, int *rcl, CTSize ofs)
+{
+ if (ct->size > 16) return CCALL_RCL_MEM; /* Too big, gets memory class. */
+ while (ct->sib) {
+ CTSize fofs;
+ ct = ctype_get(cts, ct->sib);
+ fofs = ofs+ct->size;
+ if (ctype_isfield(ct->info))
+ ccall_classify_ct(cts, ctype_rawchild(cts, ct), rcl, fofs);
+ else if (ctype_isbitfield(ct->info))
+ rcl[(fofs >= 8)] |= CCALL_RCL_INT; /* NYI: unaligned bitfields? */
+ else if (ctype_isxattrib(ct->info, CTA_SUBTYPE))
+ ccall_classify_struct(cts, ctype_rawchild(cts, ct), rcl, fofs);
+ }
+ return ((rcl[0]|rcl[1]) & CCALL_RCL_MEM); /* Memory class? */
+}
+
+/* Try to split up a small struct into registers. */
+static int ccall_struct_reg(CCallState *cc, CTState *cts, GPRArg *dp, int *rcl)
+{
+ MSize ngpr = cc->ngpr, nfpr = cc->nfpr;
+ uint32_t i;
+ UNUSED(cts);
+ for (i = 0; i < 2; i++) {
+ lj_assertCTS(!(rcl[i] & CCALL_RCL_MEM), "pass mem struct in reg");
+ if ((rcl[i] & CCALL_RCL_INT)) { /* Integer class takes precedence. */
+ if (ngpr >= CCALL_NARG_GPR) return 1; /* Register overflow. */
+ cc->gpr[ngpr++] = dp[i];
+ } else if ((rcl[i] & CCALL_RCL_SSE)) {
+ if (nfpr >= CCALL_NARG_FPR) return 1; /* Register overflow. */
+ cc->fpr[nfpr++].l[0] = dp[i];
+ }
+ }
+ cc->ngpr = ngpr; cc->nfpr = nfpr;
+ return 0; /* Ok. */
+}
+
+/* Pass a small struct argument. */
+static int ccall_struct_arg(CCallState *cc, CTState *cts, CType *d, int *rcl,
+ TValue *o, int narg)
+{
+ GPRArg dp[2];
+ dp[0] = dp[1] = 0;
+ /* Convert to temp. struct. */
+ lj_cconv_ct_tv(cts, d, (uint8_t *)dp, o, CCF_ARG(narg));
+ if (ccall_struct_reg(cc, cts, dp, rcl)) {
+ /* Register overflow? Pass on stack. */
+ MSize nsp = cc->nsp, n = rcl[1] ? 2 : 1;
+ if (nsp + n > CCALL_MAXSTACK) return 1; /* Too many arguments. */
+ cc->nsp = nsp + n;
+ memcpy(&cc->stack[nsp], dp, n*CTSIZE_PTR);
+ }
+ return 0; /* Ok. */
+}
+
+/* Combine returned small struct. */
+static void ccall_struct_ret(CCallState *cc, int *rcl, uint8_t *dp, CTSize sz)
+{
+ GPRArg sp[2];
+ MSize ngpr = 0, nfpr = 0;
+ uint32_t i;
+ for (i = 0; i < 2; i++) {
+ if ((rcl[i] & CCALL_RCL_INT)) { /* Integer class takes precedence. */
+ sp[i] = cc->gpr[ngpr++];
+ } else if ((rcl[i] & CCALL_RCL_SSE)) {
+ sp[i] = cc->fpr[nfpr++].l[0];
+ }
+ }
+ memcpy(dp, sp, sz);
+}
+#endif
+
+/* -- ARM hard-float ABI struct classification ---------------------------- */
+
+#if LJ_TARGET_ARM && !LJ_ABI_SOFTFP
+
+/* Classify a struct based on its fields. */
+static unsigned int ccall_classify_struct(CTState *cts, CType *ct, CType *ctf)
+{
+ CTSize sz = ct->size;
+ unsigned int r = 0, n = 0, isu = (ct->info & CTF_UNION);
+ if ((ctf->info & CTF_VARARG)) goto noth;
+ while (ct->sib) {
+ CType *sct;
+ ct = ctype_get(cts, ct->sib);
+ if (ctype_isfield(ct->info)) {
+ sct = ctype_rawchild(cts, ct);
+ if (ctype_isfp(sct->info)) {
+ r |= sct->size;
+ if (!isu) n++; else if (n == 0) n = 1;
+ } else if (ctype_iscomplex(sct->info)) {
+ r |= (sct->size >> 1);
+ if (!isu) n += 2; else if (n < 2) n = 2;
+ } else if (ctype_isstruct(sct->info)) {
+ goto substruct;
+ } else {
+ goto noth;
+ }
+ } else if (ctype_isbitfield(ct->info)) {
+ goto noth;
+ } else if (ctype_isxattrib(ct->info, CTA_SUBTYPE)) {
+ sct = ctype_rawchild(cts, ct);
+ substruct:
+ if (sct->size > 0) {
+ unsigned int s = ccall_classify_struct(cts, sct, ctf);
+ if (s <= 1) goto noth;
+ r |= (s & 255);
+ if (!isu) n += (s >> 8); else if (n < (s >>8)) n = (s >> 8);
+ }
+ }
+ }
+ if ((r == 4 || r == 8) && n <= 4)
+ return r + (n << 8);
+noth: /* Not a homogeneous float/double aggregate. */
+ return (sz <= 4); /* Return structs of size <= 4 in a GPR. */
+}
+
+#endif
+
+/* -- ARM64 ABI struct classification ------------------------------------- */
+
+#if LJ_TARGET_ARM64
+
+/* Classify a struct based on its fields. */
+static unsigned int ccall_classify_struct(CTState *cts, CType *ct)
+{
+ CTSize sz = ct->size;
+ unsigned int r = 0, n = 0, isu = (ct->info & CTF_UNION);
+ while (ct->sib) {
+ CType *sct;
+ ct = ctype_get(cts, ct->sib);
+ if (ctype_isfield(ct->info)) {
+ sct = ctype_rawchild(cts, ct);
+ if (ctype_isfp(sct->info)) {
+ r |= sct->size;
+ if (!isu) n++; else if (n == 0) n = 1;
+ } else if (ctype_iscomplex(sct->info)) {
+ r |= (sct->size >> 1);
+ if (!isu) n += 2; else if (n < 2) n = 2;
+ } else if (ctype_isstruct(sct->info)) {
+ goto substruct;
+ } else {
+ goto noth;
+ }
+ } else if (ctype_isbitfield(ct->info)) {
+ goto noth;
+ } else if (ctype_isxattrib(ct->info, CTA_SUBTYPE)) {
+ sct = ctype_rawchild(cts, ct);
+ substruct:
+ if (sct->size > 0) {
+ unsigned int s = ccall_classify_struct(cts, sct);
+ if (s <= 1) goto noth;
+ r |= (s & 255);
+ if (!isu) n += (s >> 8); else if (n < (s >>8)) n = (s >> 8);
+ }
+ }
+ }
+ if ((r == 4 || r == 8) && n <= 4)
+ return r + (n << 8);
+noth: /* Not a homogeneous float/double aggregate. */
+ return (sz <= 16); /* Return structs of size <= 16 in GPRs. */
+}
+
+#endif
+
+/* -- MIPS64 ABI struct classification ---------------------------- */
+
+#if LJ_TARGET_MIPS64
+
+#define FTYPE_FLOAT 1
+#define FTYPE_DOUBLE 2
+
+/* Classify FP fields (max. 2) and their types. */
+static unsigned int ccall_classify_struct(CTState *cts, CType *ct, CType *ctf)
+{
+ int n = 0, ft = 0;
+ if ((ctf->info & CTF_VARARG) || (ct->info & CTF_UNION))
+ goto noth;
+ while (ct->sib) {
+ CType *sct;
+ ct = ctype_get(cts, ct->sib);
+ if (n == 2) {
+ goto noth;
+ } else if (ctype_isfield(ct->info)) {
+ sct = ctype_rawchild(cts, ct);
+ if (ctype_isfp(sct->info)) {
+ ft |= (sct->size == 4 ? FTYPE_FLOAT : FTYPE_DOUBLE) << 2*n;
+ n++;
+ } else {
+ goto noth;
+ }
+ } else if (ctype_isbitfield(ct->info) ||
+ ctype_isxattrib(ct->info, CTA_SUBTYPE)) {
+ goto noth;
+ }
+ }
+ if (n <= 2)
+ return ft;
+noth: /* Not a homogeneous float/double aggregate. */
+ return 0; /* Struct is in GPRs. */
+}
+
+static void ccall_copy_struct(CCallState *cc, CType *ctr, void *dp, void *sp,
+ int ft)
+{
+ if (LJ_ABI_SOFTFP ? ft :
+ ((ft & 3) == FTYPE_FLOAT || (ft >> 2) == FTYPE_FLOAT)) {
+ int i, ofs = 0;
+ for (i = 0; ft != 0; i++, ft >>= 2) {
+ if ((ft & 3) == FTYPE_FLOAT) {
+#if LJ_ABI_SOFTFP
+ /* The 2nd FP struct result is in CARG1 (gpr[2]) and not CRET2. */
+ memcpy((uint8_t *)dp + ofs,
+ (uint8_t *)&cc->gpr[2*i] + LJ_ENDIAN_SELECT(0, 4), 4);
+#else
+ *(float *)((uint8_t *)dp + ofs) = cc->fpr[i].f;
+#endif
+ ofs += 4;
+ } else {
+ ofs = (ofs + 7) & ~7; /* 64 bit alignment. */
+#if LJ_ABI_SOFTFP
+ *(intptr_t *)((uint8_t *)dp + ofs) = cc->gpr[2*i];
+#else
+ *(double *)((uint8_t *)dp + ofs) = cc->fpr[i].d;
+#endif
+ ofs += 8;
+ }
+ }
+ } else {
+#if !LJ_ABI_SOFTFP
+ if (ft) sp = (uint8_t *)&cc->fpr[0];
+#endif
+ memcpy(dp, sp, ctr->size);
+ }
+}
+
+#endif
+
+/* -- Common C call handling ---------------------------------------------- */
+
+/* Infer the destination CTypeID for a vararg argument. */
+CTypeID lj_ccall_ctid_vararg(CTState *cts, cTValue *o)
+{
+ if (tvisnumber(o)) {
+ return CTID_DOUBLE;
+ } else if (tviscdata(o)) {
+ CTypeID id = cdataV(o)->ctypeid;
+ CType *s = ctype_get(cts, id);
+ if (ctype_isrefarray(s->info)) {
+ return lj_ctype_intern(cts,
+ CTINFO(CT_PTR, CTALIGN_PTR|ctype_cid(s->info)), CTSIZE_PTR);
+ } else if (ctype_isstruct(s->info) || ctype_isfunc(s->info)) {
+ /* NYI: how to pass a struct by value in a vararg argument? */
+ return lj_ctype_intern(cts, CTINFO(CT_PTR, CTALIGN_PTR|id), CTSIZE_PTR);
+ } else if (ctype_isfp(s->info) && s->size == sizeof(float)) {
+ return CTID_DOUBLE;
+ } else {
+ return id;
+ }
+ } else if (tvisstr(o)) {
+ return CTID_P_CCHAR;
+ } else if (tvisbool(o)) {
+ return CTID_BOOL;
+ } else {
+ return CTID_P_VOID;
+ }
+}
+
+/* Setup arguments for C call. */
+static int ccall_set_args(lua_State *L, CTState *cts, CType *ct,
+ CCallState *cc)
+{
+ int gcsteps = 0;
+ TValue *o, *top = L->top;
+ CTypeID fid;
+ CType *ctr;
+ MSize maxgpr, ngpr = 0, nsp = 0, narg;
+#if CCALL_NARG_FPR
+ MSize nfpr = 0;
+#if LJ_TARGET_ARM
+ MSize fprodd = 0;
+#endif
+#endif
+
+ /* Clear unused regs to get some determinism in case of misdeclaration. */
+ memset(cc->gpr, 0, sizeof(cc->gpr));
+#if CCALL_NUM_FPR
+ memset(cc->fpr, 0, sizeof(cc->fpr));
+#endif
+
+#if LJ_TARGET_X86
+ /* x86 has several different calling conventions. */
+ cc->resx87 = 0;
+ switch (ctype_cconv(ct->info)) {
+ case CTCC_FASTCALL: maxgpr = 2; break;
+ case CTCC_THISCALL: maxgpr = 1; break;
+ default: maxgpr = 0; break;
+ }
+#else
+ maxgpr = CCALL_NARG_GPR;
+#endif
+
+ /* Perform required setup for some result types. */
+ ctr = ctype_rawchild(cts, ct);
+ if (ctype_isvector(ctr->info)) {
+ if (!(CCALL_VECTOR_REG && (ctr->size == 8 || ctr->size == 16)))
+ goto err_nyi;
+ } else if (ctype_iscomplex(ctr->info) || ctype_isstruct(ctr->info)) {
+ /* Preallocate cdata object and anchor it after arguments. */
+ CTSize sz = ctr->size;
+ GCcdata *cd = lj_cdata_new(cts, ctype_cid(ct->info), sz);
+ void *dp = cdataptr(cd);
+ setcdataV(L, L->top++, cd);
+ if (ctype_isstruct(ctr->info)) {
+ CCALL_HANDLE_STRUCTRET
+ } else {
+ CCALL_HANDLE_COMPLEXRET
+ }
+#if LJ_TARGET_X86
+ } else if (ctype_isfp(ctr->info)) {
+ cc->resx87 = ctr->size == sizeof(float) ? 1 : 2;
+#endif
+ }
+
+ /* Skip initial attributes. */
+ fid = ct->sib;
+ while (fid) {
+ CType *ctf = ctype_get(cts, fid);
+ if (!ctype_isattrib(ctf->info)) break;
+ fid = ctf->sib;
+ }
+
+ /* Walk through all passed arguments. */
+ for (o = L->base+1, narg = 1; o < top; o++, narg++) {
+ CTypeID did;
+ CType *d;
+ CTSize sz;
+ MSize n, isfp = 0, isva = 0;
+ void *dp, *rp = NULL;
+
+ if (fid) { /* Get argument type from field. */
+ CType *ctf = ctype_get(cts, fid);
+ fid = ctf->sib;
+ lj_assertL(ctype_isfield(ctf->info), "field expected");
+ did = ctype_cid(ctf->info);
+ } else {
+ if (!(ct->info & CTF_VARARG))
+ lj_err_caller(L, LJ_ERR_FFI_NUMARG); /* Too many arguments. */
+ did = lj_ccall_ctid_vararg(cts, o); /* Infer vararg type. */
+ isva = 1;
+ }
+ d = ctype_raw(cts, did);
+ sz = d->size;
+
+ /* Find out how (by value/ref) and where (GPR/FPR) to pass an argument. */
+ if (ctype_isnum(d->info)) {
+ if (sz > 8) goto err_nyi;
+ if ((d->info & CTF_FP))
+ isfp = 1;
+ } else if (ctype_isvector(d->info)) {
+ if (CCALL_VECTOR_REG && (sz == 8 || sz == 16))
+ isfp = 1;
+ else
+ goto err_nyi;
+ } else if (ctype_isstruct(d->info)) {
+ CCALL_HANDLE_STRUCTARG
+ } else if (ctype_iscomplex(d->info)) {
+ CCALL_HANDLE_COMPLEXARG
+ } else {
+ sz = CTSIZE_PTR;
+ }
+ sz = (sz + CTSIZE_PTR-1) & ~(CTSIZE_PTR-1);
+ n = sz / CTSIZE_PTR; /* Number of GPRs or stack slots needed. */
+
+ CCALL_HANDLE_REGARG /* Handle register arguments. */
+
+ /* Otherwise pass argument on stack. */
+ if (CCALL_ALIGN_STACKARG && !rp && (d->info & CTF_ALIGN) > CTALIGN_PTR) {
+ MSize align = (1u << ctype_align(d->info-CTALIGN_PTR)) -1;
+ nsp = (nsp + align) & ~align; /* Align argument on stack. */
+ }
+ if (nsp + n > CCALL_MAXSTACK) { /* Too many arguments. */
+ err_nyi:
+ lj_err_caller(L, LJ_ERR_FFI_NYICALL);
+ }
+ dp = &cc->stack[nsp];
+ nsp += n;
+ isva = 0;
+
+ done:
+ if (rp) { /* Pass by reference. */
+ gcsteps++;
+ *(void **)dp = rp;
+ dp = rp;
+ }
+ lj_cconv_ct_tv(cts, d, (uint8_t *)dp, o, CCF_ARG(narg));
+ /* Extend passed integers to 32 bits at least. */
+ if (ctype_isinteger_or_bool(d->info) && d->size < 4) {
+ if (d->info & CTF_UNSIGNED)
+ *(uint32_t *)dp = d->size == 1 ? (uint32_t)*(uint8_t *)dp :
+ (uint32_t)*(uint16_t *)dp;
+ else
+ *(int32_t *)dp = d->size == 1 ? (int32_t)*(int8_t *)dp :
+ (int32_t)*(int16_t *)dp;
+ }
+#if LJ_TARGET_ARM64 && LJ_BE
+ if (isfp && d->size == sizeof(float))
+ ((float *)dp)[1] = ((float *)dp)[0]; /* Floats occupy high slot. */
+#endif
+#if LJ_TARGET_MIPS64 || (LJ_TARGET_ARM64 && LJ_BE)
+ if ((ctype_isinteger_or_bool(d->info) || ctype_isenum(d->info)
+#if LJ_TARGET_MIPS64
+ || (isfp && nsp == 0)
+#endif
+ ) && d->size <= 4) {
+ *(int64_t *)dp = (int64_t)*(int32_t *)dp; /* Sign-extend to 64 bit. */
+ }
+#endif
+#if LJ_TARGET_X64 && LJ_ABI_WIN
+ if (isva) { /* Windows/x64 mirrors varargs in both register sets. */
+ if (nfpr == ngpr)
+ cc->gpr[ngpr-1] = cc->fpr[ngpr-1].l[0];
+ else
+ cc->fpr[ngpr-1].l[0] = cc->gpr[ngpr-1];
+ }
+#else
+ UNUSED(isva);
+#endif
+#if LJ_TARGET_X64 && !LJ_ABI_WIN
+ if (isfp == 2 && n == 2 && (uint8_t *)dp == (uint8_t *)&cc->fpr[nfpr-2]) {
+ cc->fpr[nfpr-1].d[0] = cc->fpr[nfpr-2].d[1]; /* Split complex double. */
+ cc->fpr[nfpr-2].d[1] = 0;
+ }
+#elif LJ_TARGET_ARM64 || (LJ_TARGET_MIPS64 && !LJ_ABI_SOFTFP)
+ if (isfp == 2 && (uint8_t *)dp < (uint8_t *)cc->stack) {
+ /* Split float HFA or complex float into separate registers. */
+ CTSize i = (sz >> 2) - 1;
+ do { ((uint64_t *)dp)[i] = ((uint32_t *)dp)[i]; } while (i--);
+ }
+#else
+ UNUSED(isfp);
+#endif
+ }
+ if (fid) lj_err_caller(L, LJ_ERR_FFI_NUMARG); /* Too few arguments. */
+
+#if LJ_TARGET_X64 || (LJ_TARGET_PPC && !LJ_ABI_SOFTFP)
+ cc->nfpr = nfpr; /* Required for vararg functions. */
+#endif
+ cc->nsp = nsp;
+ cc->spadj = (CCALL_SPS_FREE + CCALL_SPS_EXTRA)*CTSIZE_PTR;
+ if (nsp > CCALL_SPS_FREE)
+ cc->spadj += (((nsp-CCALL_SPS_FREE)*CTSIZE_PTR + 15u) & ~15u);
+ return gcsteps;
+}
+
+/* Get results from C call. */
+static int ccall_get_results(lua_State *L, CTState *cts, CType *ct,
+ CCallState *cc, int *ret)
+{
+ CType *ctr = ctype_rawchild(cts, ct);
+ uint8_t *sp = (uint8_t *)&cc->gpr[0];
+ if (ctype_isvoid(ctr->info)) {
+ *ret = 0; /* Zero results. */
+ return 0; /* No additional GC step. */
+ }
+ *ret = 1; /* One result. */
+ if (ctype_isstruct(ctr->info)) {
+ /* Return cdata object which is already on top of stack. */
+ if (!cc->retref) {
+ void *dp = cdataptr(cdataV(L->top-1)); /* Use preallocated object. */
+ CCALL_HANDLE_STRUCTRET2
+ }
+ return 1; /* One GC step. */
+ }
+ if (ctype_iscomplex(ctr->info)) {
+ /* Return cdata object which is already on top of stack. */
+ void *dp = cdataptr(cdataV(L->top-1)); /* Use preallocated object. */
+ CCALL_HANDLE_COMPLEXRET2
+ return 1; /* One GC step. */
+ }
+ if (LJ_BE && ctr->size < CTSIZE_PTR &&
+ (ctype_isinteger_or_bool(ctr->info) || ctype_isenum(ctr->info)))
+ sp += (CTSIZE_PTR - ctr->size);
+#if CCALL_NUM_FPR
+ if (ctype_isfp(ctr->info) || ctype_isvector(ctr->info))
+ sp = (uint8_t *)&cc->fpr[0];
+#endif
+#ifdef CCALL_HANDLE_RET
+ CCALL_HANDLE_RET
+#endif
+ /* No reference types end up here, so there's no need for the CTypeID. */
+ lj_assertL(!(ctype_isrefarray(ctr->info) || ctype_isstruct(ctr->info)),
+ "unexpected reference ctype");
+ return lj_cconv_tv_ct(cts, ctr, 0, L->top-1, sp);
+}
+
+/* Call C function. */
+int lj_ccall_func(lua_State *L, GCcdata *cd)
+{
+ CTState *cts = ctype_cts(L);
+ CType *ct = ctype_raw(cts, cd->ctypeid);
+ CTSize sz = CTSIZE_PTR;
+ if (ctype_isptr(ct->info)) {
+ sz = ct->size;
+ ct = ctype_rawchild(cts, ct);
+ }
+ if (ctype_isfunc(ct->info)) {
+ CCallState cc;
+ int gcsteps, ret;
+ cc.func = (void (*)(void))cdata_getptr(cdataptr(cd), sz);
+ gcsteps = ccall_set_args(L, cts, ct, &cc);
+ ct = (CType *)((intptr_t)ct-(intptr_t)cts->tab);
+ cts->cb.slot = ~0u;
+ lj_vm_ffi_call(&cc);
+ if (cts->cb.slot != ~0u) { /* Blacklist function that called a callback. */
+ TValue tv;
+ tv.u64 = ((uintptr_t)(void *)cc.func >> 2) | U64x(800000000, 00000000);
+ setboolV(lj_tab_set(L, cts->miscmap, &tv), 1);
+ }
+ ct = (CType *)((intptr_t)ct+(intptr_t)cts->tab); /* May be reallocated. */
+ gcsteps += ccall_get_results(L, cts, ct, &cc, &ret);
+#if LJ_TARGET_X86 && LJ_ABI_WIN
+ /* Automatically detect __stdcall and fix up C function declaration. */
+ if (cc.spadj && ctype_cconv(ct->info) == CTCC_CDECL) {
+ CTF_INSERT(ct->info, CCONV, CTCC_STDCALL);
+ lj_trace_abort(G(L));
+ }
+#endif
+ while (gcsteps-- > 0)
+ lj_gc_check(L);
+ return ret;
+ }
+ return -1; /* Not a function. */
+}
+
+#endif
diff --git a/libs/luajit-cmake/luajit/src/lj_ccall.h b/libs/luajit-cmake/luajit/src/lj_ccall.h
new file mode 100644
index 0000000..0b3c524
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/lj_ccall.h
@@ -0,0 +1,194 @@
+/*
+** FFI C call handling.
+** Copyright (C) 2005-2022 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LJ_CCALL_H
+#define _LJ_CCALL_H
+
+#include "lj_obj.h"
+#include "lj_ctype.h"
+
+#if LJ_HASFFI
+
+/* -- C calling conventions ----------------------------------------------- */
+
+#if LJ_TARGET_X86ORX64
+
+#if LJ_TARGET_X86
+#define CCALL_NARG_GPR 2 /* For fastcall arguments. */
+#define CCALL_NARG_FPR 0
+#define CCALL_NRET_GPR 2
+#define CCALL_NRET_FPR 1 /* For FP results on x87 stack. */
+#define CCALL_ALIGN_STACKARG 0 /* Don't align argument on stack. */
+#elif LJ_ABI_WIN
+#define CCALL_NARG_GPR 4
+#define CCALL_NARG_FPR 4
+#define CCALL_NRET_GPR 1
+#define CCALL_NRET_FPR 1
+#define CCALL_SPS_EXTRA 4
+#else
+#define CCALL_NARG_GPR 6
+#define CCALL_NARG_FPR 8
+#define CCALL_NRET_GPR 2
+#define CCALL_NRET_FPR 2
+#define CCALL_VECTOR_REG 1 /* Pass vectors in registers. */
+#endif
+
+#define CCALL_SPS_FREE 1
+#define CCALL_ALIGN_CALLSTATE 16
+
+typedef LJ_ALIGN(16) union FPRArg {
+ double d[2];
+ float f[4];
+ uint8_t b[16];
+ uint16_t s[8];
+ int i[4];
+ int64_t l[2];
+} FPRArg;
+
+typedef intptr_t GPRArg;
+
+#elif LJ_TARGET_ARM
+
+#define CCALL_NARG_GPR 4
+#define CCALL_NRET_GPR 2 /* For softfp double. */
+#if LJ_ABI_SOFTFP
+#define CCALL_NARG_FPR 0
+#define CCALL_NRET_FPR 0
+#else
+#define CCALL_NARG_FPR 8
+#define CCALL_NRET_FPR 4
+#endif
+#define CCALL_SPS_FREE 0
+
+typedef intptr_t GPRArg;
+typedef union FPRArg {
+ double d;
+ float f[2];
+} FPRArg;
+
+#elif LJ_TARGET_ARM64
+
+#define CCALL_NARG_GPR 8
+#define CCALL_NRET_GPR 2
+#define CCALL_NARG_FPR 8
+#define CCALL_NRET_FPR 4
+#define CCALL_SPS_FREE 0
+
+typedef intptr_t GPRArg;
+typedef union FPRArg {
+ double d;
+ struct { LJ_ENDIAN_LOHI(float f; , float g;) };
+ struct { LJ_ENDIAN_LOHI(uint32_t lo; , uint32_t hi;) };
+} FPRArg;
+
+#elif LJ_TARGET_PPC
+
+#define CCALL_NARG_GPR 8
+#define CCALL_NARG_FPR (LJ_ABI_SOFTFP ? 0 : 8)
+#define CCALL_NRET_GPR 4 /* For complex double. */
+#define CCALL_NRET_FPR (LJ_ABI_SOFTFP ? 0 : 1)
+#define CCALL_SPS_EXTRA 4
+#define CCALL_SPS_FREE 0
+
+typedef intptr_t GPRArg;
+typedef double FPRArg;
+
+#elif LJ_TARGET_MIPS32
+
+#define CCALL_NARG_GPR 4
+#define CCALL_NARG_FPR (LJ_ABI_SOFTFP ? 0 : 2)
+#define CCALL_NRET_GPR (LJ_ABI_SOFTFP ? 4 : 2)
+#define CCALL_NRET_FPR (LJ_ABI_SOFTFP ? 0 : 2)
+#define CCALL_SPS_EXTRA 7
+#define CCALL_SPS_FREE 1
+
+typedef intptr_t GPRArg;
+typedef union FPRArg {
+ double d;
+ struct { LJ_ENDIAN_LOHI(float f; , float g;) };
+} FPRArg;
+
+#elif LJ_TARGET_MIPS64
+
+/* FP args are positional and overlay the GPR array. */
+#define CCALL_NARG_GPR 8
+#define CCALL_NARG_FPR 0
+#define CCALL_NRET_GPR 2
+#define CCALL_NRET_FPR (LJ_ABI_SOFTFP ? 0 : 2)
+#define CCALL_SPS_EXTRA 3
+#define CCALL_SPS_FREE 1
+
+typedef intptr_t GPRArg;
+typedef union FPRArg {
+ double d;
+ struct { LJ_ENDIAN_LOHI(float f; , float g;) };
+} FPRArg;
+
+#else
+#error "Missing calling convention definitions for this architecture"
+#endif
+
+#ifndef CCALL_SPS_EXTRA
+#define CCALL_SPS_EXTRA 0
+#endif
+#ifndef CCALL_VECTOR_REG
+#define CCALL_VECTOR_REG 0
+#endif
+#ifndef CCALL_ALIGN_STACKARG
+#define CCALL_ALIGN_STACKARG 1
+#endif
+#ifndef CCALL_ALIGN_CALLSTATE
+#define CCALL_ALIGN_CALLSTATE 8
+#endif
+
+#define CCALL_NUM_GPR \
+ (CCALL_NARG_GPR > CCALL_NRET_GPR ? CCALL_NARG_GPR : CCALL_NRET_GPR)
+#define CCALL_NUM_FPR \
+ (CCALL_NARG_FPR > CCALL_NRET_FPR ? CCALL_NARG_FPR : CCALL_NRET_FPR)
+
+/* Check against constants in lj_ctype.h. */
+LJ_STATIC_ASSERT(CCALL_NUM_GPR <= CCALL_MAX_GPR);
+LJ_STATIC_ASSERT(CCALL_NUM_FPR <= CCALL_MAX_FPR);
+
+#define CCALL_MAXSTACK 32
+
+/* -- C call state -------------------------------------------------------- */
+
+typedef LJ_ALIGN(CCALL_ALIGN_CALLSTATE) struct CCallState {
+ void (*func)(void); /* Pointer to called function. */
+ uint32_t spadj; /* Stack pointer adjustment. */
+ uint8_t nsp; /* Number of stack slots. */
+ uint8_t retref; /* Return value by reference. */
+#if LJ_TARGET_X64
+ uint8_t ngpr; /* Number of arguments in GPRs. */
+ uint8_t nfpr; /* Number of arguments in FPRs. */
+#elif LJ_TARGET_X86
+ uint8_t resx87; /* Result on x87 stack: 1:float, 2:double. */
+#elif LJ_TARGET_ARM64
+ void *retp; /* Aggregate return pointer in x8. */
+#elif LJ_TARGET_PPC
+ uint8_t nfpr; /* Number of arguments in FPRs. */
+#endif
+#if LJ_32
+ int32_t align1;
+#endif
+#if CCALL_NUM_FPR
+ FPRArg fpr[CCALL_NUM_FPR]; /* Arguments/results in FPRs. */
+#endif
+ GPRArg gpr[CCALL_NUM_GPR]; /* Arguments/results in GPRs. */
+ GPRArg stack[CCALL_MAXSTACK]; /* Stack slots. */
+} CCallState;
+
+/* -- C call handling ----------------------------------------------------- */
+
+/* Really belongs to lj_vm.h. */
+LJ_ASMF void LJ_FASTCALL lj_vm_ffi_call(CCallState *cc);
+
+LJ_FUNC CTypeID lj_ccall_ctid_vararg(CTState *cts, cTValue *o);
+LJ_FUNC int lj_ccall_func(lua_State *L, GCcdata *cd);
+
+#endif
+
+#endif
diff --git a/libs/luajit-cmake/luajit/src/lj_ccallback.c b/libs/luajit-cmake/luajit/src/lj_ccallback.c
new file mode 100644
index 0000000..43e4430
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/lj_ccallback.c
@@ -0,0 +1,796 @@
+/*
+** FFI C callback handling.
+** Copyright (C) 2005-2022 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#include "lj_obj.h"
+
+#if LJ_HASFFI
+
+#include "lj_gc.h"
+#include "lj_err.h"
+#include "lj_tab.h"
+#include "lj_state.h"
+#include "lj_frame.h"
+#include "lj_ctype.h"
+#include "lj_cconv.h"
+#include "lj_ccall.h"
+#include "lj_ccallback.h"
+#include "lj_target.h"
+#include "lj_mcode.h"
+#include "lj_trace.h"
+#include "lj_vm.h"
+
+/* -- Target-specific handling of callback slots -------------------------- */
+
+#define CALLBACK_MCODE_SIZE (LJ_PAGESIZE * LJ_NUM_CBPAGE)
+
+#if LJ_OS_NOJIT
+
+/* Callbacks disabled. */
+#define CALLBACK_SLOT2OFS(slot) (0*(slot))
+#define CALLBACK_OFS2SLOT(ofs) (0*(ofs))
+#define CALLBACK_MAX_SLOT 0
+
+#elif LJ_TARGET_X86ORX64
+
+#define CALLBACK_MCODE_HEAD (LJ_64 ? 8 : 0)
+#define CALLBACK_MCODE_GROUP (-2+1+2+(LJ_GC64 ? 10 : 5)+(LJ_64 ? 6 : 5))
+
+#define CALLBACK_SLOT2OFS(slot) \
+ (CALLBACK_MCODE_HEAD + CALLBACK_MCODE_GROUP*((slot)/32) + 4*(slot))
+
+static MSize CALLBACK_OFS2SLOT(MSize ofs)
+{
+ MSize group;
+ ofs -= CALLBACK_MCODE_HEAD;
+ group = ofs / (32*4 + CALLBACK_MCODE_GROUP);
+ return (ofs % (32*4 + CALLBACK_MCODE_GROUP))/4 + group*32;
+}
+
+#define CALLBACK_MAX_SLOT \
+ (((CALLBACK_MCODE_SIZE-CALLBACK_MCODE_HEAD)/(CALLBACK_MCODE_GROUP+4*32))*32)
+
+#elif LJ_TARGET_ARM
+
+#define CALLBACK_MCODE_HEAD 32
+
+#elif LJ_TARGET_ARM64
+
+#define CALLBACK_MCODE_HEAD 32
+
+#elif LJ_TARGET_PPC
+
+#define CALLBACK_MCODE_HEAD 24
+
+#elif LJ_TARGET_MIPS32
+
+#define CALLBACK_MCODE_HEAD 20
+
+#elif LJ_TARGET_MIPS64
+
+#define CALLBACK_MCODE_HEAD 52
+
+#else
+
+/* Missing support for this architecture. */
+#define CALLBACK_SLOT2OFS(slot) (0*(slot))
+#define CALLBACK_OFS2SLOT(ofs) (0*(ofs))
+#define CALLBACK_MAX_SLOT 0
+
+#endif
+
+#ifndef CALLBACK_SLOT2OFS
+#define CALLBACK_SLOT2OFS(slot) (CALLBACK_MCODE_HEAD + 8*(slot))
+#define CALLBACK_OFS2SLOT(ofs) (((ofs)-CALLBACK_MCODE_HEAD)/8)
+#define CALLBACK_MAX_SLOT (CALLBACK_OFS2SLOT(CALLBACK_MCODE_SIZE))
+#endif
+
+/* Convert callback slot number to callback function pointer. */
+static void *callback_slot2ptr(CTState *cts, MSize slot)
+{
+ return (uint8_t *)cts->cb.mcode + CALLBACK_SLOT2OFS(slot);
+}
+
+/* Convert callback function pointer to slot number. */
+MSize lj_ccallback_ptr2slot(CTState *cts, void *p)
+{
+ uintptr_t ofs = (uintptr_t)((uint8_t *)p -(uint8_t *)cts->cb.mcode);
+ if (ofs < CALLBACK_MCODE_SIZE) {
+ MSize slot = CALLBACK_OFS2SLOT((MSize)ofs);
+ if (CALLBACK_SLOT2OFS(slot) == (MSize)ofs)
+ return slot;
+ }
+ return ~0u; /* Not a known callback function pointer. */
+}
+
+/* Initialize machine code for callback function pointers. */
+#if LJ_OS_NOJIT
+/* Disabled callback support. */
+#define callback_mcode_init(g, p) (p)
+#elif LJ_TARGET_X86ORX64
+static void *callback_mcode_init(global_State *g, uint8_t *page)
+{
+ uint8_t *p = page;
+ uint8_t *target = (uint8_t *)(void *)lj_vm_ffi_callback;
+ MSize slot;
+#if LJ_64
+ *(void **)p = target; p += 8;
+#endif
+ for (slot = 0; slot < CALLBACK_MAX_SLOT; slot++) {
+ /* mov al, slot; jmp group */
+ *p++ = XI_MOVrib | RID_EAX; *p++ = (uint8_t)slot;
+ if ((slot & 31) == 31 || slot == CALLBACK_MAX_SLOT-1) {
+ /* push ebp/rbp; mov ah, slot>>8; mov ebp, &g. */
+ *p++ = XI_PUSH + RID_EBP;
+ *p++ = XI_MOVrib | (RID_EAX+4); *p++ = (uint8_t)(slot >> 8);
+#if LJ_GC64
+ *p++ = 0x48; *p++ = XI_MOVri | RID_EBP;
+ *(uint64_t *)p = (uint64_t)(g); p += 8;
+#else
+ *p++ = XI_MOVri | RID_EBP;
+ *(int32_t *)p = i32ptr(g); p += 4;
+#endif
+#if LJ_64
+ /* jmp [rip-pageofs] where lj_vm_ffi_callback is stored. */
+ *p++ = XI_GROUP5; *p++ = XM_OFS0 + (XOg_JMP<<3) + RID_EBP;
+ *(int32_t *)p = (int32_t)(page-(p+4)); p += 4;
+#else
+ /* jmp lj_vm_ffi_callback. */
+ *p++ = XI_JMP; *(int32_t *)p = target-(p+4); p += 4;
+#endif
+ } else {
+ *p++ = XI_JMPs; *p++ = (uint8_t)((2+2)*(31-(slot&31)) - 2);
+ }
+ }
+ return p;
+}
+#elif LJ_TARGET_ARM
+static void *callback_mcode_init(global_State *g, uint32_t *page)
+{
+ uint32_t *p = page;
+ void *target = (void *)lj_vm_ffi_callback;
+ MSize slot;
+ /* This must match with the saveregs macro in buildvm_arm.dasc. */
+ *p++ = ARMI_SUB|ARMF_D(RID_R12)|ARMF_N(RID_R12)|ARMF_M(RID_PC);
+ *p++ = ARMI_PUSH|ARMF_N(RID_SP)|RSET_RANGE(RID_R4,RID_R11+1)|RID2RSET(RID_LR);
+ *p++ = ARMI_SUB|ARMI_K12|ARMF_D(RID_R12)|ARMF_N(RID_R12)|CALLBACK_MCODE_HEAD;
+ *p++ = ARMI_STR|ARMI_LS_P|ARMI_LS_W|ARMF_D(RID_R12)|ARMF_N(RID_SP)|(CFRAME_SIZE-4*9);
+ *p++ = ARMI_LDR|ARMI_LS_P|ARMI_LS_U|ARMF_D(RID_R12)|ARMF_N(RID_PC);
+ *p++ = ARMI_LDR|ARMI_LS_P|ARMI_LS_U|ARMF_D(RID_PC)|ARMF_N(RID_PC);
+ *p++ = u32ptr(g);
+ *p++ = u32ptr(target);
+ for (slot = 0; slot < CALLBACK_MAX_SLOT; slot++) {
+ *p++ = ARMI_MOV|ARMF_D(RID_R12)|ARMF_M(RID_PC);
+ *p = ARMI_B | ((page-p-2) & 0x00ffffffu);
+ p++;
+ }
+ return p;
+}
+#elif LJ_TARGET_ARM64
+static void *callback_mcode_init(global_State *g, uint32_t *page)
+{
+ uint32_t *p = page;
+ void *target = (void *)lj_vm_ffi_callback;
+ MSize slot;
+ *p++ = A64I_LE(A64I_LDRLx | A64F_D(RID_X11) | A64F_S19(4));
+ *p++ = A64I_LE(A64I_LDRLx | A64F_D(RID_X10) | A64F_S19(5));
+ *p++ = A64I_LE(A64I_BR | A64F_N(RID_X11));
+ *p++ = A64I_LE(A64I_NOP);
+ ((void **)p)[0] = target;
+ ((void **)p)[1] = g;
+ p += 4;
+ for (slot = 0; slot < CALLBACK_MAX_SLOT; slot++) {
+ *p++ = A64I_LE(A64I_MOVZw | A64F_D(RID_X9) | A64F_U16(slot));
+ *p = A64I_LE(A64I_B | A64F_S26((page-p) & 0x03ffffffu));
+ p++;
+ }
+ return p;
+}
+#elif LJ_TARGET_PPC
+static void *callback_mcode_init(global_State *g, uint32_t *page)
+{
+ uint32_t *p = page;
+ void *target = (void *)lj_vm_ffi_callback;
+ MSize slot;
+ *p++ = PPCI_LIS | PPCF_T(RID_TMP) | (u32ptr(target) >> 16);
+ *p++ = PPCI_LIS | PPCF_T(RID_R12) | (u32ptr(g) >> 16);
+ *p++ = PPCI_ORI | PPCF_A(RID_TMP)|PPCF_T(RID_TMP) | (u32ptr(target) & 0xffff);
+ *p++ = PPCI_ORI | PPCF_A(RID_R12)|PPCF_T(RID_R12) | (u32ptr(g) & 0xffff);
+ *p++ = PPCI_MTCTR | PPCF_T(RID_TMP);
+ *p++ = PPCI_BCTR;
+ for (slot = 0; slot < CALLBACK_MAX_SLOT; slot++) {
+ *p++ = PPCI_LI | PPCF_T(RID_R11) | slot;
+ *p = PPCI_B | (((page-p) & 0x00ffffffu) << 2);
+ p++;
+ }
+ return p;
+}
+#elif LJ_TARGET_MIPS
+static void *callback_mcode_init(global_State *g, uint32_t *page)
+{
+ uint32_t *p = page;
+ uintptr_t target = (uintptr_t)(void *)lj_vm_ffi_callback;
+ uintptr_t ug = (uintptr_t)(void *)g;
+ MSize slot;
+#if LJ_TARGET_MIPS32
+ *p++ = MIPSI_LUI | MIPSF_T(RID_R3) | (target >> 16);
+ *p++ = MIPSI_LUI | MIPSF_T(RID_R2) | (ug >> 16);
+#else
+ *p++ = MIPSI_LUI | MIPSF_T(RID_R3) | (target >> 48);
+ *p++ = MIPSI_LUI | MIPSF_T(RID_R2) | (ug >> 48);
+ *p++ = MIPSI_ORI | MIPSF_T(RID_R3)|MIPSF_S(RID_R3) | ((target >> 32) & 0xffff);
+ *p++ = MIPSI_ORI | MIPSF_T(RID_R2)|MIPSF_S(RID_R2) | ((ug >> 32) & 0xffff);
+ *p++ = MIPSI_DSLL | MIPSF_D(RID_R3)|MIPSF_T(RID_R3) | MIPSF_A(16);
+ *p++ = MIPSI_DSLL | MIPSF_D(RID_R2)|MIPSF_T(RID_R2) | MIPSF_A(16);
+ *p++ = MIPSI_ORI | MIPSF_T(RID_R3)|MIPSF_S(RID_R3) | ((target >> 16) & 0xffff);
+ *p++ = MIPSI_ORI | MIPSF_T(RID_R2)|MIPSF_S(RID_R2) | ((ug >> 16) & 0xffff);
+ *p++ = MIPSI_DSLL | MIPSF_D(RID_R3)|MIPSF_T(RID_R3) | MIPSF_A(16);
+ *p++ = MIPSI_DSLL | MIPSF_D(RID_R2)|MIPSF_T(RID_R2) | MIPSF_A(16);
+#endif
+ *p++ = MIPSI_ORI | MIPSF_T(RID_R3)|MIPSF_S(RID_R3) | (target & 0xffff);
+ *p++ = MIPSI_JR | MIPSF_S(RID_R3);
+ *p++ = MIPSI_ORI | MIPSF_T(RID_R2)|MIPSF_S(RID_R2) | (ug & 0xffff);
+ for (slot = 0; slot < CALLBACK_MAX_SLOT; slot++) {
+ *p = MIPSI_B | ((page-p-1) & 0x0000ffffu);
+ p++;
+ *p++ = MIPSI_LI | MIPSF_T(RID_R1) | slot;
+ }
+ return p;
+}
+#else
+/* Missing support for this architecture. */
+#define callback_mcode_init(g, p) (p)
+#endif
+
+/* -- Machine code management --------------------------------------------- */
+
+#if LJ_TARGET_WINDOWS
+
+#define WIN32_LEAN_AND_MEAN
+#include <windows.h>
+
+#elif LJ_TARGET_POSIX
+
+#include <sys/mman.h>
+#ifndef MAP_ANONYMOUS
+#define MAP_ANONYMOUS MAP_ANON
+#endif
+#ifdef PROT_MPROTECT
+#define CCPROT_CREATE (PROT_MPROTECT(PROT_EXEC))
+#else
+#define CCPROT_CREATE 0
+#endif
+
+#endif
+
+/* Allocate and initialize area for callback function pointers. */
+static void callback_mcode_new(CTState *cts)
+{
+ size_t sz = (size_t)CALLBACK_MCODE_SIZE;
+ void *p, *pe;
+ if (CALLBACK_MAX_SLOT == 0)
+ lj_err_caller(cts->L, LJ_ERR_FFI_CBACKOV);
+#if LJ_TARGET_WINDOWS
+ p = LJ_WIN_VALLOC(NULL, sz, MEM_RESERVE|MEM_COMMIT, PAGE_READWRITE);
+ if (!p)
+ lj_err_caller(cts->L, LJ_ERR_FFI_CBACKOV);
+#elif LJ_TARGET_POSIX
+ p = mmap(NULL, sz, (PROT_READ|PROT_WRITE|CCPROT_CREATE), MAP_PRIVATE|MAP_ANONYMOUS,
+ -1, 0);
+ if (p == MAP_FAILED)
+ lj_err_caller(cts->L, LJ_ERR_FFI_CBACKOV);
+#else
+ /* Fallback allocator. Fails if memory is not executable by default. */
+ p = lj_mem_new(cts->L, sz);
+#endif
+ cts->cb.mcode = p;
+ pe = callback_mcode_init(cts->g, p);
+ UNUSED(pe);
+ lj_assertCTS((size_t)((char *)pe - (char *)p) <= sz,
+ "miscalculated CALLBACK_MAX_SLOT");
+ lj_mcode_sync(p, (char *)p + sz);
+#if LJ_TARGET_WINDOWS
+ {
+ DWORD oprot;
+ LJ_WIN_VPROTECT(p, sz, PAGE_EXECUTE_READ, &oprot);
+ }
+#elif LJ_TARGET_POSIX
+ mprotect(p, sz, (PROT_READ|PROT_EXEC));
+#endif
+}
+
+/* Free area for callback function pointers. */
+void lj_ccallback_mcode_free(CTState *cts)
+{
+ size_t sz = (size_t)CALLBACK_MCODE_SIZE;
+ void *p = cts->cb.mcode;
+ if (p == NULL) return;
+#if LJ_TARGET_WINDOWS
+ VirtualFree(p, 0, MEM_RELEASE);
+ UNUSED(sz);
+#elif LJ_TARGET_POSIX
+ munmap(p, sz);
+#else
+ lj_mem_free(cts->g, p, sz);
+#endif
+}
+
+/* -- C callback entry ---------------------------------------------------- */
+
+/* Target-specific handling of register arguments. Similar to lj_ccall.c. */
+#if LJ_TARGET_X86
+
+#define CALLBACK_HANDLE_REGARG \
+ if (!isfp) { /* Only non-FP values may be passed in registers. */ \
+ if (n > 1) { /* Anything > 32 bit is passed on the stack. */ \
+ if (!LJ_ABI_WIN) ngpr = maxgpr; /* Prevent reordering. */ \
+ } else if (ngpr + 1 <= maxgpr) { \
+ sp = &cts->cb.gpr[ngpr]; \
+ ngpr += n; \
+ goto done; \
+ } \
+ }
+
+#elif LJ_TARGET_X64 && LJ_ABI_WIN
+
+/* Windows/x64 argument registers are strictly positional (use ngpr). */
+#define CALLBACK_HANDLE_REGARG \
+ if (isfp) { \
+ if (ngpr < maxgpr) { sp = &cts->cb.fpr[ngpr++]; UNUSED(nfpr); goto done; } \
+ } else { \
+ if (ngpr < maxgpr) { sp = &cts->cb.gpr[ngpr++]; goto done; } \
+ }
+
+#elif LJ_TARGET_X64
+
+#define CALLBACK_HANDLE_REGARG \
+ if (isfp) { \
+ if (nfpr + n <= CCALL_NARG_FPR) { \
+ sp = &cts->cb.fpr[nfpr]; \
+ nfpr += n; \
+ goto done; \
+ } \
+ } else { \
+ if (ngpr + n <= maxgpr) { \
+ sp = &cts->cb.gpr[ngpr]; \
+ ngpr += n; \
+ goto done; \
+ } \
+ }
+
+#elif LJ_TARGET_ARM
+
+#if LJ_ABI_SOFTFP
+
+#define CALLBACK_HANDLE_REGARG_FP1 UNUSED(isfp);
+#define CALLBACK_HANDLE_REGARG_FP2
+
+#else
+
+#define CALLBACK_HANDLE_REGARG_FP1 \
+ if (isfp) { \
+ if (n == 1) { \
+ if (fprodd) { \
+ sp = &cts->cb.fpr[fprodd-1]; \
+ fprodd = 0; \
+ goto done; \
+ } else if (nfpr + 1 <= CCALL_NARG_FPR) { \
+ sp = &cts->cb.fpr[nfpr++]; \
+ fprodd = nfpr; \
+ goto done; \
+ } \
+ } else { \
+ if (nfpr + 1 <= CCALL_NARG_FPR) { \
+ sp = &cts->cb.fpr[nfpr++]; \
+ goto done; \
+ } \
+ } \
+ fprodd = 0; /* No reordering after the first FP value is on stack. */ \
+ } else {
+
+#define CALLBACK_HANDLE_REGARG_FP2 }
+
+#endif
+
+#define CALLBACK_HANDLE_REGARG \
+ CALLBACK_HANDLE_REGARG_FP1 \
+ if (n > 1) ngpr = (ngpr + 1u) & ~1u; /* Align to regpair. */ \
+ if (ngpr + n <= maxgpr) { \
+ sp = &cts->cb.gpr[ngpr]; \
+ ngpr += n; \
+ goto done; \
+ } CALLBACK_HANDLE_REGARG_FP2
+
+#elif LJ_TARGET_ARM64
+
+#define CALLBACK_HANDLE_REGARG \
+ if (isfp) { \
+ if (nfpr + n <= CCALL_NARG_FPR) { \
+ sp = &cts->cb.fpr[nfpr]; \
+ nfpr += n; \
+ goto done; \
+ } else { \
+ nfpr = CCALL_NARG_FPR; /* Prevent reordering. */ \
+ } \
+ } else { \
+ if (!LJ_TARGET_OSX && n > 1) \
+ ngpr = (ngpr + 1u) & ~1u; /* Align to regpair. */ \
+ if (ngpr + n <= maxgpr) { \
+ sp = &cts->cb.gpr[ngpr]; \
+ ngpr += n; \
+ goto done; \
+ } else { \
+ ngpr = CCALL_NARG_GPR; /* Prevent reordering. */ \
+ } \
+ }
+
+#elif LJ_TARGET_PPC
+
+#define CALLBACK_HANDLE_GPR \
+ if (n > 1) { \
+ lj_assertCTS(((LJ_ABI_SOFTFP && ctype_isnum(cta->info)) || /* double. */ \
+ ctype_isinteger(cta->info)) && n == 2, /* int64_t. */ \
+ "bad GPR type"); \
+ ngpr = (ngpr + 1u) & ~1u; /* Align int64_t to regpair. */ \
+ } \
+ if (ngpr + n <= maxgpr) { \
+ sp = &cts->cb.gpr[ngpr]; \
+ ngpr += n; \
+ goto done; \
+ }
+
+#if LJ_ABI_SOFTFP
+#define CALLBACK_HANDLE_REGARG \
+ CALLBACK_HANDLE_GPR \
+ UNUSED(isfp);
+#else
+#define CALLBACK_HANDLE_REGARG \
+ if (isfp) { \
+ if (nfpr + 1 <= CCALL_NARG_FPR) { \
+ sp = &cts->cb.fpr[nfpr++]; \
+ cta = ctype_get(cts, CTID_DOUBLE); /* FPRs always hold doubles. */ \
+ goto done; \
+ } \
+ } else { /* Try to pass argument in GPRs. */ \
+ CALLBACK_HANDLE_GPR \
+ }
+#endif
+
+#if !LJ_ABI_SOFTFP
+#define CALLBACK_HANDLE_RET \
+ if (ctype_isfp(ctr->info) && ctr->size == sizeof(float)) \
+ *(double *)dp = *(float *)dp; /* FPRs always hold doubles. */
+#endif
+
+#elif LJ_TARGET_MIPS32
+
+#define CALLBACK_HANDLE_GPR \
+ if (n > 1) ngpr = (ngpr + 1u) & ~1u; /* Align to regpair. */ \
+ if (ngpr + n <= maxgpr) { \
+ sp = &cts->cb.gpr[ngpr]; \
+ ngpr += n; \
+ goto done; \
+ }
+
+#if !LJ_ABI_SOFTFP /* MIPS32 hard-float */
+#define CALLBACK_HANDLE_REGARG \
+ if (isfp && nfpr < CCALL_NARG_FPR) { /* Try to pass argument in FPRs. */ \
+ sp = (void *)((uint8_t *)&cts->cb.fpr[nfpr] + ((LJ_BE && n==1) ? 4 : 0)); \
+ nfpr++; ngpr += n; \
+ goto done; \
+ } else { /* Try to pass argument in GPRs. */ \
+ nfpr = CCALL_NARG_FPR; \
+ CALLBACK_HANDLE_GPR \
+ }
+#else /* MIPS32 soft-float */
+#define CALLBACK_HANDLE_REGARG \
+ CALLBACK_HANDLE_GPR \
+ UNUSED(isfp);
+#endif
+
+#define CALLBACK_HANDLE_RET \
+ if (ctype_isfp(ctr->info) && ctr->size == sizeof(float)) \
+ ((float *)dp)[1] = *(float *)dp;
+
+#elif LJ_TARGET_MIPS64
+
+#if !LJ_ABI_SOFTFP /* MIPS64 hard-float */
+#define CALLBACK_HANDLE_REGARG \
+ if (ngpr + n <= maxgpr) { \
+ sp = isfp ? (void*) &cts->cb.fpr[ngpr] : (void*) &cts->cb.gpr[ngpr]; \
+ ngpr += n; \
+ goto done; \
+ }
+#else /* MIPS64 soft-float */
+#define CALLBACK_HANDLE_REGARG \
+ if (ngpr + n <= maxgpr) { \
+ UNUSED(isfp); \
+ sp = (void*) &cts->cb.gpr[ngpr]; \
+ ngpr += n; \
+ goto done; \
+ }
+#endif
+
+#define CALLBACK_HANDLE_RET \
+ if (ctype_isfp(ctr->info) && ctr->size == sizeof(float)) \
+ ((float *)dp)[1] = *(float *)dp;
+
+#else
+#error "Missing calling convention definitions for this architecture"
+#endif
+
+/* Convert and push callback arguments to Lua stack. */
+static void callback_conv_args(CTState *cts, lua_State *L)
+{
+ TValue *o = L->top;
+ intptr_t *stack = cts->cb.stack;
+ MSize slot = cts->cb.slot;
+ CTypeID id = 0, rid, fid;
+ int gcsteps = 0;
+ CType *ct;
+ GCfunc *fn;
+ int fntp;
+ MSize ngpr = 0, nsp = 0, maxgpr = CCALL_NARG_GPR;
+#if CCALL_NARG_FPR
+ MSize nfpr = 0;
+#if LJ_TARGET_ARM
+ MSize fprodd = 0;
+#endif
+#endif
+
+ if (slot < cts->cb.sizeid && (id = cts->cb.cbid[slot]) != 0) {
+ ct = ctype_get(cts, id);
+ rid = ctype_cid(ct->info); /* Return type. x86: +(spadj<<16). */
+ fn = funcV(lj_tab_getint(cts->miscmap, (int32_t)slot));
+ fntp = LJ_TFUNC;
+ } else { /* Must set up frame first, before throwing the error. */
+ ct = NULL;
+ rid = 0;
+ fn = (GCfunc *)L;
+ fntp = LJ_TTHREAD;
+ }
+ /* Continuation returns from callback. */
+ if (LJ_FR2) {
+ (o++)->u64 = LJ_CONT_FFI_CALLBACK;
+ (o++)->u64 = rid;
+ } else {
+ o->u32.lo = LJ_CONT_FFI_CALLBACK;
+ o->u32.hi = rid;
+ o++;
+ }
+ setframe_gc(o, obj2gco(fn), fntp);
+ if (LJ_FR2) o++;
+ setframe_ftsz(o, ((char *)(o+1) - (char *)L->base) + FRAME_CONT);
+ L->top = L->base = ++o;
+ if (!ct)
+ lj_err_caller(cts->L, LJ_ERR_FFI_BADCBACK);
+ if (isluafunc(fn))
+ setcframe_pc(L->cframe, proto_bc(funcproto(fn))+1);
+ lj_state_checkstack(L, LUA_MINSTACK); /* May throw. */
+ o = L->base; /* Might have been reallocated. */
+
+#if LJ_TARGET_X86
+ /* x86 has several different calling conventions. */
+ switch (ctype_cconv(ct->info)) {
+ case CTCC_FASTCALL: maxgpr = 2; break;
+ case CTCC_THISCALL: maxgpr = 1; break;
+ default: maxgpr = 0; break;
+ }
+#endif
+
+ fid = ct->sib;
+ while (fid) {
+ CType *ctf = ctype_get(cts, fid);
+ if (!ctype_isattrib(ctf->info)) {
+ CType *cta;
+ void *sp;
+ CTSize sz;
+ int isfp;
+ MSize n;
+ lj_assertCTS(ctype_isfield(ctf->info), "field expected");
+ cta = ctype_rawchild(cts, ctf);
+ isfp = ctype_isfp(cta->info);
+ sz = (cta->size + CTSIZE_PTR-1) & ~(CTSIZE_PTR-1);
+ n = sz / CTSIZE_PTR; /* Number of GPRs or stack slots needed. */
+
+ CALLBACK_HANDLE_REGARG /* Handle register arguments. */
+
+ /* Otherwise pass argument on stack. */
+ if (CCALL_ALIGN_STACKARG && LJ_32 && sz == 8)
+ nsp = (nsp + 1) & ~1u; /* Align 64 bit argument on stack. */
+ sp = &stack[nsp];
+ nsp += n;
+
+ done:
+ if (LJ_BE && cta->size < CTSIZE_PTR
+#if LJ_TARGET_MIPS64
+ && !(isfp && nsp)
+#endif
+ )
+ sp = (void *)((uint8_t *)sp + CTSIZE_PTR-cta->size);
+ gcsteps += lj_cconv_tv_ct(cts, cta, 0, o++, sp);
+ }
+ fid = ctf->sib;
+ }
+ L->top = o;
+#if LJ_TARGET_X86
+ /* Store stack adjustment for returns from non-cdecl callbacks. */
+ if (ctype_cconv(ct->info) != CTCC_CDECL) {
+#if LJ_FR2
+ (L->base-3)->u64 |= (nsp << (16+2));
+#else
+ (L->base-2)->u32.hi |= (nsp << (16+2));
+#endif
+ }
+#endif
+ while (gcsteps-- > 0)
+ lj_gc_check(L);
+}
+
+/* Convert Lua object to callback result. */
+static void callback_conv_result(CTState *cts, lua_State *L, TValue *o)
+{
+#if LJ_FR2
+ CType *ctr = ctype_raw(cts, (uint16_t)(L->base-3)->u64);
+#else
+ CType *ctr = ctype_raw(cts, (uint16_t)(L->base-2)->u32.hi);
+#endif
+#if LJ_TARGET_X86
+ cts->cb.gpr[2] = 0;
+#endif
+ if (!ctype_isvoid(ctr->info)) {
+ uint8_t *dp = (uint8_t *)&cts->cb.gpr[0];
+#if CCALL_NUM_FPR
+ if (ctype_isfp(ctr->info))
+ dp = (uint8_t *)&cts->cb.fpr[0];
+#endif
+#if LJ_TARGET_ARM64 && LJ_BE
+ if (ctype_isfp(ctr->info) && ctr->size == sizeof(float))
+ dp = (uint8_t *)&cts->cb.fpr[0].f[1];
+#endif
+ lj_cconv_ct_tv(cts, ctr, dp, o, 0);
+#ifdef CALLBACK_HANDLE_RET
+ CALLBACK_HANDLE_RET
+#endif
+ /* Extend returned integers to (at least) 32 bits. */
+ if (ctype_isinteger_or_bool(ctr->info) && ctr->size < 4) {
+ if (ctr->info & CTF_UNSIGNED)
+ *(uint32_t *)dp = ctr->size == 1 ? (uint32_t)*(uint8_t *)dp :
+ (uint32_t)*(uint16_t *)dp;
+ else
+ *(int32_t *)dp = ctr->size == 1 ? (int32_t)*(int8_t *)dp :
+ (int32_t)*(int16_t *)dp;
+ }
+#if LJ_TARGET_MIPS64 || (LJ_TARGET_ARM64 && LJ_BE)
+ /* Always sign-extend results to 64 bits. Even a soft-fp 'float'. */
+ if (ctr->size <= 4 &&
+ (LJ_ABI_SOFTFP || ctype_isinteger_or_bool(ctr->info)))
+ *(int64_t *)dp = (int64_t)*(int32_t *)dp;
+#endif
+#if LJ_TARGET_X86
+ if (ctype_isfp(ctr->info))
+ cts->cb.gpr[2] = ctr->size == sizeof(float) ? 1 : 2;
+#endif
+ }
+}
+
+/* Enter callback. */
+lua_State * LJ_FASTCALL lj_ccallback_enter(CTState *cts, void *cf)
+{
+ lua_State *L = cts->L;
+ global_State *g = cts->g;
+ lj_assertG(L != NULL, "uninitialized cts->L in callback");
+ if (tvref(g->jit_base)) {
+ setstrV(L, L->top++, lj_err_str(L, LJ_ERR_FFI_BADCBACK));
+ if (g->panic) g->panic(L);
+ exit(EXIT_FAILURE);
+ }
+ lj_trace_abort(g); /* Never record across callback. */
+ /* Setup C frame. */
+ cframe_prev(cf) = L->cframe;
+ setcframe_L(cf, L);
+ cframe_errfunc(cf) = -1;
+ cframe_nres(cf) = 0;
+ L->cframe = cf;
+ callback_conv_args(cts, L);
+ return L; /* Now call the function on this stack. */
+}
+
+/* Leave callback. */
+void LJ_FASTCALL lj_ccallback_leave(CTState *cts, TValue *o)
+{
+ lua_State *L = cts->L;
+ GCfunc *fn;
+ TValue *obase = L->base;
+ L->base = L->top; /* Keep continuation frame for throwing errors. */
+ if (o >= L->base) {
+ /* PC of RET* is lost. Point to last line for result conv. errors. */
+ fn = curr_func(L);
+ if (isluafunc(fn)) {
+ GCproto *pt = funcproto(fn);
+ setcframe_pc(L->cframe, proto_bc(pt)+pt->sizebc+1);
+ }
+ }
+ callback_conv_result(cts, L, o);
+ /* Finally drop C frame and continuation frame. */
+ L->top -= 2+2*LJ_FR2;
+ L->base = obase;
+ L->cframe = cframe_prev(L->cframe);
+ cts->cb.slot = 0; /* Blacklist C function that called the callback. */
+}
+
+/* -- C callback management ----------------------------------------------- */
+
+/* Get an unused slot in the callback slot table. */
+static MSize callback_slot_new(CTState *cts, CType *ct)
+{
+ CTypeID id = ctype_typeid(cts, ct);
+ CTypeID1 *cbid = cts->cb.cbid;
+ MSize top;
+ for (top = cts->cb.topid; top < cts->cb.sizeid; top++)
+ if (LJ_LIKELY(cbid[top] == 0))
+ goto found;
+#if CALLBACK_MAX_SLOT
+ if (top >= CALLBACK_MAX_SLOT)
+#endif
+ lj_err_caller(cts->L, LJ_ERR_FFI_CBACKOV);
+ if (!cts->cb.mcode)
+ callback_mcode_new(cts);
+ lj_mem_growvec(cts->L, cbid, cts->cb.sizeid, CALLBACK_MAX_SLOT, CTypeID1);
+ cts->cb.cbid = cbid;
+ memset(cbid+top, 0, (cts->cb.sizeid-top)*sizeof(CTypeID1));
+found:
+ cbid[top] = id;
+ cts->cb.topid = top+1;
+ return top;
+}
+
+/* Check for function pointer and supported argument/result types. */
+static CType *callback_checkfunc(CTState *cts, CType *ct)
+{
+ int narg = 0;
+ if (!ctype_isptr(ct->info) || (LJ_64 && ct->size != CTSIZE_PTR))
+ return NULL;
+ ct = ctype_rawchild(cts, ct);
+ if (ctype_isfunc(ct->info)) {
+ CType *ctr = ctype_rawchild(cts, ct);
+ CTypeID fid = ct->sib;
+ if (!(ctype_isvoid(ctr->info) || ctype_isenum(ctr->info) ||
+ ctype_isptr(ctr->info) || (ctype_isnum(ctr->info) && ctr->size <= 8)))
+ return NULL;
+ if ((ct->info & CTF_VARARG))
+ return NULL;
+ while (fid) {
+ CType *ctf = ctype_get(cts, fid);
+ if (!ctype_isattrib(ctf->info)) {
+ CType *cta;
+ lj_assertCTS(ctype_isfield(ctf->info), "field expected");
+ cta = ctype_rawchild(cts, ctf);
+ if (!(ctype_isenum(cta->info) || ctype_isptr(cta->info) ||
+ (ctype_isnum(cta->info) && cta->size <= 8)) ||
+ ++narg >= LUA_MINSTACK-3)
+ return NULL;
+ }
+ fid = ctf->sib;
+ }
+ return ct;
+ }
+ return NULL;
+}
+
+/* Create a new callback and return the callback function pointer. */
+void *lj_ccallback_new(CTState *cts, CType *ct, GCfunc *fn)
+{
+ ct = callback_checkfunc(cts, ct);
+ if (ct) {
+ MSize slot = callback_slot_new(cts, ct);
+ GCtab *t = cts->miscmap;
+ setfuncV(cts->L, lj_tab_setint(cts->L, t, (int32_t)slot), fn);
+ lj_gc_anybarriert(cts->L, t);
+ return callback_slot2ptr(cts, slot);
+ }
+ return NULL; /* Bad conversion. */
+}
+
+#endif
diff --git a/libs/luajit-cmake/luajit/src/lj_ccallback.h b/libs/luajit-cmake/luajit/src/lj_ccallback.h
new file mode 100644
index 0000000..8a2c31d
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/lj_ccallback.h
@@ -0,0 +1,25 @@
+/*
+** FFI C callback handling.
+** Copyright (C) 2005-2022 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LJ_CCALLBACK_H
+#define _LJ_CCALLBACK_H
+
+#include "lj_obj.h"
+#include "lj_ctype.h"
+
+#if LJ_HASFFI
+
+/* Really belongs to lj_vm.h. */
+LJ_ASMF void lj_vm_ffi_callback(void);
+
+LJ_FUNC MSize lj_ccallback_ptr2slot(CTState *cts, void *p);
+LJ_FUNCA lua_State * LJ_FASTCALL lj_ccallback_enter(CTState *cts, void *cf);
+LJ_FUNCA void LJ_FASTCALL lj_ccallback_leave(CTState *cts, TValue *o);
+LJ_FUNC void *lj_ccallback_new(CTState *cts, CType *ct, GCfunc *fn);
+LJ_FUNC void lj_ccallback_mcode_free(CTState *cts);
+
+#endif
+
+#endif
diff --git a/libs/luajit-cmake/luajit/src/lj_cconv.c b/libs/luajit-cmake/luajit/src/lj_cconv.c
new file mode 100644
index 0000000..3bbfd3f
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/lj_cconv.c
@@ -0,0 +1,770 @@
+/*
+** C type conversions.
+** Copyright (C) 2005-2022 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#include "lj_obj.h"
+
+#if LJ_HASFFI
+
+#include "lj_err.h"
+#include "lj_buf.h"
+#include "lj_tab.h"
+#include "lj_ctype.h"
+#include "lj_cdata.h"
+#include "lj_cconv.h"
+#include "lj_ccallback.h"
+
+/* -- Conversion errors --------------------------------------------------- */
+
+/* Bad conversion. */
+LJ_NORET static void cconv_err_conv(CTState *cts, CType *d, CType *s,
+ CTInfo flags)
+{
+ const char *dst = strdata(lj_ctype_repr(cts->L, ctype_typeid(cts, d), NULL));
+ const char *src;
+ if ((flags & CCF_FROMTV))
+ src = lj_obj_typename[1+(ctype_isnum(s->info) ? LUA_TNUMBER :
+ ctype_isarray(s->info) ? LUA_TSTRING : LUA_TNIL)];
+ else
+ src = strdata(lj_ctype_repr(cts->L, ctype_typeid(cts, s), NULL));
+ if (CCF_GETARG(flags))
+ lj_err_argv(cts->L, CCF_GETARG(flags), LJ_ERR_FFI_BADCONV, src, dst);
+ else
+ lj_err_callerv(cts->L, LJ_ERR_FFI_BADCONV, src, dst);
+}
+
+/* Bad conversion from TValue. */
+LJ_NORET static void cconv_err_convtv(CTState *cts, CType *d, TValue *o,
+ CTInfo flags)
+{
+ const char *dst = strdata(lj_ctype_repr(cts->L, ctype_typeid(cts, d), NULL));
+ const char *src = lj_typename(o);
+ if (CCF_GETARG(flags))
+ lj_err_argv(cts->L, CCF_GETARG(flags), LJ_ERR_FFI_BADCONV, src, dst);
+ else
+ lj_err_callerv(cts->L, LJ_ERR_FFI_BADCONV, src, dst);
+}
+
+/* Initializer overflow. */
+LJ_NORET static void cconv_err_initov(CTState *cts, CType *d)
+{
+ const char *dst = strdata(lj_ctype_repr(cts->L, ctype_typeid(cts, d), NULL));
+ lj_err_callerv(cts->L, LJ_ERR_FFI_INITOV, dst);
+}
+
+/* -- C type compatibility checks ----------------------------------------- */
+
+/* Get raw type and qualifiers for a child type. Resolves enums, too. */
+static CType *cconv_childqual(CTState *cts, CType *ct, CTInfo *qual)
+{
+ ct = ctype_child(cts, ct);
+ for (;;) {
+ if (ctype_isattrib(ct->info)) {
+ if (ctype_attrib(ct->info) == CTA_QUAL) *qual |= ct->size;
+ } else if (!ctype_isenum(ct->info)) {
+ break;
+ }
+ ct = ctype_child(cts, ct);
+ }
+ *qual |= (ct->info & CTF_QUAL);
+ return ct;
+}
+
+/* Check for compatible types when converting to a pointer.
+** Note: these checks are more relaxed than what C99 mandates.
+*/
+int lj_cconv_compatptr(CTState *cts, CType *d, CType *s, CTInfo flags)
+{
+ if (!((flags & CCF_CAST) || d == s)) {
+ CTInfo dqual = 0, squal = 0;
+ d = cconv_childqual(cts, d, &dqual);
+ if (!ctype_isstruct(s->info))
+ s = cconv_childqual(cts, s, &squal);
+ if ((flags & CCF_SAME)) {
+ if (dqual != squal)
+ return 0; /* Different qualifiers. */
+ } else if (!(flags & CCF_IGNQUAL)) {
+ if ((dqual & squal) != squal)
+ return 0; /* Discarded qualifiers. */
+ if (ctype_isvoid(d->info) || ctype_isvoid(s->info))
+ return 1; /* Converting to/from void * is always ok. */
+ }
+ if (ctype_type(d->info) != ctype_type(s->info) ||
+ d->size != s->size)
+ return 0; /* Different type or different size. */
+ if (ctype_isnum(d->info)) {
+ if (((d->info ^ s->info) & (CTF_BOOL|CTF_FP)))
+ return 0; /* Different numeric types. */
+ } else if (ctype_ispointer(d->info)) {
+ /* Check child types for compatibility. */
+ return lj_cconv_compatptr(cts, d, s, flags|CCF_SAME);
+ } else if (ctype_isstruct(d->info)) {
+ if (d != s)
+ return 0; /* Must be exact same type for struct/union. */
+ } else if (ctype_isfunc(d->info)) {
+ /* NYI: structural equality of functions. */
+ }
+ }
+ return 1; /* Types are compatible. */
+}
+
+/* -- C type to C type conversion ----------------------------------------- */
+
+/* Convert C type to C type. Caveat: expects to get the raw CType!
+**
+** Note: This is only used by the interpreter and not optimized at all.
+** The JIT compiler will do a much better job specializing for each case.
+*/
+void lj_cconv_ct_ct(CTState *cts, CType *d, CType *s,
+ uint8_t *dp, uint8_t *sp, CTInfo flags)
+{
+ CTSize dsize = d->size, ssize = s->size;
+ CTInfo dinfo = d->info, sinfo = s->info;
+ void *tmpptr;
+
+ lj_assertCTS(!ctype_isenum(dinfo) && !ctype_isenum(sinfo),
+ "unresolved enum");
+ lj_assertCTS(!ctype_isattrib(dinfo) && !ctype_isattrib(sinfo),
+ "unstripped attribute");
+
+ if (ctype_type(dinfo) > CT_MAYCONVERT || ctype_type(sinfo) > CT_MAYCONVERT)
+ goto err_conv;
+
+ /* Some basic sanity checks. */
+ lj_assertCTS(!ctype_isnum(dinfo) || dsize > 0, "bad size for number type");
+ lj_assertCTS(!ctype_isnum(sinfo) || ssize > 0, "bad size for number type");
+ lj_assertCTS(!ctype_isbool(dinfo) || dsize == 1 || dsize == 4,
+ "bad size for bool type");
+ lj_assertCTS(!ctype_isbool(sinfo) || ssize == 1 || ssize == 4,
+ "bad size for bool type");
+ lj_assertCTS(!ctype_isinteger(dinfo) || (1u<<lj_fls(dsize)) == dsize,
+ "bad size for integer type");
+ lj_assertCTS(!ctype_isinteger(sinfo) || (1u<<lj_fls(ssize)) == ssize,
+ "bad size for integer type");
+
+ switch (cconv_idx2(dinfo, sinfo)) {
+ /* Destination is a bool. */
+ case CCX(B, B):
+ /* Source operand is already normalized. */
+ if (dsize == 1) *dp = *sp; else *(int *)dp = *sp;
+ break;
+ case CCX(B, I): {
+ MSize i;
+ uint8_t b = 0;
+ for (i = 0; i < ssize; i++) b |= sp[i];
+ b = (b != 0);
+ if (dsize == 1) *dp = b; else *(int *)dp = b;
+ break;
+ }
+ case CCX(B, F): {
+ uint8_t b;
+ if (ssize == sizeof(double)) b = (*(double *)sp != 0);
+ else if (ssize == sizeof(float)) b = (*(float *)sp != 0);
+ else goto err_conv; /* NYI: long double. */
+ if (dsize == 1) *dp = b; else *(int *)dp = b;
+ break;
+ }
+
+ /* Destination is an integer. */
+ case CCX(I, B):
+ case CCX(I, I):
+ conv_I_I:
+ if (dsize > ssize) { /* Zero-extend or sign-extend LSB. */
+#if LJ_LE
+ uint8_t fill = (!(sinfo & CTF_UNSIGNED) && (sp[ssize-1]&0x80)) ? 0xff : 0;
+ memcpy(dp, sp, ssize);
+ memset(dp + ssize, fill, dsize-ssize);
+#else
+ uint8_t fill = (!(sinfo & CTF_UNSIGNED) && (sp[0]&0x80)) ? 0xff : 0;
+ memset(dp, fill, dsize-ssize);
+ memcpy(dp + (dsize-ssize), sp, ssize);
+#endif
+ } else { /* Copy LSB. */
+#if LJ_LE
+ memcpy(dp, sp, dsize);
+#else
+ memcpy(dp, sp + (ssize-dsize), dsize);
+#endif
+ }
+ break;
+ case CCX(I, F): {
+ double n; /* Always convert via double. */
+ conv_I_F:
+ /* Convert source to double. */
+ if (ssize == sizeof(double)) n = *(double *)sp;
+ else if (ssize == sizeof(float)) n = (double)*(float *)sp;
+ else goto err_conv; /* NYI: long double. */
+ /* Then convert double to integer. */
+ /* The conversion must exactly match the semantics of JIT-compiled code! */
+ if (dsize < 4 || (dsize == 4 && !(dinfo & CTF_UNSIGNED))) {
+ int32_t i = (int32_t)n;
+ if (dsize == 4) *(int32_t *)dp = i;
+ else if (dsize == 2) *(int16_t *)dp = (int16_t)i;
+ else *(int8_t *)dp = (int8_t)i;
+ } else if (dsize == 4) {
+ *(uint32_t *)dp = (uint32_t)n;
+ } else if (dsize == 8) {
+ if (!(dinfo & CTF_UNSIGNED))
+ *(int64_t *)dp = (int64_t)n;
+ else
+ *(uint64_t *)dp = lj_num2u64(n);
+ } else {
+ goto err_conv; /* NYI: conversion to >64 bit integers. */
+ }
+ break;
+ }
+ case CCX(I, C):
+ s = ctype_child(cts, s);
+ sinfo = s->info;
+ ssize = s->size;
+ goto conv_I_F; /* Just convert re. */
+ case CCX(I, P):
+ if (!(flags & CCF_CAST)) goto err_conv;
+ sinfo = CTINFO(CT_NUM, CTF_UNSIGNED);
+ goto conv_I_I;
+ case CCX(I, A):
+ if (!(flags & CCF_CAST)) goto err_conv;
+ sinfo = CTINFO(CT_NUM, CTF_UNSIGNED);
+ ssize = CTSIZE_PTR;
+ tmpptr = sp;
+ sp = (uint8_t *)&tmpptr;
+ goto conv_I_I;
+
+ /* Destination is a floating-point number. */
+ case CCX(F, B):
+ case CCX(F, I): {
+ double n; /* Always convert via double. */
+ conv_F_I:
+ /* First convert source to double. */
+ /* The conversion must exactly match the semantics of JIT-compiled code! */
+ if (ssize < 4 || (ssize == 4 && !(sinfo & CTF_UNSIGNED))) {
+ int32_t i;
+ if (ssize == 4) {
+ i = *(int32_t *)sp;
+ } else if (!(sinfo & CTF_UNSIGNED)) {
+ if (ssize == 2) i = *(int16_t *)sp;
+ else i = *(int8_t *)sp;
+ } else {
+ if (ssize == 2) i = *(uint16_t *)sp;
+ else i = *(uint8_t *)sp;
+ }
+ n = (double)i;
+ } else if (ssize == 4) {
+ n = (double)*(uint32_t *)sp;
+ } else if (ssize == 8) {
+ if (!(sinfo & CTF_UNSIGNED)) n = (double)*(int64_t *)sp;
+ else n = (double)*(uint64_t *)sp;
+ } else {
+ goto err_conv; /* NYI: conversion from >64 bit integers. */
+ }
+ /* Convert double to destination. */
+ if (dsize == sizeof(double)) *(double *)dp = n;
+ else if (dsize == sizeof(float)) *(float *)dp = (float)n;
+ else goto err_conv; /* NYI: long double. */
+ break;
+ }
+ case CCX(F, F): {
+ double n; /* Always convert via double. */
+ conv_F_F:
+ if (ssize == dsize) goto copyval;
+ /* Convert source to double. */
+ if (ssize == sizeof(double)) n = *(double *)sp;
+ else if (ssize == sizeof(float)) n = (double)*(float *)sp;
+ else goto err_conv; /* NYI: long double. */
+ /* Convert double to destination. */
+ if (dsize == sizeof(double)) *(double *)dp = n;
+ else if (dsize == sizeof(float)) *(float *)dp = (float)n;
+ else goto err_conv; /* NYI: long double. */
+ break;
+ }
+ case CCX(F, C):
+ s = ctype_child(cts, s);
+ sinfo = s->info;
+ ssize = s->size;
+ goto conv_F_F; /* Ignore im, and convert from re. */
+
+ /* Destination is a complex number. */
+ case CCX(C, I):
+ d = ctype_child(cts, d);
+ dinfo = d->info;
+ dsize = d->size;
+ memset(dp + dsize, 0, dsize); /* Clear im. */
+ goto conv_F_I; /* Convert to re. */
+ case CCX(C, F):
+ d = ctype_child(cts, d);
+ dinfo = d->info;
+ dsize = d->size;
+ memset(dp + dsize, 0, dsize); /* Clear im. */
+ goto conv_F_F; /* Convert to re. */
+
+ case CCX(C, C):
+ if (dsize != ssize) { /* Different types: convert re/im separately. */
+ CType *dc = ctype_child(cts, d);
+ CType *sc = ctype_child(cts, s);
+ lj_cconv_ct_ct(cts, dc, sc, dp, sp, flags);
+ lj_cconv_ct_ct(cts, dc, sc, dp + dc->size, sp + sc->size, flags);
+ return;
+ }
+ goto copyval; /* Otherwise this is easy. */
+
+ /* Destination is a vector. */
+ case CCX(V, I):
+ case CCX(V, F):
+ case CCX(V, C): {
+ CType *dc = ctype_child(cts, d);
+ CTSize esize;
+ /* First convert the scalar to the first element. */
+ lj_cconv_ct_ct(cts, dc, s, dp, sp, flags);
+ /* Then replicate it to the other elements (splat). */
+ for (sp = dp, esize = dc->size; dsize > esize; dsize -= esize) {
+ dp += esize;
+ memcpy(dp, sp, esize);
+ }
+ break;
+ }
+
+ case CCX(V, V):
+ /* Copy same-sized vectors, even for different lengths/element-types. */
+ if (dsize != ssize) goto err_conv;
+ goto copyval;
+
+ /* Destination is a pointer. */
+ case CCX(P, I):
+ if (!(flags & CCF_CAST)) goto err_conv;
+ dinfo = CTINFO(CT_NUM, CTF_UNSIGNED);
+ goto conv_I_I;
+
+ case CCX(P, F):
+ if (!(flags & CCF_CAST) || !(flags & CCF_FROMTV)) goto err_conv;
+ /* The signed conversion is cheaper. x64 really has 47 bit pointers. */
+ dinfo = CTINFO(CT_NUM, (LJ_64 && dsize == 8) ? 0 : CTF_UNSIGNED);
+ goto conv_I_F;
+
+ case CCX(P, P):
+ if (!lj_cconv_compatptr(cts, d, s, flags)) goto err_conv;
+ cdata_setptr(dp, dsize, cdata_getptr(sp, ssize));
+ break;
+
+ case CCX(P, A):
+ case CCX(P, S):
+ if (!lj_cconv_compatptr(cts, d, s, flags)) goto err_conv;
+ cdata_setptr(dp, dsize, sp);
+ break;
+
+ /* Destination is an array. */
+ case CCX(A, A):
+ if ((flags & CCF_CAST) || (d->info & CTF_VLA) || dsize != ssize ||
+ d->size == CTSIZE_INVALID || !lj_cconv_compatptr(cts, d, s, flags))
+ goto err_conv;
+ goto copyval;
+
+ /* Destination is a struct/union. */
+ case CCX(S, S):
+ if ((flags & CCF_CAST) || (d->info & CTF_VLA) || d != s)
+ goto err_conv; /* Must be exact same type. */
+copyval: /* Copy value. */
+ lj_assertCTS(dsize == ssize, "value copy with different sizes");
+ memcpy(dp, sp, dsize);
+ break;
+
+ default:
+ err_conv:
+ cconv_err_conv(cts, d, s, flags);
+ }
+}
+
+/* -- C type to TValue conversion ----------------------------------------- */
+
+/* Convert C type to TValue. Caveat: expects to get the raw CType! */
+int lj_cconv_tv_ct(CTState *cts, CType *s, CTypeID sid,
+ TValue *o, uint8_t *sp)
+{
+ CTInfo sinfo = s->info;
+ if (ctype_isnum(sinfo)) {
+ if (!ctype_isbool(sinfo)) {
+ if (ctype_isinteger(sinfo) && s->size > 4) goto copyval;
+ if (LJ_DUALNUM && ctype_isinteger(sinfo)) {
+ int32_t i;
+ lj_cconv_ct_ct(cts, ctype_get(cts, CTID_INT32), s,
+ (uint8_t *)&i, sp, 0);
+ if ((sinfo & CTF_UNSIGNED) && i < 0)
+ setnumV(o, (lua_Number)(uint32_t)i);
+ else
+ setintV(o, i);
+ } else {
+ lj_cconv_ct_ct(cts, ctype_get(cts, CTID_DOUBLE), s,
+ (uint8_t *)&o->n, sp, 0);
+ /* Numbers are NOT canonicalized here! Beware of uninitialized data. */
+ lj_assertCTS(tvisnum(o), "non-canonical NaN passed");
+ }
+ } else {
+ uint32_t b = s->size == 1 ? (*sp != 0) : (*(int *)sp != 0);
+ setboolV(o, b);
+ setboolV(&cts->g->tmptv2, b); /* Remember for trace recorder. */
+ }
+ return 0;
+ } else if (ctype_isrefarray(sinfo) || ctype_isstruct(sinfo)) {
+ /* Create reference. */
+ setcdataV(cts->L, o, lj_cdata_newref(cts, sp, sid));
+ return 1; /* Need GC step. */
+ } else {
+ GCcdata *cd;
+ CTSize sz;
+ copyval: /* Copy value. */
+ sz = s->size;
+ lj_assertCTS(sz != CTSIZE_INVALID, "value copy with invalid size");
+ /* Attributes are stripped, qualifiers are kept (but mostly ignored). */
+ cd = lj_cdata_new(cts, ctype_typeid(cts, s), sz);
+ setcdataV(cts->L, o, cd);
+ memcpy(cdataptr(cd), sp, sz);
+ return 1; /* Need GC step. */
+ }
+}
+
+/* Convert bitfield to TValue. */
+int lj_cconv_tv_bf(CTState *cts, CType *s, TValue *o, uint8_t *sp)
+{
+ CTInfo info = s->info;
+ CTSize pos, bsz;
+ uint32_t val;
+ lj_assertCTS(ctype_isbitfield(info), "bitfield expected");
+ /* NYI: packed bitfields may cause misaligned reads. */
+ switch (ctype_bitcsz(info)) {
+ case 4: val = *(uint32_t *)sp; break;
+ case 2: val = *(uint16_t *)sp; break;
+ case 1: val = *(uint8_t *)sp; break;
+ default:
+ lj_assertCTS(0, "bad bitfield container size %d", ctype_bitcsz(info));
+ val = 0;
+ break;
+ }
+ /* Check if a packed bitfield crosses a container boundary. */
+ pos = ctype_bitpos(info);
+ bsz = ctype_bitbsz(info);
+ lj_assertCTS(pos < 8*ctype_bitcsz(info), "bad bitfield position");
+ lj_assertCTS(bsz > 0 && bsz <= 8*ctype_bitcsz(info), "bad bitfield size");
+ if (pos + bsz > 8*ctype_bitcsz(info))
+ lj_err_caller(cts->L, LJ_ERR_FFI_NYIPACKBIT);
+ if (!(info & CTF_BOOL)) {
+ CTSize shift = 32 - bsz;
+ if (!(info & CTF_UNSIGNED)) {
+ setintV(o, (int32_t)(val << (shift-pos)) >> shift);
+ } else {
+ val = (val << (shift-pos)) >> shift;
+ if (!LJ_DUALNUM || (int32_t)val < 0)
+ setnumV(o, (lua_Number)(uint32_t)val);
+ else
+ setintV(o, (int32_t)val);
+ }
+ } else {
+ uint32_t b = (val >> pos) & 1;
+ lj_assertCTS(bsz == 1, "bad bool bitfield size");
+ setboolV(o, b);
+ setboolV(&cts->g->tmptv2, b); /* Remember for trace recorder. */
+ }
+ return 0; /* No GC step needed. */
+}
+
+/* -- TValue to C type conversion ----------------------------------------- */
+
+/* Convert table to array. */
+static void cconv_array_tab(CTState *cts, CType *d,
+ uint8_t *dp, GCtab *t, CTInfo flags)
+{
+ int32_t i;
+ CType *dc = ctype_rawchild(cts, d); /* Array element type. */
+ CTSize size = d->size, esize = dc->size, ofs = 0;
+ for (i = 0; ; i++) {
+ TValue *tv = (TValue *)lj_tab_getint(t, i);
+ if (!tv || tvisnil(tv)) {
+ if (i == 0) continue; /* Try again for 1-based tables. */
+ break; /* Stop at first nil. */
+ }
+ if (ofs >= size)
+ cconv_err_initov(cts, d);
+ lj_cconv_ct_tv(cts, dc, dp + ofs, tv, flags);
+ ofs += esize;
+ }
+ if (size != CTSIZE_INVALID) { /* Only fill up arrays with known size. */
+ if (ofs == esize) { /* Replicate a single element. */
+ for (; ofs < size; ofs += esize) memcpy(dp + ofs, dp, esize);
+ } else { /* Otherwise fill the remainder with zero. */
+ memset(dp + ofs, 0, size - ofs);
+ }
+ }
+}
+
+/* Convert table to sub-struct/union. */
+static void cconv_substruct_tab(CTState *cts, CType *d, uint8_t *dp,
+ GCtab *t, int32_t *ip, CTInfo flags)
+{
+ CTypeID id = d->sib;
+ while (id) {
+ CType *df = ctype_get(cts, id);
+ id = df->sib;
+ if (ctype_isfield(df->info) || ctype_isbitfield(df->info)) {
+ TValue *tv;
+ int32_t i = *ip, iz = i;
+ if (!gcref(df->name)) continue; /* Ignore unnamed fields. */
+ if (i >= 0) {
+ retry:
+ tv = (TValue *)lj_tab_getint(t, i);
+ if (!tv || tvisnil(tv)) {
+ if (i == 0) { i = 1; goto retry; } /* 1-based tables. */
+ if (iz == 0) { *ip = i = -1; goto tryname; } /* Init named fields. */
+ break; /* Stop at first nil. */
+ }
+ *ip = i + 1;
+ } else {
+ tryname:
+ tv = (TValue *)lj_tab_getstr(t, gco2str(gcref(df->name)));
+ if (!tv || tvisnil(tv)) continue;
+ }
+ if (ctype_isfield(df->info))
+ lj_cconv_ct_tv(cts, ctype_rawchild(cts, df), dp+df->size, tv, flags);
+ else
+ lj_cconv_bf_tv(cts, df, dp+df->size, tv);
+ if ((d->info & CTF_UNION)) break;
+ } else if (ctype_isxattrib(df->info, CTA_SUBTYPE)) {
+ cconv_substruct_tab(cts, ctype_rawchild(cts, df),
+ dp+df->size, t, ip, flags);
+ } /* Ignore all other entries in the chain. */
+ }
+}
+
+/* Convert table to struct/union. */
+static void cconv_struct_tab(CTState *cts, CType *d,
+ uint8_t *dp, GCtab *t, CTInfo flags)
+{
+ int32_t i = 0;
+ memset(dp, 0, d->size); /* Much simpler to clear the struct first. */
+ cconv_substruct_tab(cts, d, dp, t, &i, flags);
+}
+
+/* Convert TValue to C type. Caveat: expects to get the raw CType! */
+void lj_cconv_ct_tv(CTState *cts, CType *d,
+ uint8_t *dp, TValue *o, CTInfo flags)
+{
+ CTypeID sid = CTID_P_VOID;
+ CType *s;
+ void *tmpptr;
+ uint8_t tmpbool, *sp = (uint8_t *)&tmpptr;
+ if (LJ_LIKELY(tvisint(o))) {
+ sp = (uint8_t *)&o->i;
+ sid = CTID_INT32;
+ flags |= CCF_FROMTV;
+ } else if (LJ_LIKELY(tvisnum(o))) {
+ sp = (uint8_t *)&o->n;
+ sid = CTID_DOUBLE;
+ flags |= CCF_FROMTV;
+ } else if (tviscdata(o)) {
+ sp = cdataptr(cdataV(o));
+ sid = cdataV(o)->ctypeid;
+ s = ctype_get(cts, sid);
+ if (ctype_isref(s->info)) { /* Resolve reference for value. */
+ lj_assertCTS(s->size == CTSIZE_PTR, "ref is not pointer-sized");
+ sp = *(void **)sp;
+ sid = ctype_cid(s->info);
+ }
+ s = ctype_raw(cts, sid);
+ if (ctype_isfunc(s->info)) {
+ CTypeID did = ctype_typeid(cts, d);
+ sid = lj_ctype_intern(cts, CTINFO(CT_PTR, CTALIGN_PTR|sid), CTSIZE_PTR);
+ d = ctype_get(cts, did); /* cts->tab may have been reallocated. */
+ } else {
+ if (ctype_isenum(s->info)) s = ctype_child(cts, s);
+ goto doconv;
+ }
+ } else if (tvisstr(o)) {
+ GCstr *str = strV(o);
+ if (ctype_isenum(d->info)) { /* Match string against enum constant. */
+ CTSize ofs;
+ CType *cct = lj_ctype_getfield(cts, d, str, &ofs);
+ if (!cct || !ctype_isconstval(cct->info))
+ goto err_conv;
+ lj_assertCTS(d->size == 4, "only 32 bit enum supported"); /* NYI */
+ sp = (uint8_t *)&cct->size;
+ sid = ctype_cid(cct->info);
+ } else if (ctype_isrefarray(d->info)) { /* Copy string to array. */
+ CType *dc = ctype_rawchild(cts, d);
+ CTSize sz = str->len+1;
+ if (!ctype_isinteger(dc->info) || dc->size != 1)
+ goto err_conv;
+ if (d->size != 0 && d->size < sz)
+ sz = d->size;
+ memcpy(dp, strdata(str), sz);
+ return;
+ } else { /* Otherwise pass it as a const char[]. */
+ sp = (uint8_t *)strdata(str);
+ sid = CTID_A_CCHAR;
+ flags |= CCF_FROMTV;
+ }
+ } else if (tvistab(o)) {
+ if (ctype_isarray(d->info)) {
+ cconv_array_tab(cts, d, dp, tabV(o), flags);
+ return;
+ } else if (ctype_isstruct(d->info)) {
+ cconv_struct_tab(cts, d, dp, tabV(o), flags);
+ return;
+ } else {
+ goto err_conv;
+ }
+ } else if (tvisbool(o)) {
+ tmpbool = boolV(o);
+ sp = &tmpbool;
+ sid = CTID_BOOL;
+ } else if (tvisnil(o)) {
+ tmpptr = (void *)0;
+ flags |= CCF_FROMTV;
+ } else if (tvisudata(o)) {
+ GCudata *ud = udataV(o);
+ tmpptr = uddata(ud);
+ if (ud->udtype == UDTYPE_IO_FILE)
+ tmpptr = *(void **)tmpptr;
+ else if (ud->udtype == UDTYPE_BUFFER)
+ tmpptr = ((SBufExt *)tmpptr)->r;
+ } else if (tvislightud(o)) {
+ tmpptr = lightudV(cts->g, o);
+ } else if (tvisfunc(o)) {
+ void *p = lj_ccallback_new(cts, d, funcV(o));
+ if (p) {
+ *(void **)dp = p;
+ return;
+ }
+ goto err_conv;
+ } else {
+ err_conv:
+ cconv_err_convtv(cts, d, o, flags);
+ }
+ s = ctype_get(cts, sid);
+doconv:
+ if (ctype_isenum(d->info)) d = ctype_child(cts, d);
+ lj_cconv_ct_ct(cts, d, s, dp, sp, flags);
+}
+
+/* Convert TValue to bitfield. */
+void lj_cconv_bf_tv(CTState *cts, CType *d, uint8_t *dp, TValue *o)
+{
+ CTInfo info = d->info;
+ CTSize pos, bsz;
+ uint32_t val, mask;
+ lj_assertCTS(ctype_isbitfield(info), "bitfield expected");
+ if ((info & CTF_BOOL)) {
+ uint8_t tmpbool;
+ lj_assertCTS(ctype_bitbsz(info) == 1, "bad bool bitfield size");
+ lj_cconv_ct_tv(cts, ctype_get(cts, CTID_BOOL), &tmpbool, o, 0);
+ val = tmpbool;
+ } else {
+ CTypeID did = (info & CTF_UNSIGNED) ? CTID_UINT32 : CTID_INT32;
+ lj_cconv_ct_tv(cts, ctype_get(cts, did), (uint8_t *)&val, o, 0);
+ }
+ pos = ctype_bitpos(info);
+ bsz = ctype_bitbsz(info);
+ lj_assertCTS(pos < 8*ctype_bitcsz(info), "bad bitfield position");
+ lj_assertCTS(bsz > 0 && bsz <= 8*ctype_bitcsz(info), "bad bitfield size");
+ /* Check if a packed bitfield crosses a container boundary. */
+ if (pos + bsz > 8*ctype_bitcsz(info))
+ lj_err_caller(cts->L, LJ_ERR_FFI_NYIPACKBIT);
+ mask = ((1u << bsz) - 1u) << pos;
+ val = (val << pos) & mask;
+ /* NYI: packed bitfields may cause misaligned reads/writes. */
+ switch (ctype_bitcsz(info)) {
+ case 4: *(uint32_t *)dp = (*(uint32_t *)dp & ~mask) | (uint32_t)val; break;
+ case 2: *(uint16_t *)dp = (*(uint16_t *)dp & ~mask) | (uint16_t)val; break;
+ case 1: *(uint8_t *)dp = (*(uint8_t *)dp & ~mask) | (uint8_t)val; break;
+ default:
+ lj_assertCTS(0, "bad bitfield container size %d", ctype_bitcsz(info));
+ break;
+ }
+}
+
+/* -- Initialize C type with TValues -------------------------------------- */
+
+/* Initialize an array with TValues. */
+static void cconv_array_init(CTState *cts, CType *d, CTSize sz, uint8_t *dp,
+ TValue *o, MSize len)
+{
+ CType *dc = ctype_rawchild(cts, d); /* Array element type. */
+ CTSize ofs, esize = dc->size;
+ MSize i;
+ if (len*esize > sz)
+ cconv_err_initov(cts, d);
+ for (i = 0, ofs = 0; i < len; i++, ofs += esize)
+ lj_cconv_ct_tv(cts, dc, dp + ofs, o + i, 0);
+ if (ofs == esize) { /* Replicate a single element. */
+ for (; ofs < sz; ofs += esize) memcpy(dp + ofs, dp, esize);
+ } else { /* Otherwise fill the remainder with zero. */
+ memset(dp + ofs, 0, sz - ofs);
+ }
+}
+
+/* Initialize a sub-struct/union with TValues. */
+static void cconv_substruct_init(CTState *cts, CType *d, uint8_t *dp,
+ TValue *o, MSize len, MSize *ip)
+{
+ CTypeID id = d->sib;
+ while (id) {
+ CType *df = ctype_get(cts, id);
+ id = df->sib;
+ if (ctype_isfield(df->info) || ctype_isbitfield(df->info)) {
+ MSize i = *ip;
+ if (!gcref(df->name)) continue; /* Ignore unnamed fields. */
+ if (i >= len) break;
+ *ip = i + 1;
+ if (ctype_isfield(df->info))
+ lj_cconv_ct_tv(cts, ctype_rawchild(cts, df), dp+df->size, o + i, 0);
+ else
+ lj_cconv_bf_tv(cts, df, dp+df->size, o + i);
+ if ((d->info & CTF_UNION)) break;
+ } else if (ctype_isxattrib(df->info, CTA_SUBTYPE)) {
+ cconv_substruct_init(cts, ctype_rawchild(cts, df),
+ dp+df->size, o, len, ip);
+ if ((d->info & CTF_UNION)) break;
+ } /* Ignore all other entries in the chain. */
+ }
+}
+
+/* Initialize a struct/union with TValues. */
+static void cconv_struct_init(CTState *cts, CType *d, CTSize sz, uint8_t *dp,
+ TValue *o, MSize len)
+{
+ MSize i = 0;
+ memset(dp, 0, sz); /* Much simpler to clear the struct first. */
+ cconv_substruct_init(cts, d, dp, o, len, &i);
+ if (i < len)
+ cconv_err_initov(cts, d);
+}
+
+/* Check whether to use a multi-value initializer.
+** This is true if an aggregate is to be initialized with a value.
+** Valarrays are treated as values here so ct_tv handles (V|C, I|F).
+*/
+int lj_cconv_multi_init(CTState *cts, CType *d, TValue *o)
+{
+ if (!(ctype_isrefarray(d->info) || ctype_isstruct(d->info)))
+ return 0; /* Destination is not an aggregate. */
+ if (tvistab(o) || (tvisstr(o) && !ctype_isstruct(d->info)))
+ return 0; /* Initializer is not a value. */
+ if (tviscdata(o) && lj_ctype_rawref(cts, cdataV(o)->ctypeid) == d)
+ return 0; /* Source and destination are identical aggregates. */
+ return 1; /* Otherwise the initializer is a value. */
+}
+
+/* Initialize C type with TValues. Caveat: expects to get the raw CType! */
+void lj_cconv_ct_init(CTState *cts, CType *d, CTSize sz,
+ uint8_t *dp, TValue *o, MSize len)
+{
+ if (len == 0)
+ memset(dp, 0, sz);
+ else if (len == 1 && !lj_cconv_multi_init(cts, d, o))
+ lj_cconv_ct_tv(cts, d, dp, o, 0);
+ else if (ctype_isarray(d->info)) /* Also handles valarray init with len>1. */
+ cconv_array_init(cts, d, sz, dp, o, len);
+ else if (ctype_isstruct(d->info))
+ cconv_struct_init(cts, d, sz, dp, o, len);
+ else
+ cconv_err_initov(cts, d);
+}
+
+#endif
diff --git a/libs/luajit-cmake/luajit/src/lj_cconv.h b/libs/luajit-cmake/luajit/src/lj_cconv.h
new file mode 100644
index 0000000..45b0ca1
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/lj_cconv.h
@@ -0,0 +1,71 @@
+/*
+** C type conversions.
+** Copyright (C) 2005-2022 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LJ_CCONV_H
+#define _LJ_CCONV_H
+
+#include "lj_obj.h"
+#include "lj_ctype.h"
+
+#if LJ_HASFFI
+
+/* Compressed C type index. ORDER CCX. */
+enum {
+ CCX_B, /* Bool. */
+ CCX_I, /* Integer. */
+ CCX_F, /* Floating-point number. */
+ CCX_C, /* Complex. */
+ CCX_V, /* Vector. */
+ CCX_P, /* Pointer. */
+ CCX_A, /* Refarray. */
+ CCX_S /* Struct/union. */
+};
+
+/* Convert C type info to compressed C type index. ORDER CT. ORDER CCX. */
+static LJ_AINLINE uint32_t cconv_idx(CTInfo info)
+{
+ uint32_t idx = ((info >> 26) & 15u); /* Dispatch bits. */
+ lj_assertX(ctype_type(info) <= CT_MAYCONVERT,
+ "cannot convert ctype %08x", info);
+#if LJ_64
+ idx = ((uint32_t)(U64x(f436fff5,fff7f021) >> 4*idx) & 15u);
+#else
+ idx = (((idx < 8 ? 0xfff7f021u : 0xf436fff5) >> 4*(idx & 7u)) & 15u);
+#endif
+ lj_assertX(idx < 8, "cannot convert ctype %08x", info);
+ return idx;
+}
+
+#define cconv_idx2(dinfo, sinfo) \
+ ((cconv_idx((dinfo)) << 3) + cconv_idx((sinfo)))
+
+#define CCX(dst, src) ((CCX_##dst << 3) + CCX_##src)
+
+/* Conversion flags. */
+#define CCF_CAST 0x00000001u
+#define CCF_FROMTV 0x00000002u
+#define CCF_SAME 0x00000004u
+#define CCF_IGNQUAL 0x00000008u
+
+#define CCF_ARG_SHIFT 8
+#define CCF_ARG(n) ((n) << CCF_ARG_SHIFT)
+#define CCF_GETARG(f) ((f) >> CCF_ARG_SHIFT)
+
+LJ_FUNC int lj_cconv_compatptr(CTState *cts, CType *d, CType *s, CTInfo flags);
+LJ_FUNC void lj_cconv_ct_ct(CTState *cts, CType *d, CType *s,
+ uint8_t *dp, uint8_t *sp, CTInfo flags);
+LJ_FUNC int lj_cconv_tv_ct(CTState *cts, CType *s, CTypeID sid,
+ TValue *o, uint8_t *sp);
+LJ_FUNC int lj_cconv_tv_bf(CTState *cts, CType *s, TValue *o, uint8_t *sp);
+LJ_FUNC void lj_cconv_ct_tv(CTState *cts, CType *d,
+ uint8_t *dp, TValue *o, CTInfo flags);
+LJ_FUNC void lj_cconv_bf_tv(CTState *cts, CType *d, uint8_t *dp, TValue *o);
+LJ_FUNC int lj_cconv_multi_init(CTState *cts, CType *d, TValue *o);
+LJ_FUNC void lj_cconv_ct_init(CTState *cts, CType *d, CTSize sz,
+ uint8_t *dp, TValue *o, MSize len);
+
+#endif
+
+#endif
diff --git a/libs/luajit-cmake/luajit/src/lj_cdata.c b/libs/luajit-cmake/luajit/src/lj_cdata.c
new file mode 100644
index 0000000..01a74f5
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/lj_cdata.c
@@ -0,0 +1,304 @@
+/*
+** C data management.
+** Copyright (C) 2005-2022 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#include "lj_obj.h"
+
+#if LJ_HASFFI
+
+#include "lj_gc.h"
+#include "lj_err.h"
+#include "lj_tab.h"
+#include "lj_ctype.h"
+#include "lj_cconv.h"
+#include "lj_cdata.h"
+
+/* -- C data allocation --------------------------------------------------- */
+
+/* Allocate a new C data object holding a reference to another object. */
+GCcdata *lj_cdata_newref(CTState *cts, const void *p, CTypeID id)
+{
+ CTypeID refid = lj_ctype_intern(cts, CTINFO_REF(id), CTSIZE_PTR);
+ GCcdata *cd = lj_cdata_new(cts, refid, CTSIZE_PTR);
+ *(const void **)cdataptr(cd) = p;
+ return cd;
+}
+
+/* Allocate variable-sized or specially aligned C data object. */
+GCcdata *lj_cdata_newv(lua_State *L, CTypeID id, CTSize sz, CTSize align)
+{
+ global_State *g;
+ MSize extra = sizeof(GCcdataVar) + sizeof(GCcdata) +
+ (align > CT_MEMALIGN ? (1u<<align) - (1u<<CT_MEMALIGN) : 0);
+ char *p = lj_mem_newt(L, extra + sz, char);
+ uintptr_t adata = (uintptr_t)p + sizeof(GCcdataVar) + sizeof(GCcdata);
+ uintptr_t almask = (1u << align) - 1u;
+ GCcdata *cd = (GCcdata *)(((adata + almask) & ~almask) - sizeof(GCcdata));
+ lj_assertL((char *)cd - p < 65536, "excessive cdata alignment");
+ cdatav(cd)->offset = (uint16_t)((char *)cd - p);
+ cdatav(cd)->extra = extra;
+ cdatav(cd)->len = sz;
+ g = G(L);
+ setgcrefr(cd->nextgc, g->gc.root);
+ setgcref(g->gc.root, obj2gco(cd));
+ newwhite(g, obj2gco(cd));
+ cd->marked |= 0x80;
+ cd->gct = ~LJ_TCDATA;
+ cd->ctypeid = id;
+ return cd;
+}
+
+/* Allocate arbitrary C data object. */
+GCcdata *lj_cdata_newx(CTState *cts, CTypeID id, CTSize sz, CTInfo info)
+{
+ if (!(info & CTF_VLA) && ctype_align(info) <= CT_MEMALIGN)
+ return lj_cdata_new(cts, id, sz);
+ else
+ return lj_cdata_newv(cts->L, id, sz, ctype_align(info));
+}
+
+/* Free a C data object. */
+void LJ_FASTCALL lj_cdata_free(global_State *g, GCcdata *cd)
+{
+ if (LJ_UNLIKELY(cd->marked & LJ_GC_CDATA_FIN)) {
+ GCobj *root;
+ makewhite(g, obj2gco(cd));
+ markfinalized(obj2gco(cd));
+ if ((root = gcref(g->gc.mmudata)) != NULL) {
+ setgcrefr(cd->nextgc, root->gch.nextgc);
+ setgcref(root->gch.nextgc, obj2gco(cd));
+ setgcref(g->gc.mmudata, obj2gco(cd));
+ } else {
+ setgcref(cd->nextgc, obj2gco(cd));
+ setgcref(g->gc.mmudata, obj2gco(cd));
+ }
+ } else if (LJ_LIKELY(!cdataisv(cd))) {
+ CType *ct = ctype_raw(ctype_ctsG(g), cd->ctypeid);
+ CTSize sz = ctype_hassize(ct->info) ? ct->size : CTSIZE_PTR;
+ lj_assertG(ctype_hassize(ct->info) || ctype_isfunc(ct->info) ||
+ ctype_isextern(ct->info), "free of ctype without a size");
+ lj_mem_free(g, cd, sizeof(GCcdata) + sz);
+ } else {
+ lj_mem_free(g, memcdatav(cd), sizecdatav(cd));
+ }
+}
+
+void lj_cdata_setfin(lua_State *L, GCcdata *cd, GCobj *obj, uint32_t it)
+{
+ GCtab *t = ctype_ctsG(G(L))->finalizer;
+ if (gcref(t->metatable)) {
+ /* Add cdata to finalizer table, if still enabled. */
+ TValue *tv, tmp;
+ setcdataV(L, &tmp, cd);
+ lj_gc_anybarriert(L, t);
+ tv = lj_tab_set(L, t, &tmp);
+ if (it == LJ_TNIL) {
+ setnilV(tv);
+ cd->marked &= ~LJ_GC_CDATA_FIN;
+ } else {
+ setgcV(L, tv, obj, it);
+ cd->marked |= LJ_GC_CDATA_FIN;
+ }
+ }
+}
+
+/* -- C data indexing ----------------------------------------------------- */
+
+/* Index C data by a TValue. Return CType and pointer. */
+CType *lj_cdata_index(CTState *cts, GCcdata *cd, cTValue *key, uint8_t **pp,
+ CTInfo *qual)
+{
+ uint8_t *p = (uint8_t *)cdataptr(cd);
+ CType *ct = ctype_get(cts, cd->ctypeid);
+ ptrdiff_t idx;
+
+ /* Resolve reference for cdata object. */
+ if (ctype_isref(ct->info)) {
+ lj_assertCTS(ct->size == CTSIZE_PTR, "ref is not pointer-sized");
+ p = *(uint8_t **)p;
+ ct = ctype_child(cts, ct);
+ }
+
+collect_attrib:
+ /* Skip attributes and collect qualifiers. */
+ while (ctype_isattrib(ct->info)) {
+ if (ctype_attrib(ct->info) == CTA_QUAL) *qual |= ct->size;
+ ct = ctype_child(cts, ct);
+ }
+ /* Interning rejects refs to refs. */
+ lj_assertCTS(!ctype_isref(ct->info), "bad ref of ref");
+
+ if (tvisint(key)) {
+ idx = (ptrdiff_t)intV(key);
+ goto integer_key;
+ } else if (tvisnum(key)) { /* Numeric key. */
+#ifdef _MSC_VER
+ /* Workaround for MSVC bug. */
+ volatile
+#endif
+ lua_Number n = numV(key);
+ idx = LJ_64 ? (ptrdiff_t)n : (ptrdiff_t)lj_num2int(n);
+ integer_key:
+ if (ctype_ispointer(ct->info)) {
+ CTSize sz = lj_ctype_size(cts, ctype_cid(ct->info)); /* Element size. */
+ if (sz == CTSIZE_INVALID)
+ lj_err_caller(cts->L, LJ_ERR_FFI_INVSIZE);
+ if (ctype_isptr(ct->info)) {
+ p = (uint8_t *)cdata_getptr(p, ct->size);
+ } else if ((ct->info & (CTF_VECTOR|CTF_COMPLEX))) {
+ if ((ct->info & CTF_COMPLEX)) idx &= 1;
+ *qual |= CTF_CONST; /* Valarray elements are constant. */
+ }
+ *pp = p + idx*(int32_t)sz;
+ return ct;
+ }
+ } else if (tviscdata(key)) { /* Integer cdata key. */
+ GCcdata *cdk = cdataV(key);
+ CType *ctk = ctype_raw(cts, cdk->ctypeid);
+ if (ctype_isenum(ctk->info)) ctk = ctype_child(cts, ctk);
+ if (ctype_isinteger(ctk->info)) {
+ lj_cconv_ct_ct(cts, ctype_get(cts, CTID_INT_PSZ), ctk,
+ (uint8_t *)&idx, cdataptr(cdk), 0);
+ goto integer_key;
+ }
+ } else if (tvisstr(key)) { /* String key. */
+ GCstr *name = strV(key);
+ if (ctype_isstruct(ct->info)) {
+ CTSize ofs;
+ CType *fct = lj_ctype_getfieldq(cts, ct, name, &ofs, qual);
+ if (fct) {
+ *pp = p + ofs;
+ return fct;
+ }
+ } else if (ctype_iscomplex(ct->info)) {
+ if (name->len == 2) {
+ *qual |= CTF_CONST; /* Complex fields are constant. */
+ if (strdata(name)[0] == 'r' && strdata(name)[1] == 'e') {
+ *pp = p;
+ return ct;
+ } else if (strdata(name)[0] == 'i' && strdata(name)[1] == 'm') {
+ *pp = p + (ct->size >> 1);
+ return ct;
+ }
+ }
+ } else if (cd->ctypeid == CTID_CTYPEID) {
+ /* Allow indexing a (pointer to) struct constructor to get constants. */
+ CType *sct = ctype_raw(cts, *(CTypeID *)p);
+ if (ctype_isptr(sct->info))
+ sct = ctype_rawchild(cts, sct);
+ if (ctype_isstruct(sct->info)) {
+ CTSize ofs;
+ CType *fct = lj_ctype_getfield(cts, sct, name, &ofs);
+ if (fct && ctype_isconstval(fct->info))
+ return fct;
+ }
+ ct = sct; /* Allow resolving metamethods for constructors, too. */
+ }
+ }
+ if (ctype_isptr(ct->info)) { /* Automatically perform '->'. */
+ if (ctype_isstruct(ctype_rawchild(cts, ct)->info)) {
+ p = (uint8_t *)cdata_getptr(p, ct->size);
+ ct = ctype_child(cts, ct);
+ goto collect_attrib;
+ }
+ }
+ *qual |= 1; /* Lookup failed. */
+ return ct; /* But return the resolved raw type. */
+}
+
+/* -- C data getters ------------------------------------------------------ */
+
+/* Get constant value and convert to TValue. */
+static void cdata_getconst(CTState *cts, TValue *o, CType *ct)
+{
+ CType *ctt = ctype_child(cts, ct);
+ lj_assertCTS(ctype_isinteger(ctt->info) && ctt->size <= 4,
+ "only 32 bit const supported"); /* NYI */
+ /* Constants are already zero-extended/sign-extended to 32 bits. */
+ if ((ctt->info & CTF_UNSIGNED) && (int32_t)ct->size < 0)
+ setnumV(o, (lua_Number)(uint32_t)ct->size);
+ else
+ setintV(o, (int32_t)ct->size);
+}
+
+/* Get C data value and convert to TValue. */
+int lj_cdata_get(CTState *cts, CType *s, TValue *o, uint8_t *sp)
+{
+ CTypeID sid;
+
+ if (ctype_isconstval(s->info)) {
+ cdata_getconst(cts, o, s);
+ return 0; /* No GC step needed. */
+ } else if (ctype_isbitfield(s->info)) {
+ return lj_cconv_tv_bf(cts, s, o, sp);
+ }
+
+ /* Get child type of pointer/array/field. */
+ lj_assertCTS(ctype_ispointer(s->info) || ctype_isfield(s->info),
+ "pointer or field expected");
+ sid = ctype_cid(s->info);
+ s = ctype_get(cts, sid);
+
+ /* Resolve reference for field. */
+ if (ctype_isref(s->info)) {
+ lj_assertCTS(s->size == CTSIZE_PTR, "ref is not pointer-sized");
+ sp = *(uint8_t **)sp;
+ sid = ctype_cid(s->info);
+ s = ctype_get(cts, sid);
+ }
+
+ /* Skip attributes. */
+ while (ctype_isattrib(s->info))
+ s = ctype_child(cts, s);
+
+ return lj_cconv_tv_ct(cts, s, sid, o, sp);
+}
+
+/* -- C data setters ------------------------------------------------------ */
+
+/* Convert TValue and set C data value. */
+void lj_cdata_set(CTState *cts, CType *d, uint8_t *dp, TValue *o, CTInfo qual)
+{
+ if (ctype_isconstval(d->info)) {
+ goto err_const;
+ } else if (ctype_isbitfield(d->info)) {
+ if (((d->info|qual) & CTF_CONST)) goto err_const;
+ lj_cconv_bf_tv(cts, d, dp, o);
+ return;
+ }
+
+ /* Get child type of pointer/array/field. */
+ lj_assertCTS(ctype_ispointer(d->info) || ctype_isfield(d->info),
+ "pointer or field expected");
+ d = ctype_child(cts, d);
+
+ /* Resolve reference for field. */
+ if (ctype_isref(d->info)) {
+ lj_assertCTS(d->size == CTSIZE_PTR, "ref is not pointer-sized");
+ dp = *(uint8_t **)dp;
+ d = ctype_child(cts, d);
+ }
+
+ /* Skip attributes and collect qualifiers. */
+ for (;;) {
+ if (ctype_isattrib(d->info)) {
+ if (ctype_attrib(d->info) == CTA_QUAL) qual |= d->size;
+ } else {
+ break;
+ }
+ d = ctype_child(cts, d);
+ }
+
+ lj_assertCTS(ctype_hassize(d->info), "store to ctype without size");
+ lj_assertCTS(!ctype_isvoid(d->info), "store to void type");
+
+ if (((d->info|qual) & CTF_CONST)) {
+ err_const:
+ lj_err_caller(cts->L, LJ_ERR_FFI_WRCONST);
+ }
+
+ lj_cconv_ct_tv(cts, d, dp, o, 0);
+}
+
+#endif
diff --git a/libs/luajit-cmake/luajit/src/lj_cdata.h b/libs/luajit-cmake/luajit/src/lj_cdata.h
new file mode 100644
index 0000000..de52e8a
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/lj_cdata.h
@@ -0,0 +1,79 @@
+/*
+** C data management.
+** Copyright (C) 2005-2022 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LJ_CDATA_H
+#define _LJ_CDATA_H
+
+#include "lj_obj.h"
+#include "lj_gc.h"
+#include "lj_ctype.h"
+
+#if LJ_HASFFI
+
+/* Get C data pointer. */
+static LJ_AINLINE void *cdata_getptr(void *p, CTSize sz)
+{
+ if (LJ_64 && sz == 4) { /* Support 32 bit pointers on 64 bit targets. */
+ return ((void *)(uintptr_t)*(uint32_t *)p);
+ } else {
+ lj_assertX(sz == CTSIZE_PTR, "bad pointer size %d", sz);
+ return *(void **)p;
+ }
+}
+
+/* Set C data pointer. */
+static LJ_AINLINE void cdata_setptr(void *p, CTSize sz, const void *v)
+{
+ if (LJ_64 && sz == 4) { /* Support 32 bit pointers on 64 bit targets. */
+ *(uint32_t *)p = (uint32_t)(uintptr_t)v;
+ } else {
+ lj_assertX(sz == CTSIZE_PTR, "bad pointer size %d", sz);
+ *(void **)p = (void *)v;
+ }
+}
+
+/* Allocate fixed-size C data object. */
+static LJ_AINLINE GCcdata *lj_cdata_new(CTState *cts, CTypeID id, CTSize sz)
+{
+ GCcdata *cd;
+#ifdef LUA_USE_ASSERT
+ CType *ct = ctype_raw(cts, id);
+ lj_assertCTS((ctype_hassize(ct->info) ? ct->size : CTSIZE_PTR) == sz,
+ "inconsistent size of fixed-size cdata alloc");
+#endif
+ cd = (GCcdata *)lj_mem_newgco(cts->L, sizeof(GCcdata) + sz);
+ cd->gct = ~LJ_TCDATA;
+ cd->ctypeid = ctype_check(cts, id);
+ return cd;
+}
+
+/* Variant which works without a valid CTState. */
+static LJ_AINLINE GCcdata *lj_cdata_new_(lua_State *L, CTypeID id, CTSize sz)
+{
+ GCcdata *cd = (GCcdata *)lj_mem_newgco(L, sizeof(GCcdata) + sz);
+ cd->gct = ~LJ_TCDATA;
+ cd->ctypeid = id;
+ return cd;
+}
+
+LJ_FUNC GCcdata *lj_cdata_newref(CTState *cts, const void *pp, CTypeID id);
+LJ_FUNC GCcdata *lj_cdata_newv(lua_State *L, CTypeID id, CTSize sz,
+ CTSize align);
+LJ_FUNC GCcdata *lj_cdata_newx(CTState *cts, CTypeID id, CTSize sz,
+ CTInfo info);
+
+LJ_FUNC void LJ_FASTCALL lj_cdata_free(global_State *g, GCcdata *cd);
+LJ_FUNC void lj_cdata_setfin(lua_State *L, GCcdata *cd, GCobj *obj,
+ uint32_t it);
+
+LJ_FUNC CType *lj_cdata_index(CTState *cts, GCcdata *cd, cTValue *key,
+ uint8_t **pp, CTInfo *qual);
+LJ_FUNC int lj_cdata_get(CTState *cts, CType *s, TValue *o, uint8_t *sp);
+LJ_FUNC void lj_cdata_set(CTState *cts, CType *d, uint8_t *dp, TValue *o,
+ CTInfo qual);
+
+#endif
+
+#endif
diff --git a/libs/luajit-cmake/luajit/src/lj_char.c b/libs/luajit-cmake/luajit/src/lj_char.c
new file mode 100644
index 0000000..11f23ef
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/lj_char.c
@@ -0,0 +1,43 @@
+/*
+** Character types.
+** Donated to the public domain.
+**
+** This is intended to replace the problematic libc single-byte NLS functions.
+** These just don't make sense anymore with UTF-8 locales becoming the norm
+** on POSIX systems. It never worked too well on Windows systems since hardly
+** anyone bothered to call setlocale().
+**
+** This table is hardcoded for ASCII. Identifiers include the characters
+** 128-255, too. This allows for the use of all non-ASCII chars as identifiers
+** in the lexer. This is a broad definition, but works well in practice
+** for both UTF-8 locales and most single-byte locales (such as ISO-8859-*).
+**
+** If you really need proper character types for UTF-8 strings, please use
+** an add-on library such as slnunicode: http://luaforge.net/projects/sln/
+*/
+
+#define lj_char_c
+#define LUA_CORE
+
+#include "lj_char.h"
+
+LJ_DATADEF const uint8_t lj_char_bits[257] = {
+ 0,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 3, 3, 3, 3, 3, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 2, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
+ 152,152,152,152,152,152,152,152,152,152, 4, 4, 4, 4, 4, 4,
+ 4,176,176,176,176,176,176,160,160,160,160,160,160,160,160,160,
+ 160,160,160,160,160,160,160,160,160,160,160, 4, 4, 4, 4,132,
+ 4,208,208,208,208,208,208,192,192,192,192,192,192,192,192,192,
+ 192,192,192,192,192,192,192,192,192,192,192, 4, 4, 4, 4, 1,
+ 128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,
+ 128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,
+ 128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,
+ 128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,
+ 128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,
+ 128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,
+ 128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,
+ 128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128
+};
+
diff --git a/libs/luajit-cmake/luajit/src/lj_char.h b/libs/luajit-cmake/luajit/src/lj_char.h
new file mode 100644
index 0000000..c3c86d3
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/lj_char.h
@@ -0,0 +1,42 @@
+/*
+** Character types.
+** Donated to the public domain.
+*/
+
+#ifndef _LJ_CHAR_H
+#define _LJ_CHAR_H
+
+#include "lj_def.h"
+
+#define LJ_CHAR_CNTRL 0x01
+#define LJ_CHAR_SPACE 0x02
+#define LJ_CHAR_PUNCT 0x04
+#define LJ_CHAR_DIGIT 0x08
+#define LJ_CHAR_XDIGIT 0x10
+#define LJ_CHAR_UPPER 0x20
+#define LJ_CHAR_LOWER 0x40
+#define LJ_CHAR_IDENT 0x80
+#define LJ_CHAR_ALPHA (LJ_CHAR_LOWER|LJ_CHAR_UPPER)
+#define LJ_CHAR_ALNUM (LJ_CHAR_ALPHA|LJ_CHAR_DIGIT)
+#define LJ_CHAR_GRAPH (LJ_CHAR_ALNUM|LJ_CHAR_PUNCT)
+
+/* Only pass -1 or 0..255 to these macros. Never pass a signed char! */
+#define lj_char_isa(c, t) ((lj_char_bits+1)[(c)] & t)
+#define lj_char_iscntrl(c) lj_char_isa((c), LJ_CHAR_CNTRL)
+#define lj_char_isspace(c) lj_char_isa((c), LJ_CHAR_SPACE)
+#define lj_char_ispunct(c) lj_char_isa((c), LJ_CHAR_PUNCT)
+#define lj_char_isdigit(c) lj_char_isa((c), LJ_CHAR_DIGIT)
+#define lj_char_isxdigit(c) lj_char_isa((c), LJ_CHAR_XDIGIT)
+#define lj_char_isupper(c) lj_char_isa((c), LJ_CHAR_UPPER)
+#define lj_char_islower(c) lj_char_isa((c), LJ_CHAR_LOWER)
+#define lj_char_isident(c) lj_char_isa((c), LJ_CHAR_IDENT)
+#define lj_char_isalpha(c) lj_char_isa((c), LJ_CHAR_ALPHA)
+#define lj_char_isalnum(c) lj_char_isa((c), LJ_CHAR_ALNUM)
+#define lj_char_isgraph(c) lj_char_isa((c), LJ_CHAR_GRAPH)
+
+#define lj_char_toupper(c) ((c) - (lj_char_islower(c) >> 1))
+#define lj_char_tolower(c) ((c) + lj_char_isupper(c))
+
+LJ_DATA const uint8_t lj_char_bits[257];
+
+#endif
diff --git a/libs/luajit-cmake/luajit/src/lj_clib.c b/libs/luajit-cmake/luajit/src/lj_clib.c
new file mode 100644
index 0000000..f0ef6ed
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/lj_clib.c
@@ -0,0 +1,434 @@
+/*
+** FFI C library loader.
+** Copyright (C) 2005-2022 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#include "lj_obj.h"
+
+#if LJ_HASFFI
+
+#include "lj_gc.h"
+#include "lj_err.h"
+#include "lj_tab.h"
+#include "lj_str.h"
+#include "lj_udata.h"
+#include "lj_ctype.h"
+#include "lj_cconv.h"
+#include "lj_cdata.h"
+#include "lj_clib.h"
+#include "lj_strfmt.h"
+
+/* -- OS-specific functions ----------------------------------------------- */
+
+#if LJ_TARGET_DLOPEN
+
+#include <dlfcn.h>
+#include <stdio.h>
+
+#if defined(RTLD_DEFAULT) && !defined(NO_RTLD_DEFAULT)
+#define CLIB_DEFHANDLE RTLD_DEFAULT
+#elif LJ_TARGET_OSX || LJ_TARGET_BSD
+#define CLIB_DEFHANDLE ((void *)(intptr_t)-2)
+#else
+#define CLIB_DEFHANDLE NULL
+#endif
+
+LJ_NORET LJ_NOINLINE static void clib_error_(lua_State *L)
+{
+ lj_err_callermsg(L, dlerror());
+}
+
+#define clib_error(L, fmt, name) clib_error_(L)
+
+#if LJ_TARGET_CYGWIN
+#define CLIB_SOPREFIX "cyg"
+#else
+#define CLIB_SOPREFIX "lib"
+#endif
+
+#if LJ_TARGET_OSX
+#define CLIB_SOEXT "%s.dylib"
+#elif LJ_TARGET_CYGWIN
+#define CLIB_SOEXT "%s.dll"
+#else
+#define CLIB_SOEXT "%s.so"
+#endif
+
+static const char *clib_extname(lua_State *L, const char *name)
+{
+ if (!strchr(name, '/')
+#if LJ_TARGET_CYGWIN
+ && !strchr(name, '\\')
+#endif
+ ) {
+ if (!strchr(name, '.')) {
+ name = lj_strfmt_pushf(L, CLIB_SOEXT, name);
+ L->top--;
+#if LJ_TARGET_CYGWIN
+ } else {
+ return name;
+#endif
+ }
+ if (!(name[0] == CLIB_SOPREFIX[0] && name[1] == CLIB_SOPREFIX[1] &&
+ name[2] == CLIB_SOPREFIX[2])) {
+ name = lj_strfmt_pushf(L, CLIB_SOPREFIX "%s", name);
+ L->top--;
+ }
+ }
+ return name;
+}
+
+/* Check for a recognized ld script line. */
+static const char *clib_check_lds(lua_State *L, const char *buf)
+{
+ char *p, *e;
+ if ((!strncmp(buf, "GROUP", 5) || !strncmp(buf, "INPUT", 5)) &&
+ (p = strchr(buf, '('))) {
+ while (*++p == ' ') ;
+ for (e = p; *e && *e != ' ' && *e != ')'; e++) ;
+ return strdata(lj_str_new(L, p, e-p));
+ }
+ return NULL;
+}
+
+/* Quick and dirty solution to resolve shared library name from ld script. */
+static const char *clib_resolve_lds(lua_State *L, const char *name)
+{
+ FILE *fp = fopen(name, "r");
+ const char *p = NULL;
+ if (fp) {
+ char buf[256];
+ if (fgets(buf, sizeof(buf), fp)) {
+ if (!strncmp(buf, "/* GNU ld script", 16)) { /* ld script magic? */
+ while (fgets(buf, sizeof(buf), fp)) { /* Check all lines. */
+ p = clib_check_lds(L, buf);
+ if (p) break;
+ }
+ } else { /* Otherwise check only the first line. */
+ p = clib_check_lds(L, buf);
+ }
+ }
+ fclose(fp);
+ }
+ return p;
+}
+
+static void *clib_loadlib(lua_State *L, const char *name, int global)
+{
+ void *h = dlopen(clib_extname(L, name),
+ RTLD_LAZY | (global?RTLD_GLOBAL:RTLD_LOCAL));
+ if (!h) {
+ const char *e, *err = dlerror();
+ if (err && *err == '/' && (e = strchr(err, ':')) &&
+ (name = clib_resolve_lds(L, strdata(lj_str_new(L, err, e-err))))) {
+ h = dlopen(name, RTLD_LAZY | (global?RTLD_GLOBAL:RTLD_LOCAL));
+ if (h) return h;
+ err = dlerror();
+ }
+ if (!err) err = "dlopen failed";
+ lj_err_callermsg(L, err);
+ }
+ return h;
+}
+
+static void clib_unloadlib(CLibrary *cl)
+{
+ if (cl->handle && cl->handle != CLIB_DEFHANDLE)
+ dlclose(cl->handle);
+}
+
+static void *clib_getsym(CLibrary *cl, const char *name)
+{
+ void *p = dlsym(cl->handle, name);
+ return p;
+}
+
+#elif LJ_TARGET_WINDOWS
+
+#define WIN32_LEAN_AND_MEAN
+#include <windows.h>
+
+#ifndef GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS
+#define GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS 4
+#define GET_MODULE_HANDLE_EX_FLAG_UNCHANGED_REFCOUNT 2
+BOOL WINAPI GetModuleHandleExA(DWORD, LPCSTR, HMODULE*);
+#endif
+
+#define CLIB_DEFHANDLE ((void *)-1)
+
+/* Default libraries. */
+enum {
+ CLIB_HANDLE_EXE,
+#if !LJ_TARGET_UWP
+ CLIB_HANDLE_DLL,
+ CLIB_HANDLE_CRT,
+ CLIB_HANDLE_KERNEL32,
+ CLIB_HANDLE_USER32,
+ CLIB_HANDLE_GDI32,
+#endif
+ CLIB_HANDLE_MAX
+};
+
+static void *clib_def_handle[CLIB_HANDLE_MAX];
+
+LJ_NORET LJ_NOINLINE static void clib_error(lua_State *L, const char *fmt,
+ const char *name)
+{
+ DWORD err = GetLastError();
+#if LJ_TARGET_XBOXONE
+ wchar_t wbuf[128];
+ char buf[128*2];
+ if (!FormatMessageW(FORMAT_MESSAGE_IGNORE_INSERTS|FORMAT_MESSAGE_FROM_SYSTEM,
+ NULL, err, 0, wbuf, sizeof(wbuf)/sizeof(wchar_t), NULL) ||
+ !WideCharToMultiByte(CP_ACP, 0, wbuf, 128, buf, 128*2, NULL, NULL))
+#else
+ char buf[128];
+ if (!FormatMessageA(FORMAT_MESSAGE_IGNORE_INSERTS|FORMAT_MESSAGE_FROM_SYSTEM,
+ NULL, err, 0, buf, sizeof(buf), NULL))
+#endif
+ buf[0] = '\0';
+ lj_err_callermsg(L, lj_strfmt_pushf(L, fmt, name, buf));
+}
+
+static int clib_needext(const char *s)
+{
+ while (*s) {
+ if (*s == '/' || *s == '\\' || *s == '.') return 0;
+ s++;
+ }
+ return 1;
+}
+
+static const char *clib_extname(lua_State *L, const char *name)
+{
+ if (clib_needext(name)) {
+ name = lj_strfmt_pushf(L, "%s.dll", name);
+ L->top--;
+ }
+ return name;
+}
+
+static void *clib_loadlib(lua_State *L, const char *name, int global)
+{
+ DWORD oldwerr = GetLastError();
+ void *h = LJ_WIN_LOADLIBA(clib_extname(L, name));
+ if (!h) clib_error(L, "cannot load module " LUA_QS ": %s", name);
+ SetLastError(oldwerr);
+ UNUSED(global);
+ return h;
+}
+
+static void clib_unloadlib(CLibrary *cl)
+{
+ if (cl->handle == CLIB_DEFHANDLE) {
+#if !LJ_TARGET_UWP
+ MSize i;
+ for (i = CLIB_HANDLE_KERNEL32; i < CLIB_HANDLE_MAX; i++) {
+ void *h = clib_def_handle[i];
+ if (h) {
+ clib_def_handle[i] = NULL;
+ FreeLibrary((HINSTANCE)h);
+ }
+ }
+#endif
+ } else if (cl->handle) {
+ FreeLibrary((HINSTANCE)cl->handle);
+ }
+}
+
+#if LJ_TARGET_UWP
+EXTERN_C IMAGE_DOS_HEADER __ImageBase;
+#endif
+
+static void *clib_getsym(CLibrary *cl, const char *name)
+{
+ void *p = NULL;
+ if (cl->handle == CLIB_DEFHANDLE) { /* Search default libraries. */
+ MSize i;
+ for (i = 0; i < CLIB_HANDLE_MAX; i++) {
+ HINSTANCE h = (HINSTANCE)clib_def_handle[i];
+ if (!(void *)h) { /* Resolve default library handles (once). */
+#if LJ_TARGET_UWP
+ h = (HINSTANCE)&__ImageBase;
+#else
+ switch (i) {
+ case CLIB_HANDLE_EXE: GetModuleHandleExA(GET_MODULE_HANDLE_EX_FLAG_UNCHANGED_REFCOUNT, NULL, &h); break;
+ case CLIB_HANDLE_DLL:
+ GetModuleHandleExA(GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS|GET_MODULE_HANDLE_EX_FLAG_UNCHANGED_REFCOUNT,
+ (const char *)clib_def_handle, &h);
+ break;
+ case CLIB_HANDLE_CRT:
+ GetModuleHandleExA(GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS|GET_MODULE_HANDLE_EX_FLAG_UNCHANGED_REFCOUNT,
+ (const char *)&_fmode, &h);
+ break;
+ case CLIB_HANDLE_KERNEL32: h = LJ_WIN_LOADLIBA("kernel32.dll"); break;
+ case CLIB_HANDLE_USER32: h = LJ_WIN_LOADLIBA("user32.dll"); break;
+ case CLIB_HANDLE_GDI32: h = LJ_WIN_LOADLIBA("gdi32.dll"); break;
+ }
+ if (!h) continue;
+#endif
+ clib_def_handle[i] = (void *)h;
+ }
+ p = (void *)GetProcAddress(h, name);
+ if (p) break;
+ }
+ } else {
+ p = (void *)GetProcAddress((HINSTANCE)cl->handle, name);
+ }
+ return p;
+}
+
+#else
+
+#define CLIB_DEFHANDLE NULL
+
+LJ_NORET LJ_NOINLINE static void clib_error(lua_State *L, const char *fmt,
+ const char *name)
+{
+ lj_err_callermsg(L, lj_strfmt_pushf(L, fmt, name, "no support for this OS"));
+}
+
+static void *clib_loadlib(lua_State *L, const char *name, int global)
+{
+ lj_err_callermsg(L, "no support for loading dynamic libraries for this OS");
+ UNUSED(name); UNUSED(global);
+ return NULL;
+}
+
+static void clib_unloadlib(CLibrary *cl)
+{
+ UNUSED(cl);
+}
+
+static void *clib_getsym(CLibrary *cl, const char *name)
+{
+ UNUSED(cl); UNUSED(name);
+ return NULL;
+}
+
+#endif
+
+/* -- C library indexing -------------------------------------------------- */
+
+#if LJ_TARGET_X86 && LJ_ABI_WIN
+/* Compute argument size for fastcall/stdcall functions. */
+static CTSize clib_func_argsize(CTState *cts, CType *ct)
+{
+ CTSize n = 0;
+ while (ct->sib) {
+ CType *d;
+ ct = ctype_get(cts, ct->sib);
+ if (ctype_isfield(ct->info)) {
+ d = ctype_rawchild(cts, ct);
+ n += ((d->size + 3) & ~3);
+ }
+ }
+ return n;
+}
+#endif
+
+/* Get redirected or mangled external symbol. */
+static const char *clib_extsym(CTState *cts, CType *ct, GCstr *name)
+{
+ if (ct->sib) {
+ CType *ctf = ctype_get(cts, ct->sib);
+ if (ctype_isxattrib(ctf->info, CTA_REDIR))
+ return strdata(gco2str(gcref(ctf->name)));
+ }
+ return strdata(name);
+}
+
+/* Index a C library by name. */
+TValue *lj_clib_index(lua_State *L, CLibrary *cl, GCstr *name)
+{
+ TValue *tv = lj_tab_setstr(L, cl->cache, name);
+ if (LJ_UNLIKELY(tvisnil(tv))) {
+ CTState *cts = ctype_cts(L);
+ CType *ct;
+ CTypeID id = lj_ctype_getname(cts, &ct, name, CLNS_INDEX);
+ if (!id)
+ lj_err_callerv(L, LJ_ERR_FFI_NODECL, strdata(name));
+ if (ctype_isconstval(ct->info)) {
+ CType *ctt = ctype_child(cts, ct);
+ lj_assertCTS(ctype_isinteger(ctt->info) && ctt->size <= 4,
+ "only 32 bit const supported"); /* NYI */
+ if ((ctt->info & CTF_UNSIGNED) && (int32_t)ct->size < 0)
+ setnumV(tv, (lua_Number)(uint32_t)ct->size);
+ else
+ setintV(tv, (int32_t)ct->size);
+ } else {
+ const char *sym = clib_extsym(cts, ct, name);
+#if LJ_TARGET_WINDOWS
+ DWORD oldwerr = GetLastError();
+#endif
+ void *p = clib_getsym(cl, sym);
+ GCcdata *cd;
+ lj_assertCTS(ctype_isfunc(ct->info) || ctype_isextern(ct->info),
+ "unexpected ctype %08x in clib", ct->info);
+#if LJ_TARGET_X86 && LJ_ABI_WIN
+ /* Retry with decorated name for fastcall/stdcall functions. */
+ if (!p && ctype_isfunc(ct->info)) {
+ CTInfo cconv = ctype_cconv(ct->info);
+ if (cconv == CTCC_FASTCALL || cconv == CTCC_STDCALL) {
+ CTSize sz = clib_func_argsize(cts, ct);
+ const char *symd = lj_strfmt_pushf(L,
+ cconv == CTCC_FASTCALL ? "@%s@%d" : "_%s@%d",
+ sym, sz);
+ L->top--;
+ p = clib_getsym(cl, symd);
+ }
+ }
+#endif
+ if (!p)
+ clib_error(L, "cannot resolve symbol " LUA_QS ": %s", sym);
+#if LJ_TARGET_WINDOWS
+ SetLastError(oldwerr);
+#endif
+ cd = lj_cdata_new(cts, id, CTSIZE_PTR);
+ *(void **)cdataptr(cd) = p;
+ setcdataV(L, tv, cd);
+ lj_gc_anybarriert(L, cl->cache);
+ }
+ }
+ return tv;
+}
+
+/* -- C library management ------------------------------------------------ */
+
+/* Create a new CLibrary object and push it on the stack. */
+static CLibrary *clib_new(lua_State *L, GCtab *mt)
+{
+ GCtab *t = lj_tab_new(L, 0, 0);
+ GCudata *ud = lj_udata_new(L, sizeof(CLibrary), t);
+ CLibrary *cl = (CLibrary *)uddata(ud);
+ cl->cache = t;
+ ud->udtype = UDTYPE_FFI_CLIB;
+ /* NOBARRIER: The GCudata is new (marked white). */
+ setgcref(ud->metatable, obj2gco(mt));
+ setudataV(L, L->top++, ud);
+ return cl;
+}
+
+/* Load a C library. */
+void lj_clib_load(lua_State *L, GCtab *mt, GCstr *name, int global)
+{
+ void *handle = clib_loadlib(L, strdata(name), global);
+ CLibrary *cl = clib_new(L, mt);
+ cl->handle = handle;
+}
+
+/* Unload a C library. */
+void lj_clib_unload(CLibrary *cl)
+{
+ clib_unloadlib(cl);
+ cl->handle = NULL;
+}
+
+/* Create the default C library object. */
+void lj_clib_default(lua_State *L, GCtab *mt)
+{
+ CLibrary *cl = clib_new(L, mt);
+ cl->handle = CLIB_DEFHANDLE;
+}
+
+#endif
diff --git a/libs/luajit-cmake/luajit/src/lj_clib.h b/libs/luajit-cmake/luajit/src/lj_clib.h
new file mode 100644
index 0000000..4429486
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/lj_clib.h
@@ -0,0 +1,29 @@
+/*
+** FFI C library loader.
+** Copyright (C) 2005-2022 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LJ_CLIB_H
+#define _LJ_CLIB_H
+
+#include "lj_obj.h"
+
+#if LJ_HASFFI
+
+/* Namespace for C library indexing. */
+#define CLNS_INDEX ((1u<<CT_FUNC)|(1u<<CT_EXTERN)|(1u<<CT_CONSTVAL))
+
+/* C library namespace. */
+typedef struct CLibrary {
+ void *handle; /* Opaque handle for dynamic library loader. */
+ GCtab *cache; /* Cache for resolved symbols. Anchored in ud->env. */
+} CLibrary;
+
+LJ_FUNC TValue *lj_clib_index(lua_State *L, CLibrary *cl, GCstr *name);
+LJ_FUNC void lj_clib_load(lua_State *L, GCtab *mt, GCstr *name, int global);
+LJ_FUNC void lj_clib_unload(CLibrary *cl);
+LJ_FUNC void lj_clib_default(lua_State *L, GCtab *mt);
+
+#endif
+
+#endif
diff --git a/libs/luajit-cmake/luajit/src/lj_cparse.c b/libs/luajit-cmake/luajit/src/lj_cparse.c
new file mode 100644
index 0000000..7fd8399
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/lj_cparse.c
@@ -0,0 +1,1927 @@
+/*
+** C declaration parser.
+** Copyright (C) 2005-2022 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#include "lj_obj.h"
+
+#if LJ_HASFFI
+
+#include "lj_gc.h"
+#include "lj_err.h"
+#include "lj_buf.h"
+#include "lj_ctype.h"
+#include "lj_cparse.h"
+#include "lj_frame.h"
+#include "lj_vm.h"
+#include "lj_char.h"
+#include "lj_strscan.h"
+#include "lj_strfmt.h"
+
+/*
+** Important note: this is NOT a validating C parser! This is a minimal
+** C declaration parser, solely for use by the LuaJIT FFI.
+**
+** It ought to return correct results for properly formed C declarations,
+** but it may accept some invalid declarations, too (and return nonsense).
+** Also, it shows rather generic error messages to avoid unnecessary bloat.
+** If in doubt, please check the input against your favorite C compiler.
+*/
+
+#ifdef LUA_USE_ASSERT
+#define lj_assertCP(c, ...) (lj_assertG_(G(cp->L), (c), __VA_ARGS__))
+#else
+#define lj_assertCP(c, ...) ((void)cp)
+#endif
+
+/* -- Miscellaneous ------------------------------------------------------- */
+
+/* Match string against a C literal. */
+#define cp_str_is(str, k) \
+ ((str)->len == sizeof(k)-1 && !memcmp(strdata(str), k, sizeof(k)-1))
+
+/* Check string against a linear list of matches. */
+int lj_cparse_case(GCstr *str, const char *match)
+{
+ MSize len;
+ int n;
+ for (n = 0; (len = (MSize)*match++); n++, match += len) {
+ if (str->len == len && !memcmp(match, strdata(str), len))
+ return n;
+ }
+ return -1;
+}
+
+/* -- C lexer ------------------------------------------------------------- */
+
+/* C lexer token names. */
+static const char *const ctoknames[] = {
+#define CTOKSTR(name, str) str,
+CTOKDEF(CTOKSTR)
+#undef CTOKSTR
+ NULL
+};
+
+/* Forward declaration. */
+LJ_NORET static void cp_err(CPState *cp, ErrMsg em);
+
+static const char *cp_tok2str(CPState *cp, CPToken tok)
+{
+ lj_assertCP(tok < CTOK_FIRSTDECL, "bad CPToken %d", tok);
+ if (tok > CTOK_OFS)
+ return ctoknames[tok-CTOK_OFS-1];
+ else if (!lj_char_iscntrl(tok))
+ return lj_strfmt_pushf(cp->L, "%c", tok);
+ else
+ return lj_strfmt_pushf(cp->L, "char(%d)", tok);
+}
+
+/* End-of-line? */
+static LJ_AINLINE int cp_iseol(CPChar c)
+{
+ return (c == '\n' || c == '\r');
+}
+
+/* Peek next raw character. */
+static LJ_AINLINE CPChar cp_rawpeek(CPState *cp)
+{
+ return (CPChar)(uint8_t)(*cp->p);
+}
+
+static LJ_NOINLINE CPChar cp_get_bs(CPState *cp);
+
+/* Get next character. */
+static LJ_AINLINE CPChar cp_get(CPState *cp)
+{
+ cp->c = (CPChar)(uint8_t)(*cp->p++);
+ if (LJ_LIKELY(cp->c != '\\')) return cp->c;
+ return cp_get_bs(cp);
+}
+
+/* Transparently skip backslash-escaped line breaks. */
+static LJ_NOINLINE CPChar cp_get_bs(CPState *cp)
+{
+ CPChar c2, c = cp_rawpeek(cp);
+ if (!cp_iseol(c)) return cp->c;
+ cp->p++;
+ c2 = cp_rawpeek(cp);
+ if (cp_iseol(c2) && c2 != c) cp->p++;
+ cp->linenumber++;
+ return cp_get(cp);
+}
+
+/* Save character in buffer. */
+static LJ_AINLINE void cp_save(CPState *cp, CPChar c)
+{
+ lj_buf_putb(&cp->sb, c);
+}
+
+/* Skip line break. Handles "\n", "\r", "\r\n" or "\n\r". */
+static void cp_newline(CPState *cp)
+{
+ CPChar c = cp_rawpeek(cp);
+ if (cp_iseol(c) && c != cp->c) cp->p++;
+ cp->linenumber++;
+}
+
+LJ_NORET static void cp_errmsg(CPState *cp, CPToken tok, ErrMsg em, ...)
+{
+ const char *msg, *tokstr;
+ lua_State *L;
+ va_list argp;
+ if (tok == 0) {
+ tokstr = NULL;
+ } else if (tok == CTOK_IDENT || tok == CTOK_INTEGER || tok == CTOK_STRING ||
+ tok >= CTOK_FIRSTDECL) {
+ if (cp->sb.w == cp->sb.b) cp_save(cp, '$');
+ cp_save(cp, '\0');
+ tokstr = cp->sb.b;
+ } else {
+ tokstr = cp_tok2str(cp, tok);
+ }
+ L = cp->L;
+ va_start(argp, em);
+ msg = lj_strfmt_pushvf(L, err2msg(em), argp);
+ va_end(argp);
+ if (tokstr)
+ msg = lj_strfmt_pushf(L, err2msg(LJ_ERR_XNEAR), msg, tokstr);
+ if (cp->linenumber > 1)
+ msg = lj_strfmt_pushf(L, "%s at line %d", msg, cp->linenumber);
+ lj_err_callermsg(L, msg);
+}
+
+LJ_NORET LJ_NOINLINE static void cp_err_token(CPState *cp, CPToken tok)
+{
+ cp_errmsg(cp, cp->tok, LJ_ERR_XTOKEN, cp_tok2str(cp, tok));
+}
+
+LJ_NORET LJ_NOINLINE static void cp_err_badidx(CPState *cp, CType *ct)
+{
+ GCstr *s = lj_ctype_repr(cp->cts->L, ctype_typeid(cp->cts, ct), NULL);
+ cp_errmsg(cp, 0, LJ_ERR_FFI_BADIDX, strdata(s));
+}
+
+LJ_NORET LJ_NOINLINE static void cp_err(CPState *cp, ErrMsg em)
+{
+ cp_errmsg(cp, 0, em);
+}
+
+/* -- Main lexical scanner ------------------------------------------------ */
+
+/* Parse number literal. Only handles int32_t/uint32_t right now. */
+static CPToken cp_number(CPState *cp)
+{
+ StrScanFmt fmt;
+ TValue o;
+ do { cp_save(cp, cp->c); } while (lj_char_isident(cp_get(cp)));
+ cp_save(cp, '\0');
+ fmt = lj_strscan_scan((const uint8_t *)(cp->sb.b), sbuflen(&cp->sb)-1,
+ &o, STRSCAN_OPT_C);
+ if (fmt == STRSCAN_INT) cp->val.id = CTID_INT32;
+ else if (fmt == STRSCAN_U32) cp->val.id = CTID_UINT32;
+ else if (!(cp->mode & CPARSE_MODE_SKIP))
+ cp_errmsg(cp, CTOK_INTEGER, LJ_ERR_XNUMBER);
+ cp->val.u32 = (uint32_t)o.i;
+ return CTOK_INTEGER;
+}
+
+/* Parse identifier or keyword. */
+static CPToken cp_ident(CPState *cp)
+{
+ do { cp_save(cp, cp->c); } while (lj_char_isident(cp_get(cp)));
+ cp->str = lj_buf_str(cp->L, &cp->sb);
+ cp->val.id = lj_ctype_getname(cp->cts, &cp->ct, cp->str, cp->tmask);
+ if (ctype_type(cp->ct->info) == CT_KW)
+ return ctype_cid(cp->ct->info);
+ return CTOK_IDENT;
+}
+
+/* Parse parameter. */
+static CPToken cp_param(CPState *cp)
+{
+ CPChar c = cp_get(cp);
+ TValue *o = cp->param;
+ if (lj_char_isident(c) || c == '$') /* Reserve $xyz for future extensions. */
+ cp_errmsg(cp, c, LJ_ERR_XSYNTAX);
+ if (!o || o >= cp->L->top)
+ cp_err(cp, LJ_ERR_FFI_NUMPARAM);
+ cp->param = o+1;
+ if (tvisstr(o)) {
+ cp->str = strV(o);
+ cp->val.id = 0;
+ cp->ct = &cp->cts->tab[0];
+ return CTOK_IDENT;
+ } else if (tvisnumber(o)) {
+ cp->val.i32 = numberVint(o);
+ cp->val.id = CTID_INT32;
+ return CTOK_INTEGER;
+ } else {
+ GCcdata *cd;
+ if (!tviscdata(o))
+ lj_err_argtype(cp->L, (int)(o-cp->L->base)+1, "type parameter");
+ cd = cdataV(o);
+ if (cd->ctypeid == CTID_CTYPEID)
+ cp->val.id = *(CTypeID *)cdataptr(cd);
+ else
+ cp->val.id = cd->ctypeid;
+ return '$';
+ }
+}
+
+/* Parse string or character constant. */
+static CPToken cp_string(CPState *cp)
+{
+ CPChar delim = cp->c;
+ cp_get(cp);
+ while (cp->c != delim) {
+ CPChar c = cp->c;
+ if (c == '\0') cp_errmsg(cp, CTOK_EOF, LJ_ERR_XSTR);
+ if (c == '\\') {
+ c = cp_get(cp);
+ switch (c) {
+ case '\0': cp_errmsg(cp, CTOK_EOF, LJ_ERR_XSTR); break;
+ case 'a': c = '\a'; break;
+ case 'b': c = '\b'; break;
+ case 'f': c = '\f'; break;
+ case 'n': c = '\n'; break;
+ case 'r': c = '\r'; break;
+ case 't': c = '\t'; break;
+ case 'v': c = '\v'; break;
+ case 'e': c = 27; break;
+ case 'x':
+ c = 0;
+ while (lj_char_isxdigit(cp_get(cp)))
+ c = (c<<4) + (lj_char_isdigit(cp->c) ? cp->c-'0' : (cp->c&15)+9);
+ cp_save(cp, (c & 0xff));
+ continue;
+ default:
+ if (lj_char_isdigit(c)) {
+ c -= '0';
+ if (lj_char_isdigit(cp_get(cp))) {
+ c = c*8 + (cp->c - '0');
+ if (lj_char_isdigit(cp_get(cp))) {
+ c = c*8 + (cp->c - '0');
+ cp_get(cp);
+ }
+ }
+ cp_save(cp, (c & 0xff));
+ continue;
+ }
+ break;
+ }
+ }
+ cp_save(cp, c);
+ cp_get(cp);
+ }
+ cp_get(cp);
+ if (delim == '"') {
+ cp->str = lj_buf_str(cp->L, &cp->sb);
+ return CTOK_STRING;
+ } else {
+ if (sbuflen(&cp->sb) != 1) cp_err_token(cp, '\'');
+ cp->val.i32 = (int32_t)(char)*cp->sb.b;
+ cp->val.id = CTID_INT32;
+ return CTOK_INTEGER;
+ }
+}
+
+/* Skip C comment. */
+static void cp_comment_c(CPState *cp)
+{
+ do {
+ if (cp_get(cp) == '*') {
+ do {
+ if (cp_get(cp) == '/') { cp_get(cp); return; }
+ } while (cp->c == '*');
+ }
+ if (cp_iseol(cp->c)) cp_newline(cp);
+ } while (cp->c != '\0');
+}
+
+/* Skip C++ comment. */
+static void cp_comment_cpp(CPState *cp)
+{
+ while (!cp_iseol(cp_get(cp)) && cp->c != '\0')
+ ;
+}
+
+/* Lexical scanner for C. Only a minimal subset is implemented. */
+static CPToken cp_next_(CPState *cp)
+{
+ lj_buf_reset(&cp->sb);
+ for (;;) {
+ if (lj_char_isident(cp->c))
+ return lj_char_isdigit(cp->c) ? cp_number(cp) : cp_ident(cp);
+ switch (cp->c) {
+ case '\n': case '\r': cp_newline(cp); /* fallthrough. */
+ case ' ': case '\t': case '\v': case '\f': cp_get(cp); break;
+ case '"': case '\'': return cp_string(cp);
+ case '/':
+ if (cp_get(cp) == '*') cp_comment_c(cp);
+ else if (cp->c == '/') cp_comment_cpp(cp);
+ else return '/';
+ break;
+ case '|':
+ if (cp_get(cp) != '|') return '|';
+ cp_get(cp); return CTOK_OROR;
+ case '&':
+ if (cp_get(cp) != '&') return '&';
+ cp_get(cp); return CTOK_ANDAND;
+ case '=':
+ if (cp_get(cp) != '=') return '=';
+ cp_get(cp); return CTOK_EQ;
+ case '!':
+ if (cp_get(cp) != '=') return '!';
+ cp_get(cp); return CTOK_NE;
+ case '<':
+ if (cp_get(cp) == '=') { cp_get(cp); return CTOK_LE; }
+ else if (cp->c == '<') { cp_get(cp); return CTOK_SHL; }
+ return '<';
+ case '>':
+ if (cp_get(cp) == '=') { cp_get(cp); return CTOK_GE; }
+ else if (cp->c == '>') { cp_get(cp); return CTOK_SHR; }
+ return '>';
+ case '-':
+ if (cp_get(cp) != '>') return '-';
+ cp_get(cp); return CTOK_DEREF;
+ case '$':
+ return cp_param(cp);
+ case '\0': return CTOK_EOF;
+ default: { CPToken c = cp->c; cp_get(cp); return c; }
+ }
+ }
+}
+
+static LJ_NOINLINE CPToken cp_next(CPState *cp)
+{
+ return (cp->tok = cp_next_(cp));
+}
+
+/* -- C parser ------------------------------------------------------------ */
+
+/* Namespaces for resolving identifiers. */
+#define CPNS_DEFAULT \
+ ((1u<<CT_KW)|(1u<<CT_TYPEDEF)|(1u<<CT_FUNC)|(1u<<CT_EXTERN)|(1u<<CT_CONSTVAL))
+#define CPNS_STRUCT ((1u<<CT_KW)|(1u<<CT_STRUCT)|(1u<<CT_ENUM))
+
+typedef CTypeID CPDeclIdx; /* Index into declaration stack. */
+typedef uint32_t CPscl; /* Storage class flags. */
+
+/* Type declaration context. */
+typedef struct CPDecl {
+ CPDeclIdx top; /* Top of declaration stack. */
+ CPDeclIdx pos; /* Insertion position in declaration chain. */
+ CPDeclIdx specpos; /* Saved position for declaration specifier. */
+ uint32_t mode; /* Declarator mode. */
+ CPState *cp; /* C parser state. */
+ GCstr *name; /* Name of declared identifier (if direct). */
+ GCstr *redir; /* Redirected symbol name. */
+ CTypeID nameid; /* Existing typedef for declared identifier. */
+ CTInfo attr; /* Attributes. */
+ CTInfo fattr; /* Function attributes. */
+ CTInfo specattr; /* Saved attributes. */
+ CTInfo specfattr; /* Saved function attributes. */
+ CTSize bits; /* Field size in bits (if any). */
+ CType stack[CPARSE_MAX_DECLSTACK]; /* Type declaration stack. */
+} CPDecl;
+
+/* Forward declarations. */
+static CPscl cp_decl_spec(CPState *cp, CPDecl *decl, CPscl scl);
+static void cp_declarator(CPState *cp, CPDecl *decl);
+static CTypeID cp_decl_abstract(CPState *cp);
+
+/* Initialize C parser state. Caller must set up: L, p, srcname, mode. */
+static void cp_init(CPState *cp)
+{
+ cp->linenumber = 1;
+ cp->depth = 0;
+ cp->curpack = 0;
+ cp->packstack[0] = 255;
+ lj_buf_init(cp->L, &cp->sb);
+ lj_assertCP(cp->p != NULL, "uninitialized cp->p");
+ cp_get(cp); /* Read-ahead first char. */
+ cp->tok = 0;
+ cp->tmask = CPNS_DEFAULT;
+ cp_next(cp); /* Read-ahead first token. */
+}
+
+/* Cleanup C parser state. */
+static void cp_cleanup(CPState *cp)
+{
+ global_State *g = G(cp->L);
+ lj_buf_free(g, &cp->sb);
+}
+
+/* Check and consume optional token. */
+static int cp_opt(CPState *cp, CPToken tok)
+{
+ if (cp->tok == tok) { cp_next(cp); return 1; }
+ return 0;
+}
+
+/* Check and consume token. */
+static void cp_check(CPState *cp, CPToken tok)
+{
+ if (cp->tok != tok) cp_err_token(cp, tok);
+ cp_next(cp);
+}
+
+/* Check if the next token may start a type declaration. */
+static int cp_istypedecl(CPState *cp)
+{
+ if (cp->tok >= CTOK_FIRSTDECL && cp->tok <= CTOK_LASTDECL) return 1;
+ if (cp->tok == CTOK_IDENT && ctype_istypedef(cp->ct->info)) return 1;
+ if (cp->tok == '$') return 1;
+ return 0;
+}
+
+/* -- Constant expression evaluator --------------------------------------- */
+
+/* Forward declarations. */
+static void cp_expr_unary(CPState *cp, CPValue *k);
+static void cp_expr_sub(CPState *cp, CPValue *k, int pri);
+
+/* Please note that type handling is very weak here. Most ops simply
+** assume integer operands. Accessors are only needed to compute types and
+** return synthetic values. The only purpose of the expression evaluator
+** is to compute the values of constant expressions one would typically
+** find in C header files. And again: this is NOT a validating C parser!
+*/
+
+/* Parse comma separated expression and return last result. */
+static void cp_expr_comma(CPState *cp, CPValue *k)
+{
+ do { cp_expr_sub(cp, k, 0); } while (cp_opt(cp, ','));
+}
+
+/* Parse sizeof/alignof operator. */
+static void cp_expr_sizeof(CPState *cp, CPValue *k, int wantsz)
+{
+ CTSize sz;
+ CTInfo info;
+ if (cp_opt(cp, '(')) {
+ if (cp_istypedecl(cp))
+ k->id = cp_decl_abstract(cp);
+ else
+ cp_expr_comma(cp, k);
+ cp_check(cp, ')');
+ } else {
+ cp_expr_unary(cp, k);
+ }
+ info = lj_ctype_info_raw(cp->cts, k->id, &sz);
+ if (wantsz) {
+ if (sz != CTSIZE_INVALID)
+ k->u32 = sz;
+ else if (k->id != CTID_A_CCHAR) /* Special case for sizeof("string"). */
+ cp_err(cp, LJ_ERR_FFI_INVSIZE);
+ } else {
+ k->u32 = 1u << ctype_align(info);
+ }
+ k->id = CTID_UINT32; /* Really size_t. */
+}
+
+/* Parse prefix operators. */
+static void cp_expr_prefix(CPState *cp, CPValue *k)
+{
+ if (cp->tok == CTOK_INTEGER) {
+ *k = cp->val; cp_next(cp);
+ } else if (cp_opt(cp, '+')) {
+ cp_expr_unary(cp, k); /* Nothing to do (well, integer promotion). */
+ } else if (cp_opt(cp, '-')) {
+ cp_expr_unary(cp, k); k->i32 = -k->i32;
+ } else if (cp_opt(cp, '~')) {
+ cp_expr_unary(cp, k); k->i32 = ~k->i32;
+ } else if (cp_opt(cp, '!')) {
+ cp_expr_unary(cp, k); k->i32 = !k->i32; k->id = CTID_INT32;
+ } else if (cp_opt(cp, '(')) {
+ if (cp_istypedecl(cp)) { /* Cast operator. */
+ CTypeID id = cp_decl_abstract(cp);
+ cp_check(cp, ')');
+ cp_expr_unary(cp, k);
+ k->id = id; /* No conversion performed. */
+ } else { /* Sub-expression. */
+ cp_expr_comma(cp, k);
+ cp_check(cp, ')');
+ }
+ } else if (cp_opt(cp, '*')) { /* Indirection. */
+ CType *ct;
+ cp_expr_unary(cp, k);
+ ct = lj_ctype_rawref(cp->cts, k->id);
+ if (!ctype_ispointer(ct->info))
+ cp_err_badidx(cp, ct);
+ k->u32 = 0; k->id = ctype_cid(ct->info);
+ } else if (cp_opt(cp, '&')) { /* Address operator. */
+ cp_expr_unary(cp, k);
+ k->id = lj_ctype_intern(cp->cts, CTINFO(CT_PTR, CTALIGN_PTR+k->id),
+ CTSIZE_PTR);
+ } else if (cp_opt(cp, CTOK_SIZEOF)) {
+ cp_expr_sizeof(cp, k, 1);
+ } else if (cp_opt(cp, CTOK_ALIGNOF)) {
+ cp_expr_sizeof(cp, k, 0);
+ } else if (cp->tok == CTOK_IDENT) {
+ if (ctype_type(cp->ct->info) == CT_CONSTVAL) {
+ k->u32 = cp->ct->size; k->id = ctype_cid(cp->ct->info);
+ } else if (ctype_type(cp->ct->info) == CT_EXTERN) {
+ k->u32 = cp->val.id; k->id = ctype_cid(cp->ct->info);
+ } else if (ctype_type(cp->ct->info) == CT_FUNC) {
+ k->u32 = cp->val.id; k->id = cp->val.id;
+ } else {
+ goto err_expr;
+ }
+ cp_next(cp);
+ } else if (cp->tok == CTOK_STRING) {
+ CTSize sz = cp->str->len;
+ while (cp_next(cp) == CTOK_STRING)
+ sz += cp->str->len;
+ k->u32 = sz + 1;
+ k->id = CTID_A_CCHAR;
+ } else {
+ err_expr:
+ cp_errmsg(cp, cp->tok, LJ_ERR_XSYMBOL);
+ }
+}
+
+/* Parse postfix operators. */
+static void cp_expr_postfix(CPState *cp, CPValue *k)
+{
+ for (;;) {
+ CType *ct;
+ if (cp_opt(cp, '[')) { /* Array/pointer index. */
+ CPValue k2;
+ cp_expr_comma(cp, &k2);
+ ct = lj_ctype_rawref(cp->cts, k->id);
+ if (!ctype_ispointer(ct->info)) {
+ ct = lj_ctype_rawref(cp->cts, k2.id);
+ if (!ctype_ispointer(ct->info))
+ cp_err_badidx(cp, ct);
+ }
+ cp_check(cp, ']');
+ k->u32 = 0;
+ } else if (cp->tok == '.' || cp->tok == CTOK_DEREF) { /* Struct deref. */
+ CTSize ofs;
+ CType *fct;
+ ct = lj_ctype_rawref(cp->cts, k->id);
+ if (cp->tok == CTOK_DEREF) {
+ if (!ctype_ispointer(ct->info))
+ cp_err_badidx(cp, ct);
+ ct = lj_ctype_rawref(cp->cts, ctype_cid(ct->info));
+ }
+ cp_next(cp);
+ if (cp->tok != CTOK_IDENT) cp_err_token(cp, CTOK_IDENT);
+ if (!ctype_isstruct(ct->info) || ct->size == CTSIZE_INVALID ||
+ !(fct = lj_ctype_getfield(cp->cts, ct, cp->str, &ofs)) ||
+ ctype_isbitfield(fct->info)) {
+ GCstr *s = lj_ctype_repr(cp->cts->L, ctype_typeid(cp->cts, ct), NULL);
+ cp_errmsg(cp, 0, LJ_ERR_FFI_BADMEMBER, strdata(s), strdata(cp->str));
+ }
+ ct = fct;
+ k->u32 = ctype_isconstval(ct->info) ? ct->size : 0;
+ cp_next(cp);
+ } else {
+ return;
+ }
+ k->id = ctype_cid(ct->info);
+ }
+}
+
+/* Parse infix operators. */
+static void cp_expr_infix(CPState *cp, CPValue *k, int pri)
+{
+ CPValue k2;
+ k2.u32 = 0; k2.id = 0; /* Silence the compiler. */
+ for (;;) {
+ switch (pri) {
+ case 0:
+ if (cp_opt(cp, '?')) {
+ CPValue k3;
+ cp_expr_comma(cp, &k2); /* Right-associative. */
+ cp_check(cp, ':');
+ cp_expr_sub(cp, &k3, 0);
+ k->u32 = k->u32 ? k2.u32 : k3.u32;
+ k->id = k2.id > k3.id ? k2.id : k3.id;
+ continue;
+ }
+ /* fallthrough */
+ case 1:
+ if (cp_opt(cp, CTOK_OROR)) {
+ cp_expr_sub(cp, &k2, 2); k->i32 = k->u32 || k2.u32; k->id = CTID_INT32;
+ continue;
+ }
+ /* fallthrough */
+ case 2:
+ if (cp_opt(cp, CTOK_ANDAND)) {
+ cp_expr_sub(cp, &k2, 3); k->i32 = k->u32 && k2.u32; k->id = CTID_INT32;
+ continue;
+ }
+ /* fallthrough */
+ case 3:
+ if (cp_opt(cp, '|')) {
+ cp_expr_sub(cp, &k2, 4); k->u32 = k->u32 | k2.u32; goto arith_result;
+ }
+ /* fallthrough */
+ case 4:
+ if (cp_opt(cp, '^')) {
+ cp_expr_sub(cp, &k2, 5); k->u32 = k->u32 ^ k2.u32; goto arith_result;
+ }
+ /* fallthrough */
+ case 5:
+ if (cp_opt(cp, '&')) {
+ cp_expr_sub(cp, &k2, 6); k->u32 = k->u32 & k2.u32; goto arith_result;
+ }
+ /* fallthrough */
+ case 6:
+ if (cp_opt(cp, CTOK_EQ)) {
+ cp_expr_sub(cp, &k2, 7); k->i32 = k->u32 == k2.u32; k->id = CTID_INT32;
+ continue;
+ } else if (cp_opt(cp, CTOK_NE)) {
+ cp_expr_sub(cp, &k2, 7); k->i32 = k->u32 != k2.u32; k->id = CTID_INT32;
+ continue;
+ }
+ /* fallthrough */
+ case 7:
+ if (cp_opt(cp, '<')) {
+ cp_expr_sub(cp, &k2, 8);
+ if (k->id == CTID_INT32 && k2.id == CTID_INT32)
+ k->i32 = k->i32 < k2.i32;
+ else
+ k->i32 = k->u32 < k2.u32;
+ k->id = CTID_INT32;
+ continue;
+ } else if (cp_opt(cp, '>')) {
+ cp_expr_sub(cp, &k2, 8);
+ if (k->id == CTID_INT32 && k2.id == CTID_INT32)
+ k->i32 = k->i32 > k2.i32;
+ else
+ k->i32 = k->u32 > k2.u32;
+ k->id = CTID_INT32;
+ continue;
+ } else if (cp_opt(cp, CTOK_LE)) {
+ cp_expr_sub(cp, &k2, 8);
+ if (k->id == CTID_INT32 && k2.id == CTID_INT32)
+ k->i32 = k->i32 <= k2.i32;
+ else
+ k->i32 = k->u32 <= k2.u32;
+ k->id = CTID_INT32;
+ continue;
+ } else if (cp_opt(cp, CTOK_GE)) {
+ cp_expr_sub(cp, &k2, 8);
+ if (k->id == CTID_INT32 && k2.id == CTID_INT32)
+ k->i32 = k->i32 >= k2.i32;
+ else
+ k->i32 = k->u32 >= k2.u32;
+ k->id = CTID_INT32;
+ continue;
+ }
+ /* fallthrough */
+ case 8:
+ if (cp_opt(cp, CTOK_SHL)) {
+ cp_expr_sub(cp, &k2, 9); k->u32 = k->u32 << k2.u32;
+ continue;
+ } else if (cp_opt(cp, CTOK_SHR)) {
+ cp_expr_sub(cp, &k2, 9);
+ if (k->id == CTID_INT32)
+ k->i32 = k->i32 >> k2.i32;
+ else
+ k->u32 = k->u32 >> k2.u32;
+ continue;
+ }
+ /* fallthrough */
+ case 9:
+ if (cp_opt(cp, '+')) {
+ cp_expr_sub(cp, &k2, 10); k->u32 = k->u32 + k2.u32;
+ arith_result:
+ if (k2.id > k->id) k->id = k2.id; /* Trivial promotion to unsigned. */
+ continue;
+ } else if (cp_opt(cp, '-')) {
+ cp_expr_sub(cp, &k2, 10); k->u32 = k->u32 - k2.u32; goto arith_result;
+ }
+ /* fallthrough */
+ case 10:
+ if (cp_opt(cp, '*')) {
+ cp_expr_unary(cp, &k2); k->u32 = k->u32 * k2.u32; goto arith_result;
+ } else if (cp_opt(cp, '/')) {
+ cp_expr_unary(cp, &k2);
+ if (k2.id > k->id) k->id = k2.id; /* Trivial promotion to unsigned. */
+ if (k2.u32 == 0 ||
+ (k->id == CTID_INT32 && k->u32 == 0x80000000u && k2.i32 == -1))
+ cp_err(cp, LJ_ERR_BADVAL);
+ if (k->id == CTID_INT32)
+ k->i32 = k->i32 / k2.i32;
+ else
+ k->u32 = k->u32 / k2.u32;
+ continue;
+ } else if (cp_opt(cp, '%')) {
+ cp_expr_unary(cp, &k2);
+ if (k2.id > k->id) k->id = k2.id; /* Trivial promotion to unsigned. */
+ if (k2.u32 == 0 ||
+ (k->id == CTID_INT32 && k->u32 == 0x80000000u && k2.i32 == -1))
+ cp_err(cp, LJ_ERR_BADVAL);
+ if (k->id == CTID_INT32)
+ k->i32 = k->i32 % k2.i32;
+ else
+ k->u32 = k->u32 % k2.u32;
+ continue;
+ }
+ default:
+ return;
+ }
+ }
+}
+
+/* Parse and evaluate unary expression. */
+static void cp_expr_unary(CPState *cp, CPValue *k)
+{
+ if (++cp->depth > CPARSE_MAX_DECLDEPTH) cp_err(cp, LJ_ERR_XLEVELS);
+ cp_expr_prefix(cp, k);
+ cp_expr_postfix(cp, k);
+ cp->depth--;
+}
+
+/* Parse and evaluate sub-expression. */
+static void cp_expr_sub(CPState *cp, CPValue *k, int pri)
+{
+ cp_expr_unary(cp, k);
+ cp_expr_infix(cp, k, pri);
+}
+
+/* Parse constant integer expression. */
+static void cp_expr_kint(CPState *cp, CPValue *k)
+{
+ CType *ct;
+ cp_expr_sub(cp, k, 0);
+ ct = ctype_raw(cp->cts, k->id);
+ if (!ctype_isinteger(ct->info)) cp_err(cp, LJ_ERR_BADVAL);
+}
+
+/* Parse (non-negative) size expression. */
+static CTSize cp_expr_ksize(CPState *cp)
+{
+ CPValue k;
+ cp_expr_kint(cp, &k);
+ if (k.u32 >= 0x80000000u) cp_err(cp, LJ_ERR_FFI_INVSIZE);
+ return k.u32;
+}
+
+/* -- Type declaration stack management ----------------------------------- */
+
+/* Add declaration element behind the insertion position. */
+static CPDeclIdx cp_add(CPDecl *decl, CTInfo info, CTSize size)
+{
+ CPDeclIdx top = decl->top;
+ if (top >= CPARSE_MAX_DECLSTACK) cp_err(decl->cp, LJ_ERR_XLEVELS);
+ decl->stack[top].info = info;
+ decl->stack[top].size = size;
+ decl->stack[top].sib = 0;
+ setgcrefnull(decl->stack[top].name);
+ decl->stack[top].next = decl->stack[decl->pos].next;
+ decl->stack[decl->pos].next = (CTypeID1)top;
+ decl->top = top+1;
+ return top;
+}
+
+/* Push declaration element before the insertion position. */
+static CPDeclIdx cp_push(CPDecl *decl, CTInfo info, CTSize size)
+{
+ return (decl->pos = cp_add(decl, info, size));
+}
+
+/* Push or merge attributes. */
+static void cp_push_attributes(CPDecl *decl)
+{
+ CType *ct = &decl->stack[decl->pos];
+ if (ctype_isfunc(ct->info)) { /* Ok to modify in-place. */
+#if LJ_TARGET_X86
+ if ((decl->fattr & CTFP_CCONV))
+ ct->info = (ct->info & (CTMASK_NUM|CTF_VARARG|CTMASK_CID)) +
+ (decl->fattr & ~CTMASK_CID);
+#endif
+ } else {
+ if ((decl->attr & CTFP_ALIGNED) && !(decl->mode & CPARSE_MODE_FIELD))
+ cp_push(decl, CTINFO(CT_ATTRIB, CTATTRIB(CTA_ALIGN)),
+ ctype_align(decl->attr));
+ }
+}
+
+/* Push unrolled type to declaration stack and merge qualifiers. */
+static void cp_push_type(CPDecl *decl, CTypeID id)
+{
+ CType *ct = ctype_get(decl->cp->cts, id);
+ CTInfo info = ct->info;
+ CTSize size = ct->size;
+ switch (ctype_type(info)) {
+ case CT_STRUCT: case CT_ENUM:
+ cp_push(decl, CTINFO(CT_TYPEDEF, id), 0); /* Don't copy unique types. */
+ if ((decl->attr & CTF_QUAL)) { /* Push unmerged qualifiers. */
+ cp_push(decl, CTINFO(CT_ATTRIB, CTATTRIB(CTA_QUAL)),
+ (decl->attr & CTF_QUAL));
+ decl->attr &= ~CTF_QUAL;
+ }
+ break;
+ case CT_ATTRIB:
+ if (ctype_isxattrib(info, CTA_QUAL))
+ decl->attr &= ~size; /* Remove redundant qualifiers. */
+ cp_push_type(decl, ctype_cid(info)); /* Unroll. */
+ cp_push(decl, info & ~CTMASK_CID, size); /* Copy type. */
+ break;
+ case CT_ARRAY:
+ if ((ct->info & (CTF_VECTOR|CTF_COMPLEX))) {
+ info |= (decl->attr & CTF_QUAL);
+ decl->attr &= ~CTF_QUAL;
+ }
+ cp_push_type(decl, ctype_cid(info)); /* Unroll. */
+ cp_push(decl, info & ~CTMASK_CID, size); /* Copy type. */
+ decl->stack[decl->pos].sib = 1; /* Mark as already checked and sized. */
+ /* Note: this is not copied to the ct->sib in the C type table. */
+ break;
+ case CT_FUNC:
+ /* Copy type, link parameters (shared). */
+ decl->stack[cp_push(decl, info, size)].sib = ct->sib;
+ break;
+ default:
+ /* Copy type, merge common qualifiers. */
+ cp_push(decl, info|(decl->attr & CTF_QUAL), size);
+ decl->attr &= ~CTF_QUAL;
+ break;
+ }
+}
+
+/* Consume the declaration element chain and intern the C type. */
+static CTypeID cp_decl_intern(CPState *cp, CPDecl *decl)
+{
+ CTypeID id = 0;
+ CPDeclIdx idx = 0;
+ CTSize csize = CTSIZE_INVALID;
+ CTSize cinfo = 0;
+ do {
+ CType *ct = &decl->stack[idx];
+ CTInfo info = ct->info;
+ CTInfo size = ct->size;
+ /* The cid is already part of info for copies of pointers/functions. */
+ idx = ct->next;
+ if (ctype_istypedef(info)) {
+ lj_assertCP(id == 0, "typedef not at toplevel");
+ id = ctype_cid(info);
+ /* Always refetch info/size, since struct/enum may have been completed. */
+ cinfo = ctype_get(cp->cts, id)->info;
+ csize = ctype_get(cp->cts, id)->size;
+ lj_assertCP(ctype_isstruct(cinfo) || ctype_isenum(cinfo),
+ "typedef of bad type");
+ } else if (ctype_isfunc(info)) { /* Intern function. */
+ CType *fct;
+ CTypeID fid;
+ CTypeID sib;
+ if (id) {
+ CType *refct = ctype_raw(cp->cts, id);
+ /* Reject function or refarray return types. */
+ if (ctype_isfunc(refct->info) || ctype_isrefarray(refct->info))
+ cp_err(cp, LJ_ERR_FFI_INVTYPE);
+ }
+ /* No intervening attributes allowed, skip forward. */
+ while (idx) {
+ CType *ctn = &decl->stack[idx];
+ if (!ctype_isattrib(ctn->info)) break;
+ idx = ctn->next; /* Skip attribute. */
+ }
+ sib = ct->sib; /* Next line may reallocate the C type table. */
+ fid = lj_ctype_new(cp->cts, &fct);
+ csize = CTSIZE_INVALID;
+ fct->info = cinfo = info + id;
+ fct->size = size;
+ fct->sib = sib;
+ id = fid;
+ } else if (ctype_isattrib(info)) {
+ if (ctype_isxattrib(info, CTA_QUAL))
+ cinfo |= size;
+ else if (ctype_isxattrib(info, CTA_ALIGN))
+ CTF_INSERT(cinfo, ALIGN, size);
+ id = lj_ctype_intern(cp->cts, info+id, size);
+ /* Inherit csize/cinfo from original type. */
+ } else {
+ if (ctype_isnum(info)) { /* Handle mode/vector-size attributes. */
+ lj_assertCP(id == 0, "number not at toplevel");
+ if (!(info & CTF_BOOL)) {
+ CTSize msize = ctype_msizeP(decl->attr);
+ CTSize vsize = ctype_vsizeP(decl->attr);
+ if (msize && (!(info & CTF_FP) || (msize == 4 || msize == 8))) {
+ CTSize malign = lj_fls(msize);
+ if (malign > 4) malign = 4; /* Limit alignment. */
+ CTF_INSERT(info, ALIGN, malign);
+ size = msize; /* Override size via mode. */
+ }
+ if (vsize) { /* Vector size set? */
+ CTSize esize = lj_fls(size);
+ if (vsize >= esize) {
+ /* Intern the element type first. */
+ id = lj_ctype_intern(cp->cts, info, size);
+ /* Then create a vector (array) with vsize alignment. */
+ size = (1u << vsize);
+ if (vsize > 4) vsize = 4; /* Limit alignment. */
+ if (ctype_align(info) > vsize) vsize = ctype_align(info);
+ info = CTINFO(CT_ARRAY, (info & CTF_QUAL) + CTF_VECTOR +
+ CTALIGN(vsize));
+ }
+ }
+ }
+ } else if (ctype_isptr(info)) {
+ /* Reject pointer/ref to ref. */
+ if (id && ctype_isref(ctype_raw(cp->cts, id)->info))
+ cp_err(cp, LJ_ERR_FFI_INVTYPE);
+ if (ctype_isref(info)) {
+ info &= ~CTF_VOLATILE; /* Refs are always const, never volatile. */
+ /* No intervening attributes allowed, skip forward. */
+ while (idx) {
+ CType *ctn = &decl->stack[idx];
+ if (!ctype_isattrib(ctn->info)) break;
+ idx = ctn->next; /* Skip attribute. */
+ }
+ }
+ } else if (ctype_isarray(info)) { /* Check for valid array size etc. */
+ if (ct->sib == 0) { /* Only check/size arrays not copied by unroll. */
+ if (ctype_isref(cinfo)) /* Reject arrays of refs. */
+ cp_err(cp, LJ_ERR_FFI_INVTYPE);
+ /* Reject VLS or unknown-sized types. */
+ if (ctype_isvltype(cinfo) || csize == CTSIZE_INVALID)
+ cp_err(cp, LJ_ERR_FFI_INVSIZE);
+ /* a[] and a[?] keep their invalid size. */
+ if (size != CTSIZE_INVALID) {
+ uint64_t xsz = (uint64_t)size * csize;
+ if (xsz >= 0x80000000u) cp_err(cp, LJ_ERR_FFI_INVSIZE);
+ size = (CTSize)xsz;
+ }
+ }
+ if ((cinfo & CTF_ALIGN) > (info & CTF_ALIGN)) /* Find max. align. */
+ info = (info & ~CTF_ALIGN) | (cinfo & CTF_ALIGN);
+ info |= (cinfo & CTF_QUAL); /* Inherit qual. */
+ } else {
+ lj_assertCP(ctype_isvoid(info), "bad ctype %08x", info);
+ }
+ csize = size;
+ cinfo = info+id;
+ id = lj_ctype_intern(cp->cts, info+id, size);
+ }
+ } while (idx);
+ return id;
+}
+
+/* -- C declaration parser ------------------------------------------------ */
+
+/* Reset declaration state to declaration specifier. */
+static void cp_decl_reset(CPDecl *decl)
+{
+ decl->pos = decl->specpos;
+ decl->top = decl->specpos+1;
+ decl->stack[decl->specpos].next = 0;
+ decl->attr = decl->specattr;
+ decl->fattr = decl->specfattr;
+ decl->name = NULL;
+ decl->redir = NULL;
+}
+
+/* Parse constant initializer. */
+/* NYI: FP constants and strings as initializers. */
+static CTypeID cp_decl_constinit(CPState *cp, CType **ctp, CTypeID ctypeid)
+{
+ CType *ctt = ctype_get(cp->cts, ctypeid);
+ CTInfo info;
+ CTSize size;
+ CPValue k;
+ CTypeID constid;
+ while (ctype_isattrib(ctt->info)) { /* Skip attributes. */
+ ctypeid = ctype_cid(ctt->info); /* Update ID, too. */
+ ctt = ctype_get(cp->cts, ctypeid);
+ }
+ info = ctt->info;
+ size = ctt->size;
+ if (!ctype_isinteger(info) || !(info & CTF_CONST) || size > 4)
+ cp_err(cp, LJ_ERR_FFI_INVTYPE);
+ cp_check(cp, '=');
+ cp_expr_sub(cp, &k, 0);
+ constid = lj_ctype_new(cp->cts, ctp);
+ (*ctp)->info = CTINFO(CT_CONSTVAL, CTF_CONST|ctypeid);
+ k.u32 <<= 8*(4-size);
+ if ((info & CTF_UNSIGNED))
+ k.u32 >>= 8*(4-size);
+ else
+ k.u32 = (uint32_t)((int32_t)k.u32 >> 8*(4-size));
+ (*ctp)->size = k.u32;
+ return constid;
+}
+
+/* Parse size in parentheses as part of attribute. */
+static CTSize cp_decl_sizeattr(CPState *cp)
+{
+ CTSize sz;
+ uint32_t oldtmask = cp->tmask;
+ cp->tmask = CPNS_DEFAULT; /* Required for expression evaluator. */
+ cp_check(cp, '(');
+ sz = cp_expr_ksize(cp);
+ cp->tmask = oldtmask;
+ cp_check(cp, ')');
+ return sz;
+}
+
+/* Parse alignment attribute. */
+static void cp_decl_align(CPState *cp, CPDecl *decl)
+{
+ CTSize al = 4; /* Unspecified alignment is 16 bytes. */
+ if (cp->tok == '(') {
+ al = cp_decl_sizeattr(cp);
+ al = al ? lj_fls(al) : 0;
+ }
+ CTF_INSERT(decl->attr, ALIGN, al);
+ decl->attr |= CTFP_ALIGNED;
+}
+
+/* Parse GCC asm("name") redirect. */
+static void cp_decl_asm(CPState *cp, CPDecl *decl)
+{
+ UNUSED(decl);
+ cp_next(cp);
+ cp_check(cp, '(');
+ if (cp->tok == CTOK_STRING) {
+ GCstr *str = cp->str;
+ while (cp_next(cp) == CTOK_STRING) {
+ lj_strfmt_pushf(cp->L, "%s%s", strdata(str), strdata(cp->str));
+ cp->L->top--;
+ str = strV(cp->L->top);
+ }
+ decl->redir = str;
+ }
+ cp_check(cp, ')');
+}
+
+/* Parse GCC __attribute__((mode(...))). */
+static void cp_decl_mode(CPState *cp, CPDecl *decl)
+{
+ cp_check(cp, '(');
+ if (cp->tok == CTOK_IDENT) {
+ const char *s = strdata(cp->str);
+ CTSize sz = 0, vlen = 0;
+ if (s[0] == '_' && s[1] == '_') s += 2;
+ if (*s == 'V') {
+ s++;
+ vlen = *s++ - '0';
+ if (*s >= '0' && *s <= '9')
+ vlen = vlen*10 + (*s++ - '0');
+ }
+ switch (*s++) {
+ case 'Q': sz = 1; break;
+ case 'H': sz = 2; break;
+ case 'S': sz = 4; break;
+ case 'D': sz = 8; break;
+ case 'T': sz = 16; break;
+ case 'O': sz = 32; break;
+ default: goto bad_size;
+ }
+ if (*s == 'I' || *s == 'F') {
+ CTF_INSERT(decl->attr, MSIZEP, sz);
+ if (vlen) CTF_INSERT(decl->attr, VSIZEP, lj_fls(vlen*sz));
+ }
+ bad_size:
+ cp_next(cp);
+ }
+ cp_check(cp, ')');
+}
+
+/* Parse GCC __attribute__((...)). */
+static void cp_decl_gccattribute(CPState *cp, CPDecl *decl)
+{
+ cp_next(cp);
+ cp_check(cp, '(');
+ cp_check(cp, '(');
+ while (cp->tok != ')') {
+ if (cp->tok == CTOK_IDENT) {
+ GCstr *attrstr = cp->str;
+ cp_next(cp);
+ switch (lj_cparse_case(attrstr,
+ "\007aligned" "\013__aligned__"
+ "\006packed" "\012__packed__"
+ "\004mode" "\010__mode__"
+ "\013vector_size" "\017__vector_size__"
+#if LJ_TARGET_X86
+ "\007regparm" "\013__regparm__"
+ "\005cdecl" "\011__cdecl__"
+ "\010thiscall" "\014__thiscall__"
+ "\010fastcall" "\014__fastcall__"
+ "\007stdcall" "\013__stdcall__"
+ "\012sseregparm" "\016__sseregparm__"
+#endif
+ )) {
+ case 0: case 1: /* aligned */
+ cp_decl_align(cp, decl);
+ break;
+ case 2: case 3: /* packed */
+ decl->attr |= CTFP_PACKED;
+ break;
+ case 4: case 5: /* mode */
+ cp_decl_mode(cp, decl);
+ break;
+ case 6: case 7: /* vector_size */
+ {
+ CTSize vsize = cp_decl_sizeattr(cp);
+ if (vsize) CTF_INSERT(decl->attr, VSIZEP, lj_fls(vsize));
+ }
+ break;
+#if LJ_TARGET_X86
+ case 8: case 9: /* regparm */
+ CTF_INSERT(decl->fattr, REGPARM, cp_decl_sizeattr(cp));
+ decl->fattr |= CTFP_CCONV;
+ break;
+ case 10: case 11: /* cdecl */
+ CTF_INSERT(decl->fattr, CCONV, CTCC_CDECL);
+ decl->fattr |= CTFP_CCONV;
+ break;
+ case 12: case 13: /* thiscall */
+ CTF_INSERT(decl->fattr, CCONV, CTCC_THISCALL);
+ decl->fattr |= CTFP_CCONV;
+ break;
+ case 14: case 15: /* fastcall */
+ CTF_INSERT(decl->fattr, CCONV, CTCC_FASTCALL);
+ decl->fattr |= CTFP_CCONV;
+ break;
+ case 16: case 17: /* stdcall */
+ CTF_INSERT(decl->fattr, CCONV, CTCC_STDCALL);
+ decl->fattr |= CTFP_CCONV;
+ break;
+ case 18: case 19: /* sseregparm */
+ decl->fattr |= CTF_SSEREGPARM;
+ decl->fattr |= CTFP_CCONV;
+ break;
+#endif
+ default: /* Skip all other attributes. */
+ goto skip_attr;
+ }
+ } else if (cp->tok >= CTOK_FIRSTDECL) { /* For __attribute((const)) etc. */
+ cp_next(cp);
+ skip_attr:
+ if (cp_opt(cp, '(')) {
+ while (cp->tok != ')' && cp->tok != CTOK_EOF) cp_next(cp);
+ cp_check(cp, ')');
+ }
+ } else {
+ break;
+ }
+ if (!cp_opt(cp, ',')) break;
+ }
+ cp_check(cp, ')');
+ cp_check(cp, ')');
+}
+
+/* Parse MSVC __declspec(...). */
+static void cp_decl_msvcattribute(CPState *cp, CPDecl *decl)
+{
+ cp_next(cp);
+ cp_check(cp, '(');
+ while (cp->tok == CTOK_IDENT) {
+ GCstr *attrstr = cp->str;
+ cp_next(cp);
+ if (cp_str_is(attrstr, "align")) {
+ cp_decl_align(cp, decl);
+ } else { /* Ignore all other attributes. */
+ if (cp_opt(cp, '(')) {
+ while (cp->tok != ')' && cp->tok != CTOK_EOF) cp_next(cp);
+ cp_check(cp, ')');
+ }
+ }
+ }
+ cp_check(cp, ')');
+}
+
+/* Parse declaration attributes (and common qualifiers). */
+static void cp_decl_attributes(CPState *cp, CPDecl *decl)
+{
+ for (;;) {
+ switch (cp->tok) {
+ case CTOK_CONST: decl->attr |= CTF_CONST; break;
+ case CTOK_VOLATILE: decl->attr |= CTF_VOLATILE; break;
+ case CTOK_RESTRICT: break; /* Ignore. */
+ case CTOK_EXTENSION: break; /* Ignore. */
+ case CTOK_ATTRIBUTE: cp_decl_gccattribute(cp, decl); continue;
+ case CTOK_ASM: cp_decl_asm(cp, decl); continue;
+ case CTOK_DECLSPEC: cp_decl_msvcattribute(cp, decl); continue;
+ case CTOK_CCDECL:
+#if LJ_TARGET_X86
+ CTF_INSERT(decl->fattr, CCONV, cp->ct->size);
+ decl->fattr |= CTFP_CCONV;
+#endif
+ break;
+ case CTOK_PTRSZ:
+#if LJ_64
+ CTF_INSERT(decl->attr, MSIZEP, cp->ct->size);
+#endif
+ break;
+ default: return;
+ }
+ cp_next(cp);
+ }
+}
+
+/* Parse struct/union/enum name. */
+static CTypeID cp_struct_name(CPState *cp, CPDecl *sdecl, CTInfo info)
+{
+ CTypeID sid;
+ CType *ct;
+ cp->tmask = CPNS_STRUCT;
+ cp_next(cp);
+ cp_decl_attributes(cp, sdecl);
+ cp->tmask = CPNS_DEFAULT;
+ if (cp->tok != '{') {
+ if (cp->tok != CTOK_IDENT) cp_err_token(cp, CTOK_IDENT);
+ if (cp->val.id) { /* Name of existing struct/union/enum. */
+ sid = cp->val.id;
+ ct = cp->ct;
+ if ((ct->info ^ info) & (CTMASK_NUM|CTF_UNION)) /* Wrong type. */
+ cp_errmsg(cp, 0, LJ_ERR_FFI_REDEF, strdata(gco2str(gcref(ct->name))));
+ } else { /* Create named, incomplete struct/union/enum. */
+ if ((cp->mode & CPARSE_MODE_NOIMPLICIT))
+ cp_errmsg(cp, 0, LJ_ERR_FFI_BADTAG, strdata(cp->str));
+ sid = lj_ctype_new(cp->cts, &ct);
+ ct->info = info;
+ ct->size = CTSIZE_INVALID;
+ ctype_setname(ct, cp->str);
+ lj_ctype_addname(cp->cts, ct, sid);
+ }
+ cp_next(cp);
+ } else { /* Create anonymous, incomplete struct/union/enum. */
+ sid = lj_ctype_new(cp->cts, &ct);
+ ct->info = info;
+ ct->size = CTSIZE_INVALID;
+ }
+ if (cp->tok == '{') {
+ if (ct->size != CTSIZE_INVALID || ct->sib)
+ cp_errmsg(cp, 0, LJ_ERR_FFI_REDEF, strdata(gco2str(gcref(ct->name))));
+ ct->sib = 1; /* Indicate the type is currently being defined. */
+ }
+ return sid;
+}
+
+/* Determine field alignment. */
+static CTSize cp_field_align(CPState *cp, CType *ct, CTInfo info)
+{
+ CTSize align = ctype_align(info);
+ UNUSED(cp); UNUSED(ct);
+#if (LJ_TARGET_X86 && !LJ_ABI_WIN) || (LJ_TARGET_ARM && __APPLE__)
+ /* The SYSV i386 and iOS ABIs limit alignment of non-vector fields to 2^2. */
+ if (align > 2 && !(info & CTFP_ALIGNED)) {
+ if (ctype_isarray(info) && !(info & CTF_VECTOR)) {
+ do {
+ ct = ctype_rawchild(cp->cts, ct);
+ info = ct->info;
+ } while (ctype_isarray(info) && !(info & CTF_VECTOR));
+ }
+ if (ctype_isnum(info) || ctype_isenum(info))
+ align = 2;
+ }
+#endif
+ return align;
+}
+
+/* Layout struct/union fields. */
+static void cp_struct_layout(CPState *cp, CTypeID sid, CTInfo sattr)
+{
+ CTSize bofs = 0, bmaxofs = 0; /* Bit offset and max. bit offset. */
+ CTSize maxalign = ctype_align(sattr);
+ CType *sct = ctype_get(cp->cts, sid);
+ CTInfo sinfo = sct->info;
+ CTypeID fieldid = sct->sib;
+ while (fieldid) {
+ CType *ct = ctype_get(cp->cts, fieldid);
+ CTInfo attr = ct->size; /* Field declaration attributes (temp.). */
+
+ if (ctype_isfield(ct->info) ||
+ (ctype_isxattrib(ct->info, CTA_SUBTYPE) && attr)) {
+ CTSize align, amask; /* Alignment (pow2) and alignment mask (bits). */
+ CTSize sz;
+ CTInfo info = lj_ctype_info(cp->cts, ctype_cid(ct->info), &sz);
+ CTSize bsz, csz = 8*sz; /* Field size and container size (in bits). */
+ sinfo |= (info & (CTF_QUAL|CTF_VLA)); /* Merge pseudo-qualifiers. */
+
+ /* Check for size overflow and determine alignment. */
+ if (sz >= 0x20000000u || bofs + csz < bofs || (info & CTF_VLA)) {
+ if (!(sz == CTSIZE_INVALID && ctype_isarray(info) &&
+ !(sinfo & CTF_UNION)))
+ cp_err(cp, LJ_ERR_FFI_INVSIZE);
+ csz = sz = 0; /* Treat a[] and a[?] as zero-sized. */
+ }
+ align = cp_field_align(cp, ct, info);
+ if (((attr|sattr) & CTFP_PACKED) ||
+ ((attr & CTFP_ALIGNED) && ctype_align(attr) > align))
+ align = ctype_align(attr);
+ if (cp->packstack[cp->curpack] < align)
+ align = cp->packstack[cp->curpack];
+ if (align > maxalign) maxalign = align;
+ amask = (8u << align) - 1;
+
+ bsz = ctype_bitcsz(ct->info); /* Bitfield size (temp.). */
+ if (bsz == CTBSZ_FIELD || !ctype_isfield(ct->info)) {
+ bsz = csz; /* Regular fields or subtypes always fill the container. */
+ bofs = (bofs + amask) & ~amask; /* Start new aligned field. */
+ ct->size = (bofs >> 3); /* Store field offset. */
+ } else { /* Bitfield. */
+ if (bsz == 0 || (attr & CTFP_ALIGNED) ||
+ (!((attr|sattr) & CTFP_PACKED) && (bofs & amask) + bsz > csz))
+ bofs = (bofs + amask) & ~amask; /* Start new aligned field. */
+
+ /* Prefer regular field over bitfield. */
+ if (bsz == csz && (bofs & amask) == 0) {
+ ct->info = CTINFO(CT_FIELD, ctype_cid(ct->info));
+ ct->size = (bofs >> 3); /* Store field offset. */
+ } else {
+ ct->info = CTINFO(CT_BITFIELD,
+ (info & (CTF_QUAL|CTF_UNSIGNED|CTF_BOOL)) +
+ (csz << (CTSHIFT_BITCSZ-3)) + (bsz << CTSHIFT_BITBSZ));
+#if LJ_BE
+ ct->info += ((csz - (bofs & (csz-1)) - bsz) << CTSHIFT_BITPOS);
+#else
+ ct->info += ((bofs & (csz-1)) << CTSHIFT_BITPOS);
+#endif
+ ct->size = ((bofs & ~(csz-1)) >> 3); /* Store container offset. */
+ }
+ }
+
+ /* Determine next offset or max. offset. */
+ if ((sinfo & CTF_UNION)) {
+ if (bsz > bmaxofs) bmaxofs = bsz;
+ } else {
+ bofs += bsz;
+ }
+ } /* All other fields in the chain are already set up. */
+
+ fieldid = ct->sib;
+ }
+
+ /* Complete struct/union. */
+ sct->info = sinfo + CTALIGN(maxalign);
+ bofs = (sinfo & CTF_UNION) ? bmaxofs : bofs;
+ maxalign = (8u << maxalign) - 1;
+ sct->size = (((bofs + maxalign) & ~maxalign) >> 3);
+}
+
+/* Parse struct/union declaration. */
+static CTypeID cp_decl_struct(CPState *cp, CPDecl *sdecl, CTInfo sinfo)
+{
+ CTypeID sid = cp_struct_name(cp, sdecl, sinfo);
+ if (cp_opt(cp, '{')) { /* Struct/union definition. */
+ CTypeID lastid = sid;
+ int lastdecl = 0;
+ while (cp->tok != '}') {
+ CPDecl decl;
+ CPscl scl = cp_decl_spec(cp, &decl, CDF_STATIC);
+ decl.mode = scl ? CPARSE_MODE_DIRECT :
+ CPARSE_MODE_DIRECT|CPARSE_MODE_ABSTRACT|CPARSE_MODE_FIELD;
+
+ for (;;) {
+ CTypeID ctypeid;
+
+ if (lastdecl) cp_err_token(cp, '}');
+
+ /* Parse field declarator. */
+ decl.bits = CTSIZE_INVALID;
+ cp_declarator(cp, &decl);
+ ctypeid = cp_decl_intern(cp, &decl);
+
+ if ((scl & CDF_STATIC)) { /* Static constant in struct namespace. */
+ CType *ct;
+ CTypeID fieldid = cp_decl_constinit(cp, &ct, ctypeid);
+ ctype_get(cp->cts, lastid)->sib = fieldid;
+ lastid = fieldid;
+ ctype_setname(ct, decl.name);
+ } else {
+ CTSize bsz = CTBSZ_FIELD; /* Temp. for layout phase. */
+ CType *ct;
+ CTypeID fieldid = lj_ctype_new(cp->cts, &ct); /* Do this first. */
+ CType *tct = ctype_raw(cp->cts, ctypeid);
+
+ if (decl.bits == CTSIZE_INVALID) { /* Regular field. */
+ if (ctype_isarray(tct->info) && tct->size == CTSIZE_INVALID)
+ lastdecl = 1; /* a[] or a[?] must be the last declared field. */
+
+ /* Accept transparent struct/union/enum. */
+ if (!decl.name) {
+ if (!((ctype_isstruct(tct->info) && !(tct->info & CTF_VLA)) ||
+ ctype_isenum(tct->info)))
+ cp_err_token(cp, CTOK_IDENT);
+ ct->info = CTINFO(CT_ATTRIB, CTATTRIB(CTA_SUBTYPE) + ctypeid);
+ ct->size = ctype_isstruct(tct->info) ?
+ (decl.attr|0x80000000u) : 0; /* For layout phase. */
+ goto add_field;
+ }
+ } else { /* Bitfield. */
+ bsz = decl.bits;
+ if (!ctype_isinteger_or_bool(tct->info) ||
+ (bsz == 0 && decl.name) || 8*tct->size > CTBSZ_MAX ||
+ bsz > ((tct->info & CTF_BOOL) ? 1 : 8*tct->size))
+ cp_errmsg(cp, ':', LJ_ERR_BADVAL);
+ }
+
+ /* Create temporary field for layout phase. */
+ ct->info = CTINFO(CT_FIELD, ctypeid + (bsz << CTSHIFT_BITCSZ));
+ ct->size = decl.attr;
+ if (decl.name) ctype_setname(ct, decl.name);
+
+ add_field:
+ ctype_get(cp->cts, lastid)->sib = fieldid;
+ lastid = fieldid;
+ }
+ if (!cp_opt(cp, ',')) break;
+ cp_decl_reset(&decl);
+ }
+ cp_check(cp, ';');
+ }
+ cp_check(cp, '}');
+ ctype_get(cp->cts, lastid)->sib = 0; /* Drop sib = 1 for empty structs. */
+ cp_decl_attributes(cp, sdecl); /* Layout phase needs postfix attributes. */
+ cp_struct_layout(cp, sid, sdecl->attr);
+ }
+ return sid;
+}
+
+/* Parse enum declaration. */
+static CTypeID cp_decl_enum(CPState *cp, CPDecl *sdecl)
+{
+ CTypeID eid = cp_struct_name(cp, sdecl, CTINFO(CT_ENUM, CTID_VOID));
+ CTInfo einfo = CTINFO(CT_ENUM, CTALIGN(2) + CTID_UINT32);
+ CTSize esize = 4; /* Only 32 bit enums are supported. */
+ if (cp_opt(cp, '{')) { /* Enum definition. */
+ CPValue k;
+ CTypeID lastid = eid;
+ k.u32 = 0;
+ k.id = CTID_INT32;
+ do {
+ GCstr *name = cp->str;
+ if (cp->tok != CTOK_IDENT) cp_err_token(cp, CTOK_IDENT);
+ if (cp->val.id) cp_errmsg(cp, 0, LJ_ERR_FFI_REDEF, strdata(name));
+ cp_next(cp);
+ if (cp_opt(cp, '=')) {
+ cp_expr_kint(cp, &k);
+ if (k.id == CTID_UINT32) {
+ /* C99 says that enum constants are always (signed) integers.
+ ** But since unsigned constants like 0x80000000 are quite common,
+ ** those are left as uint32_t.
+ */
+ if (k.i32 >= 0) k.id = CTID_INT32;
+ } else {
+ /* OTOH it's common practice and even mandated by some ABIs
+ ** that the enum type itself is unsigned, unless there are any
+ ** negative constants.
+ */
+ k.id = CTID_INT32;
+ if (k.i32 < 0) einfo = CTINFO(CT_ENUM, CTALIGN(2) + CTID_INT32);
+ }
+ }
+ /* Add named enum constant. */
+ {
+ CType *ct;
+ CTypeID constid = lj_ctype_new(cp->cts, &ct);
+ ctype_get(cp->cts, lastid)->sib = constid;
+ lastid = constid;
+ ctype_setname(ct, name);
+ ct->info = CTINFO(CT_CONSTVAL, CTF_CONST|k.id);
+ ct->size = k.u32++;
+ if (k.u32 == 0x80000000u) k.id = CTID_UINT32;
+ lj_ctype_addname(cp->cts, ct, constid);
+ }
+ if (!cp_opt(cp, ',')) break;
+ } while (cp->tok != '}'); /* Trailing ',' is ok. */
+ cp_check(cp, '}');
+ /* Complete enum. */
+ ctype_get(cp->cts, eid)->info = einfo;
+ ctype_get(cp->cts, eid)->size = esize;
+ }
+ return eid;
+}
+
+/* Parse declaration specifiers. */
+static CPscl cp_decl_spec(CPState *cp, CPDecl *decl, CPscl scl)
+{
+ uint32_t cds = 0, sz = 0;
+ CTypeID tdef = 0;
+
+ decl->cp = cp;
+ decl->mode = cp->mode;
+ decl->name = NULL;
+ decl->redir = NULL;
+ decl->attr = 0;
+ decl->fattr = 0;
+ decl->pos = decl->top = 0;
+ decl->stack[0].next = 0;
+
+ for (;;) { /* Parse basic types. */
+ cp_decl_attributes(cp, decl);
+ if (cp->tok >= CTOK_FIRSTDECL && cp->tok <= CTOK_LASTDECLFLAG) {
+ uint32_t cbit;
+ if (cp->ct->size) {
+ if (sz) goto end_decl;
+ sz = cp->ct->size;
+ }
+ cbit = (1u << (cp->tok - CTOK_FIRSTDECL));
+ cds = cds | cbit | ((cbit & cds & CDF_LONG) << 1);
+ if (cp->tok >= CTOK_FIRSTSCL) {
+ if (!(scl & cbit)) cp_errmsg(cp, cp->tok, LJ_ERR_FFI_BADSCL);
+ } else if (tdef) {
+ goto end_decl;
+ }
+ cp_next(cp);
+ continue;
+ }
+ if (sz || tdef ||
+ (cds & (CDF_SHORT|CDF_LONG|CDF_SIGNED|CDF_UNSIGNED|CDF_COMPLEX)))
+ break;
+ switch (cp->tok) {
+ case CTOK_STRUCT:
+ tdef = cp_decl_struct(cp, decl, CTINFO(CT_STRUCT, 0));
+ continue;
+ case CTOK_UNION:
+ tdef = cp_decl_struct(cp, decl, CTINFO(CT_STRUCT, CTF_UNION));
+ continue;
+ case CTOK_ENUM:
+ tdef = cp_decl_enum(cp, decl);
+ continue;
+ case CTOK_IDENT:
+ if (ctype_istypedef(cp->ct->info)) {
+ tdef = ctype_cid(cp->ct->info); /* Get typedef. */
+ cp_next(cp);
+ continue;
+ }
+ break;
+ case '$':
+ tdef = cp->val.id;
+ cp_next(cp);
+ continue;
+ default:
+ break;
+ }
+ break;
+ }
+end_decl:
+
+ if ((cds & CDF_COMPLEX)) /* Use predefined complex types. */
+ tdef = sz == 4 ? CTID_COMPLEX_FLOAT : CTID_COMPLEX_DOUBLE;
+
+ if (tdef) {
+ cp_push_type(decl, tdef);
+ } else if ((cds & CDF_VOID)) {
+ cp_push(decl, CTINFO(CT_VOID, (decl->attr & CTF_QUAL)), CTSIZE_INVALID);
+ decl->attr &= ~CTF_QUAL;
+ } else {
+ /* Determine type info and size. */
+ CTInfo info = CTINFO(CT_NUM, (cds & CDF_UNSIGNED) ? CTF_UNSIGNED : 0);
+ if ((cds & CDF_BOOL)) {
+ if ((cds & ~(CDF_SCL|CDF_BOOL|CDF_INT|CDF_SIGNED|CDF_UNSIGNED)))
+ cp_errmsg(cp, 0, LJ_ERR_FFI_INVTYPE);
+ info |= CTF_BOOL;
+ if (!(cds & CDF_SIGNED)) info |= CTF_UNSIGNED;
+ if (!sz) {
+ sz = 1;
+ }
+ } else if ((cds & CDF_FP)) {
+ info = CTINFO(CT_NUM, CTF_FP);
+ if ((cds & CDF_LONG)) sz = sizeof(long double);
+ } else if ((cds & CDF_CHAR)) {
+ if ((cds & (CDF_CHAR|CDF_SIGNED|CDF_UNSIGNED)) == CDF_CHAR)
+ info |= CTF_UCHAR; /* Handle platforms where char is unsigned. */
+ } else if ((cds & CDF_SHORT)) {
+ sz = sizeof(short);
+ } else if ((cds & CDF_LONGLONG)) {
+ sz = 8;
+ } else if ((cds & CDF_LONG)) {
+ info |= CTF_LONG;
+ sz = sizeof(long);
+ } else if (!sz) {
+ if (!(cds & (CDF_SIGNED|CDF_UNSIGNED)))
+ cp_errmsg(cp, cp->tok, LJ_ERR_FFI_DECLSPEC);
+ sz = sizeof(int);
+ }
+ lj_assertCP(sz != 0, "basic ctype with zero size");
+ info += CTALIGN(lj_fls(sz)); /* Use natural alignment. */
+ info += (decl->attr & CTF_QUAL); /* Merge qualifiers. */
+ cp_push(decl, info, sz);
+ decl->attr &= ~CTF_QUAL;
+ }
+ decl->specpos = decl->pos;
+ decl->specattr = decl->attr;
+ decl->specfattr = decl->fattr;
+ return (cds & CDF_SCL); /* Return storage class. */
+}
+
+/* Parse array declaration. */
+static void cp_decl_array(CPState *cp, CPDecl *decl)
+{
+ CTInfo info = CTINFO(CT_ARRAY, 0);
+ CTSize nelem = CTSIZE_INVALID; /* Default size for a[] or a[?]. */
+ cp_decl_attributes(cp, decl);
+ if (cp_opt(cp, '?'))
+ info |= CTF_VLA; /* Create variable-length array a[?]. */
+ else if (cp->tok != ']')
+ nelem = cp_expr_ksize(cp);
+ cp_check(cp, ']');
+ cp_add(decl, info, nelem);
+}
+
+/* Parse function declaration. */
+static void cp_decl_func(CPState *cp, CPDecl *fdecl)
+{
+ CTSize nargs = 0;
+ CTInfo info = CTINFO(CT_FUNC, 0);
+ CTypeID lastid = 0, anchor = 0;
+ if (cp->tok != ')') {
+ do {
+ CPDecl decl;
+ CTypeID ctypeid, fieldid;
+ CType *ct;
+ if (cp_opt(cp, '.')) { /* Vararg function. */
+ cp_check(cp, '.'); /* Workaround for the minimalistic lexer. */
+ cp_check(cp, '.');
+ info |= CTF_VARARG;
+ break;
+ }
+ cp_decl_spec(cp, &decl, CDF_REGISTER);
+ decl.mode = CPARSE_MODE_DIRECT|CPARSE_MODE_ABSTRACT;
+ cp_declarator(cp, &decl);
+ ctypeid = cp_decl_intern(cp, &decl);
+ ct = ctype_raw(cp->cts, ctypeid);
+ if (ctype_isvoid(ct->info))
+ break;
+ else if (ctype_isrefarray(ct->info))
+ ctypeid = lj_ctype_intern(cp->cts,
+ CTINFO(CT_PTR, CTALIGN_PTR|ctype_cid(ct->info)), CTSIZE_PTR);
+ else if (ctype_isfunc(ct->info))
+ ctypeid = lj_ctype_intern(cp->cts,
+ CTINFO(CT_PTR, CTALIGN_PTR|ctypeid), CTSIZE_PTR);
+ /* Add new parameter. */
+ fieldid = lj_ctype_new(cp->cts, &ct);
+ if (anchor)
+ ctype_get(cp->cts, lastid)->sib = fieldid;
+ else
+ anchor = fieldid;
+ lastid = fieldid;
+ if (decl.name) ctype_setname(ct, decl.name);
+ ct->info = CTINFO(CT_FIELD, ctypeid);
+ ct->size = nargs++;
+ } while (cp_opt(cp, ','));
+ }
+ cp_check(cp, ')');
+ if (cp_opt(cp, '{')) { /* Skip function definition. */
+ int level = 1;
+ cp->mode |= CPARSE_MODE_SKIP;
+ for (;;) {
+ if (cp->tok == '{') level++;
+ else if (cp->tok == '}' && --level == 0) break;
+ else if (cp->tok == CTOK_EOF) cp_err_token(cp, '}');
+ cp_next(cp);
+ }
+ cp->mode &= ~CPARSE_MODE_SKIP;
+ cp->tok = ';'; /* Ok for cp_decl_multi(), error in cp_decl_single(). */
+ }
+ info |= (fdecl->fattr & ~CTMASK_CID);
+ fdecl->fattr = 0;
+ fdecl->stack[cp_add(fdecl, info, nargs)].sib = anchor;
+}
+
+/* Parse declarator. */
+static void cp_declarator(CPState *cp, CPDecl *decl)
+{
+ if (++cp->depth > CPARSE_MAX_DECLDEPTH) cp_err(cp, LJ_ERR_XLEVELS);
+
+ for (;;) { /* Head of declarator. */
+ if (cp_opt(cp, '*')) { /* Pointer. */
+ CTSize sz;
+ CTInfo info;
+ cp_decl_attributes(cp, decl);
+ sz = CTSIZE_PTR;
+ info = CTINFO(CT_PTR, CTALIGN_PTR);
+#if LJ_64
+ if (ctype_msizeP(decl->attr) == 4) {
+ sz = 4;
+ info = CTINFO(CT_PTR, CTALIGN(2));
+ }
+#endif
+ info += (decl->attr & (CTF_QUAL|CTF_REF));
+ decl->attr &= ~(CTF_QUAL|(CTMASK_MSIZEP<<CTSHIFT_MSIZEP));
+ cp_push(decl, info, sz);
+ } else if (cp_opt(cp, '&') || cp_opt(cp, CTOK_ANDAND)) { /* Reference. */
+ decl->attr &= ~(CTF_QUAL|(CTMASK_MSIZEP<<CTSHIFT_MSIZEP));
+ cp_push(decl, CTINFO_REF(0), CTSIZE_PTR);
+ } else {
+ break;
+ }
+ }
+
+ if (cp_opt(cp, '(')) { /* Inner declarator. */
+ CPDeclIdx pos;
+ cp_decl_attributes(cp, decl);
+ /* Resolve ambiguity between inner declarator and 1st function parameter. */
+ if ((decl->mode & CPARSE_MODE_ABSTRACT) &&
+ (cp->tok == ')' || cp_istypedecl(cp))) goto func_decl;
+ pos = decl->pos;
+ cp_declarator(cp, decl);
+ cp_check(cp, ')');
+ decl->pos = pos;
+ } else if (cp->tok == CTOK_IDENT) { /* Direct declarator. */
+ if (!(decl->mode & CPARSE_MODE_DIRECT)) cp_err_token(cp, CTOK_EOF);
+ decl->name = cp->str;
+ decl->nameid = cp->val.id;
+ cp_next(cp);
+ } else { /* Abstract declarator. */
+ if (!(decl->mode & CPARSE_MODE_ABSTRACT)) cp_err_token(cp, CTOK_IDENT);
+ }
+
+ for (;;) { /* Tail of declarator. */
+ if (cp_opt(cp, '[')) { /* Array. */
+ cp_decl_array(cp, decl);
+ } else if (cp_opt(cp, '(')) { /* Function. */
+ func_decl:
+ cp_decl_func(cp, decl);
+ } else {
+ break;
+ }
+ }
+
+ if ((decl->mode & CPARSE_MODE_FIELD) && cp_opt(cp, ':')) /* Field width. */
+ decl->bits = cp_expr_ksize(cp);
+
+ /* Process postfix attributes. */
+ cp_decl_attributes(cp, decl);
+ cp_push_attributes(decl);
+
+ cp->depth--;
+}
+
+/* Parse an abstract type declaration and return it's C type ID. */
+static CTypeID cp_decl_abstract(CPState *cp)
+{
+ CPDecl decl;
+ cp_decl_spec(cp, &decl, 0);
+ decl.mode = CPARSE_MODE_ABSTRACT;
+ cp_declarator(cp, &decl);
+ return cp_decl_intern(cp, &decl);
+}
+
+/* Handle pragmas. */
+static void cp_pragma(CPState *cp, BCLine pragmaline)
+{
+ cp_next(cp);
+ if (cp->tok == CTOK_IDENT && cp_str_is(cp->str, "pack")) {
+ cp_next(cp);
+ cp_check(cp, '(');
+ if (cp->tok == CTOK_IDENT) {
+ if (cp_str_is(cp->str, "push")) {
+ if (cp->curpack < CPARSE_MAX_PACKSTACK) {
+ cp->packstack[cp->curpack+1] = cp->packstack[cp->curpack];
+ cp->curpack++;
+ }
+ } else if (cp_str_is(cp->str, "pop")) {
+ if (cp->curpack > 0) cp->curpack--;
+ } else {
+ cp_errmsg(cp, cp->tok, LJ_ERR_XSYMBOL);
+ }
+ cp_next(cp);
+ if (!cp_opt(cp, ',')) goto end_pack;
+ }
+ if (cp->tok == CTOK_INTEGER) {
+ cp->packstack[cp->curpack] = cp->val.u32 ? lj_fls(cp->val.u32) : 0;
+ cp_next(cp);
+ } else {
+ cp->packstack[cp->curpack] = 255;
+ }
+ end_pack:
+ cp_check(cp, ')');
+ } else { /* Ignore all other pragmas. */
+ while (cp->tok != CTOK_EOF && cp->linenumber == pragmaline)
+ cp_next(cp);
+ }
+}
+
+/* Handle line number. */
+static void cp_line(CPState *cp, BCLine hashline)
+{
+ BCLine newline = cp->val.u32;
+ /* TODO: Handle file name and include it in error messages. */
+ while (cp->tok != CTOK_EOF && cp->linenumber == hashline)
+ cp_next(cp);
+ cp->linenumber = newline;
+}
+
+/* Parse multiple C declarations of types or extern identifiers. */
+static void cp_decl_multi(CPState *cp)
+{
+ int first = 1;
+ while (cp->tok != CTOK_EOF) {
+ CPDecl decl;
+ CPscl scl;
+ if (cp_opt(cp, ';')) { /* Skip empty statements. */
+ first = 0;
+ continue;
+ }
+ if (cp->tok == '#') { /* Workaround, since we have no preprocessor, yet. */
+ BCLine hashline = cp->linenumber;
+ CPToken tok = cp_next(cp);
+ if (tok == CTOK_INTEGER) {
+ cp_line(cp, hashline);
+ continue;
+ } else if (tok == CTOK_IDENT && cp_str_is(cp->str, "line")) {
+ if (cp_next(cp) != CTOK_INTEGER) cp_err_token(cp, tok);
+ cp_line(cp, hashline);
+ continue;
+ } else if (tok == CTOK_IDENT && cp_str_is(cp->str, "pragma")) {
+ cp_pragma(cp, hashline);
+ continue;
+ } else {
+ cp_errmsg(cp, cp->tok, LJ_ERR_XSYMBOL);
+ }
+ }
+ scl = cp_decl_spec(cp, &decl, CDF_TYPEDEF|CDF_EXTERN|CDF_STATIC);
+ if ((cp->tok == ';' || cp->tok == CTOK_EOF) &&
+ ctype_istypedef(decl.stack[0].info)) {
+ CTInfo info = ctype_rawchild(cp->cts, &decl.stack[0])->info;
+ if (ctype_isstruct(info) || ctype_isenum(info))
+ goto decl_end; /* Accept empty declaration of struct/union/enum. */
+ }
+ for (;;) {
+ CTypeID ctypeid;
+ cp_declarator(cp, &decl);
+ ctypeid = cp_decl_intern(cp, &decl);
+ if (decl.name && !decl.nameid) { /* NYI: redeclarations are ignored. */
+ CType *ct;
+ CTypeID id;
+ if ((scl & CDF_TYPEDEF)) { /* Create new typedef. */
+ id = lj_ctype_new(cp->cts, &ct);
+ ct->info = CTINFO(CT_TYPEDEF, ctypeid);
+ goto noredir;
+ } else if (ctype_isfunc(ctype_get(cp->cts, ctypeid)->info)) {
+ /* Treat both static and extern function declarations as extern. */
+ ct = ctype_get(cp->cts, ctypeid);
+ /* We always get new anonymous functions (typedefs are copied). */
+ lj_assertCP(gcref(ct->name) == NULL, "unexpected named function");
+ id = ctypeid; /* Just name it. */
+ } else if ((scl & CDF_STATIC)) { /* Accept static constants. */
+ id = cp_decl_constinit(cp, &ct, ctypeid);
+ goto noredir;
+ } else { /* External references have extern or no storage class. */
+ id = lj_ctype_new(cp->cts, &ct);
+ ct->info = CTINFO(CT_EXTERN, ctypeid);
+ }
+ if (decl.redir) { /* Add attribute for redirected symbol name. */
+ CType *cta;
+ CTypeID aid = lj_ctype_new(cp->cts, &cta);
+ ct = ctype_get(cp->cts, id); /* Table may have been reallocated. */
+ cta->info = CTINFO(CT_ATTRIB, CTATTRIB(CTA_REDIR));
+ cta->sib = ct->sib;
+ ct->sib = aid;
+ ctype_setname(cta, decl.redir);
+ }
+ noredir:
+ ctype_setname(ct, decl.name);
+ lj_ctype_addname(cp->cts, ct, id);
+ }
+ if (!cp_opt(cp, ',')) break;
+ cp_decl_reset(&decl);
+ }
+ decl_end:
+ if (cp->tok == CTOK_EOF && first) break; /* May omit ';' for 1 decl. */
+ first = 0;
+ cp_check(cp, ';');
+ }
+}
+
+/* Parse a single C type declaration. */
+static void cp_decl_single(CPState *cp)
+{
+ CPDecl decl;
+ cp_decl_spec(cp, &decl, 0);
+ cp_declarator(cp, &decl);
+ cp->val.id = cp_decl_intern(cp, &decl);
+ if (cp->tok != CTOK_EOF) cp_err_token(cp, CTOK_EOF);
+}
+
+/* ------------------------------------------------------------------------ */
+
+/* Protected callback for C parser. */
+static TValue *cpcparser(lua_State *L, lua_CFunction dummy, void *ud)
+{
+ CPState *cp = (CPState *)ud;
+ UNUSED(dummy);
+ cframe_errfunc(L->cframe) = -1; /* Inherit error function. */
+ cp_init(cp);
+ if ((cp->mode & CPARSE_MODE_MULTI))
+ cp_decl_multi(cp);
+ else
+ cp_decl_single(cp);
+ if (cp->param && cp->param != cp->L->top)
+ cp_err(cp, LJ_ERR_FFI_NUMPARAM);
+ lj_assertCP(cp->depth == 0, "unbalanced cparser declaration depth");
+ return NULL;
+}
+
+/* C parser. */
+int lj_cparse(CPState *cp)
+{
+ LJ_CTYPE_SAVE(cp->cts);
+ int errcode = lj_vm_cpcall(cp->L, NULL, cp, cpcparser);
+ if (errcode)
+ LJ_CTYPE_RESTORE(cp->cts);
+ cp_cleanup(cp);
+ return errcode;
+}
+
+#endif
diff --git a/libs/luajit-cmake/luajit/src/lj_cparse.h b/libs/luajit-cmake/luajit/src/lj_cparse.h
new file mode 100644
index 0000000..c0f61ed
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/lj_cparse.h
@@ -0,0 +1,67 @@
+/*
+** C declaration parser.
+** Copyright (C) 2005-2022 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LJ_CPARSE_H
+#define _LJ_CPARSE_H
+
+#include "lj_obj.h"
+#include "lj_ctype.h"
+
+#if LJ_HASFFI
+
+/* C parser limits. */
+#define CPARSE_MAX_BUF 32768 /* Max. token buffer size. */
+#define CPARSE_MAX_DECLSTACK 100 /* Max. declaration stack depth. */
+#define CPARSE_MAX_DECLDEPTH 20 /* Max. recursive declaration depth. */
+#define CPARSE_MAX_PACKSTACK 7 /* Max. pack pragma stack depth. */
+
+/* Flags for C parser mode. */
+#define CPARSE_MODE_MULTI 1 /* Process multiple declarations. */
+#define CPARSE_MODE_ABSTRACT 2 /* Accept abstract declarators. */
+#define CPARSE_MODE_DIRECT 4 /* Accept direct declarators. */
+#define CPARSE_MODE_FIELD 8 /* Accept field width in bits, too. */
+#define CPARSE_MODE_NOIMPLICIT 16 /* Reject implicit declarations. */
+#define CPARSE_MODE_SKIP 32 /* Skip definitions, ignore errors. */
+
+typedef int CPChar; /* C parser character. Unsigned ext. from char. */
+typedef int CPToken; /* C parser token. */
+
+/* C parser internal value representation. */
+typedef struct CPValue {
+ union {
+ int32_t i32; /* Value for CTID_INT32. */
+ uint32_t u32; /* Value for CTID_UINT32. */
+ };
+ CTypeID id; /* C Type ID of the value. */
+} CPValue;
+
+/* C parser state. */
+typedef struct CPState {
+ CPChar c; /* Current character. */
+ CPToken tok; /* Current token. */
+ CPValue val; /* Token value. */
+ GCstr *str; /* Interned string of identifier/keyword. */
+ CType *ct; /* C type table entry. */
+ const char *p; /* Current position in input buffer. */
+ SBuf sb; /* String buffer for tokens. */
+ lua_State *L; /* Lua state. */
+ CTState *cts; /* C type state. */
+ TValue *param; /* C type parameters. */
+ const char *srcname; /* Current source name. */
+ BCLine linenumber; /* Input line counter. */
+ int depth; /* Recursive declaration depth. */
+ uint32_t tmask; /* Type mask for next identifier. */
+ uint32_t mode; /* C parser mode. */
+ uint8_t packstack[CPARSE_MAX_PACKSTACK]; /* Stack for pack pragmas. */
+ uint8_t curpack; /* Current position in pack pragma stack. */
+} CPState;
+
+LJ_FUNC int lj_cparse(CPState *cp);
+
+LJ_FUNC int lj_cparse_case(GCstr *str, const char *match);
+
+#endif
+
+#endif
diff --git a/libs/luajit-cmake/luajit/src/lj_crecord.c b/libs/luajit-cmake/luajit/src/lj_crecord.c
new file mode 100644
index 0000000..bc21d85
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/lj_crecord.c
@@ -0,0 +1,1944 @@
+/*
+** Trace recorder for C data operations.
+** Copyright (C) 2005-2022 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#define lj_ffrecord_c
+#define LUA_CORE
+
+#include "lj_obj.h"
+
+#if LJ_HASJIT && LJ_HASFFI
+
+#include "lj_err.h"
+#include "lj_tab.h"
+#include "lj_frame.h"
+#include "lj_ctype.h"
+#include "lj_cdata.h"
+#include "lj_cparse.h"
+#include "lj_cconv.h"
+#include "lj_carith.h"
+#include "lj_clib.h"
+#include "lj_ccall.h"
+#include "lj_ff.h"
+#include "lj_ir.h"
+#include "lj_jit.h"
+#include "lj_ircall.h"
+#include "lj_iropt.h"
+#include "lj_trace.h"
+#include "lj_record.h"
+#include "lj_ffrecord.h"
+#include "lj_snap.h"
+#include "lj_crecord.h"
+#include "lj_dispatch.h"
+#include "lj_strfmt.h"
+
+/* Some local macros to save typing. Undef'd at the end. */
+#define IR(ref) (&J->cur.ir[(ref)])
+
+/* Pass IR on to next optimization in chain (FOLD). */
+#define emitir(ot, a, b) (lj_ir_set(J, (ot), (a), (b)), lj_opt_fold(J))
+
+#define emitconv(a, dt, st, flags) \
+ emitir(IRT(IR_CONV, (dt)), (a), (st)|((dt) << 5)|(flags))
+
+/* -- C type checks ------------------------------------------------------- */
+
+static GCcdata *argv2cdata(jit_State *J, TRef tr, cTValue *o)
+{
+ GCcdata *cd;
+ TRef trtypeid;
+ if (!tref_iscdata(tr))
+ lj_trace_err(J, LJ_TRERR_BADTYPE);
+ cd = cdataV(o);
+ /* Specialize to the CTypeID. */
+ trtypeid = emitir(IRT(IR_FLOAD, IRT_U16), tr, IRFL_CDATA_CTYPEID);
+ emitir(IRTG(IR_EQ, IRT_INT), trtypeid, lj_ir_kint(J, (int32_t)cd->ctypeid));
+ return cd;
+}
+
+/* Specialize to the CTypeID held by a cdata constructor. */
+static CTypeID crec_constructor(jit_State *J, GCcdata *cd, TRef tr)
+{
+ CTypeID id;
+ lj_assertJ(tref_iscdata(tr) && cd->ctypeid == CTID_CTYPEID,
+ "expected CTypeID cdata");
+ id = *(CTypeID *)cdataptr(cd);
+ tr = emitir(IRT(IR_FLOAD, IRT_INT), tr, IRFL_CDATA_INT);
+ emitir(IRTG(IR_EQ, IRT_INT), tr, lj_ir_kint(J, (int32_t)id));
+ return id;
+}
+
+static CTypeID argv2ctype(jit_State *J, TRef tr, cTValue *o)
+{
+ if (tref_isstr(tr)) {
+ GCstr *s = strV(o);
+ CPState cp;
+ CTypeID oldtop;
+ /* Specialize to the string containing the C type declaration. */
+ emitir(IRTG(IR_EQ, IRT_STR), tr, lj_ir_kstr(J, s));
+ cp.L = J->L;
+ cp.cts = ctype_cts(J->L);
+ oldtop = cp.cts->top;
+ cp.srcname = strdata(s);
+ cp.p = strdata(s);
+ cp.param = NULL;
+ cp.mode = CPARSE_MODE_ABSTRACT|CPARSE_MODE_NOIMPLICIT;
+ if (lj_cparse(&cp) || cp.cts->top > oldtop) /* Avoid new struct defs. */
+ lj_trace_err(J, LJ_TRERR_BADTYPE);
+ return cp.val.id;
+ } else {
+ GCcdata *cd = argv2cdata(J, tr, o);
+ return cd->ctypeid == CTID_CTYPEID ? crec_constructor(J, cd, tr) :
+ cd->ctypeid;
+ }
+}
+
+/* Convert CType to IRType (if possible). */
+static IRType crec_ct2irt(CTState *cts, CType *ct)
+{
+ if (ctype_isenum(ct->info)) ct = ctype_child(cts, ct);
+ if (LJ_LIKELY(ctype_isnum(ct->info))) {
+ if ((ct->info & CTF_FP)) {
+ if (ct->size == sizeof(double))
+ return IRT_NUM;
+ else if (ct->size == sizeof(float))
+ return IRT_FLOAT;
+ } else {
+ uint32_t b = lj_fls(ct->size);
+ if (b <= 3)
+ return IRT_I8 + 2*b + ((ct->info & CTF_UNSIGNED) ? 1 : 0);
+ }
+ } else if (ctype_isptr(ct->info)) {
+ return (LJ_64 && ct->size == 8) ? IRT_P64 : IRT_P32;
+ } else if (ctype_iscomplex(ct->info)) {
+ if (ct->size == 2*sizeof(double))
+ return IRT_NUM;
+ else if (ct->size == 2*sizeof(float))
+ return IRT_FLOAT;
+ }
+ return IRT_CDATA;
+}
+
+/* -- Optimized memory fill and copy -------------------------------------- */
+
+/* Maximum length and unroll of inlined copy/fill. */
+#define CREC_COPY_MAXUNROLL 16
+#define CREC_COPY_MAXLEN 128
+
+#define CREC_FILL_MAXUNROLL 16
+
+/* Number of windowed registers used for optimized memory copy. */
+#if LJ_TARGET_X86
+#define CREC_COPY_REGWIN 2
+#elif LJ_TARGET_PPC || LJ_TARGET_MIPS
+#define CREC_COPY_REGWIN 8
+#else
+#define CREC_COPY_REGWIN 4
+#endif
+
+/* List of memory offsets for copy/fill. */
+typedef struct CRecMemList {
+ CTSize ofs; /* Offset in bytes. */
+ IRType tp; /* Type of load/store. */
+ TRef trofs; /* TRef of interned offset. */
+ TRef trval; /* TRef of load value. */
+} CRecMemList;
+
+/* Generate copy list for element-wise struct copy. */
+static MSize crec_copy_struct(CRecMemList *ml, CTState *cts, CType *ct)
+{
+ CTypeID fid = ct->sib;
+ MSize mlp = 0;
+ while (fid) {
+ CType *df = ctype_get(cts, fid);
+ fid = df->sib;
+ if (ctype_isfield(df->info)) {
+ CType *cct;
+ IRType tp;
+ if (!gcref(df->name)) continue; /* Ignore unnamed fields. */
+ cct = ctype_rawchild(cts, df); /* Field type. */
+ tp = crec_ct2irt(cts, cct);
+ if (tp == IRT_CDATA) return 0; /* NYI: aggregates. */
+ if (mlp >= CREC_COPY_MAXUNROLL) return 0;
+ ml[mlp].ofs = df->size;
+ ml[mlp].tp = tp;
+ mlp++;
+ if (ctype_iscomplex(cct->info)) {
+ if (mlp >= CREC_COPY_MAXUNROLL) return 0;
+ ml[mlp].ofs = df->size + (cct->size >> 1);
+ ml[mlp].tp = tp;
+ mlp++;
+ }
+ } else if (!ctype_isconstval(df->info)) {
+ /* NYI: bitfields and sub-structures. */
+ return 0;
+ }
+ }
+ return mlp;
+}
+
+/* Generate unrolled copy list, from highest to lowest step size/alignment. */
+static MSize crec_copy_unroll(CRecMemList *ml, CTSize len, CTSize step,
+ IRType tp)
+{
+ CTSize ofs = 0;
+ MSize mlp = 0;
+ if (tp == IRT_CDATA) tp = IRT_U8 + 2*lj_fls(step);
+ do {
+ while (ofs + step <= len) {
+ if (mlp >= CREC_COPY_MAXUNROLL) return 0;
+ ml[mlp].ofs = ofs;
+ ml[mlp].tp = tp;
+ mlp++;
+ ofs += step;
+ }
+ step >>= 1;
+ tp -= 2;
+ } while (ofs < len);
+ return mlp;
+}
+
+/*
+** Emit copy list with windowed loads/stores.
+** LJ_TARGET_UNALIGNED: may emit unaligned loads/stores (not marked as such).
+*/
+static void crec_copy_emit(jit_State *J, CRecMemList *ml, MSize mlp,
+ TRef trdst, TRef trsrc)
+{
+ MSize i, j, rwin = 0;
+ for (i = 0, j = 0; i < mlp; ) {
+ TRef trofs = lj_ir_kintp(J, ml[i].ofs);
+ TRef trsptr = emitir(IRT(IR_ADD, IRT_PTR), trsrc, trofs);
+ ml[i].trval = emitir(IRT(IR_XLOAD, ml[i].tp), trsptr, 0);
+ ml[i].trofs = trofs;
+ i++;
+ rwin += (LJ_SOFTFP32 && ml[i].tp == IRT_NUM) ? 2 : 1;
+ if (rwin >= CREC_COPY_REGWIN || i >= mlp) { /* Flush buffered stores. */
+ rwin = 0;
+ for ( ; j < i; j++) {
+ TRef trdptr = emitir(IRT(IR_ADD, IRT_PTR), trdst, ml[j].trofs);
+ emitir(IRT(IR_XSTORE, ml[j].tp), trdptr, ml[j].trval);
+ }
+ }
+ }
+}
+
+/* Optimized memory copy. */
+static void crec_copy(jit_State *J, TRef trdst, TRef trsrc, TRef trlen,
+ CType *ct)
+{
+ if (tref_isk(trlen)) { /* Length must be constant. */
+ CRecMemList ml[CREC_COPY_MAXUNROLL];
+ MSize mlp = 0;
+ CTSize step = 1, len = (CTSize)IR(tref_ref(trlen))->i;
+ IRType tp = IRT_CDATA;
+ int needxbar = 0;
+ if (len == 0) return; /* Shortcut. */
+ if (len > CREC_COPY_MAXLEN) goto fallback;
+ if (ct) {
+ CTState *cts = ctype_ctsG(J2G(J));
+ lj_assertJ(ctype_isarray(ct->info) || ctype_isstruct(ct->info),
+ "copy of non-aggregate");
+ if (ctype_isarray(ct->info)) {
+ CType *cct = ctype_rawchild(cts, ct);
+ tp = crec_ct2irt(cts, cct);
+ if (tp == IRT_CDATA) goto rawcopy;
+ step = lj_ir_type_size[tp];
+ lj_assertJ((len & (step-1)) == 0, "copy of fractional size");
+ } else if ((ct->info & CTF_UNION)) {
+ step = (1u << ctype_align(ct->info));
+ goto rawcopy;
+ } else {
+ mlp = crec_copy_struct(ml, cts, ct);
+ goto emitcopy;
+ }
+ } else {
+ rawcopy:
+ needxbar = 1;
+ if (LJ_TARGET_UNALIGNED || step >= CTSIZE_PTR)
+ step = CTSIZE_PTR;
+ }
+ mlp = crec_copy_unroll(ml, len, step, tp);
+ emitcopy:
+ if (mlp) {
+ crec_copy_emit(J, ml, mlp, trdst, trsrc);
+ if (needxbar)
+ emitir(IRT(IR_XBAR, IRT_NIL), 0, 0);
+ return;
+ }
+ }
+fallback:
+ /* Call memcpy. Always needs a barrier to disable alias analysis. */
+ lj_ir_call(J, IRCALL_memcpy, trdst, trsrc, trlen);
+ emitir(IRT(IR_XBAR, IRT_NIL), 0, 0);
+}
+
+/* Generate unrolled fill list, from highest to lowest step size/alignment. */
+static MSize crec_fill_unroll(CRecMemList *ml, CTSize len, CTSize step)
+{
+ CTSize ofs = 0;
+ MSize mlp = 0;
+ IRType tp = IRT_U8 + 2*lj_fls(step);
+ do {
+ while (ofs + step <= len) {
+ if (mlp >= CREC_COPY_MAXUNROLL) return 0;
+ ml[mlp].ofs = ofs;
+ ml[mlp].tp = tp;
+ mlp++;
+ ofs += step;
+ }
+ step >>= 1;
+ tp -= 2;
+ } while (ofs < len);
+ return mlp;
+}
+
+/*
+** Emit stores for fill list.
+** LJ_TARGET_UNALIGNED: may emit unaligned stores (not marked as such).
+*/
+static void crec_fill_emit(jit_State *J, CRecMemList *ml, MSize mlp,
+ TRef trdst, TRef trfill)
+{
+ MSize i;
+ for (i = 0; i < mlp; i++) {
+ TRef trofs = lj_ir_kintp(J, ml[i].ofs);
+ TRef trdptr = emitir(IRT(IR_ADD, IRT_PTR), trdst, trofs);
+ emitir(IRT(IR_XSTORE, ml[i].tp), trdptr, trfill);
+ }
+}
+
+/* Optimized memory fill. */
+static void crec_fill(jit_State *J, TRef trdst, TRef trlen, TRef trfill,
+ CTSize step)
+{
+ if (tref_isk(trlen)) { /* Length must be constant. */
+ CRecMemList ml[CREC_FILL_MAXUNROLL];
+ MSize mlp;
+ CTSize len = (CTSize)IR(tref_ref(trlen))->i;
+ if (len == 0) return; /* Shortcut. */
+ if (LJ_TARGET_UNALIGNED || step >= CTSIZE_PTR)
+ step = CTSIZE_PTR;
+ if (step * CREC_FILL_MAXUNROLL < len) goto fallback;
+ mlp = crec_fill_unroll(ml, len, step);
+ if (!mlp) goto fallback;
+ if (tref_isk(trfill) || ml[0].tp != IRT_U8)
+ trfill = emitconv(trfill, IRT_INT, IRT_U8, 0);
+ if (ml[0].tp != IRT_U8) { /* Scatter U8 to U16/U32/U64. */
+ if (CTSIZE_PTR == 8 && ml[0].tp == IRT_U64) {
+ if (tref_isk(trfill)) /* Pointless on x64 with zero-extended regs. */
+ trfill = emitconv(trfill, IRT_U64, IRT_U32, 0);
+ trfill = emitir(IRT(IR_MUL, IRT_U64), trfill,
+ lj_ir_kint64(J, U64x(01010101,01010101)));
+ } else {
+ trfill = emitir(IRTI(IR_MUL), trfill,
+ lj_ir_kint(J, ml[0].tp == IRT_U16 ? 0x0101 : 0x01010101));
+ }
+ }
+ crec_fill_emit(J, ml, mlp, trdst, trfill);
+ } else {
+fallback:
+ /* Call memset. Always needs a barrier to disable alias analysis. */
+ lj_ir_call(J, IRCALL_memset, trdst, trfill, trlen); /* Note: arg order! */
+ }
+ emitir(IRT(IR_XBAR, IRT_NIL), 0, 0);
+}
+
+/* -- Convert C type to C type -------------------------------------------- */
+
+/*
+** This code mirrors the code in lj_cconv.c. It performs the same steps
+** for the trace recorder that lj_cconv.c does for the interpreter.
+**
+** One major difference is that we can get away with much fewer checks
+** here. E.g. checks for casts, constness or correct types can often be
+** omitted, even if they might fail. The interpreter subsequently throws
+** an error, which aborts the trace.
+**
+** All operations are specialized to their C types, so the on-trace
+** outcome must be the same as the outcome in the interpreter. If the
+** interpreter doesn't throw an error, then the trace is correct, too.
+** Care must be taken not to generate invalid (temporary) IR or to
+** trigger asserts.
+*/
+
+/* Determine whether a passed number or cdata number is non-zero. */
+static int crec_isnonzero(CType *s, void *p)
+{
+ if (p == (void *)0)
+ return 0;
+ if (p == (void *)1)
+ return 1;
+ if ((s->info & CTF_FP)) {
+ if (s->size == sizeof(float))
+ return (*(float *)p != 0);
+ else
+ return (*(double *)p != 0);
+ } else {
+ if (s->size == 1)
+ return (*(uint8_t *)p != 0);
+ else if (s->size == 2)
+ return (*(uint16_t *)p != 0);
+ else if (s->size == 4)
+ return (*(uint32_t *)p != 0);
+ else
+ return (*(uint64_t *)p != 0);
+ }
+}
+
+static TRef crec_ct_ct(jit_State *J, CType *d, CType *s, TRef dp, TRef sp,
+ void *svisnz)
+{
+ IRType dt = crec_ct2irt(ctype_ctsG(J2G(J)), d);
+ IRType st = crec_ct2irt(ctype_ctsG(J2G(J)), s);
+ CTSize dsize = d->size, ssize = s->size;
+ CTInfo dinfo = d->info, sinfo = s->info;
+
+ if (ctype_type(dinfo) > CT_MAYCONVERT || ctype_type(sinfo) > CT_MAYCONVERT)
+ goto err_conv;
+
+ /*
+ ** Note: Unlike lj_cconv_ct_ct(), sp holds the _value_ of pointers and
+ ** numbers up to 8 bytes. Otherwise sp holds a pointer.
+ */
+
+ switch (cconv_idx2(dinfo, sinfo)) {
+ /* Destination is a bool. */
+ case CCX(B, B):
+ goto xstore; /* Source operand is already normalized. */
+ case CCX(B, I):
+ case CCX(B, F):
+ if (st != IRT_CDATA) {
+ /* Specialize to the result of a comparison against 0. */
+ TRef zero = (st == IRT_NUM || st == IRT_FLOAT) ? lj_ir_knum(J, 0) :
+ (st == IRT_I64 || st == IRT_U64) ? lj_ir_kint64(J, 0) :
+ lj_ir_kint(J, 0);
+ int isnz = crec_isnonzero(s, svisnz);
+ emitir(IRTG(isnz ? IR_NE : IR_EQ, st), sp, zero);
+ sp = lj_ir_kint(J, isnz);
+ goto xstore;
+ }
+ goto err_nyi;
+
+ /* Destination is an integer. */
+ case CCX(I, B):
+ case CCX(I, I):
+ conv_I_I:
+ if (dt == IRT_CDATA || st == IRT_CDATA) goto err_nyi;
+ /* Extend 32 to 64 bit integer. */
+ if (dsize == 8 && ssize < 8 && !(LJ_64 && (sinfo & CTF_UNSIGNED)))
+ sp = emitconv(sp, dt, ssize < 4 ? IRT_INT : st,
+ (sinfo & CTF_UNSIGNED) ? 0 : IRCONV_SEXT);
+ else if (dsize < 8 && ssize == 8) /* Truncate from 64 bit integer. */
+ sp = emitconv(sp, dsize < 4 ? IRT_INT : dt, st, 0);
+ else if (st == IRT_INT)
+ sp = lj_opt_narrow_toint(J, sp);
+ xstore:
+ if (dt == IRT_I64 || dt == IRT_U64) lj_needsplit(J);
+ if (dp == 0) return sp;
+ emitir(IRT(IR_XSTORE, dt), dp, sp);
+ break;
+ case CCX(I, C):
+ sp = emitir(IRT(IR_XLOAD, st), sp, 0); /* Load re. */
+ /* fallthrough */
+ case CCX(I, F):
+ if (dt == IRT_CDATA || st == IRT_CDATA) goto err_nyi;
+ sp = emitconv(sp, dsize < 4 ? IRT_INT : dt, st, IRCONV_ANY);
+ goto xstore;
+ case CCX(I, P):
+ case CCX(I, A):
+ sinfo = CTINFO(CT_NUM, CTF_UNSIGNED);
+ ssize = CTSIZE_PTR;
+ st = IRT_UINTP;
+ if (((dsize ^ ssize) & 8) == 0) { /* Must insert no-op type conversion. */
+ sp = emitconv(sp, dsize < 4 ? IRT_INT : dt, IRT_PTR, 0);
+ goto xstore;
+ }
+ goto conv_I_I;
+
+ /* Destination is a floating-point number. */
+ case CCX(F, B):
+ case CCX(F, I):
+ conv_F_I:
+ if (dt == IRT_CDATA || st == IRT_CDATA) goto err_nyi;
+ sp = emitconv(sp, dt, ssize < 4 ? IRT_INT : st, 0);
+ goto xstore;
+ case CCX(F, C):
+ sp = emitir(IRT(IR_XLOAD, st), sp, 0); /* Load re. */
+ /* fallthrough */
+ case CCX(F, F):
+ conv_F_F:
+ if (dt == IRT_CDATA || st == IRT_CDATA) goto err_nyi;
+ if (dt != st) sp = emitconv(sp, dt, st, 0);
+ goto xstore;
+
+ /* Destination is a complex number. */
+ case CCX(C, I):
+ case CCX(C, F):
+ { /* Clear im. */
+ TRef ptr = emitir(IRT(IR_ADD, IRT_PTR), dp, lj_ir_kintp(J, (dsize >> 1)));
+ emitir(IRT(IR_XSTORE, dt), ptr, lj_ir_knum(J, 0));
+ }
+ /* Convert to re. */
+ if ((sinfo & CTF_FP)) goto conv_F_F; else goto conv_F_I;
+
+ case CCX(C, C):
+ if (dt == IRT_CDATA || st == IRT_CDATA) goto err_nyi;
+ {
+ TRef re, im, ptr;
+ re = emitir(IRT(IR_XLOAD, st), sp, 0);
+ ptr = emitir(IRT(IR_ADD, IRT_PTR), sp, lj_ir_kintp(J, (ssize >> 1)));
+ im = emitir(IRT(IR_XLOAD, st), ptr, 0);
+ if (dt != st) {
+ re = emitconv(re, dt, st, 0);
+ im = emitconv(im, dt, st, 0);
+ }
+ emitir(IRT(IR_XSTORE, dt), dp, re);
+ ptr = emitir(IRT(IR_ADD, IRT_PTR), dp, lj_ir_kintp(J, (dsize >> 1)));
+ emitir(IRT(IR_XSTORE, dt), ptr, im);
+ }
+ break;
+
+ /* Destination is a vector. */
+ case CCX(V, I):
+ case CCX(V, F):
+ case CCX(V, C):
+ case CCX(V, V):
+ goto err_nyi;
+
+ /* Destination is a pointer. */
+ case CCX(P, P):
+ case CCX(P, A):
+ case CCX(P, S):
+ /* There are only 32 bit pointers/addresses on 32 bit machines.
+ ** Also ok on x64, since all 32 bit ops clear the upper part of the reg.
+ */
+ goto xstore;
+ case CCX(P, I):
+ if (st == IRT_CDATA) goto err_nyi;
+ if (!LJ_64 && ssize == 8) /* Truncate from 64 bit integer. */
+ sp = emitconv(sp, IRT_U32, st, 0);
+ goto xstore;
+ case CCX(P, F):
+ if (st == IRT_CDATA) goto err_nyi;
+ /* The signed conversion is cheaper. x64 really has 47 bit pointers. */
+ sp = emitconv(sp, (LJ_64 && dsize == 8) ? IRT_I64 : IRT_U32,
+ st, IRCONV_ANY);
+ goto xstore;
+
+ /* Destination is an array. */
+ case CCX(A, A):
+ /* Destination is a struct/union. */
+ case CCX(S, S):
+ if (dp == 0) goto err_conv;
+ crec_copy(J, dp, sp, lj_ir_kint(J, dsize), d);
+ break;
+
+ default:
+ err_conv:
+ err_nyi:
+ lj_trace_err(J, LJ_TRERR_NYICONV);
+ break;
+ }
+ return 0;
+}
+
+/* -- Convert C type to TValue (load) ------------------------------------- */
+
+static TRef crec_tv_ct(jit_State *J, CType *s, CTypeID sid, TRef sp)
+{
+ CTState *cts = ctype_ctsG(J2G(J));
+ IRType t = crec_ct2irt(cts, s);
+ CTInfo sinfo = s->info;
+ if (ctype_isnum(sinfo)) {
+ TRef tr;
+ if (t == IRT_CDATA)
+ goto err_nyi; /* NYI: copyval of >64 bit integers. */
+ tr = emitir(IRT(IR_XLOAD, t), sp, 0);
+ if (t == IRT_FLOAT || t == IRT_U32) { /* Keep uint32_t/float as numbers. */
+ return emitconv(tr, IRT_NUM, t, 0);
+ } else if (t == IRT_I64 || t == IRT_U64) { /* Box 64 bit integer. */
+ sp = tr;
+ lj_needsplit(J);
+ } else if ((sinfo & CTF_BOOL)) {
+ /* Assume not equal to zero. Fixup and emit pending guard later. */
+ lj_ir_set(J, IRTGI(IR_NE), tr, lj_ir_kint(J, 0));
+ J->postproc = LJ_POST_FIXGUARD;
+ return TREF_TRUE;
+ } else {
+ return tr;
+ }
+ } else if (ctype_isptr(sinfo) || ctype_isenum(sinfo)) {
+ sp = emitir(IRT(IR_XLOAD, t), sp, 0); /* Box pointers and enums. */
+ } else if (ctype_isrefarray(sinfo) || ctype_isstruct(sinfo)) {
+ cts->L = J->L;
+ sid = lj_ctype_intern(cts, CTINFO_REF(sid), CTSIZE_PTR); /* Create ref. */
+ } else if (ctype_iscomplex(sinfo)) { /* Unbox/box complex. */
+ ptrdiff_t esz = (ptrdiff_t)(s->size >> 1);
+ TRef ptr, tr1, tr2, dp;
+ dp = emitir(IRTG(IR_CNEW, IRT_CDATA), lj_ir_kint(J, sid), TREF_NIL);
+ tr1 = emitir(IRT(IR_XLOAD, t), sp, 0);
+ ptr = emitir(IRT(IR_ADD, IRT_PTR), sp, lj_ir_kintp(J, esz));
+ tr2 = emitir(IRT(IR_XLOAD, t), ptr, 0);
+ ptr = emitir(IRT(IR_ADD, IRT_PTR), dp, lj_ir_kintp(J, sizeof(GCcdata)));
+ emitir(IRT(IR_XSTORE, t), ptr, tr1);
+ ptr = emitir(IRT(IR_ADD, IRT_PTR), dp, lj_ir_kintp(J, sizeof(GCcdata)+esz));
+ emitir(IRT(IR_XSTORE, t), ptr, tr2);
+ return dp;
+ } else {
+ /* NYI: copyval of vectors. */
+ err_nyi:
+ lj_trace_err(J, LJ_TRERR_NYICONV);
+ }
+ /* Box pointer, ref, enum or 64 bit integer. */
+ return emitir(IRTG(IR_CNEWI, IRT_CDATA), lj_ir_kint(J, sid), sp);
+}
+
+/* -- Convert TValue to C type (store) ------------------------------------ */
+
+static TRef crec_ct_tv(jit_State *J, CType *d, TRef dp, TRef sp, cTValue *sval)
+{
+ CTState *cts = ctype_ctsG(J2G(J));
+ CTypeID sid = CTID_P_VOID;
+ void *svisnz = 0;
+ CType *s;
+ if (LJ_LIKELY(tref_isinteger(sp))) {
+ sid = CTID_INT32;
+ svisnz = (void *)(intptr_t)(tvisint(sval)?(intV(sval)!=0):!tviszero(sval));
+ } else if (tref_isnum(sp)) {
+ sid = CTID_DOUBLE;
+ svisnz = (void *)(intptr_t)(tvisint(sval)?(intV(sval)!=0):!tviszero(sval));
+ } else if (tref_isbool(sp)) {
+ sp = lj_ir_kint(J, tref_istrue(sp) ? 1 : 0);
+ sid = CTID_BOOL;
+ } else if (tref_isnil(sp)) {
+ sp = lj_ir_kptr(J, NULL);
+ } else if (tref_isudata(sp)) {
+ GCudata *ud = udataV(sval);
+ if (ud->udtype == UDTYPE_IO_FILE || ud->udtype == UDTYPE_BUFFER) {
+ TRef tr = emitir(IRT(IR_FLOAD, IRT_U8), sp, IRFL_UDATA_UDTYPE);
+ emitir(IRTGI(IR_EQ), tr, lj_ir_kint(J, ud->udtype));
+ sp = emitir(IRT(IR_FLOAD, IRT_PTR), sp,
+ ud->udtype == UDTYPE_IO_FILE ? IRFL_UDATA_FILE :
+ IRFL_SBUF_R);
+ } else {
+ sp = emitir(IRT(IR_ADD, IRT_PTR), sp, lj_ir_kintp(J, sizeof(GCudata)));
+ }
+ } else if (tref_isstr(sp)) {
+ if (ctype_isenum(d->info)) { /* Match string against enum constant. */
+ GCstr *str = strV(sval);
+ CTSize ofs;
+ CType *cct = lj_ctype_getfield(cts, d, str, &ofs);
+ /* Specialize to the name of the enum constant. */
+ emitir(IRTG(IR_EQ, IRT_STR), sp, lj_ir_kstr(J, str));
+ if (cct && ctype_isconstval(cct->info)) {
+ lj_assertJ(ctype_child(cts, cct)->size == 4,
+ "only 32 bit const supported"); /* NYI */
+ svisnz = (void *)(intptr_t)(ofs != 0);
+ sp = lj_ir_kint(J, (int32_t)ofs);
+ sid = ctype_cid(cct->info);
+ } /* else: interpreter will throw. */
+ } else if (ctype_isrefarray(d->info)) { /* Copy string to array. */
+ lj_trace_err(J, LJ_TRERR_BADTYPE); /* NYI */
+ } else { /* Otherwise pass the string data as a const char[]. */
+ /* Don't use STRREF. It folds with SNEW, which loses the trailing NUL. */
+ sp = emitir(IRT(IR_ADD, IRT_PTR), sp, lj_ir_kintp(J, sizeof(GCstr)));
+ sid = CTID_A_CCHAR;
+ }
+ } else if (tref_islightud(sp)) {
+#if LJ_64
+ lj_trace_err(J, LJ_TRERR_NYICONV);
+#endif
+ } else { /* NYI: tref_istab(sp). */
+ IRType t;
+ sid = argv2cdata(J, sp, sval)->ctypeid;
+ s = ctype_raw(cts, sid);
+ svisnz = cdataptr(cdataV(sval));
+ if (ctype_isfunc(s->info)) {
+ sid = lj_ctype_intern(cts, CTINFO(CT_PTR, CTALIGN_PTR|sid), CTSIZE_PTR);
+ s = ctype_get(cts, sid);
+ t = IRT_PTR;
+ } else {
+ t = crec_ct2irt(cts, s);
+ }
+ if (ctype_isptr(s->info)) {
+ sp = emitir(IRT(IR_FLOAD, t), sp, IRFL_CDATA_PTR);
+ if (ctype_isref(s->info)) {
+ svisnz = *(void **)svisnz;
+ s = ctype_rawchild(cts, s);
+ if (ctype_isenum(s->info)) s = ctype_child(cts, s);
+ t = crec_ct2irt(cts, s);
+ } else {
+ goto doconv;
+ }
+ } else if (t == IRT_I64 || t == IRT_U64) {
+ sp = emitir(IRT(IR_FLOAD, t), sp, IRFL_CDATA_INT64);
+ lj_needsplit(J);
+ goto doconv;
+ } else if (t == IRT_INT || t == IRT_U32) {
+ if (ctype_isenum(s->info)) s = ctype_child(cts, s);
+ sp = emitir(IRT(IR_FLOAD, t), sp, IRFL_CDATA_INT);
+ goto doconv;
+ } else {
+ sp = emitir(IRT(IR_ADD, IRT_PTR), sp, lj_ir_kintp(J, sizeof(GCcdata)));
+ }
+ if (ctype_isnum(s->info) && t != IRT_CDATA)
+ sp = emitir(IRT(IR_XLOAD, t), sp, 0); /* Load number value. */
+ goto doconv;
+ }
+ s = ctype_get(cts, sid);
+doconv:
+ if (ctype_isenum(d->info)) d = ctype_child(cts, d);
+ return crec_ct_ct(J, d, s, dp, sp, svisnz);
+}
+
+/* -- C data metamethods -------------------------------------------------- */
+
+/* This would be rather difficult in FOLD, so do it here:
+** (base+k)+(idx*sz)+ofs ==> (base+idx*sz)+(ofs+k)
+** (base+(idx+k)*sz)+ofs ==> (base+idx*sz)+(ofs+k*sz)
+*/
+static TRef crec_reassoc_ofs(jit_State *J, TRef tr, ptrdiff_t *ofsp, MSize sz)
+{
+ IRIns *ir = IR(tref_ref(tr));
+ if (LJ_LIKELY(J->flags & JIT_F_OPT_FOLD) && irref_isk(ir->op2) &&
+ (ir->o == IR_ADD || ir->o == IR_ADDOV || ir->o == IR_SUBOV)) {
+ IRIns *irk = IR(ir->op2);
+ ptrdiff_t k;
+ if (LJ_64 && irk->o == IR_KINT64)
+ k = (ptrdiff_t)ir_kint64(irk)->u64 * sz;
+ else
+ k = (ptrdiff_t)irk->i * sz;
+ if (ir->o == IR_SUBOV) *ofsp -= k; else *ofsp += k;
+ tr = ir->op1; /* Not a TRef, but the caller doesn't care. */
+ }
+ return tr;
+}
+
+/* Tailcall to function. */
+static void crec_tailcall(jit_State *J, RecordFFData *rd, cTValue *tv)
+{
+ TRef kfunc = lj_ir_kfunc(J, funcV(tv));
+#if LJ_FR2
+ J->base[-2] = kfunc;
+ J->base[-1] = TREF_FRAME;
+#else
+ J->base[-1] = kfunc | TREF_FRAME;
+#endif
+ rd->nres = -1; /* Pending tailcall. */
+}
+
+/* Record ctype __index/__newindex metamethods. */
+static void crec_index_meta(jit_State *J, CTState *cts, CType *ct,
+ RecordFFData *rd)
+{
+ CTypeID id = ctype_typeid(cts, ct);
+ cTValue *tv = lj_ctype_meta(cts, id, rd->data ? MM_newindex : MM_index);
+ if (!tv)
+ lj_trace_err(J, LJ_TRERR_BADTYPE);
+ if (tvisfunc(tv)) {
+ crec_tailcall(J, rd, tv);
+ } else if (rd->data == 0 && tvistab(tv) && tref_isstr(J->base[1])) {
+ /* Specialize to result of __index lookup. */
+ cTValue *o = lj_tab_get(J->L, tabV(tv), &rd->argv[1]);
+ J->base[0] = lj_record_constify(J, o);
+ if (!J->base[0])
+ lj_trace_err(J, LJ_TRERR_BADTYPE);
+ /* Always specialize to the key. */
+ emitir(IRTG(IR_EQ, IRT_STR), J->base[1], lj_ir_kstr(J, strV(&rd->argv[1])));
+ } else {
+ /* NYI: resolving of non-function metamethods. */
+ /* NYI: non-string keys for __index table. */
+ /* NYI: stores to __newindex table. */
+ lj_trace_err(J, LJ_TRERR_BADTYPE);
+ }
+}
+
+/* Record bitfield load/store. */
+static void crec_index_bf(jit_State *J, RecordFFData *rd, TRef ptr, CTInfo info)
+{
+ IRType t = IRT_I8 + 2*lj_fls(ctype_bitcsz(info)) + ((info&CTF_UNSIGNED)?1:0);
+ TRef tr = emitir(IRT(IR_XLOAD, t), ptr, 0);
+ CTSize pos = ctype_bitpos(info), bsz = ctype_bitbsz(info), shift = 32 - bsz;
+ lj_assertJ(t <= IRT_U32, "only 32 bit bitfields supported"); /* NYI */
+ if (rd->data == 0) { /* __index metamethod. */
+ if ((info & CTF_BOOL)) {
+ tr = emitir(IRTI(IR_BAND), tr, lj_ir_kint(J, (int32_t)((1u << pos))));
+ /* Assume not equal to zero. Fixup and emit pending guard later. */
+ lj_ir_set(J, IRTGI(IR_NE), tr, lj_ir_kint(J, 0));
+ J->postproc = LJ_POST_FIXGUARD;
+ tr = TREF_TRUE;
+ } else if (!(info & CTF_UNSIGNED)) {
+ tr = emitir(IRTI(IR_BSHL), tr, lj_ir_kint(J, shift - pos));
+ tr = emitir(IRTI(IR_BSAR), tr, lj_ir_kint(J, shift));
+ } else {
+ lj_assertJ(bsz < 32, "unexpected full bitfield index");
+ tr = emitir(IRTI(IR_BSHR), tr, lj_ir_kint(J, pos));
+ tr = emitir(IRTI(IR_BAND), tr, lj_ir_kint(J, (int32_t)((1u << bsz)-1)));
+ /* We can omit the U32 to NUM conversion, since bsz < 32. */
+ }
+ J->base[0] = tr;
+ } else { /* __newindex metamethod. */
+ CTState *cts = ctype_ctsG(J2G(J));
+ CType *ct = ctype_get(cts,
+ (info & CTF_BOOL) ? CTID_BOOL :
+ (info & CTF_UNSIGNED) ? CTID_UINT32 : CTID_INT32);
+ int32_t mask = (int32_t)(((1u << bsz)-1) << pos);
+ TRef sp = crec_ct_tv(J, ct, 0, J->base[2], &rd->argv[2]);
+ sp = emitir(IRTI(IR_BSHL), sp, lj_ir_kint(J, pos));
+ /* Use of the target type avoids forwarding conversions. */
+ sp = emitir(IRT(IR_BAND, t), sp, lj_ir_kint(J, mask));
+ tr = emitir(IRT(IR_BAND, t), tr, lj_ir_kint(J, (int32_t)~mask));
+ tr = emitir(IRT(IR_BOR, t), tr, sp);
+ emitir(IRT(IR_XSTORE, t), ptr, tr);
+ rd->nres = 0;
+ J->needsnap = 1;
+ }
+}
+
+void LJ_FASTCALL recff_cdata_index(jit_State *J, RecordFFData *rd)
+{
+ TRef idx, ptr = J->base[0];
+ ptrdiff_t ofs = sizeof(GCcdata);
+ GCcdata *cd = argv2cdata(J, ptr, &rd->argv[0]);
+ CTState *cts = ctype_ctsG(J2G(J));
+ CType *ct = ctype_raw(cts, cd->ctypeid);
+ CTypeID sid = 0;
+
+ /* Resolve pointer or reference for cdata object. */
+ if (ctype_isptr(ct->info)) {
+ IRType t = (LJ_64 && ct->size == 8) ? IRT_P64 : IRT_P32;
+ if (ctype_isref(ct->info)) ct = ctype_rawchild(cts, ct);
+ ptr = emitir(IRT(IR_FLOAD, t), ptr, IRFL_CDATA_PTR);
+ ofs = 0;
+ ptr = crec_reassoc_ofs(J, ptr, &ofs, 1);
+ }
+
+again:
+ idx = J->base[1];
+ if (tref_isnumber(idx)) {
+ idx = lj_opt_narrow_cindex(J, idx);
+ if (ctype_ispointer(ct->info)) {
+ CTSize sz;
+ integer_key:
+ if ((ct->info & CTF_COMPLEX))
+ idx = emitir(IRT(IR_BAND, IRT_INTP), idx, lj_ir_kintp(J, 1));
+ sz = lj_ctype_size(cts, (sid = ctype_cid(ct->info)));
+ idx = crec_reassoc_ofs(J, idx, &ofs, sz);
+#if LJ_TARGET_ARM || LJ_TARGET_PPC
+ /* Hoist base add to allow fusion of index/shift into operands. */
+ if (LJ_LIKELY(J->flags & JIT_F_OPT_LOOP) && ofs
+#if LJ_TARGET_ARM
+ && (sz == 1 || sz == 4)
+#endif
+ ) {
+ ptr = emitir(IRT(IR_ADD, IRT_PTR), ptr, lj_ir_kintp(J, ofs));
+ ofs = 0;
+ }
+#endif
+ idx = emitir(IRT(IR_MUL, IRT_INTP), idx, lj_ir_kintp(J, sz));
+ ptr = emitir(IRT(IR_ADD, IRT_PTR), idx, ptr);
+ }
+ } else if (tref_iscdata(idx)) {
+ GCcdata *cdk = cdataV(&rd->argv[1]);
+ CType *ctk = ctype_raw(cts, cdk->ctypeid);
+ IRType t = crec_ct2irt(cts, ctk);
+ if (ctype_ispointer(ct->info) && t >= IRT_I8 && t <= IRT_U64) {
+ if (ctk->size == 8) {
+ idx = emitir(IRT(IR_FLOAD, t), idx, IRFL_CDATA_INT64);
+ } else if (ctk->size == 4) {
+ idx = emitir(IRT(IR_FLOAD, t), idx, IRFL_CDATA_INT);
+ } else {
+ idx = emitir(IRT(IR_ADD, IRT_PTR), idx,
+ lj_ir_kintp(J, sizeof(GCcdata)));
+ idx = emitir(IRT(IR_XLOAD, t), idx, 0);
+ }
+ if (LJ_64 && ctk->size < sizeof(intptr_t) && !(ctk->info & CTF_UNSIGNED))
+ idx = emitconv(idx, IRT_INTP, IRT_INT, IRCONV_SEXT);
+ if (!LJ_64 && ctk->size > sizeof(intptr_t)) {
+ idx = emitconv(idx, IRT_INTP, t, 0);
+ lj_needsplit(J);
+ }
+ goto integer_key;
+ }
+ } else if (tref_isstr(idx)) {
+ GCstr *name = strV(&rd->argv[1]);
+ if (cd && cd->ctypeid == CTID_CTYPEID)
+ ct = ctype_raw(cts, crec_constructor(J, cd, ptr));
+ if (ctype_isstruct(ct->info)) {
+ CTSize fofs;
+ CType *fct;
+ fct = lj_ctype_getfield(cts, ct, name, &fofs);
+ if (fct) {
+ ofs += (ptrdiff_t)fofs;
+ /* Always specialize to the field name. */
+ emitir(IRTG(IR_EQ, IRT_STR), idx, lj_ir_kstr(J, name));
+ if (ctype_isconstval(fct->info)) {
+ if (fct->size >= 0x80000000u &&
+ (ctype_child(cts, fct)->info & CTF_UNSIGNED)) {
+ J->base[0] = lj_ir_knum(J, (lua_Number)(uint32_t)fct->size);
+ return;
+ }
+ J->base[0] = lj_ir_kint(J, (int32_t)fct->size);
+ return; /* Interpreter will throw for newindex. */
+ } else if (ctype_isbitfield(fct->info)) {
+ if (ofs)
+ ptr = emitir(IRT(IR_ADD, IRT_PTR), ptr, lj_ir_kintp(J, ofs));
+ crec_index_bf(J, rd, ptr, fct->info);
+ return;
+ } else {
+ lj_assertJ(ctype_isfield(fct->info), "field expected");
+ sid = ctype_cid(fct->info);
+ }
+ }
+ } else if (ctype_iscomplex(ct->info)) {
+ if (name->len == 2 &&
+ ((strdata(name)[0] == 'r' && strdata(name)[1] == 'e') ||
+ (strdata(name)[0] == 'i' && strdata(name)[1] == 'm'))) {
+ /* Always specialize to the field name. */
+ emitir(IRTG(IR_EQ, IRT_STR), idx, lj_ir_kstr(J, name));
+ if (strdata(name)[0] == 'i') ofs += (ct->size >> 1);
+ sid = ctype_cid(ct->info);
+ }
+ }
+ }
+ if (!sid) {
+ if (ctype_isptr(ct->info)) { /* Automatically perform '->'. */
+ CType *cct = ctype_rawchild(cts, ct);
+ if (ctype_isstruct(cct->info)) {
+ ct = cct;
+ cd = NULL;
+ if (tref_isstr(idx)) goto again;
+ }
+ }
+ crec_index_meta(J, cts, ct, rd);
+ return;
+ }
+
+ if (ofs)
+ ptr = emitir(IRT(IR_ADD, IRT_PTR), ptr, lj_ir_kintp(J, ofs));
+
+ /* Resolve reference for field. */
+ ct = ctype_get(cts, sid);
+ if (ctype_isref(ct->info)) {
+ ptr = emitir(IRT(IR_XLOAD, IRT_PTR), ptr, 0);
+ sid = ctype_cid(ct->info);
+ ct = ctype_get(cts, sid);
+ }
+
+ while (ctype_isattrib(ct->info))
+ ct = ctype_child(cts, ct); /* Skip attributes. */
+
+ if (rd->data == 0) { /* __index metamethod. */
+ J->base[0] = crec_tv_ct(J, ct, sid, ptr);
+ } else { /* __newindex metamethod. */
+ rd->nres = 0;
+ J->needsnap = 1;
+ crec_ct_tv(J, ct, ptr, J->base[2], &rd->argv[2]);
+ }
+}
+
+/* Record setting a finalizer. */
+static void crec_finalizer(jit_State *J, TRef trcd, TRef trfin, cTValue *fin)
+{
+ if (tvisgcv(fin)) {
+ if (!trfin) trfin = lj_ir_kptr(J, gcval(fin));
+ } else if (tvisnil(fin)) {
+ trfin = lj_ir_kptr(J, NULL);
+ } else {
+ lj_trace_err(J, LJ_TRERR_BADTYPE);
+ }
+ lj_ir_call(J, IRCALL_lj_cdata_setfin, trcd,
+ trfin, lj_ir_kint(J, (int32_t)itype(fin)));
+ J->needsnap = 1;
+}
+
+/* Record cdata allocation. */
+static void crec_alloc(jit_State *J, RecordFFData *rd, CTypeID id)
+{
+ CTState *cts = ctype_ctsG(J2G(J));
+ CTSize sz;
+ CTInfo info = lj_ctype_info(cts, id, &sz);
+ CType *d = ctype_raw(cts, id);
+ TRef trcd, trid = lj_ir_kint(J, id);
+ cTValue *fin;
+ /* Use special instruction to box pointer or 32/64 bit integer. */
+ if (ctype_isptr(info) || (ctype_isinteger(info) && (sz == 4 || sz == 8))) {
+ TRef sp = J->base[1] ? crec_ct_tv(J, d, 0, J->base[1], &rd->argv[1]) :
+ ctype_isptr(info) ? lj_ir_kptr(J, NULL) :
+ sz == 4 ? lj_ir_kint(J, 0) :
+ (lj_needsplit(J), lj_ir_kint64(J, 0));
+ J->base[0] = emitir(IRTG(IR_CNEWI, IRT_CDATA), trid, sp);
+ return;
+ } else {
+ TRef trsz = TREF_NIL;
+ if ((info & CTF_VLA)) { /* Calculate VLA/VLS size at runtime. */
+ CTSize sz0, sz1;
+ if (!J->base[1] || J->base[2])
+ lj_trace_err(J, LJ_TRERR_NYICONV); /* NYI: init VLA/VLS. */
+ trsz = crec_ct_tv(J, ctype_get(cts, CTID_INT32), 0,
+ J->base[1], &rd->argv[1]);
+ sz0 = lj_ctype_vlsize(cts, d, 0);
+ sz1 = lj_ctype_vlsize(cts, d, 1);
+ trsz = emitir(IRTGI(IR_MULOV), trsz, lj_ir_kint(J, (int32_t)(sz1-sz0)));
+ trsz = emitir(IRTGI(IR_ADDOV), trsz, lj_ir_kint(J, (int32_t)sz0));
+ J->base[1] = 0; /* Simplify logic below. */
+ } else if (ctype_align(info) > CT_MEMALIGN) {
+ trsz = lj_ir_kint(J, sz);
+ }
+ trcd = emitir(IRTG(IR_CNEW, IRT_CDATA), trid, trsz);
+ if (sz > 128 || (info & CTF_VLA)) {
+ TRef dp;
+ CTSize align;
+ special: /* Only handle bulk zero-fill for large/VLA/VLS types. */
+ if (J->base[1])
+ lj_trace_err(J, LJ_TRERR_NYICONV); /* NYI: init large/VLA/VLS types. */
+ dp = emitir(IRT(IR_ADD, IRT_PTR), trcd, lj_ir_kintp(J, sizeof(GCcdata)));
+ if (trsz == TREF_NIL) trsz = lj_ir_kint(J, sz);
+ align = ctype_align(info);
+ if (align < CT_MEMALIGN) align = CT_MEMALIGN;
+ crec_fill(J, dp, trsz, lj_ir_kint(J, 0), (1u << align));
+ } else if (J->base[1] && !J->base[2] &&
+ !lj_cconv_multi_init(cts, d, &rd->argv[1])) {
+ goto single_init;
+ } else if (ctype_isarray(d->info)) {
+ CType *dc = ctype_rawchild(cts, d); /* Array element type. */
+ CTSize ofs, esize = dc->size;
+ TRef sp = 0;
+ TValue tv;
+ TValue *sval = &tv;
+ MSize i;
+ tv.u64 = 0;
+ if (!(ctype_isnum(dc->info) || ctype_isptr(dc->info)) ||
+ esize * CREC_FILL_MAXUNROLL < sz)
+ goto special;
+ for (i = 1, ofs = 0; ofs < sz; ofs += esize) {
+ TRef dp = emitir(IRT(IR_ADD, IRT_PTR), trcd,
+ lj_ir_kintp(J, ofs + sizeof(GCcdata)));
+ if (J->base[i]) {
+ sp = J->base[i];
+ sval = &rd->argv[i];
+ i++;
+ } else if (i != 2) {
+ sp = ctype_isnum(dc->info) ? lj_ir_kint(J, 0) : TREF_NIL;
+ }
+ crec_ct_tv(J, dc, dp, sp, sval);
+ }
+ } else if (ctype_isstruct(d->info)) {
+ CTypeID fid;
+ MSize i = 1;
+ if (!J->base[1]) { /* Handle zero-fill of struct-of-NYI. */
+ fid = d->sib;
+ while (fid) {
+ CType *df = ctype_get(cts, fid);
+ fid = df->sib;
+ if (ctype_isfield(df->info)) {
+ CType *dc;
+ if (!gcref(df->name)) continue; /* Ignore unnamed fields. */
+ dc = ctype_rawchild(cts, df); /* Field type. */
+ if (!(ctype_isnum(dc->info) || ctype_isptr(dc->info) ||
+ ctype_isenum(dc->info)))
+ goto special;
+ } else if (!ctype_isconstval(df->info)) {
+ goto special;
+ }
+ }
+ }
+ fid = d->sib;
+ while (fid) {
+ CType *df = ctype_get(cts, fid);
+ fid = df->sib;
+ if (ctype_isfield(df->info)) {
+ CType *dc;
+ TRef sp, dp;
+ TValue tv;
+ TValue *sval = &tv;
+ setintV(&tv, 0);
+ if (!gcref(df->name)) continue; /* Ignore unnamed fields. */
+ dc = ctype_rawchild(cts, df); /* Field type. */
+ if (!(ctype_isnum(dc->info) || ctype_isptr(dc->info) ||
+ ctype_isenum(dc->info)))
+ lj_trace_err(J, LJ_TRERR_NYICONV); /* NYI: init aggregates. */
+ if (J->base[i]) {
+ sp = J->base[i];
+ sval = &rd->argv[i];
+ i++;
+ } else {
+ sp = ctype_isptr(dc->info) ? TREF_NIL : lj_ir_kint(J, 0);
+ }
+ dp = emitir(IRT(IR_ADD, IRT_PTR), trcd,
+ lj_ir_kintp(J, df->size + sizeof(GCcdata)));
+ crec_ct_tv(J, dc, dp, sp, sval);
+ if ((d->info & CTF_UNION)) {
+ if (d->size != dc->size) /* NYI: partial init of union. */
+ lj_trace_err(J, LJ_TRERR_NYICONV);
+ break;
+ }
+ } else if (!ctype_isconstval(df->info)) {
+ /* NYI: init bitfields and sub-structures. */
+ lj_trace_err(J, LJ_TRERR_NYICONV);
+ }
+ }
+ } else {
+ TRef dp;
+ single_init:
+ dp = emitir(IRT(IR_ADD, IRT_PTR), trcd, lj_ir_kintp(J, sizeof(GCcdata)));
+ if (J->base[1]) {
+ crec_ct_tv(J, d, dp, J->base[1], &rd->argv[1]);
+ } else {
+ TValue tv;
+ tv.u64 = 0;
+ crec_ct_tv(J, d, dp, lj_ir_kint(J, 0), &tv);
+ }
+ }
+ }
+ J->base[0] = trcd;
+ /* Handle __gc metamethod. */
+ fin = lj_ctype_meta(cts, id, MM_gc);
+ if (fin)
+ crec_finalizer(J, trcd, 0, fin);
+}
+
+/* Record argument conversions. */
+static TRef crec_call_args(jit_State *J, RecordFFData *rd,
+ CTState *cts, CType *ct)
+{
+ TRef args[CCI_NARGS_MAX];
+ CTypeID fid;
+ MSize i, n;
+ TRef tr, *base;
+ cTValue *o;
+#if LJ_TARGET_X86
+#if LJ_ABI_WIN
+ TRef *arg0 = NULL, *arg1 = NULL;
+#endif
+ int ngpr = 0;
+ if (ctype_cconv(ct->info) == CTCC_THISCALL)
+ ngpr = 1;
+ else if (ctype_cconv(ct->info) == CTCC_FASTCALL)
+ ngpr = 2;
+#endif
+
+ /* Skip initial attributes. */
+ fid = ct->sib;
+ while (fid) {
+ CType *ctf = ctype_get(cts, fid);
+ if (!ctype_isattrib(ctf->info)) break;
+ fid = ctf->sib;
+ }
+ args[0] = TREF_NIL;
+ for (n = 0, base = J->base+1, o = rd->argv+1; *base; n++, base++, o++) {
+ CTypeID did;
+ CType *d;
+
+ if (n >= CCI_NARGS_MAX)
+ lj_trace_err(J, LJ_TRERR_NYICALL);
+
+ if (fid) { /* Get argument type from field. */
+ CType *ctf = ctype_get(cts, fid);
+ fid = ctf->sib;
+ lj_assertJ(ctype_isfield(ctf->info), "field expected");
+ did = ctype_cid(ctf->info);
+ } else {
+ if (!(ct->info & CTF_VARARG))
+ lj_trace_err(J, LJ_TRERR_NYICALL); /* Too many arguments. */
+ did = lj_ccall_ctid_vararg(cts, o); /* Infer vararg type. */
+ }
+ d = ctype_raw(cts, did);
+ if (!(ctype_isnum(d->info) || ctype_isptr(d->info) ||
+ ctype_isenum(d->info)))
+ lj_trace_err(J, LJ_TRERR_NYICALL);
+ tr = crec_ct_tv(J, d, 0, *base, o);
+ if (ctype_isinteger_or_bool(d->info)) {
+ if (d->size < 4) {
+ if ((d->info & CTF_UNSIGNED))
+ tr = emitconv(tr, IRT_INT, d->size==1 ? IRT_U8 : IRT_U16, 0);
+ else
+ tr = emitconv(tr, IRT_INT, d->size==1 ? IRT_I8 : IRT_I16,IRCONV_SEXT);
+ }
+ } else if (LJ_SOFTFP32 && ctype_isfp(d->info) && d->size > 4) {
+ lj_needsplit(J);
+ }
+#if LJ_TARGET_X86
+ /* 64 bit args must not end up in registers for fastcall/thiscall. */
+#if LJ_ABI_WIN
+ if (!ctype_isfp(d->info)) {
+ /* Sigh, the Windows/x86 ABI allows reordering across 64 bit args. */
+ if (tref_typerange(tr, IRT_I64, IRT_U64)) {
+ if (ngpr) {
+ arg0 = &args[n]; args[n++] = TREF_NIL; ngpr--;
+ if (ngpr) {
+ arg1 = &args[n]; args[n++] = TREF_NIL; ngpr--;
+ }
+ }
+ } else {
+ if (arg0) { *arg0 = tr; arg0 = NULL; n--; continue; }
+ if (arg1) { *arg1 = tr; arg1 = NULL; n--; continue; }
+ if (ngpr) ngpr--;
+ }
+ }
+#else
+ if (!ctype_isfp(d->info) && ngpr) {
+ if (tref_typerange(tr, IRT_I64, IRT_U64)) {
+ /* No reordering for other x86 ABIs. Simply add alignment args. */
+ do { args[n++] = TREF_NIL; } while (--ngpr);
+ } else {
+ ngpr--;
+ }
+ }
+#endif
+#endif
+ args[n] = tr;
+ }
+ tr = args[0];
+ for (i = 1; i < n; i++)
+ tr = emitir(IRT(IR_CARG, IRT_NIL), tr, args[i]);
+ return tr;
+}
+
+/* Create a snapshot for the caller, simulating a 'false' return value. */
+static void crec_snap_caller(jit_State *J)
+{
+ lua_State *L = J->L;
+ TValue *base = L->base, *top = L->top;
+ const BCIns *pc = J->pc;
+ TRef ftr = J->base[-1-LJ_FR2];
+ ptrdiff_t delta;
+ if (!frame_islua(base-1) || J->framedepth <= 0)
+ lj_trace_err(J, LJ_TRERR_NYICALL);
+ J->pc = frame_pc(base-1); delta = 1+LJ_FR2+bc_a(J->pc[-1]);
+ L->top = base; L->base = base - delta;
+ J->base[-1-LJ_FR2] = TREF_FALSE;
+ J->base -= delta; J->baseslot -= (BCReg)delta;
+ J->maxslot = (BCReg)delta-LJ_FR2; J->framedepth--;
+ lj_snap_add(J);
+ L->base = base; L->top = top;
+ J->framedepth++; J->maxslot = 1;
+ J->base += delta; J->baseslot += (BCReg)delta;
+ J->base[-1-LJ_FR2] = ftr; J->pc = pc;
+}
+
+/* Record function call. */
+static int crec_call(jit_State *J, RecordFFData *rd, GCcdata *cd)
+{
+ CTState *cts = ctype_ctsG(J2G(J));
+ CType *ct = ctype_raw(cts, cd->ctypeid);
+ IRType tp = IRT_PTR;
+ if (ctype_isptr(ct->info)) {
+ tp = (LJ_64 && ct->size == 8) ? IRT_P64 : IRT_P32;
+ ct = ctype_rawchild(cts, ct);
+ }
+ if (ctype_isfunc(ct->info)) {
+ TRef func = emitir(IRT(IR_FLOAD, tp), J->base[0], IRFL_CDATA_PTR);
+ CType *ctr = ctype_rawchild(cts, ct);
+ IRType t = crec_ct2irt(cts, ctr);
+ TRef tr;
+ TValue tv;
+ /* Check for blacklisted C functions that might call a callback. */
+ tv.u64 = ((uintptr_t)cdata_getptr(cdataptr(cd), (LJ_64 && tp == IRT_P64) ? 8 : 4) >> 2) | U64x(800000000, 00000000);
+ if (tvistrue(lj_tab_get(J->L, cts->miscmap, &tv)))
+ lj_trace_err(J, LJ_TRERR_BLACKL);
+ if (ctype_isvoid(ctr->info)) {
+ t = IRT_NIL;
+ rd->nres = 0;
+ } else if (!(ctype_isnum(ctr->info) || ctype_isptr(ctr->info) ||
+ ctype_isenum(ctr->info)) || t == IRT_CDATA) {
+ lj_trace_err(J, LJ_TRERR_NYICALL);
+ }
+ if ((ct->info & CTF_VARARG)
+#if LJ_TARGET_X86
+ || ctype_cconv(ct->info) != CTCC_CDECL
+#endif
+ )
+ func = emitir(IRT(IR_CARG, IRT_NIL), func,
+ lj_ir_kint(J, ctype_typeid(cts, ct)));
+ tr = emitir(IRT(IR_CALLXS, t), crec_call_args(J, rd, cts, ct), func);
+ if (ctype_isbool(ctr->info)) {
+ if (frame_islua(J->L->base-1) && bc_b(frame_pc(J->L->base-1)[-1]) == 1) {
+ /* Don't check result if ignored. */
+ tr = TREF_NIL;
+ } else {
+ crec_snap_caller(J);
+#if LJ_TARGET_X86ORX64
+ /* Note: only the x86/x64 backend supports U8 and only for EQ(tr, 0). */
+ lj_ir_set(J, IRTG(IR_NE, IRT_U8), tr, lj_ir_kint(J, 0));
+#else
+ lj_ir_set(J, IRTGI(IR_NE), tr, lj_ir_kint(J, 0));
+#endif
+ J->postproc = LJ_POST_FIXGUARDSNAP;
+ tr = TREF_TRUE;
+ }
+ } else if (t == IRT_PTR || (LJ_64 && t == IRT_P32) ||
+ t == IRT_I64 || t == IRT_U64 || ctype_isenum(ctr->info)) {
+ TRef trid = lj_ir_kint(J, ctype_cid(ct->info));
+ tr = emitir(IRTG(IR_CNEWI, IRT_CDATA), trid, tr);
+ if (t == IRT_I64 || t == IRT_U64) lj_needsplit(J);
+ } else if (t == IRT_FLOAT || t == IRT_U32) {
+ tr = emitconv(tr, IRT_NUM, t, 0);
+ } else if (t == IRT_I8 || t == IRT_I16) {
+ tr = emitconv(tr, IRT_INT, t, IRCONV_SEXT);
+ } else if (t == IRT_U8 || t == IRT_U16) {
+ tr = emitconv(tr, IRT_INT, t, 0);
+ }
+ J->base[0] = tr;
+ J->needsnap = 1;
+ return 1;
+ }
+ return 0;
+}
+
+void LJ_FASTCALL recff_cdata_call(jit_State *J, RecordFFData *rd)
+{
+ CTState *cts = ctype_ctsG(J2G(J));
+ GCcdata *cd = argv2cdata(J, J->base[0], &rd->argv[0]);
+ CTypeID id = cd->ctypeid;
+ CType *ct;
+ cTValue *tv;
+ MMS mm = MM_call;
+ if (id == CTID_CTYPEID) {
+ id = crec_constructor(J, cd, J->base[0]);
+ mm = MM_new;
+ } else if (crec_call(J, rd, cd)) {
+ return;
+ }
+ /* Record ctype __call/__new metamethod. */
+ ct = ctype_raw(cts, id);
+ tv = lj_ctype_meta(cts, ctype_isptr(ct->info) ? ctype_cid(ct->info) : id, mm);
+ if (tv) {
+ if (tvisfunc(tv)) {
+ crec_tailcall(J, rd, tv);
+ return;
+ }
+ } else if (mm == MM_new) {
+ crec_alloc(J, rd, id);
+ return;
+ }
+ /* No metamethod or NYI: non-function metamethods. */
+ lj_trace_err(J, LJ_TRERR_BADTYPE);
+}
+
+static TRef crec_arith_int64(jit_State *J, TRef *sp, CType **s, MMS mm)
+{
+ if (sp[0] && sp[1] && ctype_isnum(s[0]->info) && ctype_isnum(s[1]->info)) {
+ IRType dt;
+ CTypeID id;
+ TRef tr;
+ MSize i;
+ IROp op;
+ lj_needsplit(J);
+ if (((s[0]->info & CTF_UNSIGNED) && s[0]->size == 8) ||
+ ((s[1]->info & CTF_UNSIGNED) && s[1]->size == 8)) {
+ dt = IRT_U64; id = CTID_UINT64;
+ } else {
+ dt = IRT_I64; id = CTID_INT64;
+ if (mm < MM_add &&
+ !((s[0]->info | s[1]->info) & CTF_FP) &&
+ s[0]->size == 4 && s[1]->size == 4) { /* Try to narrow comparison. */
+ if (!((s[0]->info ^ s[1]->info) & CTF_UNSIGNED) ||
+ (tref_isk(sp[1]) && IR(tref_ref(sp[1]))->i >= 0)) {
+ dt = (s[0]->info & CTF_UNSIGNED) ? IRT_U32 : IRT_INT;
+ goto comp;
+ } else if (tref_isk(sp[0]) && IR(tref_ref(sp[0]))->i >= 0) {
+ dt = (s[1]->info & CTF_UNSIGNED) ? IRT_U32 : IRT_INT;
+ goto comp;
+ }
+ }
+ }
+ for (i = 0; i < 2; i++) {
+ IRType st = tref_type(sp[i]);
+ if (st == IRT_NUM || st == IRT_FLOAT)
+ sp[i] = emitconv(sp[i], dt, st, IRCONV_ANY);
+ else if (!(st == IRT_I64 || st == IRT_U64))
+ sp[i] = emitconv(sp[i], dt, IRT_INT,
+ (s[i]->info & CTF_UNSIGNED) ? 0 : IRCONV_SEXT);
+ }
+ if (mm < MM_add) {
+ comp:
+ /* Assume true comparison. Fixup and emit pending guard later. */
+ if (mm == MM_eq) {
+ op = IR_EQ;
+ } else {
+ op = mm == MM_lt ? IR_LT : IR_LE;
+ if (dt == IRT_U32 || dt == IRT_U64)
+ op += (IR_ULT-IR_LT);
+ }
+ lj_ir_set(J, IRTG(op, dt), sp[0], sp[1]);
+ J->postproc = LJ_POST_FIXGUARD;
+ return TREF_TRUE;
+ } else {
+ tr = emitir(IRT(mm+(int)IR_ADD-(int)MM_add, dt), sp[0], sp[1]);
+ }
+ return emitir(IRTG(IR_CNEWI, IRT_CDATA), lj_ir_kint(J, id), tr);
+ }
+ return 0;
+}
+
+static TRef crec_arith_ptr(jit_State *J, TRef *sp, CType **s, MMS mm)
+{
+ CTState *cts = ctype_ctsG(J2G(J));
+ CType *ctp = s[0];
+ if (!(sp[0] && sp[1])) return 0;
+ if (ctype_isptr(ctp->info) || ctype_isrefarray(ctp->info)) {
+ if ((mm == MM_sub || mm == MM_eq || mm == MM_lt || mm == MM_le) &&
+ (ctype_isptr(s[1]->info) || ctype_isrefarray(s[1]->info))) {
+ if (mm == MM_sub) { /* Pointer difference. */
+ TRef tr;
+ CTSize sz = lj_ctype_size(cts, ctype_cid(ctp->info));
+ if (sz == 0 || (sz & (sz-1)) != 0)
+ return 0; /* NYI: integer division. */
+ tr = emitir(IRT(IR_SUB, IRT_INTP), sp[0], sp[1]);
+ tr = emitir(IRT(IR_BSAR, IRT_INTP), tr, lj_ir_kint(J, lj_fls(sz)));
+#if LJ_64
+ tr = emitconv(tr, IRT_NUM, IRT_INTP, 0);
+#endif
+ return tr;
+ } else { /* Pointer comparison (unsigned). */
+ /* Assume true comparison. Fixup and emit pending guard later. */
+ IROp op = mm == MM_eq ? IR_EQ : mm == MM_lt ? IR_ULT : IR_ULE;
+ lj_ir_set(J, IRTG(op, IRT_PTR), sp[0], sp[1]);
+ J->postproc = LJ_POST_FIXGUARD;
+ return TREF_TRUE;
+ }
+ }
+ if (!((mm == MM_add || mm == MM_sub) && ctype_isnum(s[1]->info)))
+ return 0;
+ } else if (mm == MM_add && ctype_isnum(ctp->info) &&
+ (ctype_isptr(s[1]->info) || ctype_isrefarray(s[1]->info))) {
+ TRef tr = sp[0]; sp[0] = sp[1]; sp[1] = tr; /* Swap pointer and index. */
+ ctp = s[1];
+ } else {
+ return 0;
+ }
+ {
+ TRef tr = sp[1];
+ IRType t = tref_type(tr);
+ CTSize sz = lj_ctype_size(cts, ctype_cid(ctp->info));
+ CTypeID id;
+#if LJ_64
+ if (t == IRT_NUM || t == IRT_FLOAT)
+ tr = emitconv(tr, IRT_INTP, t, IRCONV_ANY);
+ else if (!(t == IRT_I64 || t == IRT_U64))
+ tr = emitconv(tr, IRT_INTP, IRT_INT,
+ ((t - IRT_I8) & 1) ? 0 : IRCONV_SEXT);
+#else
+ if (!tref_typerange(sp[1], IRT_I8, IRT_U32)) {
+ tr = emitconv(tr, IRT_INTP, t,
+ (t == IRT_NUM || t == IRT_FLOAT) ? IRCONV_ANY : 0);
+ }
+#endif
+ tr = emitir(IRT(IR_MUL, IRT_INTP), tr, lj_ir_kintp(J, sz));
+ tr = emitir(IRT(mm+(int)IR_ADD-(int)MM_add, IRT_PTR), sp[0], tr);
+ id = lj_ctype_intern(cts, CTINFO(CT_PTR, CTALIGN_PTR|ctype_cid(ctp->info)),
+ CTSIZE_PTR);
+ return emitir(IRTG(IR_CNEWI, IRT_CDATA), lj_ir_kint(J, id), tr);
+ }
+}
+
+/* Record ctype arithmetic metamethods. */
+static TRef crec_arith_meta(jit_State *J, TRef *sp, CType **s, CTState *cts,
+ RecordFFData *rd)
+{
+ cTValue *tv = NULL;
+ if (J->base[0]) {
+ if (tviscdata(&rd->argv[0])) {
+ CTypeID id = argv2cdata(J, J->base[0], &rd->argv[0])->ctypeid;
+ CType *ct = ctype_raw(cts, id);
+ if (ctype_isptr(ct->info)) id = ctype_cid(ct->info);
+ tv = lj_ctype_meta(cts, id, (MMS)rd->data);
+ }
+ if (!tv && J->base[1] && tviscdata(&rd->argv[1])) {
+ CTypeID id = argv2cdata(J, J->base[1], &rd->argv[1])->ctypeid;
+ CType *ct = ctype_raw(cts, id);
+ if (ctype_isptr(ct->info)) id = ctype_cid(ct->info);
+ tv = lj_ctype_meta(cts, id, (MMS)rd->data);
+ }
+ }
+ if (tv) {
+ if (tvisfunc(tv)) {
+ crec_tailcall(J, rd, tv);
+ return 0;
+ } /* NYI: non-function metamethods. */
+ } else if ((MMS)rd->data == MM_eq) { /* Fallback cdata pointer comparison. */
+ if (sp[0] && sp[1] && ctype_isnum(s[0]->info) == ctype_isnum(s[1]->info)) {
+ /* Assume true comparison. Fixup and emit pending guard later. */
+ lj_ir_set(J, IRTG(IR_EQ, IRT_PTR), sp[0], sp[1]);
+ J->postproc = LJ_POST_FIXGUARD;
+ return TREF_TRUE;
+ } else {
+ return TREF_FALSE;
+ }
+ }
+ lj_trace_err(J, LJ_TRERR_BADTYPE);
+ return 0;
+}
+
+void LJ_FASTCALL recff_cdata_arith(jit_State *J, RecordFFData *rd)
+{
+ CTState *cts = ctype_ctsG(J2G(J));
+ TRef sp[2];
+ CType *s[2];
+ MSize i;
+ for (i = 0; i < 2; i++) {
+ TRef tr = J->base[i];
+ CType *ct = ctype_get(cts, CTID_DOUBLE);
+ if (!tr) {
+ lj_trace_err(J, LJ_TRERR_BADTYPE);
+ } else if (tref_iscdata(tr)) {
+ CTypeID id = argv2cdata(J, tr, &rd->argv[i])->ctypeid;
+ IRType t;
+ ct = ctype_raw(cts, id);
+ t = crec_ct2irt(cts, ct);
+ if (ctype_isptr(ct->info)) { /* Resolve pointer or reference. */
+ tr = emitir(IRT(IR_FLOAD, t), tr, IRFL_CDATA_PTR);
+ if (ctype_isref(ct->info)) {
+ ct = ctype_rawchild(cts, ct);
+ t = crec_ct2irt(cts, ct);
+ }
+ } else if (t == IRT_I64 || t == IRT_U64) {
+ tr = emitir(IRT(IR_FLOAD, t), tr, IRFL_CDATA_INT64);
+ lj_needsplit(J);
+ goto ok;
+ } else if (t == IRT_INT || t == IRT_U32) {
+ tr = emitir(IRT(IR_FLOAD, t), tr, IRFL_CDATA_INT);
+ if (ctype_isenum(ct->info)) ct = ctype_child(cts, ct);
+ goto ok;
+ } else if (ctype_isfunc(ct->info)) {
+ tr = emitir(IRT(IR_FLOAD, IRT_PTR), tr, IRFL_CDATA_PTR);
+ ct = ctype_get(cts,
+ lj_ctype_intern(cts, CTINFO(CT_PTR, CTALIGN_PTR|id), CTSIZE_PTR));
+ goto ok;
+ } else {
+ tr = emitir(IRT(IR_ADD, IRT_PTR), tr, lj_ir_kintp(J, sizeof(GCcdata)));
+ }
+ if (ctype_isenum(ct->info)) ct = ctype_child(cts, ct);
+ if (ctype_isnum(ct->info)) {
+ if (t == IRT_CDATA) {
+ tr = 0;
+ } else {
+ if (t == IRT_I64 || t == IRT_U64) lj_needsplit(J);
+ tr = emitir(IRT(IR_XLOAD, t), tr, 0);
+ }
+ }
+ } else if (tref_isnil(tr)) {
+ tr = lj_ir_kptr(J, NULL);
+ ct = ctype_get(cts, CTID_P_VOID);
+ } else if (tref_isinteger(tr)) {
+ ct = ctype_get(cts, CTID_INT32);
+ } else if (tref_isstr(tr)) {
+ TRef tr2 = J->base[1-i];
+ CTypeID id = argv2cdata(J, tr2, &rd->argv[1-i])->ctypeid;
+ ct = ctype_raw(cts, id);
+ if (ctype_isenum(ct->info)) { /* Match string against enum constant. */
+ GCstr *str = strV(&rd->argv[i]);
+ CTSize ofs;
+ CType *cct = lj_ctype_getfield(cts, ct, str, &ofs);
+ if (cct && ctype_isconstval(cct->info)) {
+ /* Specialize to the name of the enum constant. */
+ emitir(IRTG(IR_EQ, IRT_STR), tr, lj_ir_kstr(J, str));
+ ct = ctype_child(cts, cct);
+ tr = lj_ir_kint(J, (int32_t)ofs);
+ } else { /* Interpreter will throw or return false. */
+ ct = ctype_get(cts, CTID_P_VOID);
+ }
+ } else if (ctype_isptr(ct->info)) {
+ tr = emitir(IRT(IR_ADD, IRT_PTR), tr, lj_ir_kintp(J, sizeof(GCstr)));
+ } else {
+ ct = ctype_get(cts, CTID_P_VOID);
+ }
+ } else if (!tref_isnum(tr)) {
+ tr = 0;
+ ct = ctype_get(cts, CTID_P_VOID);
+ }
+ ok:
+ s[i] = ct;
+ sp[i] = tr;
+ }
+ {
+ TRef tr;
+ MMS mm = (MMS)rd->data;
+ if ((mm == MM_len || mm == MM_concat ||
+ (!(tr = crec_arith_int64(J, sp, s, mm)) &&
+ !(tr = crec_arith_ptr(J, sp, s, mm)))) &&
+ !(tr = crec_arith_meta(J, sp, s, cts, rd)))
+ return;
+ J->base[0] = tr;
+ /* Fixup cdata comparisons, too. Avoids some cdata escapes. */
+ if (J->postproc == LJ_POST_FIXGUARD && frame_iscont(J->L->base-1) &&
+ !irt_isguard(J->guardemit)) {
+ const BCIns *pc = frame_contpc(J->L->base-1) - 1;
+ if (bc_op(*pc) <= BC_ISNEP) {
+ J2G(J)->tmptv.u64 = (uint64_t)(uintptr_t)pc;
+ J->postproc = LJ_POST_FIXCOMP;
+ }
+ }
+ }
+}
+
+/* -- C library namespace metamethods ------------------------------------- */
+
+void LJ_FASTCALL recff_clib_index(jit_State *J, RecordFFData *rd)
+{
+ CTState *cts = ctype_ctsG(J2G(J));
+ if (tref_isudata(J->base[0]) && tref_isstr(J->base[1]) &&
+ udataV(&rd->argv[0])->udtype == UDTYPE_FFI_CLIB) {
+ CLibrary *cl = (CLibrary *)uddata(udataV(&rd->argv[0]));
+ GCstr *name = strV(&rd->argv[1]);
+ CType *ct;
+ CTypeID id = lj_ctype_getname(cts, &ct, name, CLNS_INDEX);
+ cTValue *tv = lj_tab_getstr(cl->cache, name);
+ rd->nres = rd->data;
+ if (id && tv && !tvisnil(tv)) {
+ /* Specialize to the symbol name and make the result a constant. */
+ emitir(IRTG(IR_EQ, IRT_STR), J->base[1], lj_ir_kstr(J, name));
+ if (ctype_isconstval(ct->info)) {
+ if (ct->size >= 0x80000000u &&
+ (ctype_child(cts, ct)->info & CTF_UNSIGNED))
+ J->base[0] = lj_ir_knum(J, (lua_Number)(uint32_t)ct->size);
+ else
+ J->base[0] = lj_ir_kint(J, (int32_t)ct->size);
+ } else if (ctype_isextern(ct->info)) {
+ CTypeID sid = ctype_cid(ct->info);
+ void *sp = *(void **)cdataptr(cdataV(tv));
+ TRef ptr;
+ ct = ctype_raw(cts, sid);
+ if (LJ_64 && !checkptr32(sp))
+ ptr = lj_ir_kintp(J, (uintptr_t)sp);
+ else
+ ptr = lj_ir_kptr(J, sp);
+ if (rd->data) {
+ J->base[0] = crec_tv_ct(J, ct, sid, ptr);
+ } else {
+ J->needsnap = 1;
+ crec_ct_tv(J, ct, ptr, J->base[2], &rd->argv[2]);
+ }
+ } else {
+ J->base[0] = lj_ir_kgc(J, obj2gco(cdataV(tv)), IRT_CDATA);
+ }
+ } else {
+ lj_trace_err(J, LJ_TRERR_NOCACHE);
+ }
+ } /* else: interpreter will throw. */
+}
+
+/* -- FFI library functions ----------------------------------------------- */
+
+static TRef crec_toint(jit_State *J, CTState *cts, TRef sp, TValue *sval)
+{
+ return crec_ct_tv(J, ctype_get(cts, CTID_INT32), 0, sp, sval);
+}
+
+void LJ_FASTCALL recff_ffi_new(jit_State *J, RecordFFData *rd)
+{
+ crec_alloc(J, rd, argv2ctype(J, J->base[0], &rd->argv[0]));
+}
+
+void LJ_FASTCALL recff_ffi_errno(jit_State *J, RecordFFData *rd)
+{
+ UNUSED(rd);
+ if (J->base[0])
+ lj_trace_err(J, LJ_TRERR_NYICALL);
+ J->base[0] = lj_ir_call(J, IRCALL_lj_vm_errno);
+}
+
+void LJ_FASTCALL recff_ffi_string(jit_State *J, RecordFFData *rd)
+{
+ CTState *cts = ctype_ctsG(J2G(J));
+ TRef tr = J->base[0];
+ if (tr) {
+ TRef trlen = J->base[1];
+ if (!tref_isnil(trlen)) {
+ trlen = crec_toint(J, cts, trlen, &rd->argv[1]);
+ tr = crec_ct_tv(J, ctype_get(cts, CTID_P_CVOID), 0, tr, &rd->argv[0]);
+ } else {
+ tr = crec_ct_tv(J, ctype_get(cts, CTID_P_CCHAR), 0, tr, &rd->argv[0]);
+ trlen = lj_ir_call(J, IRCALL_strlen, tr);
+ }
+ J->base[0] = emitir(IRT(IR_XSNEW, IRT_STR), tr, trlen);
+ } /* else: interpreter will throw. */
+}
+
+void LJ_FASTCALL recff_ffi_copy(jit_State *J, RecordFFData *rd)
+{
+ CTState *cts = ctype_ctsG(J2G(J));
+ TRef trdst = J->base[0], trsrc = J->base[1], trlen = J->base[2];
+ if (trdst && trsrc && (trlen || tref_isstr(trsrc))) {
+ trdst = crec_ct_tv(J, ctype_get(cts, CTID_P_VOID), 0, trdst, &rd->argv[0]);
+ trsrc = crec_ct_tv(J, ctype_get(cts, CTID_P_CVOID), 0, trsrc, &rd->argv[1]);
+ if (trlen) {
+ trlen = crec_toint(J, cts, trlen, &rd->argv[2]);
+ } else {
+ trlen = emitir(IRTI(IR_FLOAD), J->base[1], IRFL_STR_LEN);
+ trlen = emitir(IRTI(IR_ADD), trlen, lj_ir_kint(J, 1));
+ }
+ rd->nres = 0;
+ crec_copy(J, trdst, trsrc, trlen, NULL);
+ } /* else: interpreter will throw. */
+}
+
+void LJ_FASTCALL recff_ffi_fill(jit_State *J, RecordFFData *rd)
+{
+ CTState *cts = ctype_ctsG(J2G(J));
+ TRef trdst = J->base[0], trlen = J->base[1], trfill = J->base[2];
+ if (trdst && trlen) {
+ CTSize step = 1;
+ if (tviscdata(&rd->argv[0])) { /* Get alignment of original destination. */
+ CTSize sz;
+ CType *ct = ctype_raw(cts, cdataV(&rd->argv[0])->ctypeid);
+ if (ctype_isptr(ct->info))
+ ct = ctype_rawchild(cts, ct);
+ step = (1u<<ctype_align(lj_ctype_info(cts, ctype_typeid(cts, ct), &sz)));
+ }
+ trdst = crec_ct_tv(J, ctype_get(cts, CTID_P_VOID), 0, trdst, &rd->argv[0]);
+ trlen = crec_toint(J, cts, trlen, &rd->argv[1]);
+ if (trfill)
+ trfill = crec_toint(J, cts, trfill, &rd->argv[2]);
+ else
+ trfill = lj_ir_kint(J, 0);
+ rd->nres = 0;
+ crec_fill(J, trdst, trlen, trfill, step);
+ } /* else: interpreter will throw. */
+}
+
+void LJ_FASTCALL recff_ffi_typeof(jit_State *J, RecordFFData *rd)
+{
+ if (tref_iscdata(J->base[0])) {
+ TRef trid = lj_ir_kint(J, argv2ctype(J, J->base[0], &rd->argv[0]));
+ J->base[0] = emitir(IRTG(IR_CNEWI, IRT_CDATA),
+ lj_ir_kint(J, CTID_CTYPEID), trid);
+ } else {
+ setfuncV(J->L, &J->errinfo, J->fn);
+ lj_trace_err_info(J, LJ_TRERR_NYIFFU);
+ }
+}
+
+void LJ_FASTCALL recff_ffi_istype(jit_State *J, RecordFFData *rd)
+{
+ argv2ctype(J, J->base[0], &rd->argv[0]);
+ if (tref_iscdata(J->base[1])) {
+ argv2ctype(J, J->base[1], &rd->argv[1]);
+ J->postproc = LJ_POST_FIXBOOL;
+ J->base[0] = TREF_TRUE;
+ } else {
+ J->base[0] = TREF_FALSE;
+ }
+}
+
+void LJ_FASTCALL recff_ffi_abi(jit_State *J, RecordFFData *rd)
+{
+ if (tref_isstr(J->base[0])) {
+ /* Specialize to the ABI string to make the boolean result a constant. */
+ emitir(IRTG(IR_EQ, IRT_STR), J->base[0], lj_ir_kstr(J, strV(&rd->argv[0])));
+ J->postproc = LJ_POST_FIXBOOL;
+ J->base[0] = TREF_TRUE;
+ } else {
+ lj_trace_err(J, LJ_TRERR_BADTYPE);
+ }
+}
+
+/* Record ffi.sizeof(), ffi.alignof(), ffi.offsetof(). */
+void LJ_FASTCALL recff_ffi_xof(jit_State *J, RecordFFData *rd)
+{
+ CTypeID id = argv2ctype(J, J->base[0], &rd->argv[0]);
+ if (rd->data == FF_ffi_sizeof) {
+ CType *ct = lj_ctype_rawref(ctype_ctsG(J2G(J)), id);
+ if (ctype_isvltype(ct->info))
+ lj_trace_err(J, LJ_TRERR_BADTYPE);
+ } else if (rd->data == FF_ffi_offsetof) { /* Specialize to the field name. */
+ if (!tref_isstr(J->base[1]))
+ lj_trace_err(J, LJ_TRERR_BADTYPE);
+ emitir(IRTG(IR_EQ, IRT_STR), J->base[1], lj_ir_kstr(J, strV(&rd->argv[1])));
+ rd->nres = 3; /* Just in case. */
+ }
+ J->postproc = LJ_POST_FIXCONST;
+ J->base[0] = J->base[1] = J->base[2] = TREF_NIL;
+}
+
+void LJ_FASTCALL recff_ffi_gc(jit_State *J, RecordFFData *rd)
+{
+ argv2cdata(J, J->base[0], &rd->argv[0]);
+ if (!J->base[1])
+ lj_trace_err(J, LJ_TRERR_BADTYPE);
+ crec_finalizer(J, J->base[0], J->base[1], &rd->argv[1]);
+}
+
+/* -- 64 bit bit.* library functions -------------------------------------- */
+
+/* Determine bit operation type from argument type. */
+static CTypeID crec_bit64_type(CTState *cts, cTValue *tv)
+{
+ if (tviscdata(tv)) {
+ CType *ct = lj_ctype_rawref(cts, cdataV(tv)->ctypeid);
+ if (ctype_isenum(ct->info)) ct = ctype_child(cts, ct);
+ if ((ct->info & (CTMASK_NUM|CTF_BOOL|CTF_FP|CTF_UNSIGNED)) ==
+ CTINFO(CT_NUM, CTF_UNSIGNED) && ct->size == 8)
+ return CTID_UINT64; /* Use uint64_t, since it has the highest rank. */
+ return CTID_INT64; /* Otherwise use int64_t. */
+ }
+ return 0; /* Use regular 32 bit ops. */
+}
+
+void LJ_FASTCALL recff_bit64_tobit(jit_State *J, RecordFFData *rd)
+{
+ CTState *cts = ctype_ctsG(J2G(J));
+ TRef tr = crec_ct_tv(J, ctype_get(cts, CTID_INT64), 0,
+ J->base[0], &rd->argv[0]);
+ if (!tref_isinteger(tr))
+ tr = emitconv(tr, IRT_INT, tref_type(tr), 0);
+ J->base[0] = tr;
+}
+
+int LJ_FASTCALL recff_bit64_unary(jit_State *J, RecordFFData *rd)
+{
+ CTState *cts = ctype_ctsG(J2G(J));
+ CTypeID id = crec_bit64_type(cts, &rd->argv[0]);
+ if (id) {
+ TRef tr = crec_ct_tv(J, ctype_get(cts, id), 0, J->base[0], &rd->argv[0]);
+ tr = emitir(IRT(rd->data, id-CTID_INT64+IRT_I64), tr, 0);
+ J->base[0] = emitir(IRTG(IR_CNEWI, IRT_CDATA), lj_ir_kint(J, id), tr);
+ return 1;
+ }
+ return 0;
+}
+
+int LJ_FASTCALL recff_bit64_nary(jit_State *J, RecordFFData *rd)
+{
+ CTState *cts = ctype_ctsG(J2G(J));
+ CTypeID id = 0;
+ MSize i;
+ for (i = 0; J->base[i] != 0; i++) {
+ CTypeID aid = crec_bit64_type(cts, &rd->argv[i]);
+ if (id < aid) id = aid; /* Determine highest type rank of all arguments. */
+ }
+ if (id) {
+ CType *ct = ctype_get(cts, id);
+ uint32_t ot = IRT(rd->data, id-CTID_INT64+IRT_I64);
+ TRef tr = crec_ct_tv(J, ct, 0, J->base[0], &rd->argv[0]);
+ for (i = 1; J->base[i] != 0; i++) {
+ TRef tr2 = crec_ct_tv(J, ct, 0, J->base[i], &rd->argv[i]);
+ tr = emitir(ot, tr, tr2);
+ }
+ J->base[0] = emitir(IRTG(IR_CNEWI, IRT_CDATA), lj_ir_kint(J, id), tr);
+ return 1;
+ }
+ return 0;
+}
+
+int LJ_FASTCALL recff_bit64_shift(jit_State *J, RecordFFData *rd)
+{
+ CTState *cts = ctype_ctsG(J2G(J));
+ CTypeID id;
+ TRef tsh = 0;
+ if (J->base[0] && tref_iscdata(J->base[1])) {
+ tsh = crec_ct_tv(J, ctype_get(cts, CTID_INT64), 0,
+ J->base[1], &rd->argv[1]);
+ if (!tref_isinteger(tsh))
+ tsh = emitconv(tsh, IRT_INT, tref_type(tsh), 0);
+ J->base[1] = tsh;
+ }
+ id = crec_bit64_type(cts, &rd->argv[0]);
+ if (id) {
+ TRef tr = crec_ct_tv(J, ctype_get(cts, id), 0, J->base[0], &rd->argv[0]);
+ uint32_t op = rd->data;
+ if (!tsh) tsh = lj_opt_narrow_tobit(J, J->base[1]);
+ if (!(op < IR_BROL ? LJ_TARGET_MASKSHIFT : LJ_TARGET_MASKROT) &&
+ !tref_isk(tsh))
+ tsh = emitir(IRTI(IR_BAND), tsh, lj_ir_kint(J, 63));
+#ifdef LJ_TARGET_UNIFYROT
+ if (op == (LJ_TARGET_UNIFYROT == 1 ? IR_BROR : IR_BROL)) {
+ op = LJ_TARGET_UNIFYROT == 1 ? IR_BROL : IR_BROR;
+ tsh = emitir(IRTI(IR_NEG), tsh, tsh);
+ }
+#endif
+ tr = emitir(IRT(op, id-CTID_INT64+IRT_I64), tr, tsh);
+ J->base[0] = emitir(IRTG(IR_CNEWI, IRT_CDATA), lj_ir_kint(J, id), tr);
+ return 1;
+ }
+ return 0;
+}
+
+TRef recff_bit64_tohex(jit_State *J, RecordFFData *rd, TRef hdr)
+{
+ CTState *cts = ctype_ctsG(J2G(J));
+ CTypeID id = crec_bit64_type(cts, &rd->argv[0]);
+ TRef tr, trsf = J->base[1];
+ SFormat sf = (STRFMT_UINT|STRFMT_T_HEX);
+ int32_t n;
+ if (trsf) {
+ CTypeID id2 = 0;
+ n = (int32_t)lj_carith_check64(J->L, 2, &id2);
+ if (id2)
+ trsf = crec_ct_tv(J, ctype_get(cts, CTID_INT32), 0, trsf, &rd->argv[1]);
+ else
+ trsf = lj_opt_narrow_tobit(J, trsf);
+ emitir(IRTGI(IR_EQ), trsf, lj_ir_kint(J, n)); /* Specialize to n. */
+ } else {
+ n = id ? 16 : 8;
+ }
+ if (n < 0) { n = -n; sf |= STRFMT_F_UPPER; }
+ sf |= ((SFormat)((n+1)&255) << STRFMT_SH_PREC);
+ if (id) {
+ tr = crec_ct_tv(J, ctype_get(cts, id), 0, J->base[0], &rd->argv[0]);
+ if (n < 16)
+ tr = emitir(IRT(IR_BAND, IRT_U64), tr,
+ lj_ir_kint64(J, ((uint64_t)1 << 4*n)-1));
+ } else {
+ tr = lj_opt_narrow_tobit(J, J->base[0]);
+ if (n < 8)
+ tr = emitir(IRTI(IR_BAND), tr, lj_ir_kint(J, (int32_t)((1u << 4*n)-1)));
+ tr = emitconv(tr, IRT_U64, IRT_INT, 0); /* No sign-extension. */
+ lj_needsplit(J);
+ }
+ return lj_ir_call(J, IRCALL_lj_strfmt_putfxint, hdr, lj_ir_kint(J, sf), tr);
+}
+
+/* -- Miscellaneous library functions ------------------------------------- */
+
+void LJ_FASTCALL lj_crecord_tonumber(jit_State *J, RecordFFData *rd)
+{
+ CTState *cts = ctype_ctsG(J2G(J));
+ CType *d, *ct = lj_ctype_rawref(cts, cdataV(&rd->argv[0])->ctypeid);
+ if (ctype_isenum(ct->info)) ct = ctype_child(cts, ct);
+ if (ctype_isnum(ct->info) || ctype_iscomplex(ct->info)) {
+ if (ctype_isinteger_or_bool(ct->info) && ct->size <= 4 &&
+ !(ct->size == 4 && (ct->info & CTF_UNSIGNED)))
+ d = ctype_get(cts, CTID_INT32);
+ else
+ d = ctype_get(cts, CTID_DOUBLE);
+ J->base[0] = crec_ct_tv(J, d, 0, J->base[0], &rd->argv[0]);
+ } else {
+ /* Specialize to the ctype that couldn't be converted. */
+ argv2cdata(J, J->base[0], &rd->argv[0]);
+ J->base[0] = TREF_NIL;
+ }
+}
+
+TRef lj_crecord_loadiu64(jit_State *J, TRef tr, cTValue *o)
+{
+ CTypeID id = argv2cdata(J, tr, o)->ctypeid;
+ if (!(id == CTID_INT64 || id == CTID_UINT64))
+ lj_trace_err(J, LJ_TRERR_BADTYPE);
+ lj_needsplit(J);
+ return emitir(IRT(IR_FLOAD, id == CTID_INT64 ? IRT_I64 : IRT_U64), tr,
+ IRFL_CDATA_INT64);
+}
+
+#if LJ_HASBUFFER
+TRef lj_crecord_topcvoid(jit_State *J, TRef tr, cTValue *o)
+{
+ CTState *cts = ctype_ctsG(J2G(J));
+ if (!tref_iscdata(tr)) lj_trace_err(J, LJ_TRERR_BADTYPE);
+ return crec_ct_tv(J, ctype_get(cts, CTID_P_CVOID), 0, tr, o);
+}
+
+TRef lj_crecord_topuint8(jit_State *J, TRef tr)
+{
+ return emitir(IRTG(IR_CNEWI, IRT_CDATA), lj_ir_kint(J, CTID_P_UINT8), tr);
+}
+#endif
+
+#undef IR
+#undef emitir
+#undef emitconv
+
+#endif
diff --git a/libs/luajit-cmake/luajit/src/lj_crecord.h b/libs/luajit-cmake/luajit/src/lj_crecord.h
new file mode 100644
index 0000000..2c8cf05
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/lj_crecord.h
@@ -0,0 +1,43 @@
+/*
+** Trace recorder for C data operations.
+** Copyright (C) 2005-2022 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LJ_CRECORD_H
+#define _LJ_CRECORD_H
+
+#include "lj_obj.h"
+#include "lj_jit.h"
+#include "lj_ffrecord.h"
+
+#if LJ_HASJIT && LJ_HASFFI
+LJ_FUNC void LJ_FASTCALL recff_cdata_index(jit_State *J, RecordFFData *rd);
+LJ_FUNC void LJ_FASTCALL recff_cdata_call(jit_State *J, RecordFFData *rd);
+LJ_FUNC void LJ_FASTCALL recff_cdata_arith(jit_State *J, RecordFFData *rd);
+LJ_FUNC void LJ_FASTCALL recff_clib_index(jit_State *J, RecordFFData *rd);
+LJ_FUNC void LJ_FASTCALL recff_ffi_new(jit_State *J, RecordFFData *rd);
+LJ_FUNC void LJ_FASTCALL recff_ffi_errno(jit_State *J, RecordFFData *rd);
+LJ_FUNC void LJ_FASTCALL recff_ffi_string(jit_State *J, RecordFFData *rd);
+LJ_FUNC void LJ_FASTCALL recff_ffi_copy(jit_State *J, RecordFFData *rd);
+LJ_FUNC void LJ_FASTCALL recff_ffi_fill(jit_State *J, RecordFFData *rd);
+LJ_FUNC void LJ_FASTCALL recff_ffi_typeof(jit_State *J, RecordFFData *rd);
+LJ_FUNC void LJ_FASTCALL recff_ffi_istype(jit_State *J, RecordFFData *rd);
+LJ_FUNC void LJ_FASTCALL recff_ffi_abi(jit_State *J, RecordFFData *rd);
+LJ_FUNC void LJ_FASTCALL recff_ffi_xof(jit_State *J, RecordFFData *rd);
+LJ_FUNC void LJ_FASTCALL recff_ffi_gc(jit_State *J, RecordFFData *rd);
+
+LJ_FUNC void LJ_FASTCALL recff_bit64_tobit(jit_State *J, RecordFFData *rd);
+LJ_FUNC int LJ_FASTCALL recff_bit64_unary(jit_State *J, RecordFFData *rd);
+LJ_FUNC int LJ_FASTCALL recff_bit64_nary(jit_State *J, RecordFFData *rd);
+LJ_FUNC int LJ_FASTCALL recff_bit64_shift(jit_State *J, RecordFFData *rd);
+LJ_FUNC TRef recff_bit64_tohex(jit_State *J, RecordFFData *rd, TRef hdr);
+
+LJ_FUNC void LJ_FASTCALL lj_crecord_tonumber(jit_State *J, RecordFFData *rd);
+LJ_FUNC TRef lj_crecord_loadiu64(jit_State *J, TRef tr, cTValue *o);
+#if LJ_HASBUFFER
+LJ_FUNC TRef lj_crecord_topcvoid(jit_State *J, TRef tr, cTValue *o);
+LJ_FUNC TRef lj_crecord_topuint8(jit_State *J, TRef tr);
+#endif
+#endif
+
+#endif
diff --git a/libs/luajit-cmake/luajit/src/lj_ctype.c b/libs/luajit-cmake/luajit/src/lj_ctype.c
new file mode 100644
index 0000000..204be03
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/lj_ctype.c
@@ -0,0 +1,646 @@
+/*
+** C type management.
+** Copyright (C) 2005-2022 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#include "lj_obj.h"
+
+#if LJ_HASFFI
+
+#include "lj_gc.h"
+#include "lj_err.h"
+#include "lj_str.h"
+#include "lj_tab.h"
+#include "lj_strfmt.h"
+#include "lj_ctype.h"
+#include "lj_ccallback.h"
+#include "lj_buf.h"
+
+/* -- C type definitions -------------------------------------------------- */
+
+/* Predefined typedefs. */
+#define CTTDDEF(_) \
+ /* Vararg handling. */ \
+ _("va_list", P_VOID) \
+ _("__builtin_va_list", P_VOID) \
+ _("__gnuc_va_list", P_VOID) \
+ /* From stddef.h. */ \
+ _("ptrdiff_t", INT_PSZ) \
+ _("size_t", UINT_PSZ) \
+ _("wchar_t", WCHAR) \
+ /* Subset of stdint.h. */ \
+ _("int8_t", INT8) \
+ _("int16_t", INT16) \
+ _("int32_t", INT32) \
+ _("int64_t", INT64) \
+ _("uint8_t", UINT8) \
+ _("uint16_t", UINT16) \
+ _("uint32_t", UINT32) \
+ _("uint64_t", UINT64) \
+ _("intptr_t", INT_PSZ) \
+ _("uintptr_t", UINT_PSZ) \
+ /* From POSIX. */ \
+ _("ssize_t", INT_PSZ) \
+ /* End of typedef list. */
+
+/* Keywords (only the ones we actually care for). */
+#define CTKWDEF(_) \
+ /* Type specifiers. */ \
+ _("void", -1, CTOK_VOID) \
+ _("_Bool", 0, CTOK_BOOL) \
+ _("bool", 1, CTOK_BOOL) \
+ _("char", 1, CTOK_CHAR) \
+ _("int", 4, CTOK_INT) \
+ _("__int8", 1, CTOK_INT) \
+ _("__int16", 2, CTOK_INT) \
+ _("__int32", 4, CTOK_INT) \
+ _("__int64", 8, CTOK_INT) \
+ _("float", 4, CTOK_FP) \
+ _("double", 8, CTOK_FP) \
+ _("long", 0, CTOK_LONG) \
+ _("short", 0, CTOK_SHORT) \
+ _("_Complex", 0, CTOK_COMPLEX) \
+ _("complex", 0, CTOK_COMPLEX) \
+ _("__complex", 0, CTOK_COMPLEX) \
+ _("__complex__", 0, CTOK_COMPLEX) \
+ _("signed", 0, CTOK_SIGNED) \
+ _("__signed", 0, CTOK_SIGNED) \
+ _("__signed__", 0, CTOK_SIGNED) \
+ _("unsigned", 0, CTOK_UNSIGNED) \
+ /* Type qualifiers. */ \
+ _("const", 0, CTOK_CONST) \
+ _("__const", 0, CTOK_CONST) \
+ _("__const__", 0, CTOK_CONST) \
+ _("volatile", 0, CTOK_VOLATILE) \
+ _("__volatile", 0, CTOK_VOLATILE) \
+ _("__volatile__", 0, CTOK_VOLATILE) \
+ _("restrict", 0, CTOK_RESTRICT) \
+ _("__restrict", 0, CTOK_RESTRICT) \
+ _("__restrict__", 0, CTOK_RESTRICT) \
+ _("inline", 0, CTOK_INLINE) \
+ _("__inline", 0, CTOK_INLINE) \
+ _("__inline__", 0, CTOK_INLINE) \
+ /* Storage class specifiers. */ \
+ _("typedef", 0, CTOK_TYPEDEF) \
+ _("extern", 0, CTOK_EXTERN) \
+ _("static", 0, CTOK_STATIC) \
+ _("auto", 0, CTOK_AUTO) \
+ _("register", 0, CTOK_REGISTER) \
+ /* GCC Attributes. */ \
+ _("__extension__", 0, CTOK_EXTENSION) \
+ _("__attribute", 0, CTOK_ATTRIBUTE) \
+ _("__attribute__", 0, CTOK_ATTRIBUTE) \
+ _("asm", 0, CTOK_ASM) \
+ _("__asm", 0, CTOK_ASM) \
+ _("__asm__", 0, CTOK_ASM) \
+ /* MSVC Attributes. */ \
+ _("__declspec", 0, CTOK_DECLSPEC) \
+ _("__cdecl", CTCC_CDECL, CTOK_CCDECL) \
+ _("__thiscall", CTCC_THISCALL, CTOK_CCDECL) \
+ _("__fastcall", CTCC_FASTCALL, CTOK_CCDECL) \
+ _("__stdcall", CTCC_STDCALL, CTOK_CCDECL) \
+ _("__ptr32", 4, CTOK_PTRSZ) \
+ _("__ptr64", 8, CTOK_PTRSZ) \
+ /* Other type specifiers. */ \
+ _("struct", 0, CTOK_STRUCT) \
+ _("union", 0, CTOK_UNION) \
+ _("enum", 0, CTOK_ENUM) \
+ /* Operators. */ \
+ _("sizeof", 0, CTOK_SIZEOF) \
+ _("__alignof", 0, CTOK_ALIGNOF) \
+ _("__alignof__", 0, CTOK_ALIGNOF) \
+ /* End of keyword list. */
+
+/* Type info for predefined types. Size merged in. */
+static CTInfo lj_ctype_typeinfo[] = {
+#define CTTYINFODEF(id, sz, ct, info) CTINFO((ct),(((sz)&0x3fu)<<10)+(info)),
+#define CTTDINFODEF(name, id) CTINFO(CT_TYPEDEF, CTID_##id),
+#define CTKWINFODEF(name, sz, kw) CTINFO(CT_KW,(((sz)&0x3fu)<<10)+(kw)),
+CTTYDEF(CTTYINFODEF)
+CTTDDEF(CTTDINFODEF)
+CTKWDEF(CTKWINFODEF)
+#undef CTTYINFODEF
+#undef CTTDINFODEF
+#undef CTKWINFODEF
+ 0
+};
+
+/* Predefined type names collected in a single string. */
+static const char * const lj_ctype_typenames =
+#define CTTDNAMEDEF(name, id) name "\0"
+#define CTKWNAMEDEF(name, sz, cds) name "\0"
+CTTDDEF(CTTDNAMEDEF)
+CTKWDEF(CTKWNAMEDEF)
+#undef CTTDNAMEDEF
+#undef CTKWNAMEDEF
+;
+
+#define CTTYPEINFO_NUM (sizeof(lj_ctype_typeinfo)/sizeof(CTInfo)-1)
+#ifdef LUAJIT_CTYPE_CHECK_ANCHOR
+#define CTTYPETAB_MIN CTTYPEINFO_NUM
+#else
+#define CTTYPETAB_MIN 128
+#endif
+
+/* -- C type interning ---------------------------------------------------- */
+
+#define ct_hashtype(info, size) (hashrot(info, size) & CTHASH_MASK)
+#define ct_hashname(name) \
+ (hashrot(u32ptr(name), u32ptr(name) + HASH_BIAS) & CTHASH_MASK)
+
+/* Create new type element. */
+CTypeID lj_ctype_new(CTState *cts, CType **ctp)
+{
+ CTypeID id = cts->top;
+ CType *ct;
+ lj_assertCTS(cts->L, "uninitialized cts->L");
+ if (LJ_UNLIKELY(id >= cts->sizetab)) {
+ if (id >= CTID_MAX) lj_err_msg(cts->L, LJ_ERR_TABOV);
+#ifdef LUAJIT_CTYPE_CHECK_ANCHOR
+ ct = lj_mem_newvec(cts->L, id+1, CType);
+ memcpy(ct, cts->tab, id*sizeof(CType));
+ memset(cts->tab, 0, id*sizeof(CType));
+ lj_mem_freevec(cts->g, cts->tab, cts->sizetab, CType);
+ cts->tab = ct;
+ cts->sizetab = id+1;
+#else
+ lj_mem_growvec(cts->L, cts->tab, cts->sizetab, CTID_MAX, CType);
+#endif
+ }
+ cts->top = id+1;
+ *ctp = ct = &cts->tab[id];
+ ct->info = 0;
+ ct->size = 0;
+ ct->sib = 0;
+ ct->next = 0;
+ setgcrefnull(ct->name);
+ return id;
+}
+
+/* Intern a type element. */
+CTypeID lj_ctype_intern(CTState *cts, CTInfo info, CTSize size)
+{
+ uint32_t h = ct_hashtype(info, size);
+ CTypeID id = cts->hash[h];
+ lj_assertCTS(cts->L, "uninitialized cts->L");
+ while (id) {
+ CType *ct = ctype_get(cts, id);
+ if (ct->info == info && ct->size == size)
+ return id;
+ id = ct->next;
+ }
+ id = cts->top;
+ if (LJ_UNLIKELY(id >= cts->sizetab)) {
+ if (id >= CTID_MAX) lj_err_msg(cts->L, LJ_ERR_TABOV);
+ lj_mem_growvec(cts->L, cts->tab, cts->sizetab, CTID_MAX, CType);
+ }
+ cts->top = id+1;
+ cts->tab[id].info = info;
+ cts->tab[id].size = size;
+ cts->tab[id].sib = 0;
+ cts->tab[id].next = cts->hash[h];
+ setgcrefnull(cts->tab[id].name);
+ cts->hash[h] = (CTypeID1)id;
+ return id;
+}
+
+/* Add type element to hash table. */
+static void ctype_addtype(CTState *cts, CType *ct, CTypeID id)
+{
+ uint32_t h = ct_hashtype(ct->info, ct->size);
+ ct->next = cts->hash[h];
+ cts->hash[h] = (CTypeID1)id;
+}
+
+/* Add named element to hash table. */
+void lj_ctype_addname(CTState *cts, CType *ct, CTypeID id)
+{
+ uint32_t h = ct_hashname(gcref(ct->name));
+ ct->next = cts->hash[h];
+ cts->hash[h] = (CTypeID1)id;
+}
+
+/* Get a C type by name, matching the type mask. */
+CTypeID lj_ctype_getname(CTState *cts, CType **ctp, GCstr *name, uint32_t tmask)
+{
+ CTypeID id = cts->hash[ct_hashname(name)];
+ while (id) {
+ CType *ct = ctype_get(cts, id);
+ if (gcref(ct->name) == obj2gco(name) &&
+ ((tmask >> ctype_type(ct->info)) & 1)) {
+ *ctp = ct;
+ return id;
+ }
+ id = ct->next;
+ }
+ *ctp = &cts->tab[0]; /* Simplify caller logic. ctype_get() would assert. */
+ return 0;
+}
+
+/* Get a struct/union/enum/function field by name. */
+CType *lj_ctype_getfieldq(CTState *cts, CType *ct, GCstr *name, CTSize *ofs,
+ CTInfo *qual)
+{
+ while (ct->sib) {
+ ct = ctype_get(cts, ct->sib);
+ if (gcref(ct->name) == obj2gco(name)) {
+ *ofs = ct->size;
+ return ct;
+ }
+ if (ctype_isxattrib(ct->info, CTA_SUBTYPE)) {
+ CType *fct, *cct = ctype_child(cts, ct);
+ CTInfo q = 0;
+ while (ctype_isattrib(cct->info)) {
+ if (ctype_attrib(cct->info) == CTA_QUAL) q |= cct->size;
+ cct = ctype_child(cts, cct);
+ }
+ fct = lj_ctype_getfieldq(cts, cct, name, ofs, qual);
+ if (fct) {
+ if (qual) *qual |= q;
+ *ofs += ct->size;
+ return fct;
+ }
+ }
+ }
+ return NULL; /* Not found. */
+}
+
+/* -- C type information -------------------------------------------------- */
+
+/* Follow references and get raw type for a C type ID. */
+CType *lj_ctype_rawref(CTState *cts, CTypeID id)
+{
+ CType *ct = ctype_get(cts, id);
+ while (ctype_isattrib(ct->info) || ctype_isref(ct->info))
+ ct = ctype_child(cts, ct);
+ return ct;
+}
+
+/* Get size for a C type ID. Does NOT support VLA/VLS. */
+CTSize lj_ctype_size(CTState *cts, CTypeID id)
+{
+ CType *ct = ctype_raw(cts, id);
+ return ctype_hassize(ct->info) ? ct->size : CTSIZE_INVALID;
+}
+
+/* Get size for a variable-length C type. Does NOT support other C types. */
+CTSize lj_ctype_vlsize(CTState *cts, CType *ct, CTSize nelem)
+{
+ uint64_t xsz = 0;
+ if (ctype_isstruct(ct->info)) {
+ CTypeID arrid = 0, fid = ct->sib;
+ xsz = ct->size; /* Add the struct size. */
+ while (fid) {
+ CType *ctf = ctype_get(cts, fid);
+ if (ctype_type(ctf->info) == CT_FIELD)
+ arrid = ctype_cid(ctf->info); /* Remember last field of VLS. */
+ fid = ctf->sib;
+ }
+ ct = ctype_raw(cts, arrid);
+ }
+ lj_assertCTS(ctype_isvlarray(ct->info), "VLA expected");
+ ct = ctype_rawchild(cts, ct); /* Get array element. */
+ lj_assertCTS(ctype_hassize(ct->info), "bad VLA without size");
+ /* Calculate actual size of VLA and check for overflow. */
+ xsz += (uint64_t)ct->size * nelem;
+ return xsz < 0x80000000u ? (CTSize)xsz : CTSIZE_INVALID;
+}
+
+/* Get type, qualifiers, size and alignment for a C type ID. */
+CTInfo lj_ctype_info(CTState *cts, CTypeID id, CTSize *szp)
+{
+ CTInfo qual = 0;
+ CType *ct = ctype_get(cts, id);
+ for (;;) {
+ CTInfo info = ct->info;
+ if (ctype_isenum(info)) {
+ /* Follow child. Need to look at its attributes, too. */
+ } else if (ctype_isattrib(info)) {
+ if (ctype_isxattrib(info, CTA_QUAL))
+ qual |= ct->size;
+ else if (ctype_isxattrib(info, CTA_ALIGN) && !(qual & CTFP_ALIGNED))
+ qual |= CTFP_ALIGNED + CTALIGN(ct->size);
+ } else {
+ if (!(qual & CTFP_ALIGNED)) qual |= (info & CTF_ALIGN);
+ qual |= (info & ~(CTF_ALIGN|CTMASK_CID));
+ lj_assertCTS(ctype_hassize(info) || ctype_isfunc(info),
+ "ctype without size");
+ *szp = ctype_isfunc(info) ? CTSIZE_INVALID : ct->size;
+ break;
+ }
+ ct = ctype_get(cts, ctype_cid(info));
+ }
+ return qual;
+}
+
+/* Ditto, but follow a reference. */
+CTInfo lj_ctype_info_raw(CTState *cts, CTypeID id, CTSize *szp)
+{
+ CType *ct = ctype_get(cts, id);
+ if (ctype_isref(ct->info)) id = ctype_cid(ct->info);
+ return lj_ctype_info(cts, id, szp);
+}
+
+/* Get ctype metamethod. */
+cTValue *lj_ctype_meta(CTState *cts, CTypeID id, MMS mm)
+{
+ CType *ct = ctype_get(cts, id);
+ cTValue *tv;
+ while (ctype_isattrib(ct->info) || ctype_isref(ct->info)) {
+ id = ctype_cid(ct->info);
+ ct = ctype_get(cts, id);
+ }
+ if (ctype_isptr(ct->info) &&
+ ctype_isfunc(ctype_get(cts, ctype_cid(ct->info))->info))
+ tv = lj_tab_getstr(cts->miscmap, &cts->g->strempty);
+ else
+ tv = lj_tab_getinth(cts->miscmap, -(int32_t)id);
+ if (tv && tvistab(tv) &&
+ (tv = lj_tab_getstr(tabV(tv), mmname_str(cts->g, mm))) && !tvisnil(tv))
+ return tv;
+ return NULL;
+}
+
+/* -- C type representation ----------------------------------------------- */
+
+/* Fixed max. length of a C type representation. */
+#define CTREPR_MAX 512
+
+typedef struct CTRepr {
+ char *pb, *pe;
+ CTState *cts;
+ lua_State *L;
+ int needsp;
+ int ok;
+ char buf[CTREPR_MAX];
+} CTRepr;
+
+/* Prepend string. */
+static void ctype_prepstr(CTRepr *ctr, const char *str, MSize len)
+{
+ char *p = ctr->pb;
+ if (ctr->buf + len+1 > p) { ctr->ok = 0; return; }
+ if (ctr->needsp) *--p = ' ';
+ ctr->needsp = 1;
+ p -= len;
+ while (len-- > 0) p[len] = str[len];
+ ctr->pb = p;
+}
+
+#define ctype_preplit(ctr, str) ctype_prepstr((ctr), "" str, sizeof(str)-1)
+
+/* Prepend char. */
+static void ctype_prepc(CTRepr *ctr, int c)
+{
+ if (ctr->buf >= ctr->pb) { ctr->ok = 0; return; }
+ *--ctr->pb = c;
+}
+
+/* Prepend number. */
+static void ctype_prepnum(CTRepr *ctr, uint32_t n)
+{
+ char *p = ctr->pb;
+ if (ctr->buf + 10+1 > p) { ctr->ok = 0; return; }
+ do { *--p = (char)('0' + n % 10); } while (n /= 10);
+ ctr->pb = p;
+ ctr->needsp = 0;
+}
+
+/* Append char. */
+static void ctype_appc(CTRepr *ctr, int c)
+{
+ if (ctr->pe >= ctr->buf + CTREPR_MAX) { ctr->ok = 0; return; }
+ *ctr->pe++ = c;
+}
+
+/* Append number. */
+static void ctype_appnum(CTRepr *ctr, uint32_t n)
+{
+ char buf[10];
+ char *p = buf+sizeof(buf);
+ char *q = ctr->pe;
+ if (q > ctr->buf + CTREPR_MAX - 10) { ctr->ok = 0; return; }
+ do { *--p = (char)('0' + n % 10); } while (n /= 10);
+ do { *q++ = *p++; } while (p < buf+sizeof(buf));
+ ctr->pe = q;
+}
+
+/* Prepend qualifiers. */
+static void ctype_prepqual(CTRepr *ctr, CTInfo info)
+{
+ if ((info & CTF_VOLATILE)) ctype_preplit(ctr, "volatile");
+ if ((info & CTF_CONST)) ctype_preplit(ctr, "const");
+}
+
+/* Prepend named type. */
+static void ctype_preptype(CTRepr *ctr, CType *ct, CTInfo qual, const char *t)
+{
+ if (gcref(ct->name)) {
+ GCstr *str = gco2str(gcref(ct->name));
+ ctype_prepstr(ctr, strdata(str), str->len);
+ } else {
+ if (ctr->needsp) ctype_prepc(ctr, ' ');
+ ctype_prepnum(ctr, ctype_typeid(ctr->cts, ct));
+ ctr->needsp = 1;
+ }
+ ctype_prepstr(ctr, t, (MSize)strlen(t));
+ ctype_prepqual(ctr, qual);
+}
+
+static void ctype_repr(CTRepr *ctr, CTypeID id)
+{
+ CType *ct = ctype_get(ctr->cts, id);
+ CTInfo qual = 0;
+ int ptrto = 0;
+ for (;;) {
+ CTInfo info = ct->info;
+ CTSize size = ct->size;
+ switch (ctype_type(info)) {
+ case CT_NUM:
+ if ((info & CTF_BOOL)) {
+ ctype_preplit(ctr, "bool");
+ } else if ((info & CTF_FP)) {
+ if (size == sizeof(double)) ctype_preplit(ctr, "double");
+ else if (size == sizeof(float)) ctype_preplit(ctr, "float");
+ else ctype_preplit(ctr, "long double");
+ } else if (size == 1) {
+ if (!((info ^ CTF_UCHAR) & CTF_UNSIGNED)) ctype_preplit(ctr, "char");
+ else if (CTF_UCHAR) ctype_preplit(ctr, "signed char");
+ else ctype_preplit(ctr, "unsigned char");
+ } else if (size < 8) {
+ if (size == 4) ctype_preplit(ctr, "int");
+ else ctype_preplit(ctr, "short");
+ if ((info & CTF_UNSIGNED)) ctype_preplit(ctr, "unsigned");
+ } else {
+ ctype_preplit(ctr, "_t");
+ ctype_prepnum(ctr, size*8);
+ ctype_preplit(ctr, "int");
+ if ((info & CTF_UNSIGNED)) ctype_prepc(ctr, 'u');
+ }
+ ctype_prepqual(ctr, (qual|info));
+ return;
+ case CT_VOID:
+ ctype_preplit(ctr, "void");
+ ctype_prepqual(ctr, (qual|info));
+ return;
+ case CT_STRUCT:
+ ctype_preptype(ctr, ct, qual, (info & CTF_UNION) ? "union" : "struct");
+ return;
+ case CT_ENUM:
+ if (id == CTID_CTYPEID) {
+ ctype_preplit(ctr, "ctype");
+ return;
+ }
+ ctype_preptype(ctr, ct, qual, "enum");
+ return;
+ case CT_ATTRIB:
+ if (ctype_attrib(info) == CTA_QUAL) qual |= size;
+ break;
+ case CT_PTR:
+ if ((info & CTF_REF)) {
+ ctype_prepc(ctr, '&');
+ } else {
+ ctype_prepqual(ctr, (qual|info));
+ if (LJ_64 && size == 4) ctype_preplit(ctr, "__ptr32");
+ ctype_prepc(ctr, '*');
+ }
+ qual = 0;
+ ptrto = 1;
+ ctr->needsp = 1;
+ break;
+ case CT_ARRAY:
+ if (ctype_isrefarray(info)) {
+ ctr->needsp = 1;
+ if (ptrto) { ptrto = 0; ctype_prepc(ctr, '('); ctype_appc(ctr, ')'); }
+ ctype_appc(ctr, '[');
+ if (size != CTSIZE_INVALID) {
+ CTSize csize = ctype_child(ctr->cts, ct)->size;
+ ctype_appnum(ctr, csize ? size/csize : 0);
+ } else if ((info & CTF_VLA)) {
+ ctype_appc(ctr, '?');
+ }
+ ctype_appc(ctr, ']');
+ } else if ((info & CTF_COMPLEX)) {
+ if (size == 2*sizeof(float)) ctype_preplit(ctr, "float");
+ ctype_preplit(ctr, "complex");
+ return;
+ } else {
+ ctype_preplit(ctr, ")))");
+ ctype_prepnum(ctr, size);
+ ctype_preplit(ctr, "__attribute__((vector_size(");
+ }
+ break;
+ case CT_FUNC:
+ ctr->needsp = 1;
+ if (ptrto) { ptrto = 0; ctype_prepc(ctr, '('); ctype_appc(ctr, ')'); }
+ ctype_appc(ctr, '(');
+ ctype_appc(ctr, ')');
+ break;
+ default:
+ lj_assertG_(ctr->cts->g, 0, "bad ctype %08x", info);
+ break;
+ }
+ ct = ctype_get(ctr->cts, ctype_cid(info));
+ }
+}
+
+/* Return a printable representation of a C type. */
+GCstr *lj_ctype_repr(lua_State *L, CTypeID id, GCstr *name)
+{
+ global_State *g = G(L);
+ CTRepr ctr;
+ ctr.pb = ctr.pe = &ctr.buf[CTREPR_MAX/2];
+ ctr.cts = ctype_ctsG(g);
+ ctr.L = L;
+ ctr.ok = 1;
+ ctr.needsp = 0;
+ if (name) ctype_prepstr(&ctr, strdata(name), name->len);
+ ctype_repr(&ctr, id);
+ if (LJ_UNLIKELY(!ctr.ok)) return lj_str_newlit(L, "?");
+ return lj_str_new(L, ctr.pb, ctr.pe - ctr.pb);
+}
+
+/* Convert int64_t/uint64_t to string with 'LL' or 'ULL' suffix. */
+GCstr *lj_ctype_repr_int64(lua_State *L, uint64_t n, int isunsigned)
+{
+ char buf[1+20+3];
+ char *p = buf+sizeof(buf);
+ int sign = 0;
+ *--p = 'L'; *--p = 'L';
+ if (isunsigned) {
+ *--p = 'U';
+ } else if ((int64_t)n < 0) {
+ n = (uint64_t)-(int64_t)n;
+ sign = 1;
+ }
+ do { *--p = (char)('0' + n % 10); } while (n /= 10);
+ if (sign) *--p = '-';
+ return lj_str_new(L, p, (size_t)(buf+sizeof(buf)-p));
+}
+
+/* Convert complex to string with 'i' or 'I' suffix. */
+GCstr *lj_ctype_repr_complex(lua_State *L, void *sp, CTSize size)
+{
+ SBuf *sb = lj_buf_tmp_(L);
+ TValue re, im;
+ if (size == 2*sizeof(double)) {
+ re.n = *(double *)sp; im.n = ((double *)sp)[1];
+ } else {
+ re.n = (double)*(float *)sp; im.n = (double)((float *)sp)[1];
+ }
+ lj_strfmt_putfnum(sb, STRFMT_G14, re.n);
+ if (!(im.u32.hi & 0x80000000u) || im.n != im.n) lj_buf_putchar(sb, '+');
+ lj_strfmt_putfnum(sb, STRFMT_G14, im.n);
+ lj_buf_putchar(sb, sb->w[-1] >= 'a' ? 'I' : 'i');
+ return lj_buf_str(L, sb);
+}
+
+/* -- C type state -------------------------------------------------------- */
+
+/* Initialize C type table and state. */
+CTState *lj_ctype_init(lua_State *L)
+{
+ CTState *cts = lj_mem_newt(L, sizeof(CTState), CTState);
+ CType *ct = lj_mem_newvec(L, CTTYPETAB_MIN, CType);
+ const char *name = lj_ctype_typenames;
+ CTypeID id;
+ memset(cts, 0, sizeof(CTState));
+ cts->tab = ct;
+ cts->sizetab = CTTYPETAB_MIN;
+ cts->top = CTTYPEINFO_NUM;
+ cts->L = NULL;
+ cts->g = G(L);
+ for (id = 0; id < CTTYPEINFO_NUM; id++, ct++) {
+ CTInfo info = lj_ctype_typeinfo[id];
+ ct->size = (CTSize)((int32_t)(info << 16) >> 26);
+ ct->info = info & 0xffff03ffu;
+ ct->sib = 0;
+ if (ctype_type(info) == CT_KW || ctype_istypedef(info)) {
+ size_t len = strlen(name);
+ GCstr *str = lj_str_new(L, name, len);
+ ctype_setname(ct, str);
+ name += len+1;
+ lj_ctype_addname(cts, ct, id);
+ } else {
+ setgcrefnull(ct->name);
+ ct->next = 0;
+ if (!ctype_isenum(info)) ctype_addtype(cts, ct, id);
+ }
+ }
+ setmref(G(L)->ctype_state, cts);
+ return cts;
+}
+
+/* Free C type table and state. */
+void lj_ctype_freestate(global_State *g)
+{
+ CTState *cts = ctype_ctsG(g);
+ if (cts) {
+ lj_ccallback_mcode_free(cts);
+ lj_mem_freevec(g, cts->tab, cts->sizetab, CType);
+ lj_mem_freevec(g, cts->cb.cbid, cts->cb.sizeid, CTypeID1);
+ lj_mem_freet(g, cts);
+ }
+}
+
+#endif
diff --git a/libs/luajit-cmake/luajit/src/lj_ctype.h b/libs/luajit-cmake/luajit/src/lj_ctype.h
new file mode 100644
index 0000000..3dbcdbf
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/lj_ctype.h
@@ -0,0 +1,481 @@
+/*
+** C type management.
+** Copyright (C) 2005-2022 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LJ_CTYPE_H
+#define _LJ_CTYPE_H
+
+#include "lj_obj.h"
+#include "lj_gc.h"
+
+#if LJ_HASFFI
+
+/* -- C type definitions -------------------------------------------------- */
+
+/* C type numbers. Highest 4 bits of C type info. ORDER CT. */
+enum {
+ /* Externally visible types. */
+ CT_NUM, /* Integer or floating-point numbers. */
+ CT_STRUCT, /* Struct or union. */
+ CT_PTR, /* Pointer or reference. */
+ CT_ARRAY, /* Array or complex type. */
+ CT_MAYCONVERT = CT_ARRAY,
+ CT_VOID, /* Void type. */
+ CT_ENUM, /* Enumeration. */
+ CT_HASSIZE = CT_ENUM, /* Last type where ct->size holds the actual size. */
+ CT_FUNC, /* Function. */
+ CT_TYPEDEF, /* Typedef. */
+ CT_ATTRIB, /* Miscellaneous attributes. */
+ /* Internal element types. */
+ CT_FIELD, /* Struct/union field or function parameter. */
+ CT_BITFIELD, /* Struct/union bitfield. */
+ CT_CONSTVAL, /* Constant value. */
+ CT_EXTERN, /* External reference. */
+ CT_KW /* Keyword. */
+};
+
+LJ_STATIC_ASSERT(((int)CT_PTR & (int)CT_ARRAY) == CT_PTR);
+LJ_STATIC_ASSERT(((int)CT_STRUCT & (int)CT_ARRAY) == CT_STRUCT);
+
+/*
+** ---------- info ------------
+** |type flags... A cid | size | sib | next | name |
+** +----------------------------+--------+-------+-------+-------+--
+** |NUM BFcvUL.. A | size | | type | |
+** |STRUCT ..cvU..V A | size | field | name? | name? |
+** |PTR ..cvR... A cid | size | | type | |
+** |ARRAY VCcv...V A cid | size | | type | |
+** |VOID ..cv.... A | size | | type | |
+** |ENUM A cid | size | const | name? | name? |
+** |FUNC ....VS.. cc cid | nargs | field | name? | name? |
+** |TYPEDEF cid | | | name | name |
+** |ATTRIB attrnum cid | attr | sib? | type? | |
+** |FIELD cid | offset | field | | name? |
+** |BITFIELD B.cvU csz bsz pos | offset | field | | name? |
+** |CONSTVAL c cid | value | const | name | name |
+** |EXTERN cid | | sib? | name | name |
+** |KW tok | size | | name | name |
+** +----------------------------+--------+-------+-------+-------+--
+** ^^ ^^--- bits used for C type conversion dispatch
+*/
+
+/* C type info flags. TFFArrrr */
+#define CTF_BOOL 0x08000000u /* Boolean: NUM, BITFIELD. */
+#define CTF_FP 0x04000000u /* Floating-point: NUM. */
+#define CTF_CONST 0x02000000u /* Const qualifier. */
+#define CTF_VOLATILE 0x01000000u /* Volatile qualifier. */
+#define CTF_UNSIGNED 0x00800000u /* Unsigned: NUM, BITFIELD. */
+#define CTF_LONG 0x00400000u /* Long: NUM. */
+#define CTF_VLA 0x00100000u /* Variable-length: ARRAY, STRUCT. */
+#define CTF_REF 0x00800000u /* Reference: PTR. */
+#define CTF_VECTOR 0x08000000u /* Vector: ARRAY. */
+#define CTF_COMPLEX 0x04000000u /* Complex: ARRAY. */
+#define CTF_UNION 0x00800000u /* Union: STRUCT. */
+#define CTF_VARARG 0x00800000u /* Vararg: FUNC. */
+#define CTF_SSEREGPARM 0x00400000u /* SSE register parameters: FUNC. */
+
+#define CTF_QUAL (CTF_CONST|CTF_VOLATILE)
+#define CTF_ALIGN (CTMASK_ALIGN<<CTSHIFT_ALIGN)
+#define CTF_UCHAR ((char)-1 > 0 ? CTF_UNSIGNED : 0)
+
+/* Flags used in parser. .F.Ammvf cp->attr */
+#define CTFP_ALIGNED 0x00000001u /* cp->attr + ALIGN */
+#define CTFP_PACKED 0x00000002u /* cp->attr */
+/* ...C...f cp->fattr */
+#define CTFP_CCONV 0x00000001u /* cp->fattr + CCONV/[SSE]REGPARM */
+
+/* C type info bitfields. */
+#define CTMASK_CID 0x0000ffffu /* Max. 65536 type IDs. */
+#define CTMASK_NUM 0xf0000000u /* Max. 16 type numbers. */
+#define CTSHIFT_NUM 28
+#define CTMASK_ALIGN 15 /* Max. alignment is 2^15. */
+#define CTSHIFT_ALIGN 16
+#define CTMASK_ATTRIB 255 /* Max. 256 attributes. */
+#define CTSHIFT_ATTRIB 16
+#define CTMASK_CCONV 3 /* Max. 4 calling conventions. */
+#define CTSHIFT_CCONV 16
+#define CTMASK_REGPARM 3 /* Max. 0-3 regparms. */
+#define CTSHIFT_REGPARM 18
+/* Bitfields only used in parser. */
+#define CTMASK_VSIZEP 15 /* Max. vector size is 2^15. */
+#define CTSHIFT_VSIZEP 4
+#define CTMASK_MSIZEP 255 /* Max. type size (via mode) is 128. */
+#define CTSHIFT_MSIZEP 8
+
+/* Info bits for BITFIELD. Max. size of bitfield is 64 bits. */
+#define CTBSZ_MAX 32 /* Max. size of bitfield is 32 bit. */
+#define CTBSZ_FIELD 127 /* Temp. marker for regular field. */
+#define CTMASK_BITPOS 127
+#define CTMASK_BITBSZ 127
+#define CTMASK_BITCSZ 127
+#define CTSHIFT_BITPOS 0
+#define CTSHIFT_BITBSZ 8
+#define CTSHIFT_BITCSZ 16
+
+#define CTF_INSERT(info, field, val) \
+ info = (info & ~(CTMASK_##field<<CTSHIFT_##field)) | \
+ (((CTSize)(val) & CTMASK_##field) << CTSHIFT_##field)
+
+/* Calling conventions. ORDER CC */
+enum { CTCC_CDECL, CTCC_THISCALL, CTCC_FASTCALL, CTCC_STDCALL };
+
+/* Attribute numbers. */
+enum {
+ CTA_NONE, /* Ignored attribute. Must be zero. */
+ CTA_QUAL, /* Unmerged qualifiers. */
+ CTA_ALIGN, /* Alignment override. */
+ CTA_SUBTYPE, /* Transparent sub-type. */
+ CTA_REDIR, /* Redirected symbol name. */
+ CTA_BAD, /* To catch bad IDs. */
+ CTA__MAX
+};
+
+/* Special sizes. */
+#define CTSIZE_INVALID 0xffffffffu
+
+typedef uint32_t CTInfo; /* Type info. */
+typedef uint32_t CTSize; /* Type size. */
+typedef uint32_t CTypeID; /* Type ID. */
+typedef uint16_t CTypeID1; /* Minimum-sized type ID. */
+
+/* C type table element. */
+typedef struct CType {
+ CTInfo info; /* Type info. */
+ CTSize size; /* Type size or other info. */
+ CTypeID1 sib; /* Sibling element. */
+ CTypeID1 next; /* Next element in hash chain. */
+ GCRef name; /* Element name (GCstr). */
+} CType;
+
+#define CTHASH_SIZE 128 /* Number of hash anchors. */
+#define CTHASH_MASK (CTHASH_SIZE-1)
+
+/* Simplify target-specific configuration. Checked in lj_ccall.h. */
+#define CCALL_MAX_GPR 8
+#define CCALL_MAX_FPR 8
+
+typedef LJ_ALIGN(8) union FPRCBArg { double d; float f[2]; } FPRCBArg;
+
+/* C callback state. Defined here, to avoid dragging in lj_ccall.h. */
+
+typedef LJ_ALIGN(8) struct CCallback {
+ FPRCBArg fpr[CCALL_MAX_FPR]; /* Arguments/results in FPRs. */
+ intptr_t gpr[CCALL_MAX_GPR]; /* Arguments/results in GPRs. */
+ intptr_t *stack; /* Pointer to arguments on stack. */
+ void *mcode; /* Machine code for callback func. pointers. */
+ CTypeID1 *cbid; /* Callback type table. */
+ MSize sizeid; /* Size of callback type table. */
+ MSize topid; /* Highest unused callback type table slot. */
+ MSize slot; /* Current callback slot. */
+} CCallback;
+
+/* C type state. */
+typedef struct CTState {
+ CType *tab; /* C type table. */
+ CTypeID top; /* Current top of C type table. */
+ MSize sizetab; /* Size of C type table. */
+ lua_State *L; /* Lua state (needed for errors and allocations). */
+ global_State *g; /* Global state. */
+ GCtab *finalizer; /* Map of cdata to finalizer. */
+ GCtab *miscmap; /* Map of -CTypeID to metatable and cb slot to func. */
+ CCallback cb; /* Temporary callback state. */
+ CTypeID1 hash[CTHASH_SIZE]; /* Hash anchors for C type table. */
+} CTState;
+
+#define CTINFO(ct, flags) (((CTInfo)(ct) << CTSHIFT_NUM) + (flags))
+#define CTALIGN(al) ((CTSize)(al) << CTSHIFT_ALIGN)
+#define CTATTRIB(at) ((CTInfo)(at) << CTSHIFT_ATTRIB)
+
+#define ctype_type(info) ((info) >> CTSHIFT_NUM)
+#define ctype_cid(info) ((CTypeID)((info) & CTMASK_CID))
+#define ctype_align(info) (((info) >> CTSHIFT_ALIGN) & CTMASK_ALIGN)
+#define ctype_attrib(info) (((info) >> CTSHIFT_ATTRIB) & CTMASK_ATTRIB)
+#define ctype_bitpos(info) (((info) >> CTSHIFT_BITPOS) & CTMASK_BITPOS)
+#define ctype_bitbsz(info) (((info) >> CTSHIFT_BITBSZ) & CTMASK_BITBSZ)
+#define ctype_bitcsz(info) (((info) >> CTSHIFT_BITCSZ) & CTMASK_BITCSZ)
+#define ctype_vsizeP(info) (((info) >> CTSHIFT_VSIZEP) & CTMASK_VSIZEP)
+#define ctype_msizeP(info) (((info) >> CTSHIFT_MSIZEP) & CTMASK_MSIZEP)
+#define ctype_cconv(info) (((info) >> CTSHIFT_CCONV) & CTMASK_CCONV)
+
+/* Simple type checks. */
+#define ctype_isnum(info) (ctype_type((info)) == CT_NUM)
+#define ctype_isvoid(info) (ctype_type((info)) == CT_VOID)
+#define ctype_isptr(info) (ctype_type((info)) == CT_PTR)
+#define ctype_isarray(info) (ctype_type((info)) == CT_ARRAY)
+#define ctype_isstruct(info) (ctype_type((info)) == CT_STRUCT)
+#define ctype_isfunc(info) (ctype_type((info)) == CT_FUNC)
+#define ctype_isenum(info) (ctype_type((info)) == CT_ENUM)
+#define ctype_istypedef(info) (ctype_type((info)) == CT_TYPEDEF)
+#define ctype_isattrib(info) (ctype_type((info)) == CT_ATTRIB)
+#define ctype_isfield(info) (ctype_type((info)) == CT_FIELD)
+#define ctype_isbitfield(info) (ctype_type((info)) == CT_BITFIELD)
+#define ctype_isconstval(info) (ctype_type((info)) == CT_CONSTVAL)
+#define ctype_isextern(info) (ctype_type((info)) == CT_EXTERN)
+#define ctype_hassize(info) (ctype_type((info)) <= CT_HASSIZE)
+
+/* Combined type and flag checks. */
+#define ctype_isinteger(info) \
+ (((info) & (CTMASK_NUM|CTF_BOOL|CTF_FP)) == CTINFO(CT_NUM, 0))
+#define ctype_isinteger_or_bool(info) \
+ (((info) & (CTMASK_NUM|CTF_FP)) == CTINFO(CT_NUM, 0))
+#define ctype_isbool(info) \
+ (((info) & (CTMASK_NUM|CTF_BOOL)) == CTINFO(CT_NUM, CTF_BOOL))
+#define ctype_isfp(info) \
+ (((info) & (CTMASK_NUM|CTF_FP)) == CTINFO(CT_NUM, CTF_FP))
+
+#define ctype_ispointer(info) \
+ ((ctype_type(info) >> 1) == (CT_PTR >> 1)) /* Pointer or array. */
+#define ctype_isref(info) \
+ (((info) & (CTMASK_NUM|CTF_REF)) == CTINFO(CT_PTR, CTF_REF))
+
+#define ctype_isrefarray(info) \
+ (((info) & (CTMASK_NUM|CTF_VECTOR|CTF_COMPLEX)) == CTINFO(CT_ARRAY, 0))
+#define ctype_isvector(info) \
+ (((info) & (CTMASK_NUM|CTF_VECTOR)) == CTINFO(CT_ARRAY, CTF_VECTOR))
+#define ctype_iscomplex(info) \
+ (((info) & (CTMASK_NUM|CTF_COMPLEX)) == CTINFO(CT_ARRAY, CTF_COMPLEX))
+
+#define ctype_isvltype(info) \
+ (((info) & ((CTMASK_NUM|CTF_VLA) - (2u<<CTSHIFT_NUM))) == \
+ CTINFO(CT_STRUCT, CTF_VLA)) /* VL array or VL struct. */
+#define ctype_isvlarray(info) \
+ (((info) & (CTMASK_NUM|CTF_VLA)) == CTINFO(CT_ARRAY, CTF_VLA))
+
+#define ctype_isxattrib(info, at) \
+ (((info) & (CTMASK_NUM|CTATTRIB(CTMASK_ATTRIB))) == \
+ CTINFO(CT_ATTRIB, CTATTRIB(at)))
+
+/* Target-dependent sizes and alignments. */
+#if LJ_64
+#define CTSIZE_PTR 8
+#define CTALIGN_PTR CTALIGN(3)
+#else
+#define CTSIZE_PTR 4
+#define CTALIGN_PTR CTALIGN(2)
+#endif
+
+#define CTINFO_REF(ref) \
+ CTINFO(CT_PTR, (CTF_CONST|CTF_REF|CTALIGN_PTR) + (ref))
+
+#define CT_MEMALIGN 3 /* Alignment guaranteed by memory allocator. */
+
+#ifdef LUA_USE_ASSERT
+#define lj_assertCTS(c, ...) (lj_assertG_(cts->g, (c), __VA_ARGS__))
+#else
+#define lj_assertCTS(c, ...) ((void)cts)
+#endif
+
+/* -- Predefined types ---------------------------------------------------- */
+
+/* Target-dependent types. */
+#if LJ_TARGET_PPC
+#define CTTYDEFP(_) \
+ _(LINT32, 4, CT_NUM, CTF_LONG|CTALIGN(2))
+#else
+#define CTTYDEFP(_)
+#endif
+
+/* Common types. */
+#define CTTYDEF(_) \
+ _(NONE, 0, CT_ATTRIB, CTATTRIB(CTA_BAD)) \
+ _(VOID, -1, CT_VOID, CTALIGN(0)) \
+ _(CVOID, -1, CT_VOID, CTF_CONST|CTALIGN(0)) \
+ _(BOOL, 1, CT_NUM, CTF_BOOL|CTF_UNSIGNED|CTALIGN(0)) \
+ _(CCHAR, 1, CT_NUM, CTF_CONST|CTF_UCHAR|CTALIGN(0)) \
+ _(INT8, 1, CT_NUM, CTALIGN(0)) \
+ _(UINT8, 1, CT_NUM, CTF_UNSIGNED|CTALIGN(0)) \
+ _(INT16, 2, CT_NUM, CTALIGN(1)) \
+ _(UINT16, 2, CT_NUM, CTF_UNSIGNED|CTALIGN(1)) \
+ _(INT32, 4, CT_NUM, CTALIGN(2)) \
+ _(UINT32, 4, CT_NUM, CTF_UNSIGNED|CTALIGN(2)) \
+ _(INT64, 8, CT_NUM, CTF_LONG|CTALIGN(3)) \
+ _(UINT64, 8, CT_NUM, CTF_UNSIGNED|CTF_LONG|CTALIGN(3)) \
+ _(FLOAT, 4, CT_NUM, CTF_FP|CTALIGN(2)) \
+ _(DOUBLE, 8, CT_NUM, CTF_FP|CTALIGN(3)) \
+ _(COMPLEX_FLOAT, 8, CT_ARRAY, CTF_COMPLEX|CTALIGN(2)|CTID_FLOAT) \
+ _(COMPLEX_DOUBLE, 16, CT_ARRAY, CTF_COMPLEX|CTALIGN(3)|CTID_DOUBLE) \
+ _(P_VOID, CTSIZE_PTR, CT_PTR, CTALIGN_PTR|CTID_VOID) \
+ _(P_CVOID, CTSIZE_PTR, CT_PTR, CTALIGN_PTR|CTID_CVOID) \
+ _(P_CCHAR, CTSIZE_PTR, CT_PTR, CTALIGN_PTR|CTID_CCHAR) \
+ _(P_UINT8, CTSIZE_PTR, CT_PTR, CTALIGN_PTR|CTID_UINT8) \
+ _(A_CCHAR, -1, CT_ARRAY, CTF_CONST|CTALIGN(0)|CTID_CCHAR) \
+ _(CTYPEID, 4, CT_ENUM, CTALIGN(2)|CTID_INT32) \
+ CTTYDEFP(_) \
+ /* End of type list. */
+
+/* Public predefined type IDs. */
+enum {
+#define CTTYIDDEF(id, sz, ct, info) CTID_##id,
+CTTYDEF(CTTYIDDEF)
+#undef CTTYIDDEF
+ /* Predefined typedefs and keywords follow. */
+ CTID_MAX = 65536
+};
+
+/* Target-dependent type IDs. */
+#if LJ_64
+#define CTID_INT_PSZ CTID_INT64
+#define CTID_UINT_PSZ CTID_UINT64
+#else
+#define CTID_INT_PSZ CTID_INT32
+#define CTID_UINT_PSZ CTID_UINT32
+#endif
+
+#if LJ_ABI_WIN
+#define CTID_WCHAR CTID_UINT16
+#elif LJ_TARGET_PPC
+#define CTID_WCHAR CTID_LINT32
+#else
+#define CTID_WCHAR CTID_INT32
+#endif
+
+/* -- C tokens and keywords ----------------------------------------------- */
+
+/* C lexer keywords. */
+#define CTOKDEF(_) \
+ _(IDENT, "<identifier>") _(STRING, "<string>") \
+ _(INTEGER, "<integer>") _(EOF, "<eof>") \
+ _(OROR, "||") _(ANDAND, "&&") _(EQ, "==") _(NE, "!=") \
+ _(LE, "<=") _(GE, ">=") _(SHL, "<<") _(SHR, ">>") _(DEREF, "->")
+
+/* Simple declaration specifiers. */
+#define CDSDEF(_) \
+ _(VOID) _(BOOL) _(CHAR) _(INT) _(FP) \
+ _(LONG) _(LONGLONG) _(SHORT) _(COMPLEX) _(SIGNED) _(UNSIGNED) \
+ _(CONST) _(VOLATILE) _(RESTRICT) _(INLINE) \
+ _(TYPEDEF) _(EXTERN) _(STATIC) _(AUTO) _(REGISTER)
+
+/* C keywords. */
+#define CKWDEF(_) \
+ CDSDEF(_) _(EXTENSION) _(ASM) _(ATTRIBUTE) \
+ _(DECLSPEC) _(CCDECL) _(PTRSZ) \
+ _(STRUCT) _(UNION) _(ENUM) \
+ _(SIZEOF) _(ALIGNOF)
+
+/* C token numbers. */
+enum {
+ CTOK_OFS = 255,
+#define CTOKNUM(name, sym) CTOK_##name,
+#define CKWNUM(name) CTOK_##name,
+CTOKDEF(CTOKNUM)
+CKWDEF(CKWNUM)
+#undef CTOKNUM
+#undef CKWNUM
+ CTOK_FIRSTDECL = CTOK_VOID,
+ CTOK_FIRSTSCL = CTOK_TYPEDEF,
+ CTOK_LASTDECLFLAG = CTOK_REGISTER,
+ CTOK_LASTDECL = CTOK_ENUM
+};
+
+/* Declaration specifier flags. */
+enum {
+#define CDSFLAG(name) CDF_##name = (1u << (CTOK_##name - CTOK_FIRSTDECL)),
+CDSDEF(CDSFLAG)
+#undef CDSFLAG
+ CDF__END
+};
+
+#define CDF_SCL (CDF_TYPEDEF|CDF_EXTERN|CDF_STATIC|CDF_AUTO|CDF_REGISTER)
+
+/* -- C type management --------------------------------------------------- */
+
+#define ctype_ctsG(g) (mref((g)->ctype_state, CTState))
+
+/* Get C type state. */
+static LJ_AINLINE CTState *ctype_cts(lua_State *L)
+{
+ CTState *cts = ctype_ctsG(G(L));
+ cts->L = L; /* Save L for errors and allocations. */
+ return cts;
+}
+
+/* Load FFI library on-demand. */
+#define ctype_loadffi(L) \
+ do { \
+ if (!ctype_ctsG(G(L))) { \
+ ptrdiff_t oldtop = (char *)L->top - mref(L->stack, char); \
+ luaopen_ffi(L); \
+ L->top = (TValue *)(mref(L->stack, char) + oldtop); \
+ } \
+ } while (0)
+
+/* Save and restore state of C type table. */
+#define LJ_CTYPE_SAVE(cts) CTState savects_ = *(cts)
+#define LJ_CTYPE_RESTORE(cts) \
+ ((cts)->top = savects_.top, \
+ memcpy((cts)->hash, savects_.hash, sizeof(savects_.hash)))
+
+/* Check C type ID for validity when assertions are enabled. */
+static LJ_AINLINE CTypeID ctype_check(CTState *cts, CTypeID id)
+{
+ UNUSED(cts);
+ lj_assertCTS(id > 0 && id < cts->top, "bad CTID %d", id);
+ return id;
+}
+
+/* Get C type for C type ID. */
+static LJ_AINLINE CType *ctype_get(CTState *cts, CTypeID id)
+{
+ return &cts->tab[ctype_check(cts, id)];
+}
+
+/* Get C type ID for a C type. */
+#define ctype_typeid(cts, ct) ((CTypeID)((ct) - (cts)->tab))
+
+/* Get child C type. */
+static LJ_AINLINE CType *ctype_child(CTState *cts, CType *ct)
+{
+ lj_assertCTS(!(ctype_isvoid(ct->info) || ctype_isstruct(ct->info) ||
+ ctype_isbitfield(ct->info)),
+ "ctype %08x has no children", ct->info);
+ return ctype_get(cts, ctype_cid(ct->info));
+}
+
+/* Get raw type for a C type ID. */
+static LJ_AINLINE CType *ctype_raw(CTState *cts, CTypeID id)
+{
+ CType *ct = ctype_get(cts, id);
+ while (ctype_isattrib(ct->info)) ct = ctype_child(cts, ct);
+ return ct;
+}
+
+/* Get raw type of the child of a C type. */
+static LJ_AINLINE CType *ctype_rawchild(CTState *cts, CType *ct)
+{
+ do { ct = ctype_child(cts, ct); } while (ctype_isattrib(ct->info));
+ return ct;
+}
+
+/* Set the name of a C type table element. */
+static LJ_AINLINE void ctype_setname(CType *ct, GCstr *s)
+{
+ /* NOBARRIER: mark string as fixed -- the C type table is never collected. */
+ fixstring(s);
+ setgcref(ct->name, obj2gco(s));
+}
+
+LJ_FUNC CTypeID lj_ctype_new(CTState *cts, CType **ctp);
+LJ_FUNC CTypeID lj_ctype_intern(CTState *cts, CTInfo info, CTSize size);
+LJ_FUNC void lj_ctype_addname(CTState *cts, CType *ct, CTypeID id);
+LJ_FUNC CTypeID lj_ctype_getname(CTState *cts, CType **ctp, GCstr *name,
+ uint32_t tmask);
+LJ_FUNC CType *lj_ctype_getfieldq(CTState *cts, CType *ct, GCstr *name,
+ CTSize *ofs, CTInfo *qual);
+#define lj_ctype_getfield(cts, ct, name, ofs) \
+ lj_ctype_getfieldq((cts), (ct), (name), (ofs), NULL)
+LJ_FUNC CType *lj_ctype_rawref(CTState *cts, CTypeID id);
+LJ_FUNC CTSize lj_ctype_size(CTState *cts, CTypeID id);
+LJ_FUNC CTSize lj_ctype_vlsize(CTState *cts, CType *ct, CTSize nelem);
+LJ_FUNC CTInfo lj_ctype_info(CTState *cts, CTypeID id, CTSize *szp);
+LJ_FUNC CTInfo lj_ctype_info_raw(CTState *cts, CTypeID id, CTSize *szp);
+LJ_FUNC cTValue *lj_ctype_meta(CTState *cts, CTypeID id, MMS mm);
+LJ_FUNC GCstr *lj_ctype_repr(lua_State *L, CTypeID id, GCstr *name);
+LJ_FUNC GCstr *lj_ctype_repr_int64(lua_State *L, uint64_t n, int isunsigned);
+LJ_FUNC GCstr *lj_ctype_repr_complex(lua_State *L, void *sp, CTSize size);
+LJ_FUNC CTState *lj_ctype_init(lua_State *L);
+LJ_FUNC void lj_ctype_freestate(global_State *g);
+
+#endif
+
+#endif
diff --git a/libs/luajit-cmake/luajit/src/lj_debug.c b/libs/luajit-cmake/luajit/src/lj_debug.c
new file mode 100644
index 0000000..112f535
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/lj_debug.c
@@ -0,0 +1,705 @@
+/*
+** Debugging and introspection.
+** Copyright (C) 2005-2022 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#define lj_debug_c
+#define LUA_CORE
+
+#include "lj_obj.h"
+#include "lj_err.h"
+#include "lj_debug.h"
+#include "lj_buf.h"
+#include "lj_tab.h"
+#include "lj_state.h"
+#include "lj_frame.h"
+#include "lj_bc.h"
+#include "lj_strfmt.h"
+#if LJ_HASJIT
+#include "lj_jit.h"
+#endif
+
+/* -- Frames -------------------------------------------------------------- */
+
+/* Get frame corresponding to a level. */
+cTValue *lj_debug_frame(lua_State *L, int level, int *size)
+{
+ cTValue *frame, *nextframe, *bot = tvref(L->stack)+LJ_FR2;
+ /* Traverse frames backwards. */
+ for (nextframe = frame = L->base-1; frame > bot; ) {
+ if (frame_gc(frame) == obj2gco(L))
+ level++; /* Skip dummy frames. See lj_err_optype_call(). */
+ if (level-- == 0) {
+ *size = (int)(nextframe - frame);
+ return frame; /* Level found. */
+ }
+ nextframe = frame;
+ if (frame_islua(frame)) {
+ frame = frame_prevl(frame);
+ } else {
+ if (frame_isvarg(frame))
+ level++; /* Skip vararg pseudo-frame. */
+ frame = frame_prevd(frame);
+ }
+ }
+ *size = level;
+ return NULL; /* Level not found. */
+}
+
+/* Invalid bytecode position. */
+#define NO_BCPOS (~(BCPos)0)
+
+/* Return bytecode position for function/frame or NO_BCPOS. */
+static BCPos debug_framepc(lua_State *L, GCfunc *fn, cTValue *nextframe)
+{
+ const BCIns *ins;
+ GCproto *pt;
+ BCPos pos;
+ lj_assertL(fn->c.gct == ~LJ_TFUNC || fn->c.gct == ~LJ_TTHREAD,
+ "function or frame expected");
+ if (!isluafunc(fn)) { /* Cannot derive a PC for non-Lua functions. */
+ return NO_BCPOS;
+ } else if (nextframe == NULL) { /* Lua function on top. */
+ void *cf = cframe_raw(L->cframe);
+ if (cf == NULL || (char *)cframe_pc(cf) == (char *)cframe_L(cf))
+ return NO_BCPOS;
+ ins = cframe_pc(cf); /* Only happens during error/hook handling. */
+ } else {
+ if (frame_islua(nextframe)) {
+ ins = frame_pc(nextframe);
+ } else if (frame_iscont(nextframe)) {
+ ins = frame_contpc(nextframe);
+ } else {
+ /* Lua function below errfunc/gc/hook: find cframe to get the PC. */
+ void *cf = cframe_raw(L->cframe);
+ TValue *f = L->base-1;
+ for (;;) {
+ if (cf == NULL)
+ return NO_BCPOS;
+ while (cframe_nres(cf) < 0) {
+ if (f >= restorestack(L, -cframe_nres(cf)))
+ break;
+ cf = cframe_raw(cframe_prev(cf));
+ if (cf == NULL)
+ return NO_BCPOS;
+ }
+ if (f < nextframe)
+ break;
+ if (frame_islua(f)) {
+ f = frame_prevl(f);
+ } else {
+ if (frame_isc(f) || (frame_iscont(f) && frame_iscont_fficb(f)))
+ cf = cframe_raw(cframe_prev(cf));
+ f = frame_prevd(f);
+ }
+ }
+ ins = cframe_pc(cf);
+ if (!ins) return NO_BCPOS;
+ }
+ }
+ pt = funcproto(fn);
+ pos = proto_bcpos(pt, ins) - 1;
+#if LJ_HASJIT
+ if (pos > pt->sizebc) { /* Undo the effects of lj_trace_exit for JLOOP. */
+ GCtrace *T = (GCtrace *)((char *)(ins-1) - offsetof(GCtrace, startins));
+ lj_assertL(bc_isret(bc_op(ins[-1])), "return bytecode expected");
+ pos = proto_bcpos(pt, mref(T->startpc, const BCIns));
+ }
+#endif
+ return pos;
+}
+
+/* -- Line numbers -------------------------------------------------------- */
+
+/* Get line number for a bytecode position. */
+BCLine LJ_FASTCALL lj_debug_line(GCproto *pt, BCPos pc)
+{
+ const void *lineinfo = proto_lineinfo(pt);
+ if (pc <= pt->sizebc && lineinfo) {
+ BCLine first = pt->firstline;
+ if (pc == pt->sizebc) return first + pt->numline;
+ if (pc-- == 0) return first;
+ if (pt->numline < 256)
+ return first + (BCLine)((const uint8_t *)lineinfo)[pc];
+ else if (pt->numline < 65536)
+ return first + (BCLine)((const uint16_t *)lineinfo)[pc];
+ else
+ return first + (BCLine)((const uint32_t *)lineinfo)[pc];
+ }
+ return 0;
+}
+
+/* Get line number for function/frame. */
+static BCLine debug_frameline(lua_State *L, GCfunc *fn, cTValue *nextframe)
+{
+ BCPos pc = debug_framepc(L, fn, nextframe);
+ if (pc != NO_BCPOS) {
+ GCproto *pt = funcproto(fn);
+ lj_assertL(pc <= pt->sizebc, "PC out of range");
+ return lj_debug_line(pt, pc);
+ }
+ return -1;
+}
+
+/* -- Variable names ------------------------------------------------------ */
+
+/* Get name of a local variable from slot number and PC. */
+static const char *debug_varname(const GCproto *pt, BCPos pc, BCReg slot)
+{
+ const char *p = (const char *)proto_varinfo(pt);
+ if (p) {
+ BCPos lastpc = 0;
+ for (;;) {
+ const char *name = p;
+ uint32_t vn = *(const uint8_t *)p;
+ BCPos startpc, endpc;
+ if (vn < VARNAME__MAX) {
+ if (vn == VARNAME_END) break; /* End of varinfo. */
+ } else {
+ do { p++; } while (*(const uint8_t *)p); /* Skip over variable name. */
+ }
+ p++;
+ lastpc = startpc = lastpc + lj_buf_ruleb128(&p);
+ if (startpc > pc) break;
+ endpc = startpc + lj_buf_ruleb128(&p);
+ if (pc < endpc && slot-- == 0) {
+ if (vn < VARNAME__MAX) {
+#define VARNAMESTR(name, str) str "\0"
+ name = VARNAMEDEF(VARNAMESTR);
+#undef VARNAMESTR
+ if (--vn) while (*name++ || --vn) ;
+ }
+ return name;
+ }
+ }
+ }
+ return NULL;
+}
+
+/* Get name of local variable from 1-based slot number and function/frame. */
+static TValue *debug_localname(lua_State *L, const lua_Debug *ar,
+ const char **name, BCReg slot1)
+{
+ uint32_t offset = (uint32_t)ar->i_ci & 0xffff;
+ uint32_t size = (uint32_t)ar->i_ci >> 16;
+ TValue *frame = tvref(L->stack) + offset;
+ TValue *nextframe = size ? frame + size : NULL;
+ GCfunc *fn = frame_func(frame);
+ BCPos pc = debug_framepc(L, fn, nextframe);
+ if (!nextframe) nextframe = L->top+LJ_FR2;
+ if ((int)slot1 < 0) { /* Negative slot number is for varargs. */
+ if (pc != NO_BCPOS) {
+ GCproto *pt = funcproto(fn);
+ if ((pt->flags & PROTO_VARARG)) {
+ slot1 = pt->numparams + (BCReg)(-(int)slot1);
+ if (frame_isvarg(frame)) { /* Vararg frame has been set up? (pc!=0) */
+ nextframe = frame;
+ frame = frame_prevd(frame);
+ }
+ if (frame + slot1+LJ_FR2 < nextframe) {
+ *name = "(*vararg)";
+ return frame+slot1;
+ }
+ }
+ }
+ return NULL;
+ }
+ if (pc != NO_BCPOS &&
+ (*name = debug_varname(funcproto(fn), pc, slot1-1)) != NULL)
+ ;
+ else if (slot1 > 0 && frame + slot1+LJ_FR2 < nextframe)
+ *name = "(*temporary)";
+ return frame+slot1;
+}
+
+/* Get name of upvalue. */
+const char *lj_debug_uvname(GCproto *pt, uint32_t idx)
+{
+ const uint8_t *p = proto_uvinfo(pt);
+ lj_assertX(idx < pt->sizeuv, "bad upvalue index");
+ if (!p) return "";
+ if (idx) while (*p++ || --idx) ;
+ return (const char *)p;
+}
+
+/* Get name and value of upvalue. */
+const char *lj_debug_uvnamev(cTValue *o, uint32_t idx, TValue **tvp, GCobj **op)
+{
+ if (tvisfunc(o)) {
+ GCfunc *fn = funcV(o);
+ if (isluafunc(fn)) {
+ GCproto *pt = funcproto(fn);
+ if (idx < pt->sizeuv) {
+ GCobj *uvo = gcref(fn->l.uvptr[idx]);
+ *tvp = uvval(&uvo->uv);
+ *op = uvo;
+ return lj_debug_uvname(pt, idx);
+ }
+ } else {
+ if (idx < fn->c.nupvalues) {
+ *tvp = &fn->c.upvalue[idx];
+ *op = obj2gco(fn);
+ return "";
+ }
+ }
+ }
+ return NULL;
+}
+
+/* Deduce name of an object from slot number and PC. */
+const char *lj_debug_slotname(GCproto *pt, const BCIns *ip, BCReg slot,
+ const char **name)
+{
+ const char *lname;
+restart:
+ lname = debug_varname(pt, proto_bcpos(pt, ip), slot);
+ if (lname != NULL) { *name = lname; return "local"; }
+ while (--ip > proto_bc(pt)) {
+ BCIns ins = *ip;
+ BCOp op = bc_op(ins);
+ BCReg ra = bc_a(ins);
+ if (bcmode_a(op) == BCMbase) {
+ if (slot >= ra && (op != BC_KNIL || slot <= bc_d(ins)))
+ return NULL;
+ } else if (bcmode_a(op) == BCMdst && ra == slot) {
+ switch (bc_op(ins)) {
+ case BC_MOV:
+ if (ra == slot) { slot = bc_d(ins); goto restart; }
+ break;
+ case BC_GGET:
+ *name = strdata(gco2str(proto_kgc(pt, ~(ptrdiff_t)bc_d(ins))));
+ return "global";
+ case BC_TGETS:
+ *name = strdata(gco2str(proto_kgc(pt, ~(ptrdiff_t)bc_c(ins))));
+ if (ip > proto_bc(pt)) {
+ BCIns insp = ip[-1];
+ if (bc_op(insp) == BC_MOV && bc_a(insp) == ra+1+LJ_FR2 &&
+ bc_d(insp) == bc_b(ins))
+ return "method";
+ }
+ return "field";
+ case BC_UGET:
+ *name = lj_debug_uvname(pt, bc_d(ins));
+ return "upvalue";
+ default:
+ return NULL;
+ }
+ }
+ }
+ return NULL;
+}
+
+/* Deduce function name from caller of a frame. */
+const char *lj_debug_funcname(lua_State *L, cTValue *frame, const char **name)
+{
+ cTValue *pframe;
+ GCfunc *fn;
+ BCPos pc;
+ if (frame <= tvref(L->stack)+LJ_FR2)
+ return NULL;
+ if (frame_isvarg(frame))
+ frame = frame_prevd(frame);
+ pframe = frame_prev(frame);
+ fn = frame_func(pframe);
+ pc = debug_framepc(L, fn, frame);
+ if (pc != NO_BCPOS) {
+ GCproto *pt = funcproto(fn);
+ const BCIns *ip = &proto_bc(pt)[check_exp(pc < pt->sizebc, pc)];
+ MMS mm = bcmode_mm(bc_op(*ip));
+ if (mm == MM_call) {
+ BCReg slot = bc_a(*ip);
+ if (bc_op(*ip) == BC_ITERC) slot -= 3;
+ return lj_debug_slotname(pt, ip, slot, name);
+ } else if (mm != MM__MAX) {
+ *name = strdata(mmname_str(G(L), mm));
+ return "metamethod";
+ }
+ }
+ return NULL;
+}
+
+/* -- Source code locations ----------------------------------------------- */
+
+/* Generate shortened source name. */
+void lj_debug_shortname(char *out, GCstr *str, BCLine line)
+{
+ const char *src = strdata(str);
+ if (*src == '=') {
+ strncpy(out, src+1, LUA_IDSIZE); /* Remove first char. */
+ out[LUA_IDSIZE-1] = '\0'; /* Ensures null termination. */
+ } else if (*src == '@') { /* Output "source", or "...source". */
+ size_t len = str->len-1;
+ src++; /* Skip the `@' */
+ if (len >= LUA_IDSIZE) {
+ src += len-(LUA_IDSIZE-4); /* Get last part of file name. */
+ *out++ = '.'; *out++ = '.'; *out++ = '.';
+ }
+ strcpy(out, src);
+ } else { /* Output [string "string"] or [builtin:name]. */
+ size_t len; /* Length, up to first control char. */
+ for (len = 0; len < LUA_IDSIZE-12; len++)
+ if (((const unsigned char *)src)[len] < ' ') break;
+ strcpy(out, line == ~(BCLine)0 ? "[builtin:" : "[string \""); out += 9;
+ if (src[len] != '\0') { /* Must truncate? */
+ if (len > LUA_IDSIZE-15) len = LUA_IDSIZE-15;
+ strncpy(out, src, len); out += len;
+ strcpy(out, "..."); out += 3;
+ } else {
+ strcpy(out, src); out += len;
+ }
+ strcpy(out, line == ~(BCLine)0 ? "]" : "\"]");
+ }
+}
+
+/* Add current location of a frame to error message. */
+void lj_debug_addloc(lua_State *L, const char *msg,
+ cTValue *frame, cTValue *nextframe)
+{
+ if (frame) {
+ GCfunc *fn = frame_func(frame);
+ if (isluafunc(fn)) {
+ BCLine line = debug_frameline(L, fn, nextframe);
+ if (line >= 0) {
+ GCproto *pt = funcproto(fn);
+ char buf[LUA_IDSIZE];
+ lj_debug_shortname(buf, proto_chunkname(pt), pt->firstline);
+ lj_strfmt_pushf(L, "%s:%d: %s", buf, line, msg);
+ return;
+ }
+ }
+ }
+ lj_strfmt_pushf(L, "%s", msg);
+}
+
+/* Push location string for a bytecode position to Lua stack. */
+void lj_debug_pushloc(lua_State *L, GCproto *pt, BCPos pc)
+{
+ GCstr *name = proto_chunkname(pt);
+ const char *s = strdata(name);
+ MSize i, len = name->len;
+ BCLine line = lj_debug_line(pt, pc);
+ if (pt->firstline == ~(BCLine)0) {
+ lj_strfmt_pushf(L, "builtin:%s", s);
+ } else if (*s == '@') {
+ s++; len--;
+ for (i = len; i > 0; i--)
+ if (s[i] == '/' || s[i] == '\\') {
+ s += i+1;
+ break;
+ }
+ lj_strfmt_pushf(L, "%s:%d", s, line);
+ } else if (len > 40) {
+ lj_strfmt_pushf(L, "%p:%d", pt, line);
+ } else if (*s == '=') {
+ lj_strfmt_pushf(L, "%s:%d", s+1, line);
+ } else {
+ lj_strfmt_pushf(L, "\"%s\":%d", s, line);
+ }
+}
+
+/* -- Public debug API ---------------------------------------------------- */
+
+/* lua_getupvalue() and lua_setupvalue() are in lj_api.c. */
+
+LUA_API const char *lua_getlocal(lua_State *L, const lua_Debug *ar, int n)
+{
+ const char *name = NULL;
+ if (ar) {
+ TValue *o = debug_localname(L, ar, &name, (BCReg)n);
+ if (name) {
+ copyTV(L, L->top, o);
+ incr_top(L);
+ }
+ } else if (tvisfunc(L->top-1) && isluafunc(funcV(L->top-1))) {
+ name = debug_varname(funcproto(funcV(L->top-1)), 0, (BCReg)n-1);
+ }
+ return name;
+}
+
+LUA_API const char *lua_setlocal(lua_State *L, const lua_Debug *ar, int n)
+{
+ const char *name = NULL;
+ TValue *o = debug_localname(L, ar, &name, (BCReg)n);
+ if (name)
+ copyTV(L, o, L->top-1);
+ L->top--;
+ return name;
+}
+
+int lj_debug_getinfo(lua_State *L, const char *what, lj_Debug *ar, int ext)
+{
+ int opt_f = 0, opt_L = 0;
+ TValue *frame = NULL;
+ TValue *nextframe = NULL;
+ GCfunc *fn;
+ if (*what == '>') {
+ TValue *func = L->top - 1;
+ if (!tvisfunc(func)) return 0;
+ fn = funcV(func);
+ L->top--;
+ what++;
+ } else {
+ uint32_t offset = (uint32_t)ar->i_ci & 0xffff;
+ uint32_t size = (uint32_t)ar->i_ci >> 16;
+ lj_assertL(offset != 0, "bad frame offset");
+ frame = tvref(L->stack) + offset;
+ if (size) nextframe = frame + size;
+ lj_assertL(frame <= tvref(L->maxstack) &&
+ (!nextframe || nextframe <= tvref(L->maxstack)),
+ "broken frame chain");
+ fn = frame_func(frame);
+ lj_assertL(fn->c.gct == ~LJ_TFUNC, "bad frame function");
+ }
+ for (; *what; what++) {
+ if (*what == 'S') {
+ if (isluafunc(fn)) {
+ GCproto *pt = funcproto(fn);
+ BCLine firstline = pt->firstline;
+ GCstr *name = proto_chunkname(pt);
+ ar->source = strdata(name);
+ lj_debug_shortname(ar->short_src, name, pt->firstline);
+ ar->linedefined = (int)firstline;
+ ar->lastlinedefined = (int)(firstline + pt->numline);
+ ar->what = (firstline || !pt->numline) ? "Lua" : "main";
+ } else {
+ ar->source = "=[C]";
+ ar->short_src[0] = '[';
+ ar->short_src[1] = 'C';
+ ar->short_src[2] = ']';
+ ar->short_src[3] = '\0';
+ ar->linedefined = -1;
+ ar->lastlinedefined = -1;
+ ar->what = "C";
+ }
+ } else if (*what == 'l') {
+ ar->currentline = frame ? debug_frameline(L, fn, nextframe) : -1;
+ } else if (*what == 'u') {
+ ar->nups = fn->c.nupvalues;
+ if (ext) {
+ if (isluafunc(fn)) {
+ GCproto *pt = funcproto(fn);
+ ar->nparams = pt->numparams;
+ ar->isvararg = !!(pt->flags & PROTO_VARARG);
+ } else {
+ ar->nparams = 0;
+ ar->isvararg = 1;
+ }
+ }
+ } else if (*what == 'n') {
+ ar->namewhat = frame ? lj_debug_funcname(L, frame, &ar->name) : NULL;
+ if (ar->namewhat == NULL) {
+ ar->namewhat = "";
+ ar->name = NULL;
+ }
+ } else if (*what == 'f') {
+ opt_f = 1;
+ } else if (*what == 'L') {
+ opt_L = 1;
+ } else {
+ return 0; /* Bad option. */
+ }
+ }
+ if (opt_f) {
+ setfuncV(L, L->top, fn);
+ incr_top(L);
+ }
+ if (opt_L) {
+ if (isluafunc(fn)) {
+ GCtab *t = lj_tab_new(L, 0, 0);
+ GCproto *pt = funcproto(fn);
+ const void *lineinfo = proto_lineinfo(pt);
+ if (lineinfo) {
+ BCLine first = pt->firstline;
+ int sz = pt->numline < 256 ? 1 : pt->numline < 65536 ? 2 : 4;
+ MSize i, szl = pt->sizebc-1;
+ for (i = 0; i < szl; i++) {
+ BCLine line = first +
+ (sz == 1 ? (BCLine)((const uint8_t *)lineinfo)[i] :
+ sz == 2 ? (BCLine)((const uint16_t *)lineinfo)[i] :
+ (BCLine)((const uint32_t *)lineinfo)[i]);
+ setboolV(lj_tab_setint(L, t, line), 1);
+ }
+ }
+ settabV(L, L->top, t);
+ } else {
+ setnilV(L->top);
+ }
+ incr_top(L);
+ }
+ return 1; /* Ok. */
+}
+
+LUA_API int lua_getinfo(lua_State *L, const char *what, lua_Debug *ar)
+{
+ return lj_debug_getinfo(L, what, (lj_Debug *)ar, 0);
+}
+
+LUA_API int lua_getstack(lua_State *L, int level, lua_Debug *ar)
+{
+ int size;
+ cTValue *frame = lj_debug_frame(L, level, &size);
+ if (frame) {
+ ar->i_ci = (size << 16) + (int)(frame - tvref(L->stack));
+ return 1;
+ } else {
+ ar->i_ci = level - size;
+ return 0;
+ }
+}
+
+#if LJ_HASPROFILE
+/* Put the chunkname into a buffer. */
+static int debug_putchunkname(SBuf *sb, GCproto *pt, int pathstrip)
+{
+ GCstr *name = proto_chunkname(pt);
+ const char *p = strdata(name);
+ if (pt->firstline == ~(BCLine)0) {
+ lj_buf_putmem(sb, "[builtin:", 9);
+ lj_buf_putstr(sb, name);
+ lj_buf_putb(sb, ']');
+ return 0;
+ }
+ if (*p == '=' || *p == '@') {
+ MSize len = name->len-1;
+ p++;
+ if (pathstrip) {
+ int i;
+ for (i = len-1; i >= 0; i--)
+ if (p[i] == '/' || p[i] == '\\') {
+ len -= i+1;
+ p = p+i+1;
+ break;
+ }
+ }
+ lj_buf_putmem(sb, p, len);
+ } else {
+ lj_buf_putmem(sb, "[string]", 8);
+ }
+ return 1;
+}
+
+/* Put a compact stack dump into a buffer. */
+void lj_debug_dumpstack(lua_State *L, SBuf *sb, const char *fmt, int depth)
+{
+ int level = 0, dir = 1, pathstrip = 1;
+ MSize lastlen = 0;
+ if (depth < 0) { level = ~depth; depth = dir = -1; } /* Reverse frames. */
+ while (level != depth) { /* Loop through all frame. */
+ int size;
+ cTValue *frame = lj_debug_frame(L, level, &size);
+ if (frame) {
+ cTValue *nextframe = size ? frame+size : NULL;
+ GCfunc *fn = frame_func(frame);
+ const uint8_t *p = (const uint8_t *)fmt;
+ int c;
+ while ((c = *p++)) {
+ switch (c) {
+ case 'p': /* Preserve full path. */
+ pathstrip = 0;
+ break;
+ case 'F': case 'f': { /* Dump function name. */
+ const char *name;
+ const char *what = lj_debug_funcname(L, frame, &name);
+ if (what) {
+ if (c == 'F' && isluafunc(fn)) { /* Dump module:name for 'F'. */
+ GCproto *pt = funcproto(fn);
+ if (pt->firstline != ~(BCLine)0) { /* Not a bytecode builtin. */
+ debug_putchunkname(sb, pt, pathstrip);
+ lj_buf_putb(sb, ':');
+ }
+ }
+ lj_buf_putmem(sb, name, (MSize)strlen(name));
+ break;
+ } /* else: can't derive a name, dump module:line. */
+ }
+ /* fallthrough */
+ case 'l': /* Dump module:line. */
+ if (isluafunc(fn)) {
+ GCproto *pt = funcproto(fn);
+ if (debug_putchunkname(sb, pt, pathstrip)) {
+ /* Regular Lua function. */
+ BCLine line = c == 'l' ? debug_frameline(L, fn, nextframe) :
+ pt->firstline;
+ lj_buf_putb(sb, ':');
+ lj_strfmt_putint(sb, line >= 0 ? line : pt->firstline);
+ }
+ } else if (isffunc(fn)) { /* Dump numbered builtins. */
+ lj_buf_putmem(sb, "[builtin#", 9);
+ lj_strfmt_putint(sb, fn->c.ffid);
+ lj_buf_putb(sb, ']');
+ } else { /* Dump C function address. */
+ lj_buf_putb(sb, '@');
+ lj_strfmt_putptr(sb, fn->c.f);
+ }
+ break;
+ case 'Z': /* Zap trailing separator. */
+ lastlen = sbuflen(sb);
+ break;
+ default:
+ lj_buf_putb(sb, c);
+ break;
+ }
+ }
+ } else if (dir == 1) {
+ break;
+ } else {
+ level -= size; /* Reverse frame order: quickly skip missing level. */
+ }
+ level += dir;
+ }
+ if (lastlen)
+ sb->w = sb->b + lastlen; /* Zap trailing separator. */
+}
+#endif
+
+/* Number of frames for the leading and trailing part of a traceback. */
+#define TRACEBACK_LEVELS1 12
+#define TRACEBACK_LEVELS2 10
+
+LUALIB_API void luaL_traceback (lua_State *L, lua_State *L1, const char *msg,
+ int level)
+{
+ int top = (int)(L->top - L->base);
+ int lim = TRACEBACK_LEVELS1;
+ lua_Debug ar;
+ if (msg) lua_pushfstring(L, "%s\n", msg);
+ lua_pushliteral(L, "stack traceback:");
+ while (lua_getstack(L1, level++, &ar)) {
+ GCfunc *fn;
+ if (level > lim) {
+ if (!lua_getstack(L1, level + TRACEBACK_LEVELS2, &ar)) {
+ level--;
+ } else {
+ lua_pushliteral(L, "\n\t...");
+ lua_getstack(L1, -10, &ar);
+ level = ar.i_ci - TRACEBACK_LEVELS2;
+ }
+ lim = 2147483647;
+ continue;
+ }
+ lua_getinfo(L1, "Snlf", &ar);
+ fn = funcV(L1->top-1); L1->top--;
+ if (isffunc(fn) && !*ar.namewhat)
+ lua_pushfstring(L, "\n\t[builtin#%d]:", fn->c.ffid);
+ else
+ lua_pushfstring(L, "\n\t%s:", ar.short_src);
+ if (ar.currentline > 0)
+ lua_pushfstring(L, "%d:", ar.currentline);
+ if (*ar.namewhat) {
+ lua_pushfstring(L, " in function " LUA_QS, ar.name);
+ } else {
+ if (*ar.what == 'm') {
+ lua_pushliteral(L, " in main chunk");
+ } else if (*ar.what == 'C') {
+ lua_pushfstring(L, " at %p", fn->c.f);
+ } else {
+ lua_pushfstring(L, " in function <%s:%d>",
+ ar.short_src, ar.linedefined);
+ }
+ }
+ if ((int)(L->top - L->base) - top >= 15)
+ lua_concat(L, (int)(L->top - L->base) - top);
+ }
+ lua_concat(L, (int)(L->top - L->base) - top);
+}
+
diff --git a/libs/luajit-cmake/luajit/src/lj_debug.h b/libs/luajit-cmake/luajit/src/lj_debug.h
new file mode 100644
index 0000000..28127ae
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/lj_debug.h
@@ -0,0 +1,66 @@
+/*
+** Debugging and introspection.
+** Copyright (C) 2005-2022 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LJ_DEBUG_H
+#define _LJ_DEBUG_H
+
+#include "lj_obj.h"
+
+typedef struct lj_Debug {
+ /* Common fields. Must be in the same order as in lua.h. */
+ int event;
+ const char *name;
+ const char *namewhat;
+ const char *what;
+ const char *source;
+ int currentline;
+ int nups;
+ int linedefined;
+ int lastlinedefined;
+ char short_src[LUA_IDSIZE];
+ int i_ci;
+ /* Extended fields. Only valid if lj_debug_getinfo() is called with ext = 1.*/
+ int nparams;
+ int isvararg;
+} lj_Debug;
+
+LJ_FUNC cTValue *lj_debug_frame(lua_State *L, int level, int *size);
+LJ_FUNC BCLine LJ_FASTCALL lj_debug_line(GCproto *pt, BCPos pc);
+LJ_FUNC const char *lj_debug_uvname(GCproto *pt, uint32_t idx);
+LJ_FUNC const char *lj_debug_uvnamev(cTValue *o, uint32_t idx, TValue **tvp,
+ GCobj **op);
+LJ_FUNC const char *lj_debug_slotname(GCproto *pt, const BCIns *pc,
+ BCReg slot, const char **name);
+LJ_FUNC const char *lj_debug_funcname(lua_State *L, cTValue *frame,
+ const char **name);
+LJ_FUNC void lj_debug_shortname(char *out, GCstr *str, BCLine line);
+LJ_FUNC void lj_debug_addloc(lua_State *L, const char *msg,
+ cTValue *frame, cTValue *nextframe);
+LJ_FUNC void lj_debug_pushloc(lua_State *L, GCproto *pt, BCPos pc);
+LJ_FUNC int lj_debug_getinfo(lua_State *L, const char *what, lj_Debug *ar,
+ int ext);
+#if LJ_HASPROFILE
+LJ_FUNC void lj_debug_dumpstack(lua_State *L, SBuf *sb, const char *fmt,
+ int depth);
+#endif
+
+/* Fixed internal variable names. */
+#define VARNAMEDEF(_) \
+ _(FOR_IDX, "(for index)") \
+ _(FOR_STOP, "(for limit)") \
+ _(FOR_STEP, "(for step)") \
+ _(FOR_GEN, "(for generator)") \
+ _(FOR_STATE, "(for state)") \
+ _(FOR_CTL, "(for control)")
+
+enum {
+ VARNAME_END,
+#define VARNAMEENUM(name, str) VARNAME_##name,
+ VARNAMEDEF(VARNAMEENUM)
+#undef VARNAMEENUM
+ VARNAME__MAX
+};
+
+#endif
diff --git a/libs/luajit-cmake/luajit/src/lj_def.h b/libs/luajit-cmake/luajit/src/lj_def.h
new file mode 100644
index 0000000..b61297a
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/lj_def.h
@@ -0,0 +1,381 @@
+/*
+** LuaJIT common internal definitions.
+** Copyright (C) 2005-2022 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LJ_DEF_H
+#define _LJ_DEF_H
+
+#include "lua.h"
+
+#if defined(_MSC_VER) && (_MSC_VER < 1700)
+/* Old MSVC is stuck in the last century and doesn't have C99's stdint.h. */
+typedef __int8 int8_t;
+typedef __int16 int16_t;
+typedef __int32 int32_t;
+typedef __int64 int64_t;
+typedef unsigned __int8 uint8_t;
+typedef unsigned __int16 uint16_t;
+typedef unsigned __int32 uint32_t;
+typedef unsigned __int64 uint64_t;
+#ifdef _WIN64
+typedef __int64 intptr_t;
+typedef unsigned __int64 uintptr_t;
+#else
+typedef __int32 intptr_t;
+typedef unsigned __int32 uintptr_t;
+#endif
+#elif defined(__symbian__)
+/* Cough. */
+typedef signed char int8_t;
+typedef short int int16_t;
+typedef int int32_t;
+typedef long long int64_t;
+typedef unsigned char uint8_t;
+typedef unsigned short int uint16_t;
+typedef unsigned int uint32_t;
+typedef unsigned long long uint64_t;
+typedef int intptr_t;
+typedef unsigned int uintptr_t;
+#else
+#include <stdint.h>
+#endif
+
+/* Needed everywhere. */
+#include <string.h>
+#include <stdlib.h>
+
+/* Various VM limits. */
+#define LJ_MAX_MEM32 0x7fffff00 /* Max. 32 bit memory allocation. */
+#define LJ_MAX_MEM64 ((uint64_t)1<<47) /* Max. 64 bit memory allocation. */
+/* Max. total memory allocation. */
+#define LJ_MAX_MEM (LJ_GC64 ? LJ_MAX_MEM64 : LJ_MAX_MEM32)
+#define LJ_MAX_ALLOC LJ_MAX_MEM /* Max. individual allocation length. */
+#define LJ_MAX_STR LJ_MAX_MEM32 /* Max. string length. */
+#define LJ_MAX_BUF LJ_MAX_MEM32 /* Max. buffer length. */
+#define LJ_MAX_UDATA LJ_MAX_MEM32 /* Max. userdata length. */
+
+#define LJ_MAX_STRTAB (1<<26) /* Max. string table size. */
+#define LJ_MAX_HBITS 26 /* Max. hash bits. */
+#define LJ_MAX_ABITS 28 /* Max. bits of array key. */
+#define LJ_MAX_ASIZE ((1<<(LJ_MAX_ABITS-1))+1) /* Max. array part size. */
+#define LJ_MAX_COLOSIZE 16 /* Max. elems for colocated array. */
+
+#define LJ_MAX_LINE LJ_MAX_MEM32 /* Max. source code line number. */
+#define LJ_MAX_XLEVEL 200 /* Max. syntactic nesting level. */
+#define LJ_MAX_BCINS (1<<26) /* Max. # of bytecode instructions. */
+#define LJ_MAX_SLOTS 250 /* Max. # of slots in a Lua func. */
+#define LJ_MAX_LOCVAR 200 /* Max. # of local variables. */
+#define LJ_MAX_UPVAL 60 /* Max. # of upvalues. */
+
+#define LJ_MAX_IDXCHAIN 100 /* __index/__newindex chain limit. */
+#define LJ_STACK_EXTRA (5+2*LJ_FR2) /* Extra stack space (metamethods). */
+
+#define LJ_NUM_CBPAGE 1 /* Number of FFI callback pages. */
+
+/* Minimum table/buffer sizes. */
+#define LJ_MIN_GLOBAL 6 /* Min. global table size (hbits). */
+#define LJ_MIN_REGISTRY 2 /* Min. registry size (hbits). */
+#define LJ_MIN_STRTAB 256 /* Min. string table size (pow2). */
+#define LJ_MIN_SBUF 32 /* Min. string buffer length. */
+#define LJ_MIN_VECSZ 8 /* Min. size for growable vectors. */
+#define LJ_MIN_IRSZ 32 /* Min. size for growable IR. */
+
+/* JIT compiler limits. */
+#define LJ_MAX_JSLOTS 250 /* Max. # of stack slots for a trace. */
+#define LJ_MAX_PHI 64 /* Max. # of PHIs for a loop. */
+#define LJ_MAX_EXITSTUBGR 16 /* Max. # of exit stub groups. */
+
+/* Various macros. */
+#ifndef UNUSED
+#define UNUSED(x) ((void)(x)) /* to avoid warnings */
+#endif
+
+#define U64x(hi, lo) (((uint64_t)0x##hi << 32) + (uint64_t)0x##lo)
+#define i32ptr(p) ((int32_t)(intptr_t)(void *)(p))
+#define u32ptr(p) ((uint32_t)(intptr_t)(void *)(p))
+#define i64ptr(p) ((int64_t)(intptr_t)(void *)(p))
+#define u64ptr(p) ((uint64_t)(intptr_t)(void *)(p))
+#define igcptr(p) (LJ_GC64 ? i64ptr(p) : i32ptr(p))
+
+#define checki8(x) ((x) == (int32_t)(int8_t)(x))
+#define checku8(x) ((x) == (int32_t)(uint8_t)(x))
+#define checki16(x) ((x) == (int32_t)(int16_t)(x))
+#define checku16(x) ((x) == (int32_t)(uint16_t)(x))
+#define checki32(x) ((x) == (int32_t)(x))
+#define checku32(x) ((x) == (uint32_t)(x))
+#define checkptr31(x) (((uint64_t)(uintptr_t)(x) >> 31) == 0)
+#define checkptr32(x) ((uintptr_t)(x) == (uint32_t)(uintptr_t)(x))
+#define checkptr47(x) (((uint64_t)(uintptr_t)(x) >> 47) == 0)
+#define checkptrGC(x) (LJ_GC64 ? checkptr47((x)) : LJ_64 ? checkptr31((x)) :1)
+
+/* Every half-decent C compiler transforms this into a rotate instruction. */
+#define lj_rol(x, n) (((x)<<(n)) | ((x)>>(-(int)(n)&(8*sizeof(x)-1))))
+#define lj_ror(x, n) (((x)<<(-(int)(n)&(8*sizeof(x)-1))) | ((x)>>(n)))
+
+/* A really naive Bloom filter. But sufficient for our needs. */
+typedef uintptr_t BloomFilter;
+#define BLOOM_MASK (8*sizeof(BloomFilter) - 1)
+#define bloombit(x) ((uintptr_t)1 << ((x) & BLOOM_MASK))
+#define bloomset(b, x) ((b) |= bloombit((x)))
+#define bloomtest(b, x) ((b) & bloombit((x)))
+
+#if defined(__GNUC__) || defined(__clang__) || defined(__psp2__)
+
+#define LJ_NORET __attribute__((noreturn))
+#define LJ_ALIGN(n) __attribute__((aligned(n)))
+#define LJ_INLINE inline
+#define LJ_AINLINE inline __attribute__((always_inline))
+#define LJ_NOINLINE __attribute__((noinline))
+
+#if defined(__ELF__) || defined(__MACH__) || defined(__psp2__)
+#if !((defined(__sun__) && defined(__svr4__)) || defined(__CELLOS_LV2__))
+#define LJ_NOAPI extern __attribute__((visibility("hidden")))
+#endif
+#endif
+
+/* Note: it's only beneficial to use fastcall on x86 and then only for up to
+** two non-FP args. The amalgamated compile covers all LJ_FUNC cases. Only
+** indirect calls and related tail-called C functions are marked as fastcall.
+*/
+#if defined(__i386__)
+#define LJ_FASTCALL __attribute__((fastcall))
+#endif
+
+#define LJ_LIKELY(x) __builtin_expect(!!(x), 1)
+#define LJ_UNLIKELY(x) __builtin_expect(!!(x), 0)
+
+#define lj_ffs(x) ((uint32_t)__builtin_ctz(x))
+/* Don't ask ... */
+#if defined(__INTEL_COMPILER) && (defined(__i386__) || defined(__x86_64__))
+static LJ_AINLINE uint32_t lj_fls(uint32_t x)
+{
+ uint32_t r; __asm__("bsrl %1, %0" : "=r" (r) : "rm" (x) : "cc"); return r;
+}
+#else
+#define lj_fls(x) ((uint32_t)(__builtin_clz(x)^31))
+#endif
+
+#if defined(__arm__)
+static LJ_AINLINE uint32_t lj_bswap(uint32_t x)
+{
+#if defined(__psp2__)
+ return __builtin_rev(x);
+#else
+ uint32_t r;
+#if __ARM_ARCH_6__ || __ARM_ARCH_6J__ || __ARM_ARCH_6T2__ || __ARM_ARCH_6Z__ ||\
+ __ARM_ARCH_6ZK__ || __ARM_ARCH_7__ || __ARM_ARCH_7A__ || __ARM_ARCH_7R__
+ __asm__("rev %0, %1" : "=r" (r) : "r" (x));
+ return r;
+#else
+#ifdef __thumb__
+ r = x ^ lj_ror(x, 16);
+#else
+ __asm__("eor %0, %1, %1, ror #16" : "=r" (r) : "r" (x));
+#endif
+ return ((r & 0xff00ffffu) >> 8) ^ lj_ror(x, 8);
+#endif
+#endif
+}
+
+static LJ_AINLINE uint64_t lj_bswap64(uint64_t x)
+{
+ return ((uint64_t)lj_bswap((uint32_t)x)<<32) | lj_bswap((uint32_t)(x>>32));
+}
+#elif (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3) || __clang__
+static LJ_AINLINE uint32_t lj_bswap(uint32_t x)
+{
+ return (uint32_t)__builtin_bswap32((int32_t)x);
+}
+
+static LJ_AINLINE uint64_t lj_bswap64(uint64_t x)
+{
+ return (uint64_t)__builtin_bswap64((int64_t)x);
+}
+#elif defined(__i386__) || defined(__x86_64__)
+static LJ_AINLINE uint32_t lj_bswap(uint32_t x)
+{
+ uint32_t r; __asm__("bswap %0" : "=r" (r) : "0" (x)); return r;
+}
+
+#if defined(__i386__)
+static LJ_AINLINE uint64_t lj_bswap64(uint64_t x)
+{
+ return ((uint64_t)lj_bswap((uint32_t)x)<<32) | lj_bswap((uint32_t)(x>>32));
+}
+#else
+static LJ_AINLINE uint64_t lj_bswap64(uint64_t x)
+{
+ uint64_t r; __asm__("bswap %0" : "=r" (r) : "0" (x)); return r;
+}
+#endif
+#else
+static LJ_AINLINE uint32_t lj_bswap(uint32_t x)
+{
+ return (x << 24) | ((x & 0xff00) << 8) | ((x >> 8) & 0xff00) | (x >> 24);
+}
+
+static LJ_AINLINE uint64_t lj_bswap64(uint64_t x)
+{
+ return (uint64_t)lj_bswap((uint32_t)(x >> 32)) |
+ ((uint64_t)lj_bswap((uint32_t)x) << 32);
+}
+#endif
+
+typedef union __attribute__((packed)) Unaligned16 {
+ uint16_t u;
+ uint8_t b[2];
+} Unaligned16;
+
+typedef union __attribute__((packed)) Unaligned32 {
+ uint32_t u;
+ uint8_t b[4];
+} Unaligned32;
+
+/* Unaligned load of uint16_t. */
+static LJ_AINLINE uint16_t lj_getu16(const void *p)
+{
+ return ((const Unaligned16 *)p)->u;
+}
+
+/* Unaligned load of uint32_t. */
+static LJ_AINLINE uint32_t lj_getu32(const void *p)
+{
+ return ((const Unaligned32 *)p)->u;
+}
+
+#elif defined(_MSC_VER)
+
+#define LJ_NORET __declspec(noreturn)
+#define LJ_ALIGN(n) __declspec(align(n))
+#define LJ_INLINE __inline
+#define LJ_AINLINE __forceinline
+#define LJ_NOINLINE __declspec(noinline)
+#if defined(_M_IX86)
+#define LJ_FASTCALL __fastcall
+#endif
+
+#ifdef _M_PPC
+unsigned int _CountLeadingZeros(long);
+#pragma intrinsic(_CountLeadingZeros)
+static LJ_AINLINE uint32_t lj_fls(uint32_t x)
+{
+ return _CountLeadingZeros(x) ^ 31;
+}
+#else
+unsigned char _BitScanForward(unsigned long *, unsigned long);
+unsigned char _BitScanReverse(unsigned long *, unsigned long);
+#pragma intrinsic(_BitScanForward)
+#pragma intrinsic(_BitScanReverse)
+
+static LJ_AINLINE uint32_t lj_ffs(uint32_t x)
+{
+ unsigned long r; _BitScanForward(&r, x); return (uint32_t)r;
+}
+
+static LJ_AINLINE uint32_t lj_fls(uint32_t x)
+{
+ unsigned long r; _BitScanReverse(&r, x); return (uint32_t)r;
+}
+#endif
+
+unsigned long _byteswap_ulong(unsigned long);
+uint64_t _byteswap_uint64(uint64_t);
+#define lj_bswap(x) (_byteswap_ulong((x)))
+#define lj_bswap64(x) (_byteswap_uint64((x)))
+
+#if defined(_M_PPC) && defined(LUAJIT_NO_UNALIGNED)
+/*
+** Replacement for unaligned loads on Xbox 360. Disabled by default since it's
+** usually more costly than the occasional stall when crossing a cache-line.
+*/
+static LJ_AINLINE uint16_t lj_getu16(const void *v)
+{
+ const uint8_t *p = (const uint8_t *)v;
+ return (uint16_t)((p[0]<<8) | p[1]);
+}
+static LJ_AINLINE uint32_t lj_getu32(const void *v)
+{
+ const uint8_t *p = (const uint8_t *)v;
+ return (uint32_t)((p[0]<<24) | (p[1]<<16) | (p[2]<<8) | p[3]);
+}
+#else
+/* Unaligned loads are generally ok on x86/x64. */
+#define lj_getu16(p) (*(uint16_t *)(p))
+#define lj_getu32(p) (*(uint32_t *)(p))
+#endif
+
+#else
+#error "missing defines for your compiler"
+#endif
+
+/* Optional defines. */
+#ifndef LJ_FASTCALL
+#define LJ_FASTCALL
+#endif
+#ifndef LJ_NORET
+#define LJ_NORET
+#endif
+#ifndef LJ_NOAPI
+#define LJ_NOAPI extern
+#endif
+#ifndef LJ_LIKELY
+#define LJ_LIKELY(x) (x)
+#define LJ_UNLIKELY(x) (x)
+#endif
+
+/* Attributes for internal functions. */
+#define LJ_DATA LJ_NOAPI
+#define LJ_DATADEF
+#define LJ_ASMF LJ_NOAPI
+#define LJ_FUNCA LJ_NOAPI
+#if defined(ljamalg_c)
+#define LJ_FUNC static
+#else
+#define LJ_FUNC LJ_NOAPI
+#endif
+#define LJ_FUNC_NORET LJ_FUNC LJ_NORET
+#define LJ_FUNCA_NORET LJ_FUNCA LJ_NORET
+#define LJ_ASMF_NORET LJ_ASMF LJ_NORET
+
+/* Internal assertions. */
+#if defined(LUA_USE_ASSERT) || defined(LUA_USE_APICHECK)
+#define lj_assert_check(g, c, ...) \
+ ((c) ? (void)0 : \
+ (lj_assert_fail((g), __FILE__, __LINE__, __func__, __VA_ARGS__), 0))
+#define lj_checkapi(c, ...) lj_assert_check(G(L), (c), __VA_ARGS__)
+#else
+#define lj_checkapi(c, ...) ((void)L)
+#endif
+
+#ifdef LUA_USE_ASSERT
+#define lj_assertG_(g, c, ...) lj_assert_check((g), (c), __VA_ARGS__)
+#define lj_assertG(c, ...) lj_assert_check(g, (c), __VA_ARGS__)
+#define lj_assertL(c, ...) lj_assert_check(G(L), (c), __VA_ARGS__)
+#define lj_assertX(c, ...) lj_assert_check(NULL, (c), __VA_ARGS__)
+#define check_exp(c, e) (lj_assertX((c), #c), (e))
+#else
+#define lj_assertG_(g, c, ...) ((void)0)
+#define lj_assertG(c, ...) ((void)g)
+#define lj_assertL(c, ...) ((void)L)
+#define lj_assertX(c, ...) ((void)0)
+#define check_exp(c, e) (e)
+#endif
+
+/* Static assertions. */
+#define LJ_ASSERT_NAME2(name, line) name ## line
+#define LJ_ASSERT_NAME(line) LJ_ASSERT_NAME2(lj_assert_, line)
+#ifdef __COUNTER__
+#define LJ_STATIC_ASSERT(cond) \
+ extern void LJ_ASSERT_NAME(__COUNTER__)(int STATIC_ASSERTION_FAILED[(cond)?1:-1])
+#else
+#define LJ_STATIC_ASSERT(cond) \
+ extern void LJ_ASSERT_NAME(__LINE__)(int STATIC_ASSERTION_FAILED[(cond)?1:-1])
+#endif
+
+/* PRNG state. Need this here, details in lj_prng.h. */
+typedef struct PRNGState {
+ uint64_t u[4];
+} PRNGState;
+
+#endif
diff --git a/libs/luajit-cmake/luajit/src/lj_dispatch.c b/libs/luajit-cmake/luajit/src/lj_dispatch.c
new file mode 100644
index 0000000..ded382a
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/lj_dispatch.c
@@ -0,0 +1,559 @@
+/*
+** Instruction dispatch handling.
+** Copyright (C) 2005-2022 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#define lj_dispatch_c
+#define LUA_CORE
+
+#include "lj_obj.h"
+#include "lj_err.h"
+#include "lj_buf.h"
+#include "lj_func.h"
+#include "lj_str.h"
+#include "lj_tab.h"
+#include "lj_meta.h"
+#include "lj_debug.h"
+#include "lj_state.h"
+#include "lj_frame.h"
+#include "lj_bc.h"
+#include "lj_ff.h"
+#include "lj_strfmt.h"
+#if LJ_HASJIT
+#include "lj_jit.h"
+#endif
+#if LJ_HASFFI
+#include "lj_ccallback.h"
+#endif
+#include "lj_trace.h"
+#include "lj_dispatch.h"
+#if LJ_HASPROFILE
+#include "lj_profile.h"
+#endif
+#include "lj_vm.h"
+#include "luajit.h"
+
+/* Bump GG_NUM_ASMFF in lj_dispatch.h as needed. Ugly. */
+LJ_STATIC_ASSERT(GG_NUM_ASMFF == FF_NUM_ASMFUNC);
+
+/* -- Dispatch table management ------------------------------------------- */
+
+#if LJ_TARGET_MIPS
+#include <math.h>
+LJ_FUNCA_NORET void LJ_FASTCALL lj_ffh_coroutine_wrap_err(lua_State *L,
+ lua_State *co);
+#if !LJ_HASJIT
+#define lj_dispatch_stitch lj_dispatch_ins
+#endif
+#if !LJ_HASPROFILE
+#define lj_dispatch_profile lj_dispatch_ins
+#endif
+
+#define GOTFUNC(name) (ASMFunction)name,
+static const ASMFunction dispatch_got[] = {
+ GOTDEF(GOTFUNC)
+};
+#undef GOTFUNC
+#endif
+
+/* Initialize instruction dispatch table and hot counters. */
+void lj_dispatch_init(GG_State *GG)
+{
+ uint32_t i;
+ ASMFunction *disp = GG->dispatch;
+ for (i = 0; i < GG_LEN_SDISP; i++)
+ disp[GG_LEN_DDISP+i] = disp[i] = makeasmfunc(lj_bc_ofs[i]);
+ for (i = GG_LEN_SDISP; i < GG_LEN_DDISP; i++)
+ disp[i] = makeasmfunc(lj_bc_ofs[i]);
+ /* The JIT engine is off by default. luaopen_jit() turns it on. */
+ disp[BC_FORL] = disp[BC_IFORL];
+ disp[BC_ITERL] = disp[BC_IITERL];
+ /* Workaround for stable v2.1 bytecode. TODO: Replace with BC_IITERN. */
+ disp[BC_ITERN] = &lj_vm_IITERN;
+ disp[BC_LOOP] = disp[BC_ILOOP];
+ disp[BC_FUNCF] = disp[BC_IFUNCF];
+ disp[BC_FUNCV] = disp[BC_IFUNCV];
+ GG->g.bc_cfunc_ext = GG->g.bc_cfunc_int = BCINS_AD(BC_FUNCC, LUA_MINSTACK, 0);
+ for (i = 0; i < GG_NUM_ASMFF; i++)
+ GG->bcff[i] = BCINS_AD(BC__MAX+i, 0, 0);
+#if LJ_TARGET_MIPS
+ memcpy(GG->got, dispatch_got, LJ_GOT__MAX*sizeof(ASMFunction *));
+#endif
+}
+
+#if LJ_HASJIT
+/* Initialize hotcount table. */
+void lj_dispatch_init_hotcount(global_State *g)
+{
+ int32_t hotloop = G2J(g)->param[JIT_P_hotloop];
+ HotCount start = (HotCount)(hotloop*HOTCOUNT_LOOP - 1);
+ HotCount *hotcount = G2GG(g)->hotcount;
+ uint32_t i;
+ for (i = 0; i < HOTCOUNT_SIZE; i++)
+ hotcount[i] = start;
+}
+#endif
+
+/* Internal dispatch mode bits. */
+#define DISPMODE_CALL 0x01 /* Override call dispatch. */
+#define DISPMODE_RET 0x02 /* Override return dispatch. */
+#define DISPMODE_INS 0x04 /* Override instruction dispatch. */
+#define DISPMODE_JIT 0x10 /* JIT compiler on. */
+#define DISPMODE_REC 0x20 /* Recording active. */
+#define DISPMODE_PROF 0x40 /* Profiling active. */
+
+/* Update dispatch table depending on various flags. */
+void lj_dispatch_update(global_State *g)
+{
+ uint8_t oldmode = g->dispatchmode;
+ uint8_t mode = 0;
+#if LJ_HASJIT
+ mode |= (G2J(g)->flags & JIT_F_ON) ? DISPMODE_JIT : 0;
+ mode |= G2J(g)->state != LJ_TRACE_IDLE ?
+ (DISPMODE_REC|DISPMODE_INS|DISPMODE_CALL) : 0;
+#endif
+#if LJ_HASPROFILE
+ mode |= (g->hookmask & HOOK_PROFILE) ? (DISPMODE_PROF|DISPMODE_INS) : 0;
+#endif
+ mode |= (g->hookmask & (LUA_MASKLINE|LUA_MASKCOUNT)) ? DISPMODE_INS : 0;
+ mode |= (g->hookmask & LUA_MASKCALL) ? DISPMODE_CALL : 0;
+ mode |= (g->hookmask & LUA_MASKRET) ? DISPMODE_RET : 0;
+ if (oldmode != mode) { /* Mode changed? */
+ ASMFunction *disp = G2GG(g)->dispatch;
+ ASMFunction f_forl, f_iterl, f_itern, f_loop, f_funcf, f_funcv;
+ g->dispatchmode = mode;
+
+ /* Hotcount if JIT is on, but not while recording. */
+ if ((mode & (DISPMODE_JIT|DISPMODE_REC)) == DISPMODE_JIT) {
+ f_forl = makeasmfunc(lj_bc_ofs[BC_FORL]);
+ f_iterl = makeasmfunc(lj_bc_ofs[BC_ITERL]);
+ f_itern = makeasmfunc(lj_bc_ofs[BC_ITERN]);
+ f_loop = makeasmfunc(lj_bc_ofs[BC_LOOP]);
+ f_funcf = makeasmfunc(lj_bc_ofs[BC_FUNCF]);
+ f_funcv = makeasmfunc(lj_bc_ofs[BC_FUNCV]);
+ } else { /* Otherwise use the non-hotcounting instructions. */
+ f_forl = disp[GG_LEN_DDISP+BC_IFORL];
+ f_iterl = disp[GG_LEN_DDISP+BC_IITERL];
+ f_itern = &lj_vm_IITERN;
+ f_loop = disp[GG_LEN_DDISP+BC_ILOOP];
+ f_funcf = makeasmfunc(lj_bc_ofs[BC_IFUNCF]);
+ f_funcv = makeasmfunc(lj_bc_ofs[BC_IFUNCV]);
+ }
+ /* Init static counting instruction dispatch first (may be copied below). */
+ disp[GG_LEN_DDISP+BC_FORL] = f_forl;
+ disp[GG_LEN_DDISP+BC_ITERL] = f_iterl;
+ disp[GG_LEN_DDISP+BC_ITERN] = f_itern;
+ disp[GG_LEN_DDISP+BC_LOOP] = f_loop;
+
+ /* Set dynamic instruction dispatch. */
+ if ((oldmode ^ mode) & (DISPMODE_PROF|DISPMODE_REC|DISPMODE_INS)) {
+ /* Need to update the whole table. */
+ if (!(mode & DISPMODE_INS)) { /* No ins dispatch? */
+ /* Copy static dispatch table to dynamic dispatch table. */
+ memcpy(&disp[0], &disp[GG_LEN_DDISP], GG_LEN_SDISP*sizeof(ASMFunction));
+ /* Overwrite with dynamic return dispatch. */
+ if ((mode & DISPMODE_RET)) {
+ disp[BC_RETM] = lj_vm_rethook;
+ disp[BC_RET] = lj_vm_rethook;
+ disp[BC_RET0] = lj_vm_rethook;
+ disp[BC_RET1] = lj_vm_rethook;
+ }
+ } else {
+ /* The recording dispatch also checks for hooks. */
+ ASMFunction f = (mode & DISPMODE_PROF) ? lj_vm_profhook :
+ (mode & DISPMODE_REC) ? lj_vm_record : lj_vm_inshook;
+ uint32_t i;
+ for (i = 0; i < GG_LEN_SDISP; i++)
+ disp[i] = f;
+ }
+ } else if (!(mode & DISPMODE_INS)) {
+ /* Otherwise set dynamic counting ins. */
+ disp[BC_FORL] = f_forl;
+ disp[BC_ITERL] = f_iterl;
+ disp[BC_ITERN] = f_itern;
+ disp[BC_LOOP] = f_loop;
+ /* Set dynamic return dispatch. */
+ if ((mode & DISPMODE_RET)) {
+ disp[BC_RETM] = lj_vm_rethook;
+ disp[BC_RET] = lj_vm_rethook;
+ disp[BC_RET0] = lj_vm_rethook;
+ disp[BC_RET1] = lj_vm_rethook;
+ } else {
+ disp[BC_RETM] = disp[GG_LEN_DDISP+BC_RETM];
+ disp[BC_RET] = disp[GG_LEN_DDISP+BC_RET];
+ disp[BC_RET0] = disp[GG_LEN_DDISP+BC_RET0];
+ disp[BC_RET1] = disp[GG_LEN_DDISP+BC_RET1];
+ }
+ }
+
+ /* Set dynamic call dispatch. */
+ if ((oldmode ^ mode) & DISPMODE_CALL) { /* Update the whole table? */
+ uint32_t i;
+ if ((mode & DISPMODE_CALL) == 0) { /* No call hooks? */
+ for (i = GG_LEN_SDISP; i < GG_LEN_DDISP; i++)
+ disp[i] = makeasmfunc(lj_bc_ofs[i]);
+ } else {
+ for (i = GG_LEN_SDISP; i < GG_LEN_DDISP; i++)
+ disp[i] = lj_vm_callhook;
+ }
+ }
+ if (!(mode & DISPMODE_CALL)) { /* Overwrite dynamic counting ins. */
+ disp[BC_FUNCF] = f_funcf;
+ disp[BC_FUNCV] = f_funcv;
+ }
+
+#if LJ_HASJIT
+ /* Reset hotcounts for JIT off to on transition. */
+ if ((mode & DISPMODE_JIT) && !(oldmode & DISPMODE_JIT))
+ lj_dispatch_init_hotcount(g);
+#endif
+ }
+}
+
+/* -- JIT mode setting ---------------------------------------------------- */
+
+#if LJ_HASJIT
+/* Set JIT mode for a single prototype. */
+static void setptmode(global_State *g, GCproto *pt, int mode)
+{
+ if ((mode & LUAJIT_MODE_ON)) { /* (Re-)enable JIT compilation. */
+ pt->flags &= ~PROTO_NOJIT;
+ lj_trace_reenableproto(pt); /* Unpatch all ILOOP etc. bytecodes. */
+ } else { /* Flush and/or disable JIT compilation. */
+ if (!(mode & LUAJIT_MODE_FLUSH))
+ pt->flags |= PROTO_NOJIT;
+ lj_trace_flushproto(g, pt); /* Flush all traces of prototype. */
+ }
+}
+
+/* Recursively set the JIT mode for all children of a prototype. */
+static void setptmode_all(global_State *g, GCproto *pt, int mode)
+{
+ ptrdiff_t i;
+ if (!(pt->flags & PROTO_CHILD)) return;
+ for (i = -(ptrdiff_t)pt->sizekgc; i < 0; i++) {
+ GCobj *o = proto_kgc(pt, i);
+ if (o->gch.gct == ~LJ_TPROTO) {
+ setptmode(g, gco2pt(o), mode);
+ setptmode_all(g, gco2pt(o), mode);
+ }
+ }
+}
+#endif
+
+/* Public API function: control the JIT engine. */
+int luaJIT_setmode(lua_State *L, int idx, int mode)
+{
+ global_State *g = G(L);
+ int mm = mode & LUAJIT_MODE_MASK;
+ lj_trace_abort(g); /* Abort recording on any state change. */
+ /* Avoid pulling the rug from under our own feet. */
+ if ((g->hookmask & HOOK_GC))
+ lj_err_caller(L, LJ_ERR_NOGCMM);
+ switch (mm) {
+#if LJ_HASJIT
+ case LUAJIT_MODE_ENGINE:
+ if ((mode & LUAJIT_MODE_FLUSH)) {
+ lj_trace_flushall(L);
+ } else {
+ if (!(mode & LUAJIT_MODE_ON))
+ G2J(g)->flags &= ~(uint32_t)JIT_F_ON;
+ else
+ G2J(g)->flags |= (uint32_t)JIT_F_ON;
+ lj_dispatch_update(g);
+ }
+ break;
+ case LUAJIT_MODE_FUNC:
+ case LUAJIT_MODE_ALLFUNC:
+ case LUAJIT_MODE_ALLSUBFUNC: {
+ cTValue *tv = idx == 0 ? frame_prev(L->base-1)-LJ_FR2 :
+ idx > 0 ? L->base + (idx-1) : L->top + idx;
+ GCproto *pt;
+ if ((idx == 0 || tvisfunc(tv)) && isluafunc(&gcval(tv)->fn))
+ pt = funcproto(&gcval(tv)->fn); /* Cannot use funcV() for frame slot. */
+ else if (tvisproto(tv))
+ pt = protoV(tv);
+ else
+ return 0; /* Failed. */
+ if (mm != LUAJIT_MODE_ALLSUBFUNC)
+ setptmode(g, pt, mode);
+ if (mm != LUAJIT_MODE_FUNC)
+ setptmode_all(g, pt, mode);
+ break;
+ }
+ case LUAJIT_MODE_TRACE:
+ if (!(mode & LUAJIT_MODE_FLUSH))
+ return 0; /* Failed. */
+ lj_trace_flush(G2J(g), idx);
+ break;
+#else
+ case LUAJIT_MODE_ENGINE:
+ case LUAJIT_MODE_FUNC:
+ case LUAJIT_MODE_ALLFUNC:
+ case LUAJIT_MODE_ALLSUBFUNC:
+ UNUSED(idx);
+ if ((mode & LUAJIT_MODE_ON))
+ return 0; /* Failed. */
+ break;
+#endif
+ case LUAJIT_MODE_WRAPCFUNC:
+ if ((mode & LUAJIT_MODE_ON)) {
+ if (idx != 0) {
+ cTValue *tv = idx > 0 ? L->base + (idx-1) : L->top + idx;
+ if (tvislightud(tv))
+ g->wrapf = (lua_CFunction)lightudV(g, tv);
+ else
+ return 0; /* Failed. */
+ } else {
+ return 0; /* Failed. */
+ }
+ g->bc_cfunc_ext = BCINS_AD(BC_FUNCCW, 0, 0);
+ } else {
+ g->bc_cfunc_ext = BCINS_AD(BC_FUNCC, 0, 0);
+ }
+ break;
+ default:
+ return 0; /* Failed. */
+ }
+ return 1; /* OK. */
+}
+
+/* Enforce (dynamic) linker error for version mismatches. See luajit.c. */
+LUA_API void LUAJIT_VERSION_SYM(void)
+{
+}
+
+/* -- Hooks --------------------------------------------------------------- */
+
+/* This function can be called asynchronously (e.g. during a signal). */
+LUA_API int lua_sethook(lua_State *L, lua_Hook func, int mask, int count)
+{
+ global_State *g = G(L);
+ mask &= HOOK_EVENTMASK;
+ if (func == NULL || mask == 0) { mask = 0; func = NULL; } /* Consistency. */
+ g->hookf = func;
+ g->hookcount = g->hookcstart = (int32_t)count;
+ g->hookmask = (uint8_t)((g->hookmask & ~HOOK_EVENTMASK) | mask);
+ lj_trace_abort(g); /* Abort recording on any hook change. */
+ lj_dispatch_update(g);
+ return 1;
+}
+
+LUA_API lua_Hook lua_gethook(lua_State *L)
+{
+ return G(L)->hookf;
+}
+
+LUA_API int lua_gethookmask(lua_State *L)
+{
+ return G(L)->hookmask & HOOK_EVENTMASK;
+}
+
+LUA_API int lua_gethookcount(lua_State *L)
+{
+ return (int)G(L)->hookcstart;
+}
+
+/* Call a hook. */
+static void callhook(lua_State *L, int event, BCLine line)
+{
+ global_State *g = G(L);
+ lua_Hook hookf = g->hookf;
+ if (hookf && !hook_active(g)) {
+ lua_Debug ar;
+ lj_trace_abort(g); /* Abort recording on any hook call. */
+ ar.event = event;
+ ar.currentline = line;
+ /* Top frame, nextframe = NULL. */
+ ar.i_ci = (int)((L->base-1) - tvref(L->stack));
+ lj_state_checkstack(L, 1+LUA_MINSTACK);
+#if LJ_HASPROFILE && !LJ_PROFILE_SIGPROF
+ lj_profile_hook_enter(g);
+#else
+ hook_enter(g);
+#endif
+ hookf(L, &ar);
+ lj_assertG(hook_active(g), "active hook flag removed");
+ setgcref(g->cur_L, obj2gco(L));
+#if LJ_HASPROFILE && !LJ_PROFILE_SIGPROF
+ lj_profile_hook_leave(g);
+#else
+ hook_leave(g);
+#endif
+ }
+}
+
+/* -- Dispatch callbacks -------------------------------------------------- */
+
+/* Calculate number of used stack slots in the current frame. */
+static BCReg cur_topslot(GCproto *pt, const BCIns *pc, uint32_t nres)
+{
+ BCIns ins = pc[-1];
+ if (bc_op(ins) == BC_UCLO)
+ ins = pc[bc_j(ins)];
+ switch (bc_op(ins)) {
+ case BC_CALLM: case BC_CALLMT: return bc_a(ins) + bc_c(ins) + nres-1+1+LJ_FR2;
+ case BC_RETM: return bc_a(ins) + bc_d(ins) + nres-1;
+ case BC_TSETM: return bc_a(ins) + nres-1;
+ default: return pt->framesize;
+ }
+}
+
+/* Instruction dispatch. Used by instr/line/return hooks or when recording. */
+void LJ_FASTCALL lj_dispatch_ins(lua_State *L, const BCIns *pc)
+{
+ ERRNO_SAVE
+ GCfunc *fn = curr_func(L);
+ GCproto *pt = funcproto(fn);
+ void *cf = cframe_raw(L->cframe);
+ const BCIns *oldpc = cframe_pc(cf);
+ global_State *g = G(L);
+ BCReg slots;
+ setcframe_pc(cf, pc);
+ slots = cur_topslot(pt, pc, cframe_multres_n(cf));
+ L->top = L->base + slots; /* Fix top. */
+#if LJ_HASJIT
+ {
+ jit_State *J = G2J(g);
+ if (J->state != LJ_TRACE_IDLE) {
+#ifdef LUA_USE_ASSERT
+ ptrdiff_t delta = L->top - L->base;
+#endif
+ J->L = L;
+ lj_trace_ins(J, pc-1); /* The interpreter bytecode PC is offset by 1. */
+ lj_assertG(L->top - L->base == delta,
+ "unbalanced stack after tracing of instruction");
+ }
+ }
+#endif
+ if ((g->hookmask & LUA_MASKCOUNT) && g->hookcount == 0) {
+ g->hookcount = g->hookcstart;
+ callhook(L, LUA_HOOKCOUNT, -1);
+ L->top = L->base + slots; /* Fix top again. */
+ }
+ if ((g->hookmask & LUA_MASKLINE)) {
+ BCPos npc = proto_bcpos(pt, pc) - 1;
+ BCPos opc = proto_bcpos(pt, oldpc) - 1;
+ BCLine line = lj_debug_line(pt, npc);
+ if (pc <= oldpc || opc >= pt->sizebc || line != lj_debug_line(pt, opc)) {
+ callhook(L, LUA_HOOKLINE, line);
+ L->top = L->base + slots; /* Fix top again. */
+ }
+ }
+ if ((g->hookmask & LUA_MASKRET) && bc_isret(bc_op(pc[-1])))
+ callhook(L, LUA_HOOKRET, -1);
+ ERRNO_RESTORE
+}
+
+/* Initialize call. Ensure stack space and return # of missing parameters. */
+static int call_init(lua_State *L, GCfunc *fn)
+{
+ if (isluafunc(fn)) {
+ GCproto *pt = funcproto(fn);
+ int numparams = pt->numparams;
+ int gotparams = (int)(L->top - L->base);
+ int need = pt->framesize;
+ if ((pt->flags & PROTO_VARARG)) need += 1+gotparams;
+ lj_state_checkstack(L, (MSize)need);
+ numparams -= gotparams;
+ return numparams >= 0 ? numparams : 0;
+ } else {
+ lj_state_checkstack(L, LUA_MINSTACK);
+ return 0;
+ }
+}
+
+/* Call dispatch. Used by call hooks, hot calls or when recording. */
+ASMFunction LJ_FASTCALL lj_dispatch_call(lua_State *L, const BCIns *pc)
+{
+ ERRNO_SAVE
+ GCfunc *fn = curr_func(L);
+ BCOp op;
+ global_State *g = G(L);
+#if LJ_HASJIT
+ jit_State *J = G2J(g);
+#endif
+ int missing = call_init(L, fn);
+#if LJ_HASJIT
+ J->L = L;
+ if ((uintptr_t)pc & 1) { /* Marker for hot call. */
+#ifdef LUA_USE_ASSERT
+ ptrdiff_t delta = L->top - L->base;
+#endif
+ pc = (const BCIns *)((uintptr_t)pc & ~(uintptr_t)1);
+ lj_trace_hot(J, pc);
+ lj_assertG(L->top - L->base == delta,
+ "unbalanced stack after hot call");
+ goto out;
+ } else if (J->state != LJ_TRACE_IDLE &&
+ !(g->hookmask & (HOOK_GC|HOOK_VMEVENT))) {
+#ifdef LUA_USE_ASSERT
+ ptrdiff_t delta = L->top - L->base;
+#endif
+ /* Record the FUNC* bytecodes, too. */
+ lj_trace_ins(J, pc-1); /* The interpreter bytecode PC is offset by 1. */
+ lj_assertG(L->top - L->base == delta,
+ "unbalanced stack after hot instruction");
+ }
+#endif
+ if ((g->hookmask & LUA_MASKCALL)) {
+ int i;
+ for (i = 0; i < missing; i++) /* Add missing parameters. */
+ setnilV(L->top++);
+ callhook(L, LUA_HOOKCALL, -1);
+ /* Preserve modifications of missing parameters by lua_setlocal(). */
+ while (missing-- > 0 && tvisnil(L->top - 1))
+ L->top--;
+ }
+#if LJ_HASJIT
+out:
+#endif
+ op = bc_op(pc[-1]); /* Get FUNC* op. */
+#if LJ_HASJIT
+ /* Use the non-hotcounting variants if JIT is off or while recording. */
+ if ((!(J->flags & JIT_F_ON) || J->state != LJ_TRACE_IDLE) &&
+ (op == BC_FUNCF || op == BC_FUNCV))
+ op = (BCOp)((int)op+(int)BC_IFUNCF-(int)BC_FUNCF);
+#endif
+ ERRNO_RESTORE
+ return makeasmfunc(lj_bc_ofs[op]); /* Return static dispatch target. */
+}
+
+#if LJ_HASJIT
+/* Stitch a new trace. */
+void LJ_FASTCALL lj_dispatch_stitch(jit_State *J, const BCIns *pc)
+{
+ ERRNO_SAVE
+ lua_State *L = J->L;
+ void *cf = cframe_raw(L->cframe);
+ const BCIns *oldpc = cframe_pc(cf);
+ setcframe_pc(cf, pc);
+ /* Before dispatch, have to bias PC by 1. */
+ L->top = L->base + cur_topslot(curr_proto(L), pc+1, cframe_multres_n(cf));
+ lj_trace_stitch(J, pc-1); /* Point to the CALL instruction. */
+ setcframe_pc(cf, oldpc);
+ ERRNO_RESTORE
+}
+#endif
+
+#if LJ_HASPROFILE
+/* Profile dispatch. */
+void LJ_FASTCALL lj_dispatch_profile(lua_State *L, const BCIns *pc)
+{
+ ERRNO_SAVE
+ GCfunc *fn = curr_func(L);
+ GCproto *pt = funcproto(fn);
+ void *cf = cframe_raw(L->cframe);
+ const BCIns *oldpc = cframe_pc(cf);
+ global_State *g;
+ setcframe_pc(cf, pc);
+ L->top = L->base + cur_topslot(pt, pc, cframe_multres_n(cf));
+ lj_profile_interpreter(L);
+ setcframe_pc(cf, oldpc);
+ g = G(L);
+ setgcref(g->cur_L, obj2gco(L));
+ setvmstate(g, INTERP);
+ ERRNO_RESTORE
+}
+#endif
+
diff --git a/libs/luajit-cmake/luajit/src/lj_dispatch.h b/libs/luajit-cmake/luajit/src/lj_dispatch.h
new file mode 100644
index 0000000..52762ee
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/lj_dispatch.h
@@ -0,0 +1,164 @@
+/*
+** Instruction dispatch handling.
+** Copyright (C) 2005-2022 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LJ_DISPATCH_H
+#define _LJ_DISPATCH_H
+
+#include "lj_obj.h"
+#include "lj_bc.h"
+#if LJ_HASJIT
+#include "lj_jit.h"
+#endif
+
+#if LJ_TARGET_MIPS
+/* Need our own global offset table for the dreaded MIPS calling conventions. */
+
+#ifndef _LJ_VM_H
+LJ_ASMF int32_t LJ_FASTCALL lj_vm_modi(int32_t a, int32_t b);
+#endif
+
+#if LJ_SOFTFP
+#ifndef _LJ_IRCALL_H
+extern double __adddf3(double a, double b);
+extern double __subdf3(double a, double b);
+extern double __muldf3(double a, double b);
+extern double __divdf3(double a, double b);
+#endif
+#define SFGOTDEF(_) _(sqrt) _(__adddf3) _(__subdf3) _(__muldf3) _(__divdf3)
+#else
+#define SFGOTDEF(_)
+#endif
+#if LJ_HASJIT
+#define JITGOTDEF(_) _(lj_err_trace) _(lj_trace_exit) _(lj_trace_hot)
+#else
+#define JITGOTDEF(_)
+#endif
+#if LJ_HASFFI
+#define FFIGOTDEF(_) \
+ _(lj_meta_equal_cd) _(lj_ccallback_enter) _(lj_ccallback_leave)
+#else
+#define FFIGOTDEF(_)
+#endif
+#define GOTDEF(_) \
+ _(floor) _(ceil) _(trunc) _(log) _(log10) _(exp) _(sin) _(cos) _(tan) \
+ _(asin) _(acos) _(atan) _(sinh) _(cosh) _(tanh) _(frexp) _(modf) _(atan2) \
+ _(pow) _(fmod) _(ldexp) _(lj_vm_modi) \
+ _(lj_dispatch_call) _(lj_dispatch_ins) _(lj_dispatch_stitch) \
+ _(lj_dispatch_profile) _(lj_err_throw) \
+ _(lj_ffh_coroutine_wrap_err) _(lj_func_closeuv) _(lj_func_newL_gc) \
+ _(lj_gc_barrieruv) _(lj_gc_step) _(lj_gc_step_fixtop) _(lj_meta_arith) \
+ _(lj_meta_call) _(lj_meta_cat) _(lj_meta_comp) _(lj_meta_equal) \
+ _(lj_meta_for) _(lj_meta_istype) _(lj_meta_len) _(lj_meta_tget) \
+ _(lj_meta_tset) _(lj_state_growstack) _(lj_strfmt_number) \
+ _(lj_str_new) _(lj_tab_dup) _(lj_tab_get) _(lj_tab_getinth) _(lj_tab_len) \
+ _(lj_tab_new) _(lj_tab_newkey) _(lj_tab_next) _(lj_tab_reasize) \
+ _(lj_tab_setinth) _(lj_buf_putstr_reverse) _(lj_buf_putstr_lower) \
+ _(lj_buf_putstr_upper) _(lj_buf_tostr) \
+ JITGOTDEF(_) FFIGOTDEF(_) SFGOTDEF(_)
+
+enum {
+#define GOTENUM(name) LJ_GOT_##name,
+GOTDEF(GOTENUM)
+#undef GOTENUM
+ LJ_GOT__MAX
+};
+#endif
+
+/* Type of hot counter. Must match the code in the assembler VM. */
+/* 16 bits are sufficient. Only 0.0015% overhead with maximum slot penalty. */
+typedef uint16_t HotCount;
+
+/* Number of hot counter hash table entries (must be a power of two). */
+#define HOTCOUNT_SIZE 64
+#define HOTCOUNT_PCMASK ((HOTCOUNT_SIZE-1)*sizeof(HotCount))
+
+/* Hotcount decrements. */
+#define HOTCOUNT_LOOP 2
+#define HOTCOUNT_CALL 1
+
+/* This solves a circular dependency problem -- bump as needed. Sigh. */
+#define GG_NUM_ASMFF 57
+
+#define GG_LEN_DDISP (BC__MAX + GG_NUM_ASMFF)
+#define GG_LEN_SDISP BC_FUNCF
+#define GG_LEN_DISP (GG_LEN_DDISP + GG_LEN_SDISP)
+
+/* Global state, main thread and extra fields are allocated together. */
+typedef struct GG_State {
+ lua_State L; /* Main thread. */
+ global_State g; /* Global state. */
+#if LJ_TARGET_ARM && !LJ_TARGET_NX
+ /* Make g reachable via K12 encoded DISPATCH-relative addressing. */
+ uint8_t align1[(16-sizeof(global_State))&15];
+#endif
+#if LJ_TARGET_MIPS
+ ASMFunction got[LJ_GOT__MAX]; /* Global offset table. */
+#endif
+#if LJ_HASJIT
+ jit_State J; /* JIT state. */
+ HotCount hotcount[HOTCOUNT_SIZE]; /* Hot counters. */
+#if LJ_TARGET_ARM && !LJ_TARGET_NX
+ /* Ditto for J. */
+ uint8_t align2[(16-sizeof(jit_State)-sizeof(HotCount)*HOTCOUNT_SIZE)&15];
+#endif
+#endif
+ ASMFunction dispatch[GG_LEN_DISP]; /* Instruction dispatch tables. */
+ BCIns bcff[GG_NUM_ASMFF]; /* Bytecode for ASM fast functions. */
+} GG_State;
+
+#define GG_OFS(field) ((int)offsetof(GG_State, field))
+#define G2GG(gl) ((GG_State *)((char *)(gl) - GG_OFS(g)))
+#define J2GG(j) ((GG_State *)((char *)(j) - GG_OFS(J)))
+#define L2GG(L) (G2GG(G(L)))
+#define J2G(J) (&J2GG(J)->g)
+#define G2J(gl) (&G2GG(gl)->J)
+#define L2J(L) (&L2GG(L)->J)
+#define GG_G2J (GG_OFS(J) - GG_OFS(g))
+#define GG_G2DISP (GG_OFS(dispatch) - GG_OFS(g))
+#define GG_DISP2G (GG_OFS(g) - GG_OFS(dispatch))
+#define GG_DISP2J (GG_OFS(J) - GG_OFS(dispatch))
+#define GG_DISP2HOT (GG_OFS(hotcount) - GG_OFS(dispatch))
+#define GG_DISP2STATIC (GG_LEN_DDISP*(int)sizeof(ASMFunction))
+
+#define hotcount_get(gg, pc) \
+ (gg)->hotcount[(u32ptr(pc)>>2) & (HOTCOUNT_SIZE-1)]
+#define hotcount_set(gg, pc, val) \
+ (hotcount_get((gg), (pc)) = (HotCount)(val))
+
+/* Dispatch table management. */
+LJ_FUNC void lj_dispatch_init(GG_State *GG);
+#if LJ_HASJIT
+LJ_FUNC void lj_dispatch_init_hotcount(global_State *g);
+#endif
+LJ_FUNC void lj_dispatch_update(global_State *g);
+
+/* Instruction dispatch callback for hooks or when recording. */
+LJ_FUNCA void LJ_FASTCALL lj_dispatch_ins(lua_State *L, const BCIns *pc);
+LJ_FUNCA ASMFunction LJ_FASTCALL lj_dispatch_call(lua_State *L, const BCIns*pc);
+#if LJ_HASJIT
+LJ_FUNCA void LJ_FASTCALL lj_dispatch_stitch(jit_State *J, const BCIns *pc);
+#endif
+#if LJ_HASPROFILE
+LJ_FUNCA void LJ_FASTCALL lj_dispatch_profile(lua_State *L, const BCIns *pc);
+#endif
+
+#if LJ_HASFFI && !defined(_BUILDVM_H)
+/* Save/restore errno and GetLastError() around hooks, exits and recording. */
+#include <errno.h>
+#if LJ_TARGET_WINDOWS
+#define WIN32_LEAN_AND_MEAN
+#include <windows.h>
+#define ERRNO_SAVE int olderr = errno; DWORD oldwerr = GetLastError();
+#define ERRNO_RESTORE errno = olderr; SetLastError(oldwerr);
+#else
+#define ERRNO_SAVE int olderr = errno;
+#define ERRNO_RESTORE errno = olderr;
+#endif
+#else
+#define ERRNO_SAVE
+#define ERRNO_RESTORE
+#endif
+
+#endif
diff --git a/libs/luajit-cmake/luajit/src/lj_emit_arm.h b/libs/luajit-cmake/luajit/src/lj_emit_arm.h
new file mode 100644
index 0000000..cfb174f
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/lj_emit_arm.h
@@ -0,0 +1,361 @@
+/*
+** ARM instruction emitter.
+** Copyright (C) 2005-2022 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+/* -- Constant encoding --------------------------------------------------- */
+
+static uint8_t emit_invai[16] = {
+ /* AND */ (ARMI_AND^ARMI_BIC) >> 21,
+ /* EOR */ 0,
+ /* SUB */ (ARMI_SUB^ARMI_ADD) >> 21,
+ /* RSB */ 0,
+ /* ADD */ (ARMI_ADD^ARMI_SUB) >> 21,
+ /* ADC */ (ARMI_ADC^ARMI_SBC) >> 21,
+ /* SBC */ (ARMI_SBC^ARMI_ADC) >> 21,
+ /* RSC */ 0,
+ /* TST */ 0,
+ /* TEQ */ 0,
+ /* CMP */ (ARMI_CMP^ARMI_CMN) >> 21,
+ /* CMN */ (ARMI_CMN^ARMI_CMP) >> 21,
+ /* ORR */ 0,
+ /* MOV */ (ARMI_MOV^ARMI_MVN) >> 21,
+ /* BIC */ (ARMI_BIC^ARMI_AND) >> 21,
+ /* MVN */ (ARMI_MVN^ARMI_MOV) >> 21
+};
+
+/* Encode constant in K12 format for data processing instructions. */
+static uint32_t emit_isk12(ARMIns ai, int32_t n)
+{
+ uint32_t invai, i, m = (uint32_t)n;
+ /* K12: unsigned 8 bit value, rotated in steps of two bits. */
+ for (i = 0; i < 4096; i += 256, m = lj_rol(m, 2))
+ if (m <= 255) return ARMI_K12|m|i;
+ /* Otherwise try negation/complement with the inverse instruction. */
+ invai = emit_invai[((ai >> 21) & 15)];
+ if (!invai) return 0; /* Failed. No inverse instruction. */
+ m = ~(uint32_t)n;
+ if (invai == ((ARMI_SUB^ARMI_ADD) >> 21) ||
+ invai == (ARMI_CMP^ARMI_CMN) >> 21) m++;
+ for (i = 0; i < 4096; i += 256, m = lj_rol(m, 2))
+ if (m <= 255) return ARMI_K12|(invai<<21)|m|i;
+ return 0; /* Failed. */
+}
+
+/* -- Emit basic instructions --------------------------------------------- */
+
+static void emit_dnm(ASMState *as, ARMIns ai, Reg rd, Reg rn, Reg rm)
+{
+ *--as->mcp = ai | ARMF_D(rd) | ARMF_N(rn) | ARMF_M(rm);
+}
+
+static void emit_dm(ASMState *as, ARMIns ai, Reg rd, Reg rm)
+{
+ *--as->mcp = ai | ARMF_D(rd) | ARMF_M(rm);
+}
+
+static void emit_dn(ASMState *as, ARMIns ai, Reg rd, Reg rn)
+{
+ *--as->mcp = ai | ARMF_D(rd) | ARMF_N(rn);
+}
+
+static void emit_nm(ASMState *as, ARMIns ai, Reg rn, Reg rm)
+{
+ *--as->mcp = ai | ARMF_N(rn) | ARMF_M(rm);
+}
+
+static void emit_d(ASMState *as, ARMIns ai, Reg rd)
+{
+ *--as->mcp = ai | ARMF_D(rd);
+}
+
+static void emit_n(ASMState *as, ARMIns ai, Reg rn)
+{
+ *--as->mcp = ai | ARMF_N(rn);
+}
+
+static void emit_m(ASMState *as, ARMIns ai, Reg rm)
+{
+ *--as->mcp = ai | ARMF_M(rm);
+}
+
+static void emit_lsox(ASMState *as, ARMIns ai, Reg rd, Reg rn, int32_t ofs)
+{
+ lj_assertA(ofs >= -255 && ofs <= 255,
+ "load/store offset %d out of range", ofs);
+ if (ofs < 0) ofs = -ofs; else ai |= ARMI_LS_U;
+ *--as->mcp = ai | ARMI_LS_P | ARMI_LSX_I | ARMF_D(rd) | ARMF_N(rn) |
+ ((ofs & 0xf0) << 4) | (ofs & 0x0f);
+}
+
+static void emit_lso(ASMState *as, ARMIns ai, Reg rd, Reg rn, int32_t ofs)
+{
+ lj_assertA(ofs >= -4095 && ofs <= 4095,
+ "load/store offset %d out of range", ofs);
+ /* Combine LDR/STR pairs to LDRD/STRD. */
+ if (*as->mcp == (ai|ARMI_LS_P|ARMI_LS_U|ARMF_D(rd^1)|ARMF_N(rn)|(ofs^4)) &&
+ (ai & ~(ARMI_LDR^ARMI_STR)) == ARMI_STR && rd != rn &&
+ (uint32_t)ofs <= 252 && !(ofs & 3) && !((rd ^ (ofs >>2)) & 1) &&
+ as->mcp != as->mcloop) {
+ as->mcp++;
+ emit_lsox(as, ai == ARMI_LDR ? ARMI_LDRD : ARMI_STRD, rd&~1, rn, ofs&~4);
+ return;
+ }
+ if (ofs < 0) ofs = -ofs; else ai |= ARMI_LS_U;
+ *--as->mcp = ai | ARMI_LS_P | ARMF_D(rd) | ARMF_N(rn) | ofs;
+}
+
+#if !LJ_SOFTFP
+static void emit_vlso(ASMState *as, ARMIns ai, Reg rd, Reg rn, int32_t ofs)
+{
+ lj_assertA(ofs >= -1020 && ofs <= 1020 && (ofs&3) == 0,
+ "load/store offset %d out of range", ofs);
+ if (ofs < 0) ofs = -ofs; else ai |= ARMI_LS_U;
+ *--as->mcp = ai | ARMI_LS_P | ARMF_D(rd & 15) | ARMF_N(rn) | (ofs >> 2);
+}
+#endif
+
+/* -- Emit loads/stores --------------------------------------------------- */
+
+/* Prefer spills of BASE/L. */
+#define emit_canremat(ref) ((ref) < ASMREF_L)
+
+/* Try to find a one step delta relative to another constant. */
+static int emit_kdelta1(ASMState *as, Reg d, int32_t i)
+{
+ RegSet work = ~as->freeset & RSET_GPR;
+ while (work) {
+ Reg r = rset_picktop(work);
+ IRRef ref = regcost_ref(as->cost[r]);
+ lj_assertA(r != d, "dest reg not free");
+ if (emit_canremat(ref)) {
+ int32_t delta = i - (ra_iskref(ref) ? ra_krefk(as, ref) : IR(ref)->i);
+ uint32_t k = emit_isk12(ARMI_ADD, delta);
+ if (k) {
+ if (k == ARMI_K12)
+ emit_dm(as, ARMI_MOV, d, r);
+ else
+ emit_dn(as, ARMI_ADD^k, d, r);
+ return 1;
+ }
+ }
+ rset_clear(work, r);
+ }
+ return 0; /* Failed. */
+}
+
+/* Try to find a two step delta relative to another constant. */
+static int emit_kdelta2(ASMState *as, Reg rd, int32_t i)
+{
+ RegSet work = ~as->freeset & RSET_GPR;
+ while (work) {
+ Reg r = rset_picktop(work);
+ IRRef ref = regcost_ref(as->cost[r]);
+ lj_assertA(r != rd, "dest reg %d not free", rd);
+ if (emit_canremat(ref)) {
+ int32_t other = ra_iskref(ref) ? ra_krefk(as, ref) : IR(ref)->i;
+ if (other) {
+ int32_t delta = i - other;
+ uint32_t sh, inv = 0, k2, k;
+ if (delta < 0) { delta = -delta; inv = ARMI_ADD^ARMI_SUB; }
+ sh = lj_ffs(delta) & ~1;
+ k2 = emit_isk12(0, delta & (255 << sh));
+ k = emit_isk12(0, delta & ~(255 << sh));
+ if (k) {
+ emit_dn(as, ARMI_ADD^k2^inv, rd, rd);
+ emit_dn(as, ARMI_ADD^k^inv, rd, r);
+ return 1;
+ }
+ }
+ }
+ rset_clear(work, r);
+ }
+ return 0; /* Failed. */
+}
+
+/* Load a 32 bit constant into a GPR. */
+static void emit_loadi(ASMState *as, Reg rd, int32_t i)
+{
+ uint32_t k = emit_isk12(ARMI_MOV, i);
+ lj_assertA(rset_test(as->freeset, rd) || rd == RID_TMP,
+ "dest reg %d not free", rd);
+ if (k) {
+ /* Standard K12 constant. */
+ emit_d(as, ARMI_MOV^k, rd);
+ } else if ((as->flags & JIT_F_ARMV6T2) && (uint32_t)i < 0x00010000u) {
+ /* 16 bit loword constant for ARMv6T2. */
+ emit_d(as, ARMI_MOVW|(i & 0x0fff)|((i & 0xf000)<<4), rd);
+ } else if (emit_kdelta1(as, rd, i)) {
+ /* One step delta relative to another constant. */
+ } else if ((as->flags & JIT_F_ARMV6T2)) {
+ /* 32 bit hiword/loword constant for ARMv6T2. */
+ emit_d(as, ARMI_MOVT|((i>>16) & 0x0fff)|(((i>>16) & 0xf000)<<4), rd);
+ emit_d(as, ARMI_MOVW|(i & 0x0fff)|((i & 0xf000)<<4), rd);
+ } else if (emit_kdelta2(as, rd, i)) {
+ /* Two step delta relative to another constant. */
+ } else {
+ /* Otherwise construct the constant with up to 4 instructions. */
+ /* NYI: use mvn+bic, use pc-relative loads. */
+ for (;;) {
+ uint32_t sh = lj_ffs(i) & ~1;
+ int32_t m = i & (255 << sh);
+ i &= ~(255 << sh);
+ if (i == 0) {
+ emit_d(as, ARMI_MOV ^ emit_isk12(0, m), rd);
+ break;
+ }
+ emit_dn(as, ARMI_ORR ^ emit_isk12(0, m), rd, rd);
+ }
+ }
+}
+
+#define emit_loada(as, rd, addr) emit_loadi(as, (rd), i32ptr((addr)))
+
+static Reg ra_allock(ASMState *as, intptr_t k, RegSet allow);
+
+/* Get/set from constant pointer. */
+static void emit_lsptr(ASMState *as, ARMIns ai, Reg r, void *p)
+{
+ int32_t i = i32ptr(p);
+ emit_lso(as, ai, r, ra_allock(as, (i & ~4095), rset_exclude(RSET_GPR, r)),
+ (i & 4095));
+}
+
+#if !LJ_SOFTFP
+/* Load a number constant into an FPR. */
+static void emit_loadk64(ASMState *as, Reg r, IRIns *ir)
+{
+ cTValue *tv = ir_knum(ir);
+ int32_t i;
+ if ((as->flags & JIT_F_VFPV3) && !tv->u32.lo) {
+ uint32_t hi = tv->u32.hi;
+ uint32_t b = ((hi >> 22) & 0x1ff);
+ if (!(hi & 0xffff) && (b == 0x100 || b == 0x0ff)) {
+ *--as->mcp = ARMI_VMOVI_D | ARMF_D(r & 15) |
+ ((tv->u32.hi >> 12) & 0x00080000) |
+ ((tv->u32.hi >> 4) & 0x00070000) |
+ ((tv->u32.hi >> 16) & 0x0000000f);
+ return;
+ }
+ }
+ i = i32ptr(tv);
+ emit_vlso(as, ARMI_VLDR_D, r,
+ ra_allock(as, (i & ~1020), RSET_GPR), (i & 1020));
+}
+#endif
+
+/* Get/set global_State fields. */
+#define emit_getgl(as, r, field) \
+ emit_lsptr(as, ARMI_LDR, (r), (void *)&J2G(as->J)->field)
+#define emit_setgl(as, r, field) \
+ emit_lsptr(as, ARMI_STR, (r), (void *)&J2G(as->J)->field)
+
+/* Trace number is determined from pc of exit instruction. */
+#define emit_setvmstate(as, i) UNUSED(i)
+
+/* -- Emit control-flow instructions -------------------------------------- */
+
+/* Label for internal jumps. */
+typedef MCode *MCLabel;
+
+/* Return label pointing to current PC. */
+#define emit_label(as) ((as)->mcp)
+
+static void emit_branch(ASMState *as, ARMIns ai, MCode *target)
+{
+ MCode *p = as->mcp;
+ ptrdiff_t delta = (target - p) - 1;
+ lj_assertA(((delta + 0x00800000) >> 24) == 0, "branch target out of range");
+ *--p = ai | ((uint32_t)delta & 0x00ffffffu);
+ as->mcp = p;
+}
+
+#define emit_jmp(as, target) emit_branch(as, ARMI_B, (target))
+
+static void emit_call(ASMState *as, void *target)
+{
+ MCode *p = --as->mcp;
+ ptrdiff_t delta = ((char *)target - (char *)p) - 8;
+ if ((((delta>>2) + 0x00800000) >> 24) == 0) {
+ if ((delta & 1))
+ *p = ARMI_BLX | ((uint32_t)(delta>>2) & 0x00ffffffu) | ((delta&2) << 23);
+ else
+ *p = ARMI_BL | ((uint32_t)(delta>>2) & 0x00ffffffu);
+ } else { /* Target out of range: need indirect call. But don't use R0-R3. */
+ Reg r = ra_allock(as, i32ptr(target), RSET_RANGE(RID_R4, RID_R12+1));
+ *p = ARMI_BLXr | ARMF_M(r);
+ }
+}
+
+/* -- Emit generic operations --------------------------------------------- */
+
+/* Generic move between two regs. */
+static void emit_movrr(ASMState *as, IRIns *ir, Reg dst, Reg src)
+{
+#if LJ_SOFTFP
+ lj_assertA(!irt_isnum(ir->t), "unexpected FP op"); UNUSED(ir);
+#else
+ if (dst >= RID_MAX_GPR) {
+ emit_dm(as, irt_isnum(ir->t) ? ARMI_VMOV_D : ARMI_VMOV_S,
+ (dst & 15), (src & 15));
+ return;
+ }
+#endif
+ if (as->mcp != as->mcloop) { /* Swap early registers for loads/stores. */
+ MCode ins = *as->mcp, swp = (src^dst);
+ if ((ins & 0x0c000000) == 0x04000000 && (ins & 0x02000010) != 0x02000010) {
+ if (!((ins ^ (dst << 16)) & 0x000f0000))
+ *as->mcp = ins ^ (swp << 16); /* Swap N in load/store. */
+ if (!(ins & 0x00100000) && !((ins ^ (dst << 12)) & 0x0000f000))
+ *as->mcp = ins ^ (swp << 12); /* Swap D in store. */
+ }
+ }
+ emit_dm(as, ARMI_MOV, dst, src);
+}
+
+/* Generic load of register with base and (small) offset address. */
+static void emit_loadofs(ASMState *as, IRIns *ir, Reg r, Reg base, int32_t ofs)
+{
+#if LJ_SOFTFP
+ lj_assertA(!irt_isnum(ir->t), "unexpected FP op"); UNUSED(ir);
+#else
+ if (r >= RID_MAX_GPR)
+ emit_vlso(as, irt_isnum(ir->t) ? ARMI_VLDR_D : ARMI_VLDR_S, r, base, ofs);
+ else
+#endif
+ emit_lso(as, ARMI_LDR, r, base, ofs);
+}
+
+/* Generic store of register with base and (small) offset address. */
+static void emit_storeofs(ASMState *as, IRIns *ir, Reg r, Reg base, int32_t ofs)
+{
+#if LJ_SOFTFP
+ lj_assertA(!irt_isnum(ir->t), "unexpected FP op"); UNUSED(ir);
+#else
+ if (r >= RID_MAX_GPR)
+ emit_vlso(as, irt_isnum(ir->t) ? ARMI_VSTR_D : ARMI_VSTR_S, r, base, ofs);
+ else
+#endif
+ emit_lso(as, ARMI_STR, r, base, ofs);
+}
+
+/* Emit an arithmetic/logic operation with a constant operand. */
+static void emit_opk(ASMState *as, ARMIns ai, Reg dest, Reg src,
+ int32_t i, RegSet allow)
+{
+ uint32_t k = emit_isk12(ai, i);
+ if (k)
+ emit_dn(as, ai^k, dest, src);
+ else
+ emit_dnm(as, ai, dest, src, ra_allock(as, i, allow));
+}
+
+/* Add offset to pointer. */
+static void emit_addptr(ASMState *as, Reg r, int32_t ofs)
+{
+ if (ofs)
+ emit_opk(as, ARMI_ADD, r, r, ofs, rset_exclude(RSET_GPR, r));
+}
+
+#define emit_spsub(as, ofs) emit_addptr(as, RID_SP, -(ofs))
+
diff --git a/libs/luajit-cmake/luajit/src/lj_emit_arm64.h b/libs/luajit-cmake/luajit/src/lj_emit_arm64.h
new file mode 100644
index 0000000..c4b4c14
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/lj_emit_arm64.h
@@ -0,0 +1,424 @@
+/*
+** ARM64 instruction emitter.
+** Copyright (C) 2005-2022 Mike Pall. See Copyright Notice in luajit.h
+**
+** Contributed by Djordje Kovacevic and Stefan Pejic from RT-RK.com.
+** Sponsored by Cisco Systems, Inc.
+*/
+
+/* -- Constant encoding --------------------------------------------------- */
+
+static uint64_t get_k64val(ASMState *as, IRRef ref)
+{
+ IRIns *ir = IR(ref);
+ if (ir->o == IR_KINT64) {
+ return ir_kint64(ir)->u64;
+ } else if (ir->o == IR_KGC) {
+ return (uint64_t)ir_kgc(ir);
+ } else if (ir->o == IR_KPTR || ir->o == IR_KKPTR) {
+ return (uint64_t)ir_kptr(ir);
+ } else {
+ lj_assertA(ir->o == IR_KINT || ir->o == IR_KNULL,
+ "bad 64 bit const IR op %d", ir->o);
+ return ir->i; /* Sign-extended. */
+ }
+}
+
+/* Encode constant in K12 format for data processing instructions. */
+static uint32_t emit_isk12(int64_t n)
+{
+ uint64_t k = (n < 0) ? -n : n;
+ uint32_t m = (n < 0) ? 0x40000000 : 0;
+ if (k < 0x1000) {
+ return A64I_K12|m|A64F_U12(k);
+ } else if ((k & 0xfff000) == k) {
+ return A64I_K12|m|0x400000|A64F_U12(k>>12);
+ }
+ return 0;
+}
+
+#define emit_clz64(n) __builtin_clzll(n)
+#define emit_ctz64(n) __builtin_ctzll(n)
+
+/* Encode constant in K13 format for logical data processing instructions. */
+static uint32_t emit_isk13(uint64_t n, int is64)
+{
+ int inv = 0, w = 128, lz, tz;
+ if (n & 1) { n = ~n; w = 64; inv = 1; } /* Avoid wrap-around of ones. */
+ if (!n) return 0; /* Neither all-zero nor all-ones are allowed. */
+ do { /* Find the repeat width. */
+ if (is64 && (uint32_t)(n^(n>>32))) break;
+ n = (uint32_t)n;
+ if (!n) return 0; /* Ditto when passing n=0xffffffff and is64=0. */
+ w = 32; if ((n^(n>>16)) & 0xffff) break;
+ n = n & 0xffff; w = 16; if ((n^(n>>8)) & 0xff) break;
+ n = n & 0xff; w = 8; if ((n^(n>>4)) & 0xf) break;
+ n = n & 0xf; w = 4; if ((n^(n>>2)) & 0x3) break;
+ n = n & 0x3; w = 2;
+ } while (0);
+ lz = emit_clz64(n);
+ tz = emit_ctz64(n);
+ if ((int64_t)(n << lz) >> (lz+tz) != -1ll) return 0; /* Non-contiguous? */
+ if (inv)
+ return A64I_K13 | (((lz-w) & 127) << 16) | (((lz+tz-w-1) & 63) << 10);
+ else
+ return A64I_K13 | ((w-tz) << 16) | (((63-lz-tz-w-w) & 63) << 10);
+}
+
+static uint32_t emit_isfpk64(uint64_t n)
+{
+ uint64_t etop9 = ((n >> 54) & 0x1ff);
+ if ((n << 16) == 0 && (etop9 == 0x100 || etop9 == 0x0ff)) {
+ return (uint32_t)(((n >> 48) & 0x7f) | ((n >> 56) & 0x80));
+ }
+ return ~0u;
+}
+
+/* -- Emit basic instructions --------------------------------------------- */
+
+static void emit_dnma(ASMState *as, A64Ins ai, Reg rd, Reg rn, Reg rm, Reg ra)
+{
+ *--as->mcp = ai | A64F_D(rd) | A64F_N(rn) | A64F_M(rm) | A64F_A(ra);
+}
+
+static void emit_dnm(ASMState *as, A64Ins ai, Reg rd, Reg rn, Reg rm)
+{
+ *--as->mcp = ai | A64F_D(rd) | A64F_N(rn) | A64F_M(rm);
+}
+
+static void emit_dm(ASMState *as, A64Ins ai, Reg rd, Reg rm)
+{
+ *--as->mcp = ai | A64F_D(rd) | A64F_M(rm);
+}
+
+static void emit_dn(ASMState *as, A64Ins ai, Reg rd, Reg rn)
+{
+ *--as->mcp = ai | A64F_D(rd) | A64F_N(rn);
+}
+
+static void emit_nm(ASMState *as, A64Ins ai, Reg rn, Reg rm)
+{
+ *--as->mcp = ai | A64F_N(rn) | A64F_M(rm);
+}
+
+static void emit_d(ASMState *as, A64Ins ai, Reg rd)
+{
+ *--as->mcp = ai | A64F_D(rd);
+}
+
+static void emit_n(ASMState *as, A64Ins ai, Reg rn)
+{
+ *--as->mcp = ai | A64F_N(rn);
+}
+
+static int emit_checkofs(A64Ins ai, int64_t ofs)
+{
+ int scale = (ai >> 30) & 3;
+ if (ofs < 0 || (ofs & ((1<<scale)-1))) {
+ return (ofs >= -256 && ofs <= 255) ? -1 : 0;
+ } else {
+ return (ofs < (4096<<scale)) ? 1 : 0;
+ }
+}
+
+static void emit_lso(ASMState *as, A64Ins ai, Reg rd, Reg rn, int64_t ofs)
+{
+ int ot = emit_checkofs(ai, ofs), sc = (ai >> 30) & 3;
+ lj_assertA(ot, "load/store offset %d out of range", ofs);
+ /* Combine LDR/STR pairs to LDP/STP. */
+ if ((sc == 2 || sc == 3) &&
+ (!(ai & 0x400000) || rd != rn) &&
+ as->mcp != as->mcloop) {
+ uint32_t prev = *as->mcp & ~A64F_D(31);
+ int ofsm = ofs - (1<<sc), ofsp = ofs + (1<<sc);
+ A64Ins aip;
+ if (prev == (ai | A64F_N(rn) | A64F_U12(ofsm>>sc)) ||
+ prev == ((ai^A64I_LS_U) | A64F_N(rn) | A64F_S9(ofsm&0x1ff))) {
+ aip = (A64F_A(rd) | A64F_D(*as->mcp & 31));
+ } else if (prev == (ai | A64F_N(rn) | A64F_U12(ofsp>>sc)) ||
+ prev == ((ai^A64I_LS_U) | A64F_N(rn) | A64F_S9(ofsp&0x1ff))) {
+ aip = (A64F_D(rd) | A64F_A(*as->mcp & 31));
+ ofsm = ofs;
+ } else {
+ goto nopair;
+ }
+ if (ofsm >= (int)((unsigned int)-64<<sc) && ofsm <= (63<<sc)) {
+ *as->mcp = aip | A64F_N(rn) | ((ofsm >> sc) << 15) |
+ (ai ^ ((ai == A64I_LDRx || ai == A64I_STRx) ? 0x50000000 : 0x90000000));
+ return;
+ }
+ }
+nopair:
+ if (ot == 1)
+ *--as->mcp = ai | A64F_D(rd) | A64F_N(rn) | A64F_U12(ofs >> sc);
+ else
+ *--as->mcp = (ai^A64I_LS_U) | A64F_D(rd) | A64F_N(rn) | A64F_S9(ofs & 0x1ff);
+}
+
+/* -- Emit loads/stores --------------------------------------------------- */
+
+/* Prefer rematerialization of BASE/L from global_State over spills. */
+#define emit_canremat(ref) ((ref) <= ASMREF_L)
+
+/* Try to find an N-step delta relative to other consts with N < lim. */
+static int emit_kdelta(ASMState *as, Reg rd, uint64_t k, int lim)
+{
+ RegSet work = (~as->freeset & RSET_GPR) | RID2RSET(RID_GL);
+ if (lim <= 1) return 0; /* Can't beat that. */
+ while (work) {
+ Reg r = rset_picktop(work);
+ IRRef ref = regcost_ref(as->cost[r]);
+ lj_assertA(r != rd, "dest reg %d not free", rd);
+ if (ref < REF_TRUE) {
+ uint64_t kx = ra_iskref(ref) ? (uint64_t)ra_krefk(as, ref) :
+ get_k64val(as, ref);
+ int64_t delta = (int64_t)(k - kx);
+ if (delta == 0) {
+ emit_dm(as, A64I_MOVx, rd, r);
+ return 1;
+ } else {
+ uint32_t k12 = emit_isk12(delta < 0 ? -delta : delta);
+ if (k12) {
+ emit_dn(as, (delta < 0 ? A64I_SUBx : A64I_ADDx)^k12, rd, r);
+ return 1;
+ }
+ /* Do other ops or multi-step deltas pay off? Probably not.
+ ** E.g. XOR rarely helps with pointer consts.
+ */
+ }
+ }
+ rset_clear(work, r);
+ }
+ return 0; /* Failed. */
+}
+
+static void emit_loadk(ASMState *as, Reg rd, uint64_t u64, int is64)
+{
+ int i, zeros = 0, ones = 0, neg;
+ if (!is64) u64 = (int64_t)(int32_t)u64; /* Sign-extend. */
+ /* Count homogeneous 16 bit fragments. */
+ for (i = 0; i < 4; i++) {
+ uint64_t frag = (u64 >> i*16) & 0xffff;
+ zeros += (frag == 0);
+ ones += (frag == 0xffff);
+ }
+ neg = ones > zeros; /* Use MOVN if it pays off. */
+ if ((neg ? ones : zeros) < 3) { /* Need 2+ ins. Try shorter K13 encoding. */
+ uint32_t k13 = emit_isk13(u64, is64);
+ if (k13) {
+ emit_dn(as, (is64|A64I_ORRw)^k13, rd, RID_ZERO);
+ return;
+ }
+ }
+ if (!emit_kdelta(as, rd, u64, 4 - (neg ? ones : zeros))) {
+ int shift = 0, lshift = 0;
+ uint64_t n64 = neg ? ~u64 : u64;
+ if (n64 != 0) {
+ /* Find first/last fragment to be filled. */
+ shift = (63-emit_clz64(n64)) & ~15;
+ lshift = emit_ctz64(n64) & ~15;
+ }
+ /* MOVK requires the original value (u64). */
+ while (shift > lshift) {
+ uint32_t u16 = (u64 >> shift) & 0xffff;
+ /* Skip fragments that are correctly filled by MOVN/MOVZ. */
+ if (u16 != (neg ? 0xffff : 0))
+ emit_d(as, is64 | A64I_MOVKw | A64F_U16(u16) | A64F_LSL16(shift), rd);
+ shift -= 16;
+ }
+ /* But MOVN needs an inverted value (n64). */
+ emit_d(as, (neg ? A64I_MOVNx : A64I_MOVZx) |
+ A64F_U16((n64 >> lshift) & 0xffff) | A64F_LSL16(lshift), rd);
+ }
+}
+
+/* Load a 32 bit constant into a GPR. */
+#define emit_loadi(as, rd, i) emit_loadk(as, rd, i, 0)
+
+/* Load a 64 bit constant into a GPR. */
+#define emit_loadu64(as, rd, i) emit_loadk(as, rd, i, A64I_X)
+
+#define emit_loada(as, r, addr) emit_loadu64(as, (r), (uintptr_t)(addr))
+
+#define glofs(as, k) \
+ ((intptr_t)((uintptr_t)(k) - (uintptr_t)&J2GG(as->J)->g))
+#define mcpofs(as, k) \
+ ((intptr_t)((uintptr_t)(k) - (uintptr_t)(as->mcp - 1)))
+#define checkmcpofs(as, k) \
+ (A64F_S_OK(mcpofs(as, k)>>2, 19))
+
+static Reg ra_allock(ASMState *as, intptr_t k, RegSet allow);
+
+/* Get/set from constant pointer. */
+static void emit_lsptr(ASMState *as, A64Ins ai, Reg r, void *p)
+{
+ /* First, check if ip + offset is in range. */
+ if ((ai & 0x00400000) && checkmcpofs(as, p)) {
+ emit_d(as, A64I_LDRLx | A64F_S19(mcpofs(as, p)>>2), r);
+ } else {
+ Reg base = RID_GL; /* Next, try GL + offset. */
+ int64_t ofs = glofs(as, p);
+ if (!emit_checkofs(ai, ofs)) { /* Else split up into base reg + offset. */
+ int64_t i64 = i64ptr(p);
+ base = ra_allock(as, (i64 & ~0x7fffull), rset_exclude(RSET_GPR, r));
+ ofs = i64 & 0x7fffull;
+ }
+ emit_lso(as, ai, r, base, ofs);
+ }
+}
+
+/* Load 64 bit IR constant into register. */
+static void emit_loadk64(ASMState *as, Reg r, IRIns *ir)
+{
+ const uint64_t *k = &ir_k64(ir)->u64;
+ int64_t ofs;
+ if (r >= RID_MAX_GPR) {
+ uint32_t fpk = emit_isfpk64(*k);
+ if (fpk != ~0u) {
+ emit_d(as, A64I_FMOV_DI | A64F_FP8(fpk), (r & 31));
+ return;
+ }
+ }
+ ofs = glofs(as, k);
+ if (emit_checkofs(A64I_LDRx, ofs)) {
+ emit_lso(as, r >= RID_MAX_GPR ? A64I_LDRd : A64I_LDRx,
+ (r & 31), RID_GL, ofs);
+ } else {
+ if (r >= RID_MAX_GPR) {
+ emit_dn(as, A64I_FMOV_D_R, (r & 31), RID_TMP);
+ r = RID_TMP;
+ }
+ if (checkmcpofs(as, k))
+ emit_d(as, A64I_LDRLx | A64F_S19(mcpofs(as, k)>>2), r);
+ else
+ emit_loadu64(as, r, *k);
+ }
+}
+
+/* Get/set global_State fields. */
+#define emit_getgl(as, r, field) \
+ emit_lsptr(as, A64I_LDRx, (r), (void *)&J2G(as->J)->field)
+#define emit_setgl(as, r, field) \
+ emit_lsptr(as, A64I_STRx, (r), (void *)&J2G(as->J)->field)
+
+/* Trace number is determined from pc of exit instruction. */
+#define emit_setvmstate(as, i) UNUSED(i)
+
+/* -- Emit control-flow instructions -------------------------------------- */
+
+/* Label for internal jumps. */
+typedef MCode *MCLabel;
+
+/* Return label pointing to current PC. */
+#define emit_label(as) ((as)->mcp)
+
+static void emit_cond_branch(ASMState *as, A64CC cond, MCode *target)
+{
+ MCode *p = --as->mcp;
+ ptrdiff_t delta = target - p;
+ lj_assertA(A64F_S_OK(delta, 19), "branch target out of range");
+ *p = A64I_BCC | A64F_S19(delta) | cond;
+}
+
+static void emit_branch(ASMState *as, A64Ins ai, MCode *target)
+{
+ MCode *p = --as->mcp;
+ ptrdiff_t delta = target - p;
+ lj_assertA(A64F_S_OK(delta, 26), "branch target out of range");
+ *p = ai | A64F_S26(delta);
+}
+
+static void emit_tnb(ASMState *as, A64Ins ai, Reg r, uint32_t bit, MCode *target)
+{
+ MCode *p = --as->mcp;
+ ptrdiff_t delta = target - p;
+ lj_assertA(bit < 63, "bit number out of range");
+ lj_assertA(A64F_S_OK(delta, 14), "branch target out of range");
+ if (bit > 31) ai |= A64I_X;
+ *p = ai | A64F_BIT(bit & 31) | A64F_S14(delta) | r;
+}
+
+static void emit_cnb(ASMState *as, A64Ins ai, Reg r, MCode *target)
+{
+ MCode *p = --as->mcp;
+ ptrdiff_t delta = target - p;
+ lj_assertA(A64F_S_OK(delta, 19), "branch target out of range");
+ *p = ai | A64F_S19(delta) | r;
+}
+
+#define emit_jmp(as, target) emit_branch(as, A64I_B, (target))
+
+static void emit_call(ASMState *as, void *target)
+{
+ MCode *p = --as->mcp;
+ ptrdiff_t delta = (char *)target - (char *)p;
+ if (A64F_S_OK(delta>>2, 26)) {
+ *p = A64I_BL | A64F_S26(delta>>2);
+ } else { /* Target out of range: need indirect call. But don't use R0-R7. */
+ Reg r = ra_allock(as, i64ptr(target),
+ RSET_RANGE(RID_X8, RID_MAX_GPR)-RSET_FIXED);
+ *p = A64I_BLR | A64F_N(r);
+ }
+}
+
+/* -- Emit generic operations --------------------------------------------- */
+
+/* Generic move between two regs. */
+static void emit_movrr(ASMState *as, IRIns *ir, Reg dst, Reg src)
+{
+ if (dst >= RID_MAX_GPR) {
+ emit_dn(as, irt_isnum(ir->t) ? A64I_FMOV_D : A64I_FMOV_S,
+ (dst & 31), (src & 31));
+ return;
+ }
+ if (as->mcp != as->mcloop) { /* Swap early registers for loads/stores. */
+ MCode ins = *as->mcp, swp = (src^dst);
+ if ((ins & 0xbf800000) == 0xb9000000) {
+ if (!((ins ^ (dst << 5)) & 0x000003e0))
+ *as->mcp = ins ^ (swp << 5); /* Swap N in load/store. */
+ if (!(ins & 0x00400000) && !((ins ^ dst) & 0x0000001f))
+ *as->mcp = ins ^ swp; /* Swap D in store. */
+ }
+ }
+ emit_dm(as, A64I_MOVx, dst, src);
+}
+
+/* Generic load of register with base and (small) offset address. */
+static void emit_loadofs(ASMState *as, IRIns *ir, Reg r, Reg base, int32_t ofs)
+{
+ if (r >= RID_MAX_GPR)
+ emit_lso(as, irt_isnum(ir->t) ? A64I_LDRd : A64I_LDRs, (r & 31), base, ofs);
+ else
+ emit_lso(as, irt_is64(ir->t) ? A64I_LDRx : A64I_LDRw, r, base, ofs);
+}
+
+/* Generic store of register with base and (small) offset address. */
+static void emit_storeofs(ASMState *as, IRIns *ir, Reg r, Reg base, int32_t ofs)
+{
+ if (r >= RID_MAX_GPR)
+ emit_lso(as, irt_isnum(ir->t) ? A64I_STRd : A64I_STRs, (r & 31), base, ofs);
+ else
+ emit_lso(as, irt_is64(ir->t) ? A64I_STRx : A64I_STRw, r, base, ofs);
+}
+
+/* Emit an arithmetic operation with a constant operand. */
+static void emit_opk(ASMState *as, A64Ins ai, Reg dest, Reg src,
+ int32_t i, RegSet allow)
+{
+ uint32_t k = emit_isk12(i);
+ if (k)
+ emit_dn(as, ai^k, dest, src);
+ else
+ emit_dnm(as, ai, dest, src, ra_allock(as, i, allow));
+}
+
+/* Add offset to pointer. */
+static void emit_addptr(ASMState *as, Reg r, int32_t ofs)
+{
+ if (ofs)
+ emit_opk(as, ofs < 0 ? A64I_SUBx : A64I_ADDx, r, r,
+ ofs < 0 ? -ofs : ofs, rset_exclude(RSET_GPR, r));
+}
+
+#define emit_spsub(as, ofs) emit_addptr(as, RID_SP, -(ofs))
+
diff --git a/libs/luajit-cmake/luajit/src/lj_emit_mips.h b/libs/luajit-cmake/luajit/src/lj_emit_mips.h
new file mode 100644
index 0000000..0cea547
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/lj_emit_mips.h
@@ -0,0 +1,310 @@
+/*
+** MIPS instruction emitter.
+** Copyright (C) 2005-2022 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#if LJ_64
+static intptr_t get_k64val(ASMState *as, IRRef ref)
+{
+ IRIns *ir = IR(ref);
+ if (ir->o == IR_KINT64) {
+ return (intptr_t)ir_kint64(ir)->u64;
+ } else if (ir->o == IR_KGC) {
+ return (intptr_t)ir_kgc(ir);
+ } else if (ir->o == IR_KPTR || ir->o == IR_KKPTR) {
+ return (intptr_t)ir_kptr(ir);
+ } else if (LJ_SOFTFP && ir->o == IR_KNUM) {
+ return (intptr_t)ir_knum(ir)->u64;
+ } else {
+ lj_assertA(ir->o == IR_KINT || ir->o == IR_KNULL,
+ "bad 64 bit const IR op %d", ir->o);
+ return ir->i; /* Sign-extended. */
+ }
+}
+#endif
+
+#if LJ_64
+#define get_kval(as, ref) get_k64val(as, ref)
+#else
+#define get_kval(as, ref) (IR((ref))->i)
+#endif
+
+/* -- Emit basic instructions --------------------------------------------- */
+
+static void emit_dst(ASMState *as, MIPSIns mi, Reg rd, Reg rs, Reg rt)
+{
+ *--as->mcp = mi | MIPSF_D(rd) | MIPSF_S(rs) | MIPSF_T(rt);
+}
+
+static void emit_dta(ASMState *as, MIPSIns mi, Reg rd, Reg rt, uint32_t a)
+{
+ *--as->mcp = mi | MIPSF_D(rd) | MIPSF_T(rt) | MIPSF_A(a);
+}
+
+#define emit_ds(as, mi, rd, rs) emit_dst(as, (mi), (rd), (rs), 0)
+#define emit_tg(as, mi, rt, rg) emit_dst(as, (mi), (rg)&31, 0, (rt))
+
+static void emit_tsi(ASMState *as, MIPSIns mi, Reg rt, Reg rs, int32_t i)
+{
+ *--as->mcp = mi | MIPSF_T(rt) | MIPSF_S(rs) | (i & 0xffff);
+}
+
+#define emit_ti(as, mi, rt, i) emit_tsi(as, (mi), (rt), 0, (i))
+#define emit_hsi(as, mi, rh, rs, i) emit_tsi(as, (mi), (rh) & 31, (rs), (i))
+
+static void emit_fgh(ASMState *as, MIPSIns mi, Reg rf, Reg rg, Reg rh)
+{
+ *--as->mcp = mi | MIPSF_F(rf&31) | MIPSF_G(rg&31) | MIPSF_H(rh&31);
+}
+
+#define emit_fg(as, mi, rf, rg) emit_fgh(as, (mi), (rf), (rg), 0)
+
+static void emit_rotr(ASMState *as, Reg dest, Reg src, Reg tmp, uint32_t shift)
+{
+ if (LJ_64 || (as->flags & JIT_F_MIPSXXR2)) {
+ emit_dta(as, MIPSI_ROTR, dest, src, shift);
+ } else {
+ emit_dst(as, MIPSI_OR, dest, dest, tmp);
+ emit_dta(as, MIPSI_SLL, dest, src, (-shift)&31);
+ emit_dta(as, MIPSI_SRL, tmp, src, shift);
+ }
+}
+
+#if LJ_64 || LJ_HASBUFFER
+static void emit_tsml(ASMState *as, MIPSIns mi, Reg rt, Reg rs, uint32_t msb,
+ uint32_t lsb)
+{
+ *--as->mcp = mi | MIPSF_T(rt) | MIPSF_S(rs) | MIPSF_M(msb) | MIPSF_L(lsb);
+}
+#endif
+
+/* -- Emit loads/stores --------------------------------------------------- */
+
+/* Prefer rematerialization of BASE/L from global_State over spills. */
+#define emit_canremat(ref) ((ref) <= REF_BASE)
+
+/* Try to find a one step delta relative to another constant. */
+static int emit_kdelta1(ASMState *as, Reg rd, intptr_t i)
+{
+ RegSet work = ~as->freeset & RSET_GPR;
+ while (work) {
+ Reg r = rset_picktop(work);
+ IRRef ref = regcost_ref(as->cost[r]);
+ lj_assertA(r != rd, "dest reg %d not free", rd);
+ if (ref < ASMREF_L) {
+ intptr_t delta = (intptr_t)((uintptr_t)i -
+ (uintptr_t)(ra_iskref(ref) ? ra_krefk(as, ref) : get_kval(as, ref)));
+ if (checki16(delta)) {
+ emit_tsi(as, MIPSI_AADDIU, rd, r, delta);
+ return 1;
+ }
+ }
+ rset_clear(work, r);
+ }
+ return 0; /* Failed. */
+}
+
+/* Load a 32 bit constant into a GPR. */
+static void emit_loadi(ASMState *as, Reg r, int32_t i)
+{
+ if (checki16(i)) {
+ emit_ti(as, MIPSI_LI, r, i);
+ } else {
+ if ((i & 0xffff)) {
+ intptr_t jgl = (intptr_t)(void *)J2G(as->J);
+ if ((uintptr_t)(i-jgl) < 65536) {
+ emit_tsi(as, MIPSI_ADDIU, r, RID_JGL, i-jgl-32768);
+ return;
+ } else if (emit_kdelta1(as, r, i)) {
+ return;
+ } else if ((i >> 16) == 0) {
+ emit_tsi(as, MIPSI_ORI, r, RID_ZERO, i);
+ return;
+ }
+ emit_tsi(as, MIPSI_ORI, r, r, i);
+ }
+ emit_ti(as, MIPSI_LUI, r, (i >> 16));
+ }
+}
+
+#if LJ_64
+/* Load a 64 bit constant into a GPR. */
+static void emit_loadu64(ASMState *as, Reg r, uint64_t u64)
+{
+ if (checki32((int64_t)u64)) {
+ emit_loadi(as, r, (int32_t)u64);
+ } else {
+ uint64_t delta = u64 - (uint64_t)(void *)J2G(as->J);
+ if (delta < 65536) {
+ emit_tsi(as, MIPSI_DADDIU, r, RID_JGL, (int32_t)(delta-32768));
+ } else if (emit_kdelta1(as, r, (intptr_t)u64)) {
+ return;
+ } else {
+ /* TODO MIPSR6: Use DAHI & DATI. Caveat: sign-extension. */
+ if ((u64 & 0xffff)) {
+ emit_tsi(as, MIPSI_ORI, r, r, u64 & 0xffff);
+ }
+ if (((u64 >> 16) & 0xffff)) {
+ emit_dta(as, MIPSI_DSLL, r, r, 16);
+ emit_tsi(as, MIPSI_ORI, r, r, (u64 >> 16) & 0xffff);
+ emit_dta(as, MIPSI_DSLL, r, r, 16);
+ } else {
+ emit_dta(as, MIPSI_DSLL32, r, r, 0);
+ }
+ emit_loadi(as, r, (int32_t)(u64 >> 32));
+ }
+ /* TODO: There are probably more optimization opportunities. */
+ }
+}
+
+#define emit_loada(as, r, addr) emit_loadu64(as, (r), u64ptr((addr)))
+#else
+#define emit_loada(as, r, addr) emit_loadi(as, (r), i32ptr((addr)))
+#endif
+
+static Reg ra_allock(ASMState *as, intptr_t k, RegSet allow);
+static void ra_allockreg(ASMState *as, intptr_t k, Reg r);
+
+/* Get/set from constant pointer. */
+static void emit_lsptr(ASMState *as, MIPSIns mi, Reg r, void *p, RegSet allow)
+{
+ intptr_t jgl = (intptr_t)(J2G(as->J));
+ intptr_t i = (intptr_t)(p);
+ Reg base;
+ if ((uint32_t)(i-jgl) < 65536) {
+ i = i-jgl-32768;
+ base = RID_JGL;
+ } else {
+ base = ra_allock(as, i-(int16_t)i, allow);
+ }
+ emit_tsi(as, mi, r, base, i);
+}
+
+#if LJ_64
+static void emit_loadk64(ASMState *as, Reg r, IRIns *ir)
+{
+ const uint64_t *k = &ir_k64(ir)->u64;
+ Reg r64 = r;
+ if (rset_test(RSET_FPR, r)) {
+ r64 = RID_TMP;
+ emit_tg(as, MIPSI_DMTC1, r64, r);
+ }
+ if ((uint32_t)((intptr_t)k-(intptr_t)J2G(as->J)) < 65536)
+ emit_lsptr(as, MIPSI_LD, r64, (void *)k, 0);
+ else
+ emit_loadu64(as, r64, *k);
+}
+#else
+#define emit_loadk64(as, r, ir) \
+ emit_lsptr(as, MIPSI_LDC1, ((r) & 31), (void *)&ir_knum((ir))->u64, RSET_GPR)
+#endif
+
+/* Get/set global_State fields. */
+static void emit_lsglptr(ASMState *as, MIPSIns mi, Reg r, int32_t ofs)
+{
+ emit_tsi(as, mi, r, RID_JGL, ofs-32768);
+}
+
+#define emit_getgl(as, r, field) \
+ emit_lsglptr(as, MIPSI_AL, (r), (int32_t)offsetof(global_State, field))
+#define emit_setgl(as, r, field) \
+ emit_lsglptr(as, MIPSI_AS, (r), (int32_t)offsetof(global_State, field))
+
+/* Trace number is determined from per-trace exit stubs. */
+#define emit_setvmstate(as, i) UNUSED(i)
+
+/* -- Emit control-flow instructions -------------------------------------- */
+
+/* Label for internal jumps. */
+typedef MCode *MCLabel;
+
+/* Return label pointing to current PC. */
+#define emit_label(as) ((as)->mcp)
+
+static void emit_branch(ASMState *as, MIPSIns mi, Reg rs, Reg rt, MCode *target)
+{
+ MCode *p = as->mcp;
+ ptrdiff_t delta = target - p;
+ lj_assertA(((delta + 0x8000) >> 16) == 0, "branch target out of range");
+ *--p = mi | MIPSF_S(rs) | MIPSF_T(rt) | ((uint32_t)delta & 0xffffu);
+ as->mcp = p;
+}
+
+static void emit_jmp(ASMState *as, MCode *target)
+{
+ *--as->mcp = MIPSI_NOP;
+ emit_branch(as, MIPSI_B, RID_ZERO, RID_ZERO, (target));
+}
+
+static void emit_call(ASMState *as, void *target, int needcfa)
+{
+ MCode *p = as->mcp;
+#if LJ_TARGET_MIPSR6
+ ptrdiff_t delta = (char *)target - (char *)p;
+ if ((((delta>>2) + 0x02000000) >> 26) == 0) { /* Try compact call first. */
+ *--p = MIPSI_BALC | (((uintptr_t)delta >>2) & 0x03ffffffu);
+ as->mcp = p;
+ return;
+ }
+#endif
+ *--p = MIPSI_NOP; /* Delay slot. */
+ if ((((uintptr_t)target ^ (uintptr_t)p) >> 28) == 0) {
+#if !LJ_TARGET_MIPSR6
+ *--p = (((uintptr_t)target & 1) ? MIPSI_JALX : MIPSI_JAL) |
+ (((uintptr_t)target >>2) & 0x03ffffffu);
+#else
+ *--p = MIPSI_JAL | (((uintptr_t)target >>2) & 0x03ffffffu);
+#endif
+ } else { /* Target out of range: need indirect call. */
+ *--p = MIPSI_JALR | MIPSF_S(RID_CFUNCADDR);
+ needcfa = 1;
+ }
+ as->mcp = p;
+ if (needcfa) ra_allockreg(as, (intptr_t)target, RID_CFUNCADDR);
+}
+
+/* -- Emit generic operations --------------------------------------------- */
+
+#define emit_move(as, dst, src) \
+ emit_ds(as, MIPSI_MOVE, (dst), (src))
+
+/* Generic move between two regs. */
+static void emit_movrr(ASMState *as, IRIns *ir, Reg dst, Reg src)
+{
+ if (dst < RID_MAX_GPR)
+ emit_move(as, dst, src);
+ else
+ emit_fg(as, irt_isnum(ir->t) ? MIPSI_MOV_D : MIPSI_MOV_S, dst, src);
+}
+
+/* Generic load of register with base and (small) offset address. */
+static void emit_loadofs(ASMState *as, IRIns *ir, Reg r, Reg base, int32_t ofs)
+{
+ if (r < RID_MAX_GPR)
+ emit_tsi(as, irt_is64(ir->t) ? MIPSI_LD : MIPSI_LW, r, base, ofs);
+ else
+ emit_tsi(as, irt_isnum(ir->t) ? MIPSI_LDC1 : MIPSI_LWC1,
+ (r & 31), base, ofs);
+}
+
+/* Generic store of register with base and (small) offset address. */
+static void emit_storeofs(ASMState *as, IRIns *ir, Reg r, Reg base, int32_t ofs)
+{
+ if (r < RID_MAX_GPR)
+ emit_tsi(as, irt_is64(ir->t) ? MIPSI_SD : MIPSI_SW, r, base, ofs);
+ else
+ emit_tsi(as, irt_isnum(ir->t) ? MIPSI_SDC1 : MIPSI_SWC1,
+ (r&31), base, ofs);
+}
+
+/* Add offset to pointer. */
+static void emit_addptr(ASMState *as, Reg r, int32_t ofs)
+{
+ if (ofs) {
+ lj_assertA(checki16(ofs), "offset %d out of range", ofs);
+ emit_tsi(as, MIPSI_AADDIU, r, r, ofs);
+ }
+}
+
+#define emit_spsub(as, ofs) emit_addptr(as, RID_SP, -(ofs))
+
diff --git a/libs/luajit-cmake/luajit/src/lj_emit_ppc.h b/libs/luajit-cmake/luajit/src/lj_emit_ppc.h
new file mode 100644
index 0000000..86760e7
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/lj_emit_ppc.h
@@ -0,0 +1,238 @@
+/*
+** PPC instruction emitter.
+** Copyright (C) 2005-2022 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+/* -- Emit basic instructions --------------------------------------------- */
+
+static void emit_tab(ASMState *as, PPCIns pi, Reg rt, Reg ra, Reg rb)
+{
+ *--as->mcp = pi | PPCF_T(rt) | PPCF_A(ra) | PPCF_B(rb);
+}
+
+#define emit_asb(as, pi, ra, rs, rb) emit_tab(as, (pi), (rs), (ra), (rb))
+#define emit_as(as, pi, ra, rs) emit_tab(as, (pi), (rs), (ra), 0)
+#define emit_ab(as, pi, ra, rb) emit_tab(as, (pi), 0, (ra), (rb))
+
+static void emit_tai(ASMState *as, PPCIns pi, Reg rt, Reg ra, int32_t i)
+{
+ *--as->mcp = pi | PPCF_T(rt) | PPCF_A(ra) | (i & 0xffff);
+}
+
+#define emit_ti(as, pi, rt, i) emit_tai(as, (pi), (rt), 0, (i))
+#define emit_ai(as, pi, ra, i) emit_tai(as, (pi), 0, (ra), (i))
+#define emit_asi(as, pi, ra, rs, i) emit_tai(as, (pi), (rs), (ra), (i))
+
+#define emit_fab(as, pi, rf, ra, rb) \
+ emit_tab(as, (pi), (rf)&31, (ra)&31, (rb)&31)
+#define emit_fb(as, pi, rf, rb) emit_tab(as, (pi), (rf)&31, 0, (rb)&31)
+#define emit_fac(as, pi, rf, ra, rc) \
+ emit_tab(as, (pi) | PPCF_C((rc) & 31), (rf)&31, (ra)&31, 0)
+#define emit_facb(as, pi, rf, ra, rc, rb) \
+ emit_tab(as, (pi) | PPCF_C((rc) & 31), (rf)&31, (ra)&31, (rb)&31)
+#define emit_fai(as, pi, rf, ra, i) emit_tai(as, (pi), (rf)&31, (ra), (i))
+
+static void emit_rot(ASMState *as, PPCIns pi, Reg ra, Reg rs,
+ int32_t n, int32_t b, int32_t e)
+{
+ *--as->mcp = pi | PPCF_T(rs) | PPCF_A(ra) | PPCF_B(n) |
+ PPCF_MB(b) | PPCF_ME(e);
+}
+
+static void emit_slwi(ASMState *as, Reg ra, Reg rs, int32_t n)
+{
+ lj_assertA(n >= 0 && n < 32, "shift out or range");
+ emit_rot(as, PPCI_RLWINM, ra, rs, n, 0, 31-n);
+}
+
+static void emit_rotlwi(ASMState *as, Reg ra, Reg rs, int32_t n)
+{
+ lj_assertA(n >= 0 && n < 32, "shift out or range");
+ emit_rot(as, PPCI_RLWINM, ra, rs, n, 0, 31);
+}
+
+/* -- Emit loads/stores --------------------------------------------------- */
+
+/* Prefer rematerialization of BASE/L from global_State over spills. */
+#define emit_canremat(ref) ((ref) <= REF_BASE)
+
+/* Try to find a one step delta relative to another constant. */
+static int emit_kdelta1(ASMState *as, Reg rd, int32_t i)
+{
+ RegSet work = ~as->freeset & RSET_GPR;
+ while (work) {
+ Reg r = rset_picktop(work);
+ IRRef ref = regcost_ref(as->cost[r]);
+ lj_assertA(r != rd, "dest reg %d not free", rd);
+ if (ref < ASMREF_L) {
+ int32_t delta = i - (ra_iskref(ref) ? ra_krefk(as, ref) : IR(ref)->i);
+ if (checki16(delta)) {
+ emit_tai(as, PPCI_ADDI, rd, r, delta);
+ return 1;
+ }
+ }
+ rset_clear(work, r);
+ }
+ return 0; /* Failed. */
+}
+
+/* Load a 32 bit constant into a GPR. */
+static void emit_loadi(ASMState *as, Reg r, int32_t i)
+{
+ if (checki16(i)) {
+ emit_ti(as, PPCI_LI, r, i);
+ } else {
+ if ((i & 0xffff)) {
+ int32_t jgl = i32ptr(J2G(as->J));
+ if ((uint32_t)(i-jgl) < 65536) {
+ emit_tai(as, PPCI_ADDI, r, RID_JGL, i-jgl-32768);
+ return;
+ } else if (emit_kdelta1(as, r, i)) {
+ return;
+ }
+ emit_asi(as, PPCI_ORI, r, r, i);
+ }
+ emit_ti(as, PPCI_LIS, r, (i >> 16));
+ }
+}
+
+#define emit_loada(as, r, addr) emit_loadi(as, (r), i32ptr((addr)))
+
+static Reg ra_allock(ASMState *as, intptr_t k, RegSet allow);
+
+/* Get/set from constant pointer. */
+static void emit_lsptr(ASMState *as, PPCIns pi, Reg r, void *p, RegSet allow)
+{
+ int32_t jgl = i32ptr(J2G(as->J));
+ int32_t i = i32ptr(p);
+ Reg base;
+ if ((uint32_t)(i-jgl) < 65536) {
+ i = i-jgl-32768;
+ base = RID_JGL;
+ } else {
+ base = ra_allock(as, i-(int16_t)i, allow);
+ }
+ emit_tai(as, pi, r, base, i);
+}
+
+#define emit_loadk64(as, r, ir) \
+ emit_lsptr(as, PPCI_LFD, ((r) & 31), (void *)&ir_knum((ir))->u64, RSET_GPR)
+
+/* Get/set global_State fields. */
+static void emit_lsglptr(ASMState *as, PPCIns pi, Reg r, int32_t ofs)
+{
+ emit_tai(as, pi, r, RID_JGL, ofs-32768);
+}
+
+#define emit_getgl(as, r, field) \
+ emit_lsglptr(as, PPCI_LWZ, (r), (int32_t)offsetof(global_State, field))
+#define emit_setgl(as, r, field) \
+ emit_lsglptr(as, PPCI_STW, (r), (int32_t)offsetof(global_State, field))
+
+/* Trace number is determined from per-trace exit stubs. */
+#define emit_setvmstate(as, i) UNUSED(i)
+
+/* -- Emit control-flow instructions -------------------------------------- */
+
+/* Label for internal jumps. */
+typedef MCode *MCLabel;
+
+/* Return label pointing to current PC. */
+#define emit_label(as) ((as)->mcp)
+
+static void emit_condbranch(ASMState *as, PPCIns pi, PPCCC cc, MCode *target)
+{
+ MCode *p = --as->mcp;
+ ptrdiff_t delta = (char *)target - (char *)p;
+ lj_assertA(((delta + 0x8000) >> 16) == 0, "branch target out of range");
+ pi ^= (delta & 0x8000) * (PPCF_Y/0x8000);
+ *p = pi | PPCF_CC(cc) | ((uint32_t)delta & 0xffffu);
+}
+
+static void emit_jmp(ASMState *as, MCode *target)
+{
+ MCode *p = --as->mcp;
+ ptrdiff_t delta = (char *)target - (char *)p;
+ *p = PPCI_B | (delta & 0x03fffffcu);
+}
+
+static void emit_call(ASMState *as, void *target)
+{
+ MCode *p = --as->mcp;
+ ptrdiff_t delta = (char *)target - (char *)p;
+ if ((((delta>>2) + 0x00800000) >> 24) == 0) {
+ *p = PPCI_BL | (delta & 0x03fffffcu);
+ } else { /* Target out of range: need indirect call. Don't use arg reg. */
+ RegSet allow = RSET_GPR & ~RSET_RANGE(RID_R0, REGARG_LASTGPR+1);
+ Reg r = ra_allock(as, i32ptr(target), allow);
+ *p = PPCI_BCTRL;
+ p[-1] = PPCI_MTCTR | PPCF_T(r);
+ as->mcp = p-1;
+ }
+}
+
+/* -- Emit generic operations --------------------------------------------- */
+
+#define emit_mr(as, dst, src) \
+ emit_asb(as, PPCI_MR, (dst), (src), (src))
+
+/* Generic move between two regs. */
+static void emit_movrr(ASMState *as, IRIns *ir, Reg dst, Reg src)
+{
+ UNUSED(ir);
+ if (dst < RID_MAX_GPR)
+ emit_mr(as, dst, src);
+ else
+ emit_fb(as, PPCI_FMR, dst, src);
+}
+
+/* Generic load of register with base and (small) offset address. */
+static void emit_loadofs(ASMState *as, IRIns *ir, Reg r, Reg base, int32_t ofs)
+{
+ if (r < RID_MAX_GPR)
+ emit_tai(as, PPCI_LWZ, r, base, ofs);
+ else
+ emit_fai(as, irt_isnum(ir->t) ? PPCI_LFD : PPCI_LFS, r, base, ofs);
+}
+
+/* Generic store of register with base and (small) offset address. */
+static void emit_storeofs(ASMState *as, IRIns *ir, Reg r, Reg base, int32_t ofs)
+{
+ if (r < RID_MAX_GPR)
+ emit_tai(as, PPCI_STW, r, base, ofs);
+ else
+ emit_fai(as, irt_isnum(ir->t) ? PPCI_STFD : PPCI_STFS, r, base, ofs);
+}
+
+/* Emit a compare (for equality) with a constant operand. */
+static void emit_cmpi(ASMState *as, Reg r, int32_t k)
+{
+ if (checki16(k)) {
+ emit_ai(as, PPCI_CMPWI, r, k);
+ } else if (checku16(k)) {
+ emit_ai(as, PPCI_CMPLWI, r, k);
+ } else {
+ emit_ai(as, PPCI_CMPLWI, RID_TMP, k);
+ emit_asi(as, PPCI_XORIS, RID_TMP, r, (k >> 16));
+ }
+}
+
+/* Add offset to pointer. */
+static void emit_addptr(ASMState *as, Reg r, int32_t ofs)
+{
+ if (ofs) {
+ emit_tai(as, PPCI_ADDI, r, r, ofs);
+ if (!checki16(ofs))
+ emit_tai(as, PPCI_ADDIS, r, r, (ofs + 32768) >> 16);
+ }
+}
+
+static void emit_spsub(ASMState *as, int32_t ofs)
+{
+ if (ofs) {
+ emit_tai(as, PPCI_STWU, RID_TMP, RID_SP, -ofs);
+ emit_tai(as, PPCI_ADDI, RID_TMP, RID_SP,
+ CFRAME_SIZE + (as->parent ? as->parent->spadjust : 0));
+ }
+}
+
diff --git a/libs/luajit-cmake/luajit/src/lj_emit_x86.h b/libs/luajit-cmake/luajit/src/lj_emit_x86.h
new file mode 100644
index 0000000..3d3beda
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/lj_emit_x86.h
@@ -0,0 +1,572 @@
+/*
+** x86/x64 instruction emitter.
+** Copyright (C) 2005-2022 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+/* -- Emit basic instructions --------------------------------------------- */
+
+#define MODRM(mode, r1, r2) ((MCode)((mode)+(((r1)&7)<<3)+((r2)&7)))
+
+#if LJ_64
+#define REXRB(p, rr, rb) \
+ { MCode rex = 0x40 + (((rr)>>1)&4) + (((rb)>>3)&1); \
+ if (rex != 0x40) *--(p) = rex; }
+#define FORCE_REX 0x200
+#define REX_64 (FORCE_REX|0x080000)
+#define VEX_64 0x800000
+#else
+#define REXRB(p, rr, rb) ((void)0)
+#define FORCE_REX 0
+#define REX_64 0
+#define VEX_64 0
+#endif
+#if LJ_GC64
+#define REX_GC64 REX_64
+#else
+#define REX_GC64 0
+#endif
+
+#define emit_i8(as, i) (*--as->mcp = (MCode)(i))
+#define emit_i32(as, i) (*(int32_t *)(as->mcp-4) = (i), as->mcp -= 4)
+#define emit_u32(as, u) (*(uint32_t *)(as->mcp-4) = (u), as->mcp -= 4)
+
+#define emit_x87op(as, xo) \
+ (*(uint16_t *)(as->mcp-2) = (uint16_t)(xo), as->mcp -= 2)
+
+/* op */
+static LJ_AINLINE MCode *emit_op(x86Op xo, Reg rr, Reg rb, Reg rx,
+ MCode *p, int delta)
+{
+ int n = (int8_t)xo;
+ if (n == -60) { /* VEX-encoded instruction */
+#if LJ_64
+ xo ^= (((rr>>1)&4)+((rx>>2)&2)+((rb>>3)&1))<<13;
+#endif
+ *(uint32_t *)(p+delta-5) = (uint32_t)xo;
+ return p+delta-5;
+ }
+#if defined(__GNUC__) || defined(__clang__)
+ if (__builtin_constant_p(xo) && n == -2)
+ p[delta-2] = (MCode)(xo >> 24);
+ else if (__builtin_constant_p(xo) && n == -3)
+ *(uint16_t *)(p+delta-3) = (uint16_t)(xo >> 16);
+ else
+#endif
+ *(uint32_t *)(p+delta-5) = (uint32_t)xo;
+ p += n + delta;
+#if LJ_64
+ {
+ uint32_t rex = 0x40 + ((rr>>1)&(4+(FORCE_REX>>1)))+((rx>>2)&2)+((rb>>3)&1);
+ if (rex != 0x40) {
+ rex |= (rr >> 16);
+ if (n == -4) { *p = (MCode)rex; rex = (MCode)(xo >> 8); }
+ else if ((xo & 0xffffff) == 0x6600fd) { *p = (MCode)rex; rex = 0x66; }
+ *--p = (MCode)rex;
+ }
+ }
+#else
+ UNUSED(rr); UNUSED(rb); UNUSED(rx);
+#endif
+ return p;
+}
+
+/* op + modrm */
+#define emit_opm(xo, mode, rr, rb, p, delta) \
+ (p[(delta)-1] = MODRM((mode), (rr), (rb)), \
+ emit_op((xo), (rr), (rb), 0, (p), (delta)))
+
+/* op + modrm + sib */
+#define emit_opmx(xo, mode, scale, rr, rb, rx, p) \
+ (p[-1] = MODRM((scale), (rx), (rb)), \
+ p[-2] = MODRM((mode), (rr), RID_ESP), \
+ emit_op((xo), (rr), (rb), (rx), (p), -1))
+
+/* op r1, r2 */
+static void emit_rr(ASMState *as, x86Op xo, Reg r1, Reg r2)
+{
+ MCode *p = as->mcp;
+ as->mcp = emit_opm(xo, XM_REG, r1, r2, p, 0);
+}
+
+#if LJ_64 && defined(LUA_USE_ASSERT)
+/* [addr] is sign-extended in x64 and must be in lower 2G (not 4G). */
+static int32_t ptr2addr(const void *p)
+{
+ lj_assertX((uintptr_t)p < (uintptr_t)0x80000000, "pointer outside 2G range");
+ return i32ptr(p);
+}
+#else
+#define ptr2addr(p) (i32ptr((p)))
+#endif
+
+/* op r, [base+ofs] */
+static void emit_rmro(ASMState *as, x86Op xo, Reg rr, Reg rb, int32_t ofs)
+{
+ MCode *p = as->mcp;
+ x86Mode mode;
+ if (ra_hasreg(rb)) {
+ if (LJ_GC64 && rb == RID_RIP) {
+ mode = XM_OFS0;
+ p -= 4;
+ *(int32_t *)p = ofs;
+ } else if (ofs == 0 && (rb&7) != RID_EBP) {
+ mode = XM_OFS0;
+ } else if (checki8(ofs)) {
+ *--p = (MCode)ofs;
+ mode = XM_OFS8;
+ } else {
+ p -= 4;
+ *(int32_t *)p = ofs;
+ mode = XM_OFS32;
+ }
+ if ((rb&7) == RID_ESP)
+ *--p = MODRM(XM_SCALE1, RID_ESP, RID_ESP);
+ } else {
+ *(int32_t *)(p-4) = ofs;
+#if LJ_64
+ p[-5] = MODRM(XM_SCALE1, RID_ESP, RID_EBP);
+ p -= 5;
+ rb = RID_ESP;
+#else
+ p -= 4;
+ rb = RID_EBP;
+#endif
+ mode = XM_OFS0;
+ }
+ as->mcp = emit_opm(xo, mode, rr, rb, p, 0);
+}
+
+/* op r, [base+idx*scale+ofs] */
+static void emit_rmrxo(ASMState *as, x86Op xo, Reg rr, Reg rb, Reg rx,
+ x86Mode scale, int32_t ofs)
+{
+ MCode *p = as->mcp;
+ x86Mode mode;
+ if (ofs == 0 && (rb&7) != RID_EBP) {
+ mode = XM_OFS0;
+ } else if (checki8(ofs)) {
+ mode = XM_OFS8;
+ *--p = (MCode)ofs;
+ } else {
+ mode = XM_OFS32;
+ p -= 4;
+ *(int32_t *)p = ofs;
+ }
+ as->mcp = emit_opmx(xo, mode, scale, rr, rb, rx, p);
+}
+
+/* op r, i */
+static void emit_gri(ASMState *as, x86Group xg, Reg rb, int32_t i)
+{
+ MCode *p = as->mcp;
+ x86Op xo;
+ if (checki8(i)) {
+ *--p = (MCode)i;
+ xo = XG_TOXOi8(xg);
+ } else {
+ p -= 4;
+ *(int32_t *)p = i;
+ xo = XG_TOXOi(xg);
+ }
+ as->mcp = emit_opm(xo, XM_REG, (Reg)(xg & 7) | (rb & REX_64), rb, p, 0);
+}
+
+/* op [base+ofs], i */
+static void emit_gmroi(ASMState *as, x86Group xg, Reg rb, int32_t ofs,
+ int32_t i)
+{
+ x86Op xo;
+ if (checki8(i)) {
+ emit_i8(as, i);
+ xo = XG_TOXOi8(xg);
+ } else {
+ emit_i32(as, i);
+ xo = XG_TOXOi(xg);
+ }
+ emit_rmro(as, xo, (Reg)(xg & 7), rb, ofs);
+}
+
+#define emit_shifti(as, xg, r, i) \
+ (emit_i8(as, (i)), emit_rr(as, XO_SHIFTi, (Reg)(xg), (r)))
+
+/* op r, rm/mrm */
+static void emit_mrm(ASMState *as, x86Op xo, Reg rr, Reg rb)
+{
+ MCode *p = as->mcp;
+ x86Mode mode = XM_REG;
+ if (rb == RID_MRM) {
+ rb = as->mrm.base;
+ if (rb == RID_NONE) {
+ rb = RID_EBP;
+ mode = XM_OFS0;
+ p -= 4;
+ *(int32_t *)p = as->mrm.ofs;
+ if (as->mrm.idx != RID_NONE)
+ goto mrmidx;
+#if LJ_64
+ *--p = MODRM(XM_SCALE1, RID_ESP, RID_EBP);
+ rb = RID_ESP;
+#endif
+ } else if (LJ_GC64 && rb == RID_RIP) {
+ lj_assertA(as->mrm.idx == RID_NONE, "RIP-rel mrm cannot have index");
+ mode = XM_OFS0;
+ p -= 4;
+ *(int32_t *)p = as->mrm.ofs;
+ } else {
+ if (as->mrm.ofs == 0 && (rb&7) != RID_EBP) {
+ mode = XM_OFS0;
+ } else if (checki8(as->mrm.ofs)) {
+ *--p = (MCode)as->mrm.ofs;
+ mode = XM_OFS8;
+ } else {
+ p -= 4;
+ *(int32_t *)p = as->mrm.ofs;
+ mode = XM_OFS32;
+ }
+ if (as->mrm.idx != RID_NONE) {
+ mrmidx:
+ as->mcp = emit_opmx(xo, mode, as->mrm.scale, rr, rb, as->mrm.idx, p);
+ return;
+ }
+ if ((rb&7) == RID_ESP)
+ *--p = MODRM(XM_SCALE1, RID_ESP, RID_ESP);
+ }
+ }
+ as->mcp = emit_opm(xo, mode, rr, rb, p, 0);
+}
+
+/* op rm/mrm, i */
+static void emit_gmrmi(ASMState *as, x86Group xg, Reg rb, int32_t i)
+{
+ x86Op xo;
+ if (checki8(i)) {
+ emit_i8(as, i);
+ xo = XG_TOXOi8(xg);
+ } else {
+ emit_i32(as, i);
+ xo = XG_TOXOi(xg);
+ }
+ emit_mrm(as, xo, (Reg)(xg & 7) | (rb & REX_64), (rb & ~REX_64));
+}
+
+/* -- Emit loads/stores --------------------------------------------------- */
+
+/* mov [base+ofs], i */
+static void emit_movmroi(ASMState *as, Reg base, int32_t ofs, int32_t i)
+{
+ emit_i32(as, i);
+ emit_rmro(as, XO_MOVmi, 0, base, ofs);
+}
+
+/* mov [base+ofs], r */
+#define emit_movtomro(as, r, base, ofs) \
+ emit_rmro(as, XO_MOVto, (r), (base), (ofs))
+
+/* Get/set global_State fields. */
+#define emit_opgl(as, xo, r, field) \
+ emit_rma(as, (xo), (r), (void *)&J2G(as->J)->field)
+#define emit_getgl(as, r, field) emit_opgl(as, XO_MOV, (r)|REX_GC64, field)
+#define emit_setgl(as, r, field) emit_opgl(as, XO_MOVto, (r)|REX_GC64, field)
+
+#define emit_setvmstate(as, i) \
+ (emit_i32(as, i), emit_opgl(as, XO_MOVmi, 0, vmstate))
+
+/* mov r, i / xor r, r */
+static void emit_loadi(ASMState *as, Reg r, int32_t i)
+{
+ /* XOR r,r is shorter, but modifies the flags. This is bad for HIOP/jcc. */
+ if (i == 0 && !(LJ_32 && (IR(as->curins)->o == IR_HIOP ||
+ (as->curins+1 < as->T->nins &&
+ IR(as->curins+1)->o == IR_HIOP))) &&
+ !((*as->mcp == 0x0f && (as->mcp[1] & 0xf0) == XI_JCCn) ||
+ (*as->mcp & 0xf0) == XI_JCCs)) {
+ emit_rr(as, XO_ARITH(XOg_XOR), r, r);
+ } else {
+ MCode *p = as->mcp;
+ *(int32_t *)(p-4) = i;
+ p[-5] = (MCode)(XI_MOVri+(r&7));
+ p -= 5;
+ REXRB(p, 0, r);
+ as->mcp = p;
+ }
+}
+
+#if LJ_GC64
+#define dispofs(as, k) \
+ ((intptr_t)((uintptr_t)(k) - (uintptr_t)J2GG(as->J)->dispatch))
+#define mcpofs(as, k) \
+ ((intptr_t)((uintptr_t)(k) - (uintptr_t)as->mcp))
+#define mctopofs(as, k) \
+ ((intptr_t)((uintptr_t)(k) - (uintptr_t)as->mctop))
+/* mov r, addr */
+#define emit_loada(as, r, addr) \
+ emit_loadu64(as, (r), (uintptr_t)(addr))
+#else
+/* mov r, addr */
+#define emit_loada(as, r, addr) \
+ emit_loadi(as, (r), ptr2addr((addr)))
+#endif
+
+#if LJ_64
+/* mov r, imm64 or shorter 32 bit extended load. */
+static void emit_loadu64(ASMState *as, Reg r, uint64_t u64)
+{
+ if (checku32(u64)) { /* 32 bit load clears upper 32 bits. */
+ emit_loadi(as, r, (int32_t)u64);
+ } else if (checki32((int64_t)u64)) { /* Sign-extended 32 bit load. */
+ MCode *p = as->mcp;
+ *(int32_t *)(p-4) = (int32_t)u64;
+ as->mcp = emit_opm(XO_MOVmi, XM_REG, REX_64, r, p, -4);
+#if LJ_GC64
+ } else if (checki32(dispofs(as, u64))) {
+ emit_rmro(as, XO_LEA, r|REX_64, RID_DISPATCH, (int32_t)dispofs(as, u64));
+ } else if (checki32(mcpofs(as, u64)) && checki32(mctopofs(as, u64))) {
+ /* Since as->realign assumes the code size doesn't change, check
+ ** RIP-relative addressing reachability for both as->mcp and as->mctop.
+ */
+ emit_rmro(as, XO_LEA, r|REX_64, RID_RIP, (int32_t)mcpofs(as, u64));
+#endif
+ } else { /* Full-size 64 bit load. */
+ MCode *p = as->mcp;
+ *(uint64_t *)(p-8) = u64;
+ p[-9] = (MCode)(XI_MOVri+(r&7));
+ p[-10] = 0x48 + ((r>>3)&1);
+ p -= 10;
+ as->mcp = p;
+ }
+}
+#endif
+
+/* op r, [addr] */
+static void emit_rma(ASMState *as, x86Op xo, Reg rr, const void *addr)
+{
+#if LJ_GC64
+ if (checki32(dispofs(as, addr))) {
+ emit_rmro(as, xo, rr, RID_DISPATCH, (int32_t)dispofs(as, addr));
+ } else if (checki32(mcpofs(as, addr)) && checki32(mctopofs(as, addr))) {
+ emit_rmro(as, xo, rr, RID_RIP, (int32_t)mcpofs(as, addr));
+ } else if (!checki32((intptr_t)addr)) {
+ Reg ra = (rr & 15);
+ if (xo != XO_MOV) {
+ /* We can't allocate a register here. Use and restore DISPATCH. Ugly. */
+ uint64_t dispaddr = (uintptr_t)J2GG(as->J)->dispatch;
+ uint8_t i8 = xo == XO_GROUP3b ? *as->mcp++ : 0;
+ ra = RID_DISPATCH;
+ if (checku32(dispaddr)) {
+ emit_loadi(as, ra, (int32_t)dispaddr);
+ } else { /* Full-size 64 bit load. */
+ MCode *p = as->mcp;
+ *(uint64_t *)(p-8) = dispaddr;
+ p[-9] = (MCode)(XI_MOVri+(ra&7));
+ p[-10] = 0x48 + ((ra>>3)&1);
+ p -= 10;
+ as->mcp = p;
+ }
+ if (xo == XO_GROUP3b) emit_i8(as, i8);
+ }
+ emit_rmro(as, xo, rr, ra, 0);
+ emit_loadu64(as, ra, (uintptr_t)addr);
+ } else
+#endif
+ {
+ MCode *p = as->mcp;
+ *(int32_t *)(p-4) = ptr2addr(addr);
+#if LJ_64
+ p[-5] = MODRM(XM_SCALE1, RID_ESP, RID_EBP);
+ as->mcp = emit_opm(xo, XM_OFS0, rr, RID_ESP, p, -5);
+#else
+ as->mcp = emit_opm(xo, XM_OFS0, rr, RID_EBP, p, -4);
+#endif
+ }
+}
+
+/* Load 64 bit IR constant into register. */
+static void emit_loadk64(ASMState *as, Reg r, IRIns *ir)
+{
+ Reg r64;
+ x86Op xo;
+ const uint64_t *k = &ir_k64(ir)->u64;
+ if (rset_test(RSET_FPR, r)) {
+ r64 = r;
+ xo = XO_MOVSD;
+ } else {
+ r64 = r | REX_64;
+ xo = XO_MOV;
+ }
+ if (*k == 0) {
+ emit_rr(as, rset_test(RSET_FPR, r) ? XO_XORPS : XO_ARITH(XOg_XOR), r, r);
+#if LJ_GC64
+ } else if (checki32((intptr_t)k) || checki32(dispofs(as, k)) ||
+ (checki32(mcpofs(as, k)) && checki32(mctopofs(as, k)))) {
+ emit_rma(as, xo, r64, k);
+ } else {
+ if (ir->i) {
+ lj_assertA(*k == *(uint64_t*)(as->mctop - ir->i),
+ "bad interned 64 bit constant");
+ } else if (as->curins <= as->stopins && rset_test(RSET_GPR, r)) {
+ emit_loadu64(as, r, *k);
+ return;
+ } else {
+ /* If all else fails, add the FP constant at the MCode area bottom. */
+ while ((uintptr_t)as->mcbot & 7) *as->mcbot++ = XI_INT3;
+ *(uint64_t *)as->mcbot = *k;
+ ir->i = (int32_t)(as->mctop - as->mcbot);
+ as->mcbot += 8;
+ as->mclim = as->mcbot + MCLIM_REDZONE;
+ lj_mcode_commitbot(as->J, as->mcbot);
+ }
+ emit_rmro(as, xo, r64, RID_RIP, (int32_t)mcpofs(as, as->mctop - ir->i));
+#else
+ } else {
+ emit_rma(as, xo, r64, k);
+#endif
+ }
+}
+
+/* -- Emit control-flow instructions -------------------------------------- */
+
+/* Label for short jumps. */
+typedef MCode *MCLabel;
+
+#if LJ_32 && LJ_HASFFI
+/* jmp short target */
+static void emit_sjmp(ASMState *as, MCLabel target)
+{
+ MCode *p = as->mcp;
+ ptrdiff_t delta = target - p;
+ lj_assertA(delta == (int8_t)delta, "short jump target out of range");
+ p[-1] = (MCode)(int8_t)delta;
+ p[-2] = XI_JMPs;
+ as->mcp = p - 2;
+}
+#endif
+
+/* jcc short target */
+static void emit_sjcc(ASMState *as, int cc, MCLabel target)
+{
+ MCode *p = as->mcp;
+ ptrdiff_t delta = target - p;
+ lj_assertA(delta == (int8_t)delta, "short jump target out of range");
+ p[-1] = (MCode)(int8_t)delta;
+ p[-2] = (MCode)(XI_JCCs+(cc&15));
+ as->mcp = p - 2;
+}
+
+/* jcc short (pending target) */
+static MCLabel emit_sjcc_label(ASMState *as, int cc)
+{
+ MCode *p = as->mcp;
+ p[-1] = 0;
+ p[-2] = (MCode)(XI_JCCs+(cc&15));
+ as->mcp = p - 2;
+ return p;
+}
+
+/* Fixup jcc short target. */
+static void emit_sfixup(ASMState *as, MCLabel source)
+{
+ source[-1] = (MCode)(as->mcp-source);
+}
+
+/* Return label pointing to current PC. */
+#define emit_label(as) ((as)->mcp)
+
+/* Compute relative 32 bit offset for jump and call instructions. */
+static LJ_AINLINE int32_t jmprel(jit_State *J, MCode *p, MCode *target)
+{
+ ptrdiff_t delta = target - p;
+ UNUSED(J);
+ lj_assertJ(delta == (int32_t)delta, "jump target out of range");
+ return (int32_t)delta;
+}
+
+/* jcc target */
+static void emit_jcc(ASMState *as, int cc, MCode *target)
+{
+ MCode *p = as->mcp;
+ *(int32_t *)(p-4) = jmprel(as->J, p, target);
+ p[-5] = (MCode)(XI_JCCn+(cc&15));
+ p[-6] = 0x0f;
+ as->mcp = p - 6;
+}
+
+/* jmp target */
+static void emit_jmp(ASMState *as, MCode *target)
+{
+ MCode *p = as->mcp;
+ *(int32_t *)(p-4) = jmprel(as->J, p, target);
+ p[-5] = XI_JMP;
+ as->mcp = p - 5;
+}
+
+/* call target */
+static void emit_call_(ASMState *as, MCode *target)
+{
+ MCode *p = as->mcp;
+#if LJ_64
+ if (target-p != (int32_t)(target-p)) {
+ /* Assumes RID_RET is never an argument to calls and always clobbered. */
+ emit_rr(as, XO_GROUP5, XOg_CALL, RID_RET);
+ emit_loadu64(as, RID_RET, (uint64_t)target);
+ return;
+ }
+#endif
+ *(int32_t *)(p-4) = jmprel(as->J, p, target);
+ p[-5] = XI_CALL;
+ as->mcp = p - 5;
+}
+
+#define emit_call(as, f) emit_call_(as, (MCode *)(void *)(f))
+
+/* -- Emit generic operations --------------------------------------------- */
+
+/* Use 64 bit operations to handle 64 bit IR types. */
+#if LJ_64
+#define REX_64IR(ir, r) ((r) + (irt_is64((ir)->t) ? REX_64 : 0))
+#define VEX_64IR(ir, r) ((r) + (irt_is64((ir)->t) ? VEX_64 : 0))
+#else
+#define REX_64IR(ir, r) (r)
+#define VEX_64IR(ir, r) (r)
+#endif
+
+/* Generic move between two regs. */
+static void emit_movrr(ASMState *as, IRIns *ir, Reg dst, Reg src)
+{
+ UNUSED(ir);
+ if (dst < RID_MAX_GPR)
+ emit_rr(as, XO_MOV, REX_64IR(ir, dst), src);
+ else
+ emit_rr(as, XO_MOVAPS, dst, src);
+}
+
+/* Generic load of register with base and (small) offset address. */
+static void emit_loadofs(ASMState *as, IRIns *ir, Reg r, Reg base, int32_t ofs)
+{
+ if (r < RID_MAX_GPR)
+ emit_rmro(as, XO_MOV, REX_64IR(ir, r), base, ofs);
+ else
+ emit_rmro(as, irt_isnum(ir->t) ? XO_MOVSD : XO_MOVSS, r, base, ofs);
+}
+
+/* Generic store of register with base and (small) offset address. */
+static void emit_storeofs(ASMState *as, IRIns *ir, Reg r, Reg base, int32_t ofs)
+{
+ if (r < RID_MAX_GPR)
+ emit_rmro(as, XO_MOVto, REX_64IR(ir, r), base, ofs);
+ else
+ emit_rmro(as, irt_isnum(ir->t) ? XO_MOVSDto : XO_MOVSSto, r, base, ofs);
+}
+
+/* Add offset to pointer. */
+static void emit_addptr(ASMState *as, Reg r, int32_t ofs)
+{
+ if (ofs) {
+ emit_gri(as, XG_ARITHi(XOg_ADD), r|REX_GC64, ofs);
+ }
+}
+
+#define emit_spsub(as, ofs) emit_addptr(as, RID_ESP|REX_64, -(ofs))
+
+/* Prefer rematerialization of BASE/L from global_State over spills. */
+#define emit_canremat(ref) ((ref) <= REF_BASE)
+
diff --git a/libs/luajit-cmake/luajit/src/lj_err.c b/libs/luajit-cmake/luajit/src/lj_err.c
new file mode 100644
index 0000000..283c3d1
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/lj_err.c
@@ -0,0 +1,1098 @@
+/*
+** Error handling.
+** Copyright (C) 2005-2022 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#define lj_err_c
+#define LUA_CORE
+
+#include "lj_obj.h"
+#include "lj_err.h"
+#include "lj_debug.h"
+#include "lj_str.h"
+#include "lj_func.h"
+#include "lj_state.h"
+#include "lj_frame.h"
+#include "lj_ff.h"
+#include "lj_trace.h"
+#include "lj_vm.h"
+#include "lj_strfmt.h"
+
+/*
+** LuaJIT can either use internal or external frame unwinding:
+**
+** - Internal frame unwinding (INT) is free-standing and doesn't require
+** any OS or library support.
+**
+** - External frame unwinding (EXT) uses the system-provided unwind handler.
+**
+** Pros and Cons:
+**
+** - EXT requires unwind tables for *all* functions on the C stack between
+** the pcall/catch and the error/throw. C modules used by Lua code can
+** throw errors, so these need to have unwind tables, too. Transitively
+** this applies to all system libraries used by C modules -- at least
+** when they have callbacks which may throw an error.
+**
+** - INT is faster when actually throwing errors, but this happens rarely.
+** Setting up error handlers is zero-cost in any case.
+**
+** - INT needs to save *all* callee-saved registers when entering the
+** interpreter. EXT only needs to save those actually used inside the
+** interpreter. JIT-compiled code may need to save some more.
+**
+** - EXT provides full interoperability with C++ exceptions. You can throw
+** Lua errors or C++ exceptions through a mix of Lua frames and C++ frames.
+** C++ destructors are called as needed. C++ exceptions caught by pcall
+** are converted to the string "C++ exception". Lua errors can be caught
+** with catch (...) in C++.
+**
+** - INT has only limited support for automatically catching C++ exceptions
+** on POSIX systems using DWARF2 stack unwinding. Other systems may use
+** the wrapper function feature. Lua errors thrown through C++ frames
+** cannot be caught by C++ code and C++ destructors are not run.
+**
+** - EXT can handle errors from internal helper functions that are called
+** from JIT-compiled code (except for Windows/x86 and 32 bit ARM).
+** INT has no choice but to call the panic handler, if this happens.
+** Note: this is mainly relevant for out-of-memory errors.
+**
+** EXT is the default on all systems where the toolchain produces unwind
+** tables by default (*). This is hard-coded and/or detected in src/Makefile.
+** You can thwart the detection with: TARGET_XCFLAGS=-DLUAJIT_UNWIND_INTERNAL
+**
+** INT is the default on all other systems.
+**
+** EXT can be manually enabled for toolchains that are able to produce
+** conforming unwind tables:
+** "TARGET_XCFLAGS=-funwind-tables -DLUAJIT_UNWIND_EXTERNAL"
+** As explained above, *all* C code used directly or indirectly by LuaJIT
+** must be compiled with -funwind-tables (or -fexceptions). C++ code must
+** *not* be compiled with -fno-exceptions.
+**
+** If you're unsure whether error handling inside the VM works correctly,
+** try running this and check whether it prints "OK":
+**
+** luajit -e "print(select(2, load('OK')):match('OK'))"
+**
+** (*) Originally, toolchains only generated unwind tables for C++ code. For
+** interoperability reasons, this can be manually enabled for plain C code,
+** too (with -funwind-tables). With the introduction of the x64 architecture,
+** the corresponding POSIX and Windows ABIs mandated unwind tables for all
+** code. Over the following years most desktop and server platforms have
+** enabled unwind tables by default on all architectures. OTOH mobile and
+** embedded platforms do not consistently mandate unwind tables.
+*/
+
+/* -- Error messages ------------------------------------------------------ */
+
+/* Error message strings. */
+LJ_DATADEF const char *lj_err_allmsg =
+#define ERRDEF(name, msg) msg "\0"
+#include "lj_errmsg.h"
+;
+
+/* -- Internal frame unwinding -------------------------------------------- */
+
+/* Unwind Lua stack and move error message to new top. */
+LJ_NOINLINE static void unwindstack(lua_State *L, TValue *top)
+{
+ lj_func_closeuv(L, top);
+ if (top < L->top-1) {
+ copyTV(L, top, L->top-1);
+ L->top = top+1;
+ }
+ lj_state_relimitstack(L);
+}
+
+/* Unwind until stop frame. Optionally cleanup frames. */
+static void *err_unwind(lua_State *L, void *stopcf, int errcode)
+{
+ TValue *frame = L->base-1;
+ void *cf = L->cframe;
+ while (cf) {
+ int32_t nres = cframe_nres(cframe_raw(cf));
+ if (nres < 0) { /* C frame without Lua frame? */
+ TValue *top = restorestack(L, -nres);
+ if (frame < top) { /* Frame reached? */
+ if (errcode) {
+ L->base = frame+1;
+ L->cframe = cframe_prev(cf);
+ unwindstack(L, top);
+ }
+ return cf;
+ }
+ }
+ if (frame <= tvref(L->stack)+LJ_FR2)
+ break;
+ switch (frame_typep(frame)) {
+ case FRAME_LUA: /* Lua frame. */
+ case FRAME_LUAP:
+ frame = frame_prevl(frame);
+ break;
+ case FRAME_C: /* C frame. */
+ unwind_c:
+#if LJ_UNWIND_EXT
+ if (errcode) {
+ L->base = frame_prevd(frame) + 1;
+ L->cframe = cframe_prev(cf);
+ unwindstack(L, frame - LJ_FR2);
+ } else if (cf != stopcf) {
+ cf = cframe_prev(cf);
+ frame = frame_prevd(frame);
+ break;
+ }
+ return NULL; /* Continue unwinding. */
+#else
+ UNUSED(stopcf);
+ cf = cframe_prev(cf);
+ frame = frame_prevd(frame);
+ break;
+#endif
+ case FRAME_CP: /* Protected C frame. */
+ if (cframe_canyield(cf)) { /* Resume? */
+ if (errcode) {
+ hook_leave(G(L)); /* Assumes nobody uses coroutines inside hooks. */
+ L->cframe = NULL;
+ L->status = (uint8_t)errcode;
+ }
+ return cf;
+ }
+ if (errcode) {
+ L->base = frame_prevd(frame) + 1;
+ L->cframe = cframe_prev(cf);
+ unwindstack(L, frame - LJ_FR2);
+ }
+ return cf;
+ case FRAME_CONT: /* Continuation frame. */
+ if (frame_iscont_fficb(frame))
+ goto unwind_c;
+ /* fallthrough */
+ case FRAME_VARG: /* Vararg frame. */
+ frame = frame_prevd(frame);
+ break;
+ case FRAME_PCALL: /* FF pcall() frame. */
+ case FRAME_PCALLH: /* FF pcall() frame inside hook. */
+ if (errcode) {
+ if (errcode == LUA_YIELD) {
+ frame = frame_prevd(frame);
+ break;
+ }
+ if (frame_typep(frame) == FRAME_PCALL)
+ hook_leave(G(L));
+ L->base = frame_prevd(frame) + 1;
+ L->cframe = cf;
+ unwindstack(L, L->base);
+ }
+ return (void *)((intptr_t)cf | CFRAME_UNWIND_FF);
+ }
+ }
+ /* No C frame. */
+ if (errcode) {
+ L->base = tvref(L->stack)+1+LJ_FR2;
+ L->cframe = NULL;
+ unwindstack(L, L->base);
+ if (G(L)->panic)
+ G(L)->panic(L);
+ exit(EXIT_FAILURE);
+ }
+ return L; /* Anything non-NULL will do. */
+}
+
+/* -- External frame unwinding -------------------------------------------- */
+
+#if LJ_ABI_WIN
+
+/*
+** Someone in Redmond owes me several days of my life. A lot of this is
+** undocumented or just plain wrong on MSDN. Some of it can be gathered
+** from 3rd party docs or must be found by trial-and-error. They really
+** don't want you to write your own language-specific exception handler
+** or to interact gracefully with MSVC. :-(
+**
+** Apparently MSVC doesn't call C++ destructors for foreign exceptions
+** unless you compile your C++ code with /EHa. Unfortunately this means
+** catch (...) also catches things like access violations. The use of
+** _set_se_translator doesn't really help, because it requires /EHa, too.
+*/
+
+#define WIN32_LEAN_AND_MEAN
+#include <windows.h>
+
+#if LJ_TARGET_X86
+typedef void *UndocumentedDispatcherContext; /* Unused on x86. */
+#else
+/* Taken from: http://www.nynaeve.net/?p=99 */
+typedef struct UndocumentedDispatcherContext {
+ ULONG64 ControlPc;
+ ULONG64 ImageBase;
+ PRUNTIME_FUNCTION FunctionEntry;
+ ULONG64 EstablisherFrame;
+ ULONG64 TargetIp;
+ PCONTEXT ContextRecord;
+ void (*LanguageHandler)(void);
+ PVOID HandlerData;
+ PUNWIND_HISTORY_TABLE HistoryTable;
+ ULONG ScopeIndex;
+ ULONG Fill0;
+} UndocumentedDispatcherContext;
+#endif
+
+/* Another wild guess. */
+extern void __DestructExceptionObject(EXCEPTION_RECORD *rec, int nothrow);
+
+#if LJ_TARGET_X64 && defined(MINGW_SDK_INIT)
+/* Workaround for broken MinGW64 declaration. */
+VOID RtlUnwindEx_FIXED(PVOID,PVOID,PVOID,PVOID,PVOID,PVOID) asm("RtlUnwindEx");
+#define RtlUnwindEx RtlUnwindEx_FIXED
+#endif
+
+#define LJ_MSVC_EXCODE ((DWORD)0xe06d7363)
+#define LJ_GCC_EXCODE ((DWORD)0x20474343)
+
+#define LJ_EXCODE ((DWORD)0xe24c4a00)
+#define LJ_EXCODE_MAKE(c) (LJ_EXCODE | (DWORD)(c))
+#define LJ_EXCODE_CHECK(cl) (((cl) ^ LJ_EXCODE) <= 0xff)
+#define LJ_EXCODE_ERRCODE(cl) ((int)((cl) & 0xff))
+
+/* Windows exception handler for interpreter frame. */
+LJ_FUNCA int lj_err_unwind_win(EXCEPTION_RECORD *rec,
+ void *f, CONTEXT *ctx, UndocumentedDispatcherContext *dispatch)
+{
+#if LJ_TARGET_X86
+ void *cf = (char *)f - CFRAME_OFS_SEH;
+#else
+ void *cf = f;
+#endif
+ lua_State *L = cframe_L(cf);
+ int errcode = LJ_EXCODE_CHECK(rec->ExceptionCode) ?
+ LJ_EXCODE_ERRCODE(rec->ExceptionCode) : LUA_ERRRUN;
+ if ((rec->ExceptionFlags & 6)) { /* EH_UNWINDING|EH_EXIT_UNWIND */
+ /* Unwind internal frames. */
+ err_unwind(L, cf, errcode);
+ } else {
+ void *cf2 = err_unwind(L, cf, 0);
+ if (cf2) { /* We catch it, so start unwinding the upper frames. */
+ if (rec->ExceptionCode == LJ_MSVC_EXCODE ||
+ rec->ExceptionCode == LJ_GCC_EXCODE) {
+#if !LJ_TARGET_CYGWIN
+ __DestructExceptionObject(rec, 1);
+#endif
+ setstrV(L, L->top++, lj_err_str(L, LJ_ERR_ERRCPP));
+ } else if (!LJ_EXCODE_CHECK(rec->ExceptionCode)) {
+ /* Don't catch access violations etc. */
+ return 1; /* ExceptionContinueSearch */
+ }
+#if LJ_TARGET_X86
+ UNUSED(ctx);
+ UNUSED(dispatch);
+ /* Call all handlers for all lower C frames (including ourselves) again
+ ** with EH_UNWINDING set. Then call the specified function, passing cf
+ ** and errcode.
+ */
+ lj_vm_rtlunwind(cf, (void *)rec,
+ (cframe_unwind_ff(cf2) && errcode != LUA_YIELD) ?
+ (void *)lj_vm_unwind_ff : (void *)lj_vm_unwind_c, errcode);
+ /* lj_vm_rtlunwind does not return. */
+#else
+ /* Unwind the stack and call all handlers for all lower C frames
+ ** (including ourselves) again with EH_UNWINDING set. Then set
+ ** stack pointer = cf, result = errcode and jump to the specified target.
+ */
+ RtlUnwindEx(cf, (void *)((cframe_unwind_ff(cf2) && errcode != LUA_YIELD) ?
+ lj_vm_unwind_ff_eh :
+ lj_vm_unwind_c_eh),
+ rec, (void *)(uintptr_t)errcode, ctx, dispatch->HistoryTable);
+ /* RtlUnwindEx should never return. */
+#endif
+ }
+ }
+ return 1; /* ExceptionContinueSearch */
+}
+
+#if LJ_UNWIND_JIT
+
+#if LJ_TARGET_X64
+#define CONTEXT_REG_PC Rip
+#elif LJ_TARGET_ARM64
+#define CONTEXT_REG_PC Pc
+#else
+#error "NYI: Windows arch-specific unwinder for JIT-compiled code"
+#endif
+
+/* Windows unwinder for JIT-compiled code. */
+static void err_unwind_win_jit(global_State *g, int errcode)
+{
+ CONTEXT ctx;
+ UNWIND_HISTORY_TABLE hist;
+
+ memset(&hist, 0, sizeof(hist));
+ RtlCaptureContext(&ctx);
+ while (1) {
+ uintptr_t frame, base, addr = ctx.CONTEXT_REG_PC;
+ void *hdata;
+ PRUNTIME_FUNCTION func = RtlLookupFunctionEntry(addr, &base, &hist);
+ if (!func) { /* Found frame without .pdata: must be JIT-compiled code. */
+ ExitNo exitno;
+ uintptr_t stub = lj_trace_unwind(G2J(g), addr - sizeof(MCode), &exitno);
+ if (stub) { /* Jump to side exit to unwind the trace. */
+ ctx.CONTEXT_REG_PC = stub;
+ G2J(g)->exitcode = errcode;
+ RtlRestoreContext(&ctx, NULL); /* Does not return. */
+ }
+ break;
+ }
+ RtlVirtualUnwind(UNW_FLAG_NHANDLER, base, addr, func,
+ &ctx, &hdata, &frame, NULL);
+ if (!addr) break;
+ }
+ /* Unwinding failed, if we end up here. */
+}
+#endif
+
+/* Raise Windows exception. */
+static void err_raise_ext(global_State *g, int errcode)
+{
+#if LJ_UNWIND_JIT
+ if (tvref(g->jit_base)) {
+ err_unwind_win_jit(g, errcode);
+ return; /* Unwinding failed. */
+ }
+#elif LJ_HASJIT
+ /* Cannot catch on-trace errors for Windows/x86 SEH. Unwind to interpreter. */
+ setmref(g->jit_base, NULL);
+#endif
+ UNUSED(g);
+ RaiseException(LJ_EXCODE_MAKE(errcode), 1 /* EH_NONCONTINUABLE */, 0, NULL);
+}
+
+#elif !LJ_NO_UNWIND && (defined(__GNUC__) || defined(__clang__))
+
+/*
+** We have to use our own definitions instead of the mandatory (!) unwind.h,
+** since various OS, distros and compilers mess up the header installation.
+*/
+
+typedef struct _Unwind_Context _Unwind_Context;
+
+#define _URC_OK 0
+#define _URC_FATAL_PHASE2_ERROR 2
+#define _URC_FATAL_PHASE1_ERROR 3
+#define _URC_HANDLER_FOUND 6
+#define _URC_INSTALL_CONTEXT 7
+#define _URC_CONTINUE_UNWIND 8
+#define _URC_FAILURE 9
+
+#define LJ_UEXCLASS 0x4c55414a49543200ULL /* LUAJIT2\0 */
+#define LJ_UEXCLASS_MAKE(c) (LJ_UEXCLASS | (uint64_t)(c))
+#define LJ_UEXCLASS_CHECK(cl) (((cl) ^ LJ_UEXCLASS) <= 0xff)
+#define LJ_UEXCLASS_ERRCODE(cl) ((int)((cl) & 0xff))
+
+#if !LJ_TARGET_ARM
+
+typedef struct _Unwind_Exception
+{
+ uint64_t exclass;
+ void (*excleanup)(int, struct _Unwind_Exception *);
+ uintptr_t p1, p2;
+} __attribute__((__aligned__)) _Unwind_Exception;
+#define UNWIND_EXCEPTION_TYPE _Unwind_Exception
+
+extern uintptr_t _Unwind_GetCFA(_Unwind_Context *);
+extern void _Unwind_SetGR(_Unwind_Context *, int, uintptr_t);
+extern uintptr_t _Unwind_GetIP(_Unwind_Context *);
+extern void _Unwind_SetIP(_Unwind_Context *, uintptr_t);
+extern void _Unwind_DeleteException(_Unwind_Exception *);
+extern int _Unwind_RaiseException(_Unwind_Exception *);
+
+#define _UA_SEARCH_PHASE 1
+#define _UA_CLEANUP_PHASE 2
+#define _UA_HANDLER_FRAME 4
+#define _UA_FORCE_UNWIND 8
+
+/* DWARF2 personality handler referenced from interpreter .eh_frame. */
+LJ_FUNCA int lj_err_unwind_dwarf(int version, int actions,
+ uint64_t uexclass, _Unwind_Exception *uex, _Unwind_Context *ctx)
+{
+ void *cf;
+ lua_State *L;
+ if (version != 1)
+ return _URC_FATAL_PHASE1_ERROR;
+ cf = (void *)_Unwind_GetCFA(ctx);
+ L = cframe_L(cf);
+ if ((actions & _UA_SEARCH_PHASE)) {
+#if LJ_UNWIND_EXT
+ if (err_unwind(L, cf, 0) == NULL)
+ return _URC_CONTINUE_UNWIND;
+#endif
+ if (!LJ_UEXCLASS_CHECK(uexclass)) {
+ setstrV(L, L->top++, lj_err_str(L, LJ_ERR_ERRCPP));
+ }
+ return _URC_HANDLER_FOUND;
+ }
+ if ((actions & _UA_CLEANUP_PHASE)) {
+ int errcode;
+ if (LJ_UEXCLASS_CHECK(uexclass)) {
+ errcode = LJ_UEXCLASS_ERRCODE(uexclass);
+ } else {
+ if ((actions & _UA_HANDLER_FRAME))
+ _Unwind_DeleteException(uex);
+ errcode = LUA_ERRRUN;
+ }
+#if LJ_UNWIND_EXT
+ cf = err_unwind(L, cf, errcode);
+ if ((actions & _UA_FORCE_UNWIND)) {
+ return _URC_CONTINUE_UNWIND;
+ } else if (cf) {
+ _Unwind_SetGR(ctx, LJ_TARGET_EHRETREG, errcode);
+ _Unwind_SetIP(ctx, (uintptr_t)(cframe_unwind_ff(cf) ?
+ lj_vm_unwind_ff_eh :
+ lj_vm_unwind_c_eh));
+ return _URC_INSTALL_CONTEXT;
+ }
+#if LJ_TARGET_X86ORX64
+ else if ((actions & _UA_HANDLER_FRAME)) {
+ /* Workaround for ancient libgcc bug. Still present in RHEL 5.5. :-/
+ ** Real fix: http://gcc.gnu.org/viewcvs/trunk/gcc/unwind-dw2.c?r1=121165&r2=124837&pathrev=153877&diff_format=h
+ */
+ _Unwind_SetGR(ctx, LJ_TARGET_EHRETREG, errcode);
+ _Unwind_SetIP(ctx, (uintptr_t)lj_vm_unwind_rethrow);
+ return _URC_INSTALL_CONTEXT;
+ }
+#endif
+#else
+ /* This is not the proper way to escape from the unwinder. We get away with
+ ** it on non-x64 because the interpreter restores all callee-saved regs.
+ */
+ lj_err_throw(L, errcode);
+#if LJ_TARGET_X64
+#error "Broken build system -- only use the provided Makefiles!"
+#endif
+#endif
+ }
+ return _URC_CONTINUE_UNWIND;
+}
+
+#if LJ_UNWIND_EXT && defined(LUA_USE_ASSERT)
+struct dwarf_eh_bases { void *tbase, *dbase, *func; };
+extern const void *_Unwind_Find_FDE(void *pc, struct dwarf_eh_bases *bases);
+
+/* Verify that external error handling actually has a chance to work. */
+void lj_err_verify(void)
+{
+#if !LJ_TARGET_OSX
+ /* Check disabled on MacOS due to brilliant software engineering at Apple. */
+ struct dwarf_eh_bases ehb;
+ lj_assertX(_Unwind_Find_FDE((void *)lj_err_throw, &ehb), "broken build: external frame unwinding enabled, but missing -funwind-tables");
+#endif
+ /* Check disabled, because of broken Fedora/ARM64. See #722.
+ lj_assertX(_Unwind_Find_FDE((void *)_Unwind_RaiseException, &ehb), "broken build: external frame unwinding enabled, but system libraries have no unwind tables");
+ */
+}
+#endif
+
+#if LJ_UNWIND_JIT
+/* DWARF2 personality handler for JIT-compiled code. */
+static int err_unwind_jit(int version, int actions,
+ uint64_t uexclass, _Unwind_Exception *uex, _Unwind_Context *ctx)
+{
+ /* NYI: FFI C++ exception interoperability. */
+ if (version != 1 || !LJ_UEXCLASS_CHECK(uexclass))
+ return _URC_FATAL_PHASE1_ERROR;
+ if ((actions & _UA_SEARCH_PHASE)) {
+ return _URC_HANDLER_FOUND;
+ }
+ if ((actions & _UA_CLEANUP_PHASE)) {
+ global_State *g = *(global_State **)(uex+1);
+ ExitNo exitno;
+ uintptr_t addr = _Unwind_GetIP(ctx); /* Return address _after_ call. */
+ uintptr_t stub = lj_trace_unwind(G2J(g), addr - sizeof(MCode), &exitno);
+ lj_assertG(tvref(g->jit_base), "unexpected throw across mcode frame");
+ if (stub) { /* Jump to side exit to unwind the trace. */
+ G2J(g)->exitcode = LJ_UEXCLASS_ERRCODE(uexclass);
+#ifdef LJ_TARGET_MIPS
+ _Unwind_SetGR(ctx, 4, stub);
+ _Unwind_SetGR(ctx, 5, exitno);
+ _Unwind_SetIP(ctx, (uintptr_t)(void *)lj_vm_unwind_stub);
+#else
+ _Unwind_SetIP(ctx, stub);
+#endif
+ return _URC_INSTALL_CONTEXT;
+ }
+ return _URC_FATAL_PHASE2_ERROR;
+ }
+ return _URC_FATAL_PHASE1_ERROR;
+}
+
+/* DWARF2 template frame info for JIT-compiled code.
+**
+** After copying the template to the start of the mcode segment,
+** the frame handler function and the code size is patched.
+** The frame handler always installs a new context to jump to the exit,
+** so don't bother to add any unwind opcodes.
+*/
+static const uint8_t err_frame_jit_template[] = {
+#if LJ_BE
+ 0,0,0,
+#endif
+ LJ_64 ? 0x1c : 0x14, /* CIE length. */
+#if LJ_LE
+ 0,0,0,
+#endif
+ 0,0,0,0, 1, 'z','P','R',0, /* CIE mark, CIE version, augmentation. */
+ 1, LJ_64 ? 0x78 : 0x7c, LJ_TARGET_EHRAREG, /* Code/data align, RA. */
+#if LJ_64
+ 10, 0, 0,0,0,0,0,0,0,0, 0x1b, /* Aug. data ABS handler, PCREL|SDATA4 code. */
+ 0,0,0,0,0, /* Alignment. */
+#else
+ 6, 0, 0,0,0,0, 0x1b, /* Aug. data ABS handler, PCREL|SDATA4 code. */
+ 0, /* Alignment. */
+#endif
+#if LJ_BE
+ 0,0,0,
+#endif
+ LJ_64 ? 0x14 : 0x10, /* FDE length. */
+ 0,0,0,
+ LJ_64 ? 0x24 : 0x1c, /* CIE offset. */
+ 0,0,0,
+ LJ_64 ? 0x14 : 0x10, /* Code offset. After Final FDE. */
+#if LJ_LE
+ 0,0,0,
+#endif
+ 0,0,0,0, 0, 0,0,0, /* Code size, augmentation length, alignment. */
+#if LJ_64
+ 0,0,0,0, /* Alignment. */
+#endif
+ 0,0,0,0 /* Final FDE. */
+};
+
+#define ERR_FRAME_JIT_OFS_HANDLER 0x12
+#define ERR_FRAME_JIT_OFS_FDE (LJ_64 ? 0x20 : 0x18)
+#define ERR_FRAME_JIT_OFS_CODE_SIZE (LJ_64 ? 0x2c : 0x24)
+#if LJ_TARGET_OSX
+#define ERR_FRAME_JIT_OFS_REGISTER ERR_FRAME_JIT_OFS_FDE
+#else
+#define ERR_FRAME_JIT_OFS_REGISTER 0
+#endif
+
+extern void __register_frame(const void *);
+extern void __deregister_frame(const void *);
+
+uint8_t *lj_err_register_mcode(void *base, size_t sz, uint8_t *info)
+{
+ void **handler;
+ memcpy(info, err_frame_jit_template, sizeof(err_frame_jit_template));
+ handler = (void *)err_unwind_jit;
+ memcpy(info + ERR_FRAME_JIT_OFS_HANDLER, &handler, sizeof(handler));
+ *(uint32_t *)(info + ERR_FRAME_JIT_OFS_CODE_SIZE) =
+ (uint32_t)(sz - sizeof(err_frame_jit_template) - (info - (uint8_t *)base));
+ __register_frame(info + ERR_FRAME_JIT_OFS_REGISTER);
+#ifdef LUA_USE_ASSERT
+ {
+ struct dwarf_eh_bases ehb;
+ lj_assertX(_Unwind_Find_FDE(info + sizeof(err_frame_jit_template)+1, &ehb),
+ "bad JIT unwind table registration");
+ }
+#endif
+ return info + sizeof(err_frame_jit_template);
+}
+
+void lj_err_deregister_mcode(void *base, size_t sz, uint8_t *info)
+{
+ UNUSED(base); UNUSED(sz);
+ __deregister_frame(info + ERR_FRAME_JIT_OFS_REGISTER);
+}
+#endif
+
+#else /* LJ_TARGET_ARM */
+
+#define _US_VIRTUAL_UNWIND_FRAME 0
+#define _US_UNWIND_FRAME_STARTING 1
+#define _US_ACTION_MASK 3
+#define _US_FORCE_UNWIND 8
+
+typedef struct _Unwind_Control_Block _Unwind_Control_Block;
+#define UNWIND_EXCEPTION_TYPE _Unwind_Control_Block
+
+struct _Unwind_Control_Block {
+ uint64_t exclass;
+ uint32_t misc[20];
+};
+
+extern int _Unwind_RaiseException(_Unwind_Control_Block *);
+extern int __gnu_unwind_frame(_Unwind_Control_Block *, _Unwind_Context *);
+extern int _Unwind_VRS_Set(_Unwind_Context *, int, uint32_t, int, void *);
+extern int _Unwind_VRS_Get(_Unwind_Context *, int, uint32_t, int, void *);
+
+static inline uint32_t _Unwind_GetGR(_Unwind_Context *ctx, int r)
+{
+ uint32_t v;
+ _Unwind_VRS_Get(ctx, 0, r, 0, &v);
+ return v;
+}
+
+static inline void _Unwind_SetGR(_Unwind_Context *ctx, int r, uint32_t v)
+{
+ _Unwind_VRS_Set(ctx, 0, r, 0, &v);
+}
+
+extern void lj_vm_unwind_ext(void);
+
+/* ARM unwinder personality handler referenced from interpreter .ARM.extab. */
+LJ_FUNCA int lj_err_unwind_arm(int state, _Unwind_Control_Block *ucb,
+ _Unwind_Context *ctx)
+{
+ void *cf = (void *)_Unwind_GetGR(ctx, 13);
+ lua_State *L = cframe_L(cf);
+ int errcode;
+
+ switch ((state & _US_ACTION_MASK)) {
+ case _US_VIRTUAL_UNWIND_FRAME:
+ if ((state & _US_FORCE_UNWIND)) break;
+ return _URC_HANDLER_FOUND;
+ case _US_UNWIND_FRAME_STARTING:
+ if (LJ_UEXCLASS_CHECK(ucb->exclass)) {
+ errcode = LJ_UEXCLASS_ERRCODE(ucb->exclass);
+ } else {
+ errcode = LUA_ERRRUN;
+ setstrV(L, L->top++, lj_err_str(L, LJ_ERR_ERRCPP));
+ }
+ cf = err_unwind(L, cf, errcode);
+ if ((state & _US_FORCE_UNWIND) || cf == NULL) break;
+ _Unwind_SetGR(ctx, 15, (uint32_t)lj_vm_unwind_ext);
+ _Unwind_SetGR(ctx, 0, (uint32_t)ucb);
+ _Unwind_SetGR(ctx, 1, (uint32_t)errcode);
+ _Unwind_SetGR(ctx, 2, cframe_unwind_ff(cf) ?
+ (uint32_t)lj_vm_unwind_ff_eh :
+ (uint32_t)lj_vm_unwind_c_eh);
+ return _URC_INSTALL_CONTEXT;
+ default:
+ return _URC_FAILURE;
+ }
+ if (__gnu_unwind_frame(ucb, ctx) != _URC_OK)
+ return _URC_FAILURE;
+#ifdef LUA_USE_ASSERT
+ /* We should never get here unless this is a forced unwind aka backtrace. */
+ if (_Unwind_GetGR(ctx, 0) == 0xff33aa77) {
+ _Unwind_SetGR(ctx, 0, 0xff33aa88);
+ }
+#endif
+ return _URC_CONTINUE_UNWIND;
+}
+
+#if LJ_UNWIND_EXT && defined(LUA_USE_ASSERT)
+typedef int (*_Unwind_Trace_Fn)(_Unwind_Context *, void *);
+extern int _Unwind_Backtrace(_Unwind_Trace_Fn, void *);
+
+static int err_verify_bt(_Unwind_Context *ctx, int *got)
+{
+ if (_Unwind_GetGR(ctx, 0) == 0xff33aa88) { *got = 2; }
+ else if (*got == 0) { *got = 1; _Unwind_SetGR(ctx, 0, 0xff33aa77); }
+ return _URC_OK;
+}
+
+/* Verify that external error handling actually has a chance to work. */
+void lj_err_verify(void)
+{
+ int got = 0;
+ _Unwind_Backtrace((_Unwind_Trace_Fn)err_verify_bt, &got);
+ lj_assertX(got == 2, "broken build: external frame unwinding enabled, but missing -funwind-tables");
+}
+#endif
+
+/*
+** Note: LJ_UNWIND_JIT is not implemented for 32 bit ARM.
+**
+** The quirky ARM unwind API doesn't have __register_frame().
+** A potential workaround might involve _Unwind_Backtrace.
+** But most 32 bit ARM targets don't qualify for LJ_UNWIND_EXT, anyway,
+** since they are built without unwind tables by default.
+*/
+
+#endif /* LJ_TARGET_ARM */
+
+
+#if LJ_UNWIND_EXT
+static __thread struct {
+ UNWIND_EXCEPTION_TYPE ex;
+ global_State *g;
+} static_uex;
+
+/* Raise external exception. */
+static void err_raise_ext(global_State *g, int errcode)
+{
+ memset(&static_uex, 0, sizeof(static_uex));
+ static_uex.ex.exclass = LJ_UEXCLASS_MAKE(errcode);
+ static_uex.g = g;
+ _Unwind_RaiseException(&static_uex.ex);
+}
+
+#endif
+
+#endif
+
+/* -- Error handling ------------------------------------------------------ */
+
+/* Throw error. Find catch frame, unwind stack and continue. */
+LJ_NOINLINE void LJ_FASTCALL lj_err_throw(lua_State *L, int errcode)
+{
+ global_State *g = G(L);
+ lj_trace_abort(g);
+ L->status = LUA_OK;
+#if LJ_UNWIND_EXT
+ err_raise_ext(g, errcode);
+ /*
+ ** A return from this function signals a corrupt C stack that cannot be
+ ** unwound. We have no choice but to call the panic function and exit.
+ **
+ ** Usually this is caused by a C function without unwind information.
+ ** This may happen if you've manually enabled LUAJIT_UNWIND_EXTERNAL
+ ** and forgot to recompile *every* non-C++ file with -funwind-tables.
+ */
+ if (G(L)->panic)
+ G(L)->panic(L);
+#else
+#if LJ_HASJIT
+ setmref(g->jit_base, NULL);
+#endif
+ {
+ void *cf = err_unwind(L, NULL, errcode);
+ if (cframe_unwind_ff(cf))
+ lj_vm_unwind_ff(cframe_raw(cf));
+ else
+ lj_vm_unwind_c(cframe_raw(cf), errcode);
+ }
+#endif
+ exit(EXIT_FAILURE);
+}
+
+/* Return string object for error message. */
+LJ_NOINLINE GCstr *lj_err_str(lua_State *L, ErrMsg em)
+{
+ return lj_str_newz(L, err2msg(em));
+}
+
+/* Out-of-memory error. */
+LJ_NOINLINE void lj_err_mem(lua_State *L)
+{
+ if (L->status == LUA_ERRERR+1) /* Don't touch the stack during lua_open. */
+ lj_vm_unwind_c(L->cframe, LUA_ERRMEM);
+ if (curr_funcisL(L)) L->top = curr_topL(L);
+ setstrV(L, L->top++, lj_err_str(L, LJ_ERR_ERRMEM));
+ lj_err_throw(L, LUA_ERRMEM);
+}
+
+/* Find error function for runtime errors. Requires an extra stack traversal. */
+static ptrdiff_t finderrfunc(lua_State *L)
+{
+ cTValue *frame = L->base-1, *bot = tvref(L->stack)+LJ_FR2;
+ void *cf = L->cframe;
+ while (frame > bot && cf) {
+ while (cframe_nres(cframe_raw(cf)) < 0) { /* cframe without frame? */
+ if (frame >= restorestack(L, -cframe_nres(cf)))
+ break;
+ if (cframe_errfunc(cf) >= 0) /* Error handler not inherited (-1)? */
+ return cframe_errfunc(cf);
+ cf = cframe_prev(cf); /* Else unwind cframe and continue searching. */
+ if (cf == NULL)
+ return 0;
+ }
+ switch (frame_typep(frame)) {
+ case FRAME_LUA:
+ case FRAME_LUAP:
+ frame = frame_prevl(frame);
+ break;
+ case FRAME_C:
+ cf = cframe_prev(cf);
+ /* fallthrough */
+ case FRAME_VARG:
+ frame = frame_prevd(frame);
+ break;
+ case FRAME_CONT:
+ if (frame_iscont_fficb(frame))
+ cf = cframe_prev(cf);
+ frame = frame_prevd(frame);
+ break;
+ case FRAME_CP:
+ if (cframe_canyield(cf)) return 0;
+ if (cframe_errfunc(cf) >= 0)
+ return cframe_errfunc(cf);
+ cf = cframe_prev(cf);
+ frame = frame_prevd(frame);
+ break;
+ case FRAME_PCALL:
+ case FRAME_PCALLH:
+ if (frame_func(frame_prevd(frame))->c.ffid == FF_xpcall)
+ return savestack(L, frame_prevd(frame)+1); /* xpcall's errorfunc. */
+ return 0;
+ default:
+ lj_assertL(0, "bad frame type");
+ return 0;
+ }
+ }
+ return 0;
+}
+
+/* Runtime error. */
+LJ_NOINLINE void LJ_FASTCALL lj_err_run(lua_State *L)
+{
+ ptrdiff_t ef = (LJ_HASJIT && tvref(G(L)->jit_base)) ? 0 : finderrfunc(L);
+ if (ef) {
+ TValue *errfunc = restorestack(L, ef);
+ TValue *top = L->top;
+ lj_trace_abort(G(L));
+ if (!tvisfunc(errfunc) || L->status == LUA_ERRERR) {
+ setstrV(L, top-1, lj_err_str(L, LJ_ERR_ERRERR));
+ lj_err_throw(L, LUA_ERRERR);
+ }
+ L->status = LUA_ERRERR;
+ copyTV(L, top+LJ_FR2, top-1);
+ copyTV(L, top-1, errfunc);
+ if (LJ_FR2) setnilV(top++);
+ L->top = top+1;
+ lj_vm_call(L, top, 1+1); /* Stack: |errfunc|msg| -> |msg| */
+ }
+ lj_err_throw(L, LUA_ERRRUN);
+}
+
+#if LJ_HASJIT
+LJ_NOINLINE void LJ_FASTCALL lj_err_trace(lua_State *L, int errcode)
+{
+ if (errcode == LUA_ERRRUN)
+ lj_err_run(L);
+ else
+ lj_err_throw(L, errcode);
+}
+#endif
+
+/* Formatted runtime error message. */
+LJ_NORET LJ_NOINLINE static void err_msgv(lua_State *L, ErrMsg em, ...)
+{
+ const char *msg;
+ va_list argp;
+ va_start(argp, em);
+ if (curr_funcisL(L)) L->top = curr_topL(L);
+ msg = lj_strfmt_pushvf(L, err2msg(em), argp);
+ va_end(argp);
+ lj_debug_addloc(L, msg, L->base-1, NULL);
+ lj_err_run(L);
+}
+
+/* Non-vararg variant for better calling conventions. */
+LJ_NOINLINE void lj_err_msg(lua_State *L, ErrMsg em)
+{
+ err_msgv(L, em);
+}
+
+/* Lexer error. */
+LJ_NOINLINE void lj_err_lex(lua_State *L, GCstr *src, const char *tok,
+ BCLine line, ErrMsg em, va_list argp)
+{
+ char buff[LUA_IDSIZE];
+ const char *msg;
+ lj_debug_shortname(buff, src, line);
+ msg = lj_strfmt_pushvf(L, err2msg(em), argp);
+ msg = lj_strfmt_pushf(L, "%s:%d: %s", buff, line, msg);
+ if (tok)
+ lj_strfmt_pushf(L, err2msg(LJ_ERR_XNEAR), msg, tok);
+ lj_err_throw(L, LUA_ERRSYNTAX);
+}
+
+/* Typecheck error for operands. */
+LJ_NOINLINE void lj_err_optype(lua_State *L, cTValue *o, ErrMsg opm)
+{
+ const char *tname = lj_typename(o);
+ const char *opname = err2msg(opm);
+ if (curr_funcisL(L)) {
+ GCproto *pt = curr_proto(L);
+ const BCIns *pc = cframe_Lpc(L) - 1;
+ const char *oname = NULL;
+ const char *kind = lj_debug_slotname(pt, pc, (BCReg)(o-L->base), &oname);
+ if (kind)
+ err_msgv(L, LJ_ERR_BADOPRT, opname, kind, oname, tname);
+ }
+ err_msgv(L, LJ_ERR_BADOPRV, opname, tname);
+}
+
+/* Typecheck error for ordered comparisons. */
+LJ_NOINLINE void lj_err_comp(lua_State *L, cTValue *o1, cTValue *o2)
+{
+ const char *t1 = lj_typename(o1);
+ const char *t2 = lj_typename(o2);
+ err_msgv(L, t1 == t2 ? LJ_ERR_BADCMPV : LJ_ERR_BADCMPT, t1, t2);
+ /* This assumes the two "boolean" entries are commoned by the C compiler. */
+}
+
+/* Typecheck error for __call. */
+LJ_NOINLINE void lj_err_optype_call(lua_State *L, TValue *o)
+{
+ /* Gross hack if lua_[p]call or pcall/xpcall fail for a non-callable object:
+ ** L->base still points to the caller. So add a dummy frame with L instead
+ ** of a function. See lua_getstack().
+ */
+ const BCIns *pc = cframe_Lpc(L);
+ if (((ptrdiff_t)pc & FRAME_TYPE) != FRAME_LUA) {
+ const char *tname = lj_typename(o);
+ setframe_gc(o, obj2gco(L), LJ_TTHREAD);
+ if (LJ_FR2) o++;
+ setframe_pc(o, pc);
+ L->top = L->base = o+1;
+ err_msgv(L, LJ_ERR_BADCALL, tname);
+ }
+ lj_err_optype(L, o, LJ_ERR_OPCALL);
+}
+
+/* Error in context of caller. */
+LJ_NOINLINE void lj_err_callermsg(lua_State *L, const char *msg)
+{
+ TValue *frame = NULL, *pframe = NULL;
+ if (!(LJ_HASJIT && tvref(G(L)->jit_base))) {
+ frame = L->base-1;
+ if (frame_islua(frame)) {
+ pframe = frame_prevl(frame);
+ } else if (frame_iscont(frame)) {
+ if (frame_iscont_fficb(frame)) {
+ pframe = frame;
+ frame = NULL;
+ } else {
+ pframe = frame_prevd(frame);
+#if LJ_HASFFI
+ /* Remove frame for FFI metamethods. */
+ if (frame_func(frame)->c.ffid >= FF_ffi_meta___index &&
+ frame_func(frame)->c.ffid <= FF_ffi_meta___tostring) {
+ L->base = pframe+1;
+ L->top = frame;
+ setcframe_pc(cframe_raw(L->cframe), frame_contpc(frame));
+ }
+#endif
+ }
+ }
+ }
+ lj_debug_addloc(L, msg, pframe, frame);
+ lj_err_run(L);
+}
+
+/* Formatted error in context of caller. */
+LJ_NOINLINE void lj_err_callerv(lua_State *L, ErrMsg em, ...)
+{
+ const char *msg;
+ va_list argp;
+ va_start(argp, em);
+ msg = lj_strfmt_pushvf(L, err2msg(em), argp);
+ va_end(argp);
+ lj_err_callermsg(L, msg);
+}
+
+/* Error in context of caller. */
+LJ_NOINLINE void lj_err_caller(lua_State *L, ErrMsg em)
+{
+ lj_err_callermsg(L, err2msg(em));
+}
+
+/* Argument error message. */
+LJ_NORET LJ_NOINLINE static void err_argmsg(lua_State *L, int narg,
+ const char *msg)
+{
+ const char *fname = "?";
+ const char *ftype = lj_debug_funcname(L, L->base - 1, &fname);
+ if (narg < 0 && narg > LUA_REGISTRYINDEX)
+ narg = (int)(L->top - L->base) + narg + 1;
+ if (ftype && ftype[3] == 'h' && --narg == 0) /* Check for "method". */
+ msg = lj_strfmt_pushf(L, err2msg(LJ_ERR_BADSELF), fname, msg);
+ else
+ msg = lj_strfmt_pushf(L, err2msg(LJ_ERR_BADARG), narg, fname, msg);
+ lj_err_callermsg(L, msg);
+}
+
+/* Formatted argument error. */
+LJ_NOINLINE void lj_err_argv(lua_State *L, int narg, ErrMsg em, ...)
+{
+ const char *msg;
+ va_list argp;
+ va_start(argp, em);
+ msg = lj_strfmt_pushvf(L, err2msg(em), argp);
+ va_end(argp);
+ err_argmsg(L, narg, msg);
+}
+
+/* Argument error. */
+LJ_NOINLINE void lj_err_arg(lua_State *L, int narg, ErrMsg em)
+{
+ err_argmsg(L, narg, err2msg(em));
+}
+
+/* Typecheck error for arguments. */
+LJ_NOINLINE void lj_err_argtype(lua_State *L, int narg, const char *xname)
+{
+ const char *tname, *msg;
+ if (narg <= LUA_REGISTRYINDEX) {
+ if (narg >= LUA_GLOBALSINDEX) {
+ tname = lj_obj_itypename[~LJ_TTAB];
+ } else {
+ GCfunc *fn = curr_func(L);
+ int idx = LUA_GLOBALSINDEX - narg;
+ if (idx <= fn->c.nupvalues)
+ tname = lj_typename(&fn->c.upvalue[idx-1]);
+ else
+ tname = lj_obj_typename[0];
+ }
+ } else {
+ TValue *o = narg < 0 ? L->top + narg : L->base + narg-1;
+ tname = o < L->top ? lj_typename(o) : lj_obj_typename[0];
+ }
+ msg = lj_strfmt_pushf(L, err2msg(LJ_ERR_BADTYPE), xname, tname);
+ err_argmsg(L, narg, msg);
+}
+
+/* Typecheck error for arguments. */
+LJ_NOINLINE void lj_err_argt(lua_State *L, int narg, int tt)
+{
+ lj_err_argtype(L, narg, lj_obj_typename[tt+1]);
+}
+
+/* -- Public error handling API ------------------------------------------- */
+
+LUA_API lua_CFunction lua_atpanic(lua_State *L, lua_CFunction panicf)
+{
+ lua_CFunction old = G(L)->panic;
+ G(L)->panic = panicf;
+ return old;
+}
+
+/* Forwarders for the public API (C calling convention and no LJ_NORET). */
+LUA_API int lua_error(lua_State *L)
+{
+ lj_err_run(L);
+ return 0; /* unreachable */
+}
+
+LUALIB_API int luaL_argerror(lua_State *L, int narg, const char *msg)
+{
+ err_argmsg(L, narg, msg);
+ return 0; /* unreachable */
+}
+
+LUALIB_API int luaL_typerror(lua_State *L, int narg, const char *xname)
+{
+ lj_err_argtype(L, narg, xname);
+ return 0; /* unreachable */
+}
+
+LUALIB_API void luaL_where(lua_State *L, int level)
+{
+ int size;
+ cTValue *frame = lj_debug_frame(L, level, &size);
+ lj_debug_addloc(L, "", frame, size ? frame+size : NULL);
+}
+
+LUALIB_API int luaL_error(lua_State *L, const char *fmt, ...)
+{
+ const char *msg;
+ va_list argp;
+ va_start(argp, fmt);
+ msg = lj_strfmt_pushvf(L, fmt, argp);
+ va_end(argp);
+ lj_err_callermsg(L, msg);
+ return 0; /* unreachable */
+}
+
diff --git a/libs/luajit-cmake/luajit/src/lj_err.h b/libs/luajit-cmake/luajit/src/lj_err.h
new file mode 100644
index 0000000..bd4de9a
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/lj_err.h
@@ -0,0 +1,58 @@
+/*
+** Error handling.
+** Copyright (C) 2005-2022 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LJ_ERR_H
+#define _LJ_ERR_H
+
+#include <stdarg.h>
+
+#include "lj_obj.h"
+
+typedef enum {
+#define ERRDEF(name, msg) \
+ LJ_ERR_##name, LJ_ERR_##name##_ = LJ_ERR_##name + sizeof(msg)-1,
+#include "lj_errmsg.h"
+ LJ_ERR__MAX
+} ErrMsg;
+
+LJ_DATA const char *lj_err_allmsg;
+#define err2msg(em) (lj_err_allmsg+(int)(em))
+
+LJ_FUNC GCstr *lj_err_str(lua_State *L, ErrMsg em);
+LJ_FUNCA_NORET void LJ_FASTCALL lj_err_throw(lua_State *L, int errcode);
+LJ_FUNC_NORET void lj_err_mem(lua_State *L);
+LJ_FUNC_NORET void LJ_FASTCALL lj_err_run(lua_State *L);
+#if LJ_HASJIT
+LJ_FUNCA_NORET void LJ_FASTCALL lj_err_trace(lua_State *L, int errcode);
+#endif
+LJ_FUNC_NORET void lj_err_msg(lua_State *L, ErrMsg em);
+LJ_FUNC_NORET void lj_err_lex(lua_State *L, GCstr *src, const char *tok,
+ BCLine line, ErrMsg em, va_list argp);
+LJ_FUNC_NORET void lj_err_optype(lua_State *L, cTValue *o, ErrMsg opm);
+LJ_FUNC_NORET void lj_err_comp(lua_State *L, cTValue *o1, cTValue *o2);
+LJ_FUNC_NORET void lj_err_optype_call(lua_State *L, TValue *o);
+LJ_FUNC_NORET void lj_err_callermsg(lua_State *L, const char *msg);
+LJ_FUNC_NORET void lj_err_callerv(lua_State *L, ErrMsg em, ...);
+LJ_FUNC_NORET void lj_err_caller(lua_State *L, ErrMsg em);
+LJ_FUNC_NORET void lj_err_arg(lua_State *L, int narg, ErrMsg em);
+LJ_FUNC_NORET void lj_err_argv(lua_State *L, int narg, ErrMsg em, ...);
+LJ_FUNC_NORET void lj_err_argtype(lua_State *L, int narg, const char *xname);
+LJ_FUNC_NORET void lj_err_argt(lua_State *L, int narg, int tt);
+
+#if LJ_UNWIND_JIT && !LJ_ABI_WIN
+LJ_FUNC uint8_t *lj_err_register_mcode(void *base, size_t sz, uint8_t *info);
+LJ_FUNC void lj_err_deregister_mcode(void *base, size_t sz, uint8_t *info);
+#else
+#define lj_err_register_mcode(base, sz, info) (info)
+#define lj_err_deregister_mcode(base, sz, info) UNUSED(base)
+#endif
+
+#if LJ_UNWIND_EXT && !LJ_ABI_WIN && defined(LUA_USE_ASSERT)
+LJ_FUNC void lj_err_verify(void);
+#else
+#define lj_err_verify() ((void)0)
+#endif
+
+#endif
diff --git a/libs/luajit-cmake/luajit/src/lj_errmsg.h b/libs/luajit-cmake/luajit/src/lj_errmsg.h
new file mode 100644
index 0000000..2e5c776
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/lj_errmsg.h
@@ -0,0 +1,200 @@
+/*
+** VM error messages.
+** Copyright (C) 2005-2022 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+/* This file may be included multiple times with different ERRDEF macros. */
+
+/* Basic error handling. */
+ERRDEF(ERRMEM, "not enough memory")
+ERRDEF(ERRERR, "error in error handling")
+ERRDEF(ERRCPP, "C++ exception")
+
+/* Allocations. */
+ERRDEF(STROV, "string length overflow")
+ERRDEF(UDATAOV, "userdata length overflow")
+ERRDEF(STKOV, "stack overflow")
+ERRDEF(STKOVM, "stack overflow (%s)")
+ERRDEF(TABOV, "table overflow")
+
+/* Table indexing. */
+ERRDEF(NANIDX, "table index is NaN")
+ERRDEF(NILIDX, "table index is nil")
+ERRDEF(NEXTIDX, "invalid key to " LUA_QL("next"))
+
+/* Metamethod resolving. */
+ERRDEF(BADCALL, "attempt to call a %s value")
+ERRDEF(BADOPRT, "attempt to %s %s " LUA_QS " (a %s value)")
+ERRDEF(BADOPRV, "attempt to %s a %s value")
+ERRDEF(BADCMPT, "attempt to compare %s with %s")
+ERRDEF(BADCMPV, "attempt to compare two %s values")
+ERRDEF(GETLOOP, "loop in gettable")
+ERRDEF(SETLOOP, "loop in settable")
+ERRDEF(OPCALL, "call")
+ERRDEF(OPINDEX, "index")
+ERRDEF(OPARITH, "perform arithmetic on")
+ERRDEF(OPCAT, "concatenate")
+ERRDEF(OPLEN, "get length of")
+
+/* Type checks. */
+ERRDEF(BADSELF, "calling " LUA_QS " on bad self (%s)")
+ERRDEF(BADARG, "bad argument #%d to " LUA_QS " (%s)")
+ERRDEF(BADTYPE, "%s expected, got %s")
+ERRDEF(BADVAL, "invalid value")
+ERRDEF(NOVAL, "value expected")
+ERRDEF(NOCORO, "coroutine expected")
+ERRDEF(NOTABN, "nil or table expected")
+ERRDEF(NOLFUNC, "Lua function expected")
+ERRDEF(NOFUNCL, "function or level expected")
+ERRDEF(NOSFT, "string/function/table expected")
+ERRDEF(NOPROXY, "boolean or proxy expected")
+ERRDEF(FORINIT, LUA_QL("for") " initial value must be a number")
+ERRDEF(FORLIM, LUA_QL("for") " limit must be a number")
+ERRDEF(FORSTEP, LUA_QL("for") " step must be a number")
+
+/* C API checks. */
+ERRDEF(NOENV, "no calling environment")
+ERRDEF(CYIELD, "attempt to yield across C-call boundary")
+ERRDEF(BADLU, "bad light userdata pointer")
+ERRDEF(NOGCMM, "bad action while in __gc metamethod")
+#if LJ_TARGET_WINDOWS
+ERRDEF(BADFPU, "bad FPU precision (use D3DCREATE_FPU_PRESERVE with DirectX)")
+#endif
+
+/* Standard library function errors. */
+ERRDEF(ASSERT, "assertion failed!")
+ERRDEF(PROTMT, "cannot change a protected metatable")
+ERRDEF(UNPACK, "too many results to unpack")
+ERRDEF(RDRSTR, "reader function must return a string")
+ERRDEF(PRTOSTR, LUA_QL("tostring") " must return a string to " LUA_QL("print"))
+ERRDEF(NUMRNG, "number out of range")
+ERRDEF(IDXRNG, "index out of range")
+ERRDEF(BASERNG, "base out of range")
+ERRDEF(LVLRNG, "level out of range")
+ERRDEF(INVLVL, "invalid level")
+ERRDEF(INVOPT, "invalid option")
+ERRDEF(INVOPTM, "invalid option " LUA_QS)
+ERRDEF(INVFMT, "invalid format")
+ERRDEF(SETFENV, LUA_QL("setfenv") " cannot change environment of given object")
+ERRDEF(CORUN, "cannot resume running coroutine")
+ERRDEF(CODEAD, "cannot resume dead coroutine")
+ERRDEF(COSUSP, "cannot resume non-suspended coroutine")
+ERRDEF(TABINS, "wrong number of arguments to " LUA_QL("insert"))
+ERRDEF(TABCAT, "invalid value (%s) at index %d in table for " LUA_QL("concat"))
+ERRDEF(TABSORT, "invalid order function for sorting")
+ERRDEF(IOCLFL, "attempt to use a closed file")
+ERRDEF(IOSTDCL, "standard file is closed")
+ERRDEF(OSUNIQF, "unable to generate a unique filename")
+ERRDEF(OSDATEF, "field " LUA_QS " missing in date table")
+ERRDEF(STRDUMP, "unable to dump given function")
+ERRDEF(STRSLC, "string slice too long")
+ERRDEF(STRPATB, "missing " LUA_QL("[") " after " LUA_QL("%f") " in pattern")
+ERRDEF(STRPATC, "invalid pattern capture")
+ERRDEF(STRPATE, "malformed pattern (ends with " LUA_QL("%") ")")
+ERRDEF(STRPATM, "malformed pattern (missing " LUA_QL("]") ")")
+ERRDEF(STRPATU, "unbalanced pattern")
+ERRDEF(STRPATX, "pattern too complex")
+ERRDEF(STRCAPI, "invalid capture index")
+ERRDEF(STRCAPN, "too many captures")
+ERRDEF(STRCAPU, "unfinished capture")
+ERRDEF(STRFMT, "invalid option " LUA_QS " to " LUA_QL("format"))
+ERRDEF(STRGSRV, "invalid replacement value (a %s)")
+ERRDEF(BADMODN, "name conflict for module " LUA_QS)
+#if LJ_HASJIT
+ERRDEF(JITPROT, "runtime code generation failed, restricted kernel?")
+ERRDEF(NOJIT, "JIT compiler disabled")
+#elif defined(LJ_ARCH_NOJIT)
+ERRDEF(NOJIT, "no JIT compiler for this architecture (yet)")
+#else
+ERRDEF(NOJIT, "JIT compiler permanently disabled by build option")
+#endif
+ERRDEF(JITOPT, "unknown or malformed optimization flag " LUA_QS)
+
+/* Lexer/parser errors. */
+ERRDEF(XMODE, "attempt to load chunk with wrong mode")
+ERRDEF(XNEAR, "%s near " LUA_QS)
+ERRDEF(XLINES, "chunk has too many lines")
+ERRDEF(XLEVELS, "chunk has too many syntax levels")
+ERRDEF(XNUMBER, "malformed number")
+ERRDEF(XLSTR, "unfinished long string")
+ERRDEF(XLCOM, "unfinished long comment")
+ERRDEF(XSTR, "unfinished string")
+ERRDEF(XESC, "invalid escape sequence")
+ERRDEF(XLDELIM, "invalid long string delimiter")
+ERRDEF(XTOKEN, LUA_QS " expected")
+ERRDEF(XJUMP, "control structure too long")
+ERRDEF(XSLOTS, "function or expression too complex")
+ERRDEF(XLIMC, "chunk has more than %d local variables")
+ERRDEF(XLIMM, "main function has more than %d %s")
+ERRDEF(XLIMF, "function at line %d has more than %d %s")
+ERRDEF(XMATCH, LUA_QS " expected (to close " LUA_QS " at line %d)")
+ERRDEF(XFIXUP, "function too long for return fixup")
+ERRDEF(XPARAM, "<name> or " LUA_QL("...") " expected")
+#if !LJ_52
+ERRDEF(XAMBIG, "ambiguous syntax (function call x new statement)")
+#endif
+ERRDEF(XFUNARG, "function arguments expected")
+ERRDEF(XSYMBOL, "unexpected symbol")
+ERRDEF(XDOTS, "cannot use " LUA_QL("...") " outside a vararg function")
+ERRDEF(XSYNTAX, "syntax error")
+ERRDEF(XFOR, LUA_QL("=") " or " LUA_QL("in") " expected")
+ERRDEF(XBREAK, "no loop to break")
+ERRDEF(XLUNDEF, "undefined label " LUA_QS)
+ERRDEF(XLDUP, "duplicate label " LUA_QS)
+ERRDEF(XGSCOPE, "<goto %s> jumps into the scope of local " LUA_QS)
+
+/* Bytecode reader errors. */
+ERRDEF(BCFMT, "cannot load incompatible bytecode")
+ERRDEF(BCBAD, "cannot load malformed bytecode")
+
+#if LJ_HASFFI
+/* FFI errors. */
+ERRDEF(FFI_INVTYPE, "invalid C type")
+ERRDEF(FFI_INVSIZE, "size of C type is unknown or too large")
+ERRDEF(FFI_BADSCL, "bad storage class")
+ERRDEF(FFI_DECLSPEC, "declaration specifier expected")
+ERRDEF(FFI_BADTAG, "undeclared or implicit tag " LUA_QS)
+ERRDEF(FFI_REDEF, "attempt to redefine " LUA_QS)
+ERRDEF(FFI_NUMPARAM, "wrong number of type parameters")
+ERRDEF(FFI_INITOV, "too many initializers for " LUA_QS)
+ERRDEF(FFI_BADCONV, "cannot convert " LUA_QS " to " LUA_QS)
+ERRDEF(FFI_BADLEN, "attempt to get length of " LUA_QS)
+ERRDEF(FFI_BADCONCAT, "attempt to concatenate " LUA_QS " and " LUA_QS)
+ERRDEF(FFI_BADARITH, "attempt to perform arithmetic on " LUA_QS " and " LUA_QS)
+ERRDEF(FFI_BADCOMP, "attempt to compare " LUA_QS " with " LUA_QS)
+ERRDEF(FFI_BADCALL, LUA_QS " is not callable")
+ERRDEF(FFI_NUMARG, "wrong number of arguments for function call")
+ERRDEF(FFI_BADMEMBER, LUA_QS " has no member named " LUA_QS)
+ERRDEF(FFI_BADIDX, LUA_QS " cannot be indexed")
+ERRDEF(FFI_BADIDXW, LUA_QS " cannot be indexed with " LUA_QS)
+ERRDEF(FFI_BADMM, LUA_QS " has no " LUA_QS " metamethod")
+ERRDEF(FFI_WRCONST, "attempt to write to constant location")
+ERRDEF(FFI_NODECL, "missing declaration for symbol " LUA_QS)
+ERRDEF(FFI_BADCBACK, "bad callback")
+#if LJ_OS_NOJIT
+ERRDEF(FFI_CBACKOV, "no support for callbacks on this OS")
+#else
+ERRDEF(FFI_CBACKOV, "too many callbacks")
+#endif
+ERRDEF(FFI_NYIPACKBIT, "NYI: packed bit fields")
+ERRDEF(FFI_NYICALL, "NYI: cannot call this C function (yet)")
+#endif
+
+#if LJ_HASBUFFER
+/* String buffer errors. */
+ERRDEF(BUFFER_SELF, "cannot put buffer into itself")
+ERRDEF(BUFFER_BADOPT, "bad options table")
+ERRDEF(BUFFER_BADENC, "cannot serialize " LUA_QS)
+ERRDEF(BUFFER_BADDEC, "cannot deserialize tag 0x%02x")
+ERRDEF(BUFFER_BADDICTX, "cannot deserialize dictionary index %d")
+ERRDEF(BUFFER_DEPTH, "too deep to serialize")
+ERRDEF(BUFFER_DUPKEY, "duplicate table key")
+ERRDEF(BUFFER_EOB, "unexpected end of buffer")
+ERRDEF(BUFFER_LEFTOV, "left-over data in buffer")
+#endif
+
+#undef ERRDEF
+
+/* Detecting unused error messages:
+ awk -F, '/^ERRDEF/ { gsub(/ERRDEF./, ""); printf "grep -q LJ_ERR_%s *.[ch] || echo %s\n", $1, $1}' lj_errmsg.h | sh
+*/
diff --git a/libs/luajit-cmake/luajit/src/lj_ff.h b/libs/luajit-cmake/luajit/src/lj_ff.h
new file mode 100644
index 0000000..d00c32f
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/lj_ff.h
@@ -0,0 +1,18 @@
+/*
+** Fast function IDs.
+** Copyright (C) 2005-2022 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LJ_FF_H
+#define _LJ_FF_H
+
+/* Fast function ID. */
+typedef enum {
+ FF_LUA_ = FF_LUA, /* Lua function (must be 0). */
+ FF_C_ = FF_C, /* Regular C function (must be 1). */
+#define FFDEF(name) FF_##name,
+#include "lj_ffdef.h"
+ FF__MAX
+} FastFunc;
+
+#endif
diff --git a/libs/luajit-cmake/luajit/src/lj_ffrecord.c b/libs/luajit-cmake/luajit/src/lj_ffrecord.c
new file mode 100644
index 0000000..022de1a
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/lj_ffrecord.c
@@ -0,0 +1,1574 @@
+/*
+** Fast function call recorder.
+** Copyright (C) 2005-2022 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#define lj_ffrecord_c
+#define LUA_CORE
+
+#include "lj_obj.h"
+
+#if LJ_HASJIT
+
+#include "lj_err.h"
+#include "lj_buf.h"
+#include "lj_str.h"
+#include "lj_tab.h"
+#include "lj_frame.h"
+#include "lj_bc.h"
+#include "lj_ff.h"
+#include "lj_ir.h"
+#include "lj_jit.h"
+#include "lj_ircall.h"
+#include "lj_iropt.h"
+#include "lj_trace.h"
+#include "lj_record.h"
+#include "lj_ffrecord.h"
+#include "lj_crecord.h"
+#include "lj_dispatch.h"
+#include "lj_vm.h"
+#include "lj_strscan.h"
+#include "lj_strfmt.h"
+#include "lj_serialize.h"
+
+/* Some local macros to save typing. Undef'd at the end. */
+#define IR(ref) (&J->cur.ir[(ref)])
+
+/* Pass IR on to next optimization in chain (FOLD). */
+#define emitir(ot, a, b) (lj_ir_set(J, (ot), (a), (b)), lj_opt_fold(J))
+
+/* -- Fast function recording handlers ------------------------------------ */
+
+/* Conventions for fast function call handlers:
+**
+** The argument slots start at J->base[0]. All of them are guaranteed to be
+** valid and type-specialized references. J->base[J->maxslot] is set to 0
+** as a sentinel. The runtime argument values start at rd->argv[0].
+**
+** In general fast functions should check for presence of all of their
+** arguments and for the correct argument types. Some simplifications
+** are allowed if the interpreter throws instead. But even if recording
+** is aborted, the generated IR must be consistent (no zero-refs).
+**
+** The number of results in rd->nres is set to 1. Handlers that return
+** a different number of results need to override it. A negative value
+** prevents return processing (e.g. for pending calls).
+**
+** Results need to be stored starting at J->base[0]. Return processing
+** moves them to the right slots later.
+**
+** The per-ffid auxiliary data is the value of the 2nd part of the
+** LJLIB_REC() annotation. This allows handling similar functionality
+** in a common handler.
+*/
+
+/* Type of handler to record a fast function. */
+typedef void (LJ_FASTCALL *RecordFunc)(jit_State *J, RecordFFData *rd);
+
+/* Get runtime value of int argument. */
+static int32_t argv2int(jit_State *J, TValue *o)
+{
+ if (!lj_strscan_numberobj(o))
+ lj_trace_err(J, LJ_TRERR_BADTYPE);
+ return tvisint(o) ? intV(o) : lj_num2int(numV(o));
+}
+
+/* Get runtime value of string argument. */
+static GCstr *argv2str(jit_State *J, TValue *o)
+{
+ if (LJ_LIKELY(tvisstr(o))) {
+ return strV(o);
+ } else {
+ GCstr *s;
+ if (!tvisnumber(o))
+ lj_trace_err(J, LJ_TRERR_BADTYPE);
+ s = lj_strfmt_number(J->L, o);
+ setstrV(J->L, o, s);
+ return s;
+ }
+}
+
+/* Return number of results wanted by caller. */
+static ptrdiff_t results_wanted(jit_State *J)
+{
+ TValue *frame = J->L->base-1;
+ if (frame_islua(frame))
+ return (ptrdiff_t)bc_b(frame_pc(frame)[-1]) - 1;
+ else
+ return -1;
+}
+
+/* Trace stitching: add continuation below frame to start a new trace. */
+static void recff_stitch(jit_State *J)
+{
+ ASMFunction cont = lj_cont_stitch;
+ lua_State *L = J->L;
+ TValue *base = L->base;
+ BCReg nslot = J->maxslot + 1 + LJ_FR2;
+ TValue *nframe = base + 1 + LJ_FR2;
+ const BCIns *pc = frame_pc(base-1);
+ TValue *pframe = frame_prevl(base-1);
+
+ /* Check for this now. Throwing in lj_record_stop messes up the stack. */
+ if (J->cur.nsnap >= (MSize)J->param[JIT_P_maxsnap])
+ lj_trace_err(J, LJ_TRERR_SNAPOV);
+
+ /* Move func + args up in Lua stack and insert continuation. */
+ memmove(&base[1], &base[-1-LJ_FR2], sizeof(TValue)*nslot);
+ setframe_ftsz(nframe, ((char *)nframe - (char *)pframe) + FRAME_CONT);
+ setcont(base-LJ_FR2, cont);
+ setframe_pc(base, pc);
+ setnilV(base-1-LJ_FR2); /* Incorrect, but rec_check_slots() won't run anymore. */
+ L->base += 2 + LJ_FR2;
+ L->top += 2 + LJ_FR2;
+
+ /* Ditto for the IR. */
+ memmove(&J->base[1], &J->base[-1-LJ_FR2], sizeof(TRef)*nslot);
+#if LJ_FR2
+ J->base[2] = TREF_FRAME;
+ J->base[-1] = lj_ir_k64(J, IR_KNUM, u64ptr(contptr(cont)));
+ J->base[0] = lj_ir_k64(J, IR_KNUM, u64ptr(pc)) | TREF_CONT;
+#else
+ J->base[0] = lj_ir_kptr(J, contptr(cont)) | TREF_CONT;
+#endif
+ J->ktrace = tref_ref((J->base[-1-LJ_FR2] = lj_ir_ktrace(J)));
+ J->base += 2 + LJ_FR2;
+ J->baseslot += 2 + LJ_FR2;
+ J->framedepth++;
+
+ lj_record_stop(J, LJ_TRLINK_STITCH, 0);
+
+ /* Undo Lua stack changes. */
+ memmove(&base[-1-LJ_FR2], &base[1], sizeof(TValue)*nslot);
+ setframe_pc(base-1, pc);
+ L->base -= 2 + LJ_FR2;
+ L->top -= 2 + LJ_FR2;
+}
+
+/* Fallback handler for fast functions that are not recorded (yet). */
+static void LJ_FASTCALL recff_nyi(jit_State *J, RecordFFData *rd)
+{
+ if (J->cur.nins < (IRRef)J->param[JIT_P_minstitch] + REF_BASE) {
+ lj_trace_err_info(J, LJ_TRERR_TRACEUV);
+ } else {
+ /* Can only stitch from Lua call. */
+ if (J->framedepth && frame_islua(J->L->base-1)) {
+ BCOp op = bc_op(*frame_pc(J->L->base-1));
+ /* Stitched trace cannot start with *M op with variable # of args. */
+ if (!(op == BC_CALLM || op == BC_CALLMT ||
+ op == BC_RETM || op == BC_TSETM)) {
+ switch (J->fn->c.ffid) {
+ case FF_error:
+ case FF_debug_sethook:
+ case FF_jit_flush:
+ break; /* Don't stitch across special builtins. */
+ default:
+ recff_stitch(J); /* Use trace stitching. */
+ rd->nres = -1;
+ return;
+ }
+ }
+ }
+ /* Otherwise stop trace and return to interpreter. */
+ lj_record_stop(J, LJ_TRLINK_RETURN, 0);
+ rd->nres = -1;
+ }
+}
+
+/* Fallback handler for unsupported variants of fast functions. */
+#define recff_nyiu recff_nyi
+
+/* Must stop the trace for classic C functions with arbitrary side-effects. */
+#define recff_c recff_nyi
+
+/* Emit BUFHDR for the global temporary buffer. */
+static TRef recff_bufhdr(jit_State *J)
+{
+ return emitir(IRT(IR_BUFHDR, IRT_PGC),
+ lj_ir_kptr(J, &J2G(J)->tmpbuf), IRBUFHDR_RESET);
+}
+
+/* Emit TMPREF. */
+static TRef recff_tmpref(jit_State *J, TRef tr, int mode)
+{
+ if (!LJ_DUALNUM && tref_isinteger(tr))
+ tr = emitir(IRTN(IR_CONV), tr, IRCONV_NUM_INT);
+ return emitir(IRT(IR_TMPREF, IRT_PGC), tr, mode);
+}
+
+/* -- Base library fast functions ----------------------------------------- */
+
+static void LJ_FASTCALL recff_assert(jit_State *J, RecordFFData *rd)
+{
+ /* Arguments already specialized. The interpreter throws for nil/false. */
+ rd->nres = J->maxslot; /* Pass through all arguments. */
+}
+
+static void LJ_FASTCALL recff_type(jit_State *J, RecordFFData *rd)
+{
+ /* Arguments already specialized. Result is a constant string. Neat, huh? */
+ uint32_t t;
+ if (tvisnumber(&rd->argv[0]))
+ t = ~LJ_TNUMX;
+ else if (LJ_64 && !LJ_GC64 && tvislightud(&rd->argv[0]))
+ t = ~LJ_TLIGHTUD;
+ else
+ t = ~itype(&rd->argv[0]);
+ J->base[0] = lj_ir_kstr(J, strV(&J->fn->c.upvalue[t]));
+ UNUSED(rd);
+}
+
+static void LJ_FASTCALL recff_getmetatable(jit_State *J, RecordFFData *rd)
+{
+ TRef tr = J->base[0];
+ if (tr) {
+ RecordIndex ix;
+ ix.tab = tr;
+ copyTV(J->L, &ix.tabv, &rd->argv[0]);
+ if (lj_record_mm_lookup(J, &ix, MM_metatable))
+ J->base[0] = ix.mobj;
+ else
+ J->base[0] = ix.mt;
+ } /* else: Interpreter will throw. */
+}
+
+static void LJ_FASTCALL recff_setmetatable(jit_State *J, RecordFFData *rd)
+{
+ TRef tr = J->base[0];
+ TRef mt = J->base[1];
+ if (tref_istab(tr) && (tref_istab(mt) || (mt && tref_isnil(mt)))) {
+ TRef fref, mtref;
+ RecordIndex ix;
+ ix.tab = tr;
+ copyTV(J->L, &ix.tabv, &rd->argv[0]);
+ lj_record_mm_lookup(J, &ix, MM_metatable); /* Guard for no __metatable. */
+ fref = emitir(IRT(IR_FREF, IRT_PGC), tr, IRFL_TAB_META);
+ mtref = tref_isnil(mt) ? lj_ir_knull(J, IRT_TAB) : mt;
+ emitir(IRT(IR_FSTORE, IRT_TAB), fref, mtref);
+ if (!tref_isnil(mt))
+ emitir(IRT(IR_TBAR, IRT_TAB), tr, 0);
+ J->base[0] = tr;
+ J->needsnap = 1;
+ } /* else: Interpreter will throw. */
+}
+
+static void LJ_FASTCALL recff_rawget(jit_State *J, RecordFFData *rd)
+{
+ RecordIndex ix;
+ ix.tab = J->base[0]; ix.key = J->base[1];
+ if (tref_istab(ix.tab) && ix.key) {
+ ix.val = 0; ix.idxchain = 0;
+ settabV(J->L, &ix.tabv, tabV(&rd->argv[0]));
+ copyTV(J->L, &ix.keyv, &rd->argv[1]);
+ J->base[0] = lj_record_idx(J, &ix);
+ } /* else: Interpreter will throw. */
+}
+
+static void LJ_FASTCALL recff_rawset(jit_State *J, RecordFFData *rd)
+{
+ RecordIndex ix;
+ ix.tab = J->base[0]; ix.key = J->base[1]; ix.val = J->base[2];
+ if (tref_istab(ix.tab) && ix.key && ix.val) {
+ ix.idxchain = 0;
+ settabV(J->L, &ix.tabv, tabV(&rd->argv[0]));
+ copyTV(J->L, &ix.keyv, &rd->argv[1]);
+ copyTV(J->L, &ix.valv, &rd->argv[2]);
+ lj_record_idx(J, &ix);
+ /* Pass through table at J->base[0] as result. */
+ } /* else: Interpreter will throw. */
+}
+
+static void LJ_FASTCALL recff_rawequal(jit_State *J, RecordFFData *rd)
+{
+ TRef tra = J->base[0];
+ TRef trb = J->base[1];
+ if (tra && trb) {
+ int diff = lj_record_objcmp(J, tra, trb, &rd->argv[0], &rd->argv[1]);
+ J->base[0] = diff ? TREF_FALSE : TREF_TRUE;
+ } /* else: Interpreter will throw. */
+}
+
+#if LJ_52
+static void LJ_FASTCALL recff_rawlen(jit_State *J, RecordFFData *rd)
+{
+ TRef tr = J->base[0];
+ if (tref_isstr(tr))
+ J->base[0] = emitir(IRTI(IR_FLOAD), tr, IRFL_STR_LEN);
+ else if (tref_istab(tr))
+ J->base[0] = emitir(IRTI(IR_ALEN), tr, TREF_NIL);
+ /* else: Interpreter will throw. */
+ UNUSED(rd);
+}
+#endif
+
+/* Determine mode of select() call. */
+int32_t lj_ffrecord_select_mode(jit_State *J, TRef tr, TValue *tv)
+{
+ if (tref_isstr(tr) && *strVdata(tv) == '#') { /* select('#', ...) */
+ if (strV(tv)->len == 1) {
+ emitir(IRTG(IR_EQ, IRT_STR), tr, lj_ir_kstr(J, strV(tv)));
+ } else {
+ TRef trptr = emitir(IRT(IR_STRREF, IRT_PGC), tr, lj_ir_kint(J, 0));
+ TRef trchar = emitir(IRT(IR_XLOAD, IRT_U8), trptr, IRXLOAD_READONLY);
+ emitir(IRTGI(IR_EQ), trchar, lj_ir_kint(J, '#'));
+ }
+ return 0;
+ } else { /* select(n, ...) */
+ int32_t start = argv2int(J, tv);
+ if (start == 0) lj_trace_err(J, LJ_TRERR_BADTYPE); /* A bit misleading. */
+ return start;
+ }
+}
+
+static void LJ_FASTCALL recff_select(jit_State *J, RecordFFData *rd)
+{
+ TRef tr = J->base[0];
+ if (tr) {
+ ptrdiff_t start = lj_ffrecord_select_mode(J, tr, &rd->argv[0]);
+ if (start == 0) { /* select('#', ...) */
+ J->base[0] = lj_ir_kint(J, J->maxslot - 1);
+ } else if (tref_isk(tr)) { /* select(k, ...) */
+ ptrdiff_t n = (ptrdiff_t)J->maxslot;
+ if (start < 0) start += n;
+ else if (start > n) start = n;
+ if (start >= 1) {
+ ptrdiff_t i;
+ rd->nres = n - start;
+ for (i = 0; i < n - start; i++)
+ J->base[i] = J->base[start+i];
+ } /* else: Interpreter will throw. */
+ } else {
+ recff_nyiu(J, rd);
+ return;
+ }
+ } /* else: Interpreter will throw. */
+}
+
+static void LJ_FASTCALL recff_tonumber(jit_State *J, RecordFFData *rd)
+{
+ TRef tr = J->base[0];
+ TRef base = J->base[1];
+ if (tr && !tref_isnil(base)) {
+ base = lj_opt_narrow_toint(J, base);
+ if (!tref_isk(base) || IR(tref_ref(base))->i != 10) {
+ recff_nyiu(J, rd);
+ return;
+ }
+ }
+ if (tref_isnumber_str(tr)) {
+ if (tref_isstr(tr)) {
+ TValue tmp;
+ if (!lj_strscan_num(strV(&rd->argv[0]), &tmp)) {
+ recff_nyiu(J, rd); /* Would need an inverted STRTO for this case. */
+ return;
+ }
+ tr = emitir(IRTG(IR_STRTO, IRT_NUM), tr, 0);
+ }
+#if LJ_HASFFI
+ } else if (tref_iscdata(tr)) {
+ lj_crecord_tonumber(J, rd);
+ return;
+#endif
+ } else {
+ tr = TREF_NIL;
+ }
+ J->base[0] = tr;
+ UNUSED(rd);
+}
+
+static TValue *recff_metacall_cp(lua_State *L, lua_CFunction dummy, void *ud)
+{
+ jit_State *J = (jit_State *)ud;
+ lj_record_tailcall(J, 0, 1);
+ UNUSED(L); UNUSED(dummy);
+ return NULL;
+}
+
+static int recff_metacall(jit_State *J, RecordFFData *rd, MMS mm)
+{
+ RecordIndex ix;
+ ix.tab = J->base[0];
+ copyTV(J->L, &ix.tabv, &rd->argv[0]);
+ if (lj_record_mm_lookup(J, &ix, mm)) { /* Has metamethod? */
+ int errcode;
+ TValue argv0;
+ /* Temporarily insert metamethod below object. */
+ J->base[1+LJ_FR2] = J->base[0];
+ J->base[0] = ix.mobj;
+ copyTV(J->L, &argv0, &rd->argv[0]);
+ copyTV(J->L, &rd->argv[1+LJ_FR2], &rd->argv[0]);
+ copyTV(J->L, &rd->argv[0], &ix.mobjv);
+ /* Need to protect lj_record_tailcall because it may throw. */
+ errcode = lj_vm_cpcall(J->L, NULL, J, recff_metacall_cp);
+ /* Always undo Lua stack changes to avoid confusing the interpreter. */
+ copyTV(J->L, &rd->argv[0], &argv0);
+ if (errcode)
+ lj_err_throw(J->L, errcode); /* Propagate errors. */
+ rd->nres = -1; /* Pending call. */
+ return 1; /* Tailcalled to metamethod. */
+ }
+ return 0;
+}
+
+static void LJ_FASTCALL recff_tostring(jit_State *J, RecordFFData *rd)
+{
+ TRef tr = J->base[0];
+ if (tref_isstr(tr)) {
+ /* Ignore __tostring in the string base metatable. */
+ /* Pass on result in J->base[0]. */
+ } else if (tr && !recff_metacall(J, rd, MM_tostring)) {
+ if (tref_isnumber(tr)) {
+ J->base[0] = emitir(IRT(IR_TOSTR, IRT_STR), tr,
+ tref_isnum(tr) ? IRTOSTR_NUM : IRTOSTR_INT);
+ } else if (tref_ispri(tr)) {
+ J->base[0] = lj_ir_kstr(J, lj_strfmt_obj(J->L, &rd->argv[0]));
+ } else {
+ recff_nyiu(J, rd);
+ return;
+ }
+ }
+}
+
+static void LJ_FASTCALL recff_ipairs_aux(jit_State *J, RecordFFData *rd)
+{
+ RecordIndex ix;
+ ix.tab = J->base[0];
+ if (tref_istab(ix.tab)) {
+ if (!tvisnumber(&rd->argv[1])) /* No support for string coercion. */
+ lj_trace_err(J, LJ_TRERR_BADTYPE);
+ setintV(&ix.keyv, numberVint(&rd->argv[1])+1);
+ settabV(J->L, &ix.tabv, tabV(&rd->argv[0]));
+ ix.val = 0; ix.idxchain = 0;
+ ix.key = lj_opt_narrow_toint(J, J->base[1]);
+ J->base[0] = ix.key = emitir(IRTI(IR_ADD), ix.key, lj_ir_kint(J, 1));
+ J->base[1] = lj_record_idx(J, &ix);
+ rd->nres = tref_isnil(J->base[1]) ? 0 : 2;
+ } /* else: Interpreter will throw. */
+}
+
+static void LJ_FASTCALL recff_xpairs(jit_State *J, RecordFFData *rd)
+{
+ TRef tr = J->base[0];
+ if (!((LJ_52 || (LJ_HASFFI && tref_iscdata(tr))) &&
+ recff_metacall(J, rd, MM_pairs + rd->data))) {
+ if (tref_istab(tr)) {
+ J->base[0] = lj_ir_kfunc(J, funcV(&J->fn->c.upvalue[0]));
+ J->base[1] = tr;
+ J->base[2] = rd->data ? lj_ir_kint(J, 0) : TREF_NIL;
+ rd->nres = 3;
+ } /* else: Interpreter will throw. */
+ }
+}
+
+static void LJ_FASTCALL recff_pcall(jit_State *J, RecordFFData *rd)
+{
+ if (J->maxslot >= 1) {
+#if LJ_FR2
+ /* Shift function arguments up. */
+ memmove(J->base + 1, J->base, sizeof(TRef) * J->maxslot);
+#endif
+ lj_record_call(J, 0, J->maxslot - 1);
+ rd->nres = -1; /* Pending call. */
+ J->needsnap = 1; /* Start catching on-trace errors. */
+ } /* else: Interpreter will throw. */
+}
+
+static TValue *recff_xpcall_cp(lua_State *L, lua_CFunction dummy, void *ud)
+{
+ jit_State *J = (jit_State *)ud;
+ lj_record_call(J, 1, J->maxslot - 2);
+ UNUSED(L); UNUSED(dummy);
+ return NULL;
+}
+
+static void LJ_FASTCALL recff_xpcall(jit_State *J, RecordFFData *rd)
+{
+ if (J->maxslot >= 2) {
+ TValue argv0, argv1;
+ TRef tmp;
+ int errcode;
+ /* Swap function and traceback. */
+ tmp = J->base[0]; J->base[0] = J->base[1]; J->base[1] = tmp;
+ copyTV(J->L, &argv0, &rd->argv[0]);
+ copyTV(J->L, &argv1, &rd->argv[1]);
+ copyTV(J->L, &rd->argv[0], &argv1);
+ copyTV(J->L, &rd->argv[1], &argv0);
+#if LJ_FR2
+ /* Shift function arguments up. */
+ memmove(J->base + 2, J->base + 1, sizeof(TRef) * (J->maxslot-1));
+#endif
+ /* Need to protect lj_record_call because it may throw. */
+ errcode = lj_vm_cpcall(J->L, NULL, J, recff_xpcall_cp);
+ /* Always undo Lua stack swap to avoid confusing the interpreter. */
+ copyTV(J->L, &rd->argv[0], &argv0);
+ copyTV(J->L, &rd->argv[1], &argv1);
+ if (errcode)
+ lj_err_throw(J->L, errcode); /* Propagate errors. */
+ rd->nres = -1; /* Pending call. */
+ J->needsnap = 1; /* Start catching on-trace errors. */
+ } /* else: Interpreter will throw. */
+}
+
+static void LJ_FASTCALL recff_getfenv(jit_State *J, RecordFFData *rd)
+{
+ TRef tr = J->base[0];
+ /* Only support getfenv(0) for now. */
+ if (tref_isint(tr) && tref_isk(tr) && IR(tref_ref(tr))->i == 0) {
+ TRef trl = emitir(IRT(IR_LREF, IRT_THREAD), 0, 0);
+ J->base[0] = emitir(IRT(IR_FLOAD, IRT_TAB), trl, IRFL_THREAD_ENV);
+ return;
+ }
+ recff_nyiu(J, rd);
+}
+
+static void LJ_FASTCALL recff_next(jit_State *J, RecordFFData *rd)
+{
+#if LJ_BE
+ /* YAGNI: Disabled on big-endian due to issues with lj_vm_next,
+ ** IR_HIOP, RID_RETLO/RID_RETHI and ra_destpair.
+ */
+ recff_nyi(J, rd);
+#else
+ TRef tab = J->base[0];
+ if (tref_istab(tab)) {
+ RecordIndex ix;
+ cTValue *keyv;
+ ix.tab = tab;
+ if (tref_isnil(J->base[1])) { /* Shortcut for start of traversal. */
+ ix.key = lj_ir_kint(J, 0);
+ keyv = niltvg(J2G(J));
+ } else {
+ TRef tmp = recff_tmpref(J, J->base[1], IRTMPREF_IN1);
+ ix.key = lj_ir_call(J, IRCALL_lj_tab_keyindex, tab, tmp);
+ keyv = &rd->argv[1];
+ }
+ copyTV(J->L, &ix.tabv, &rd->argv[0]);
+ ix.keyv.u32.lo = lj_tab_keyindex(tabV(&ix.tabv), keyv);
+ /* Omit the value, if not used by the caller. */
+ ix.idxchain = (J->framedepth && frame_islua(J->L->base-1) &&
+ bc_b(frame_pc(J->L->base-1)[-1])-1 < 2);
+ ix.mobj = 0; /* We don't need the next index. */
+ rd->nres = lj_record_next(J, &ix);
+ J->base[0] = ix.key;
+ J->base[1] = ix.val;
+ } /* else: Interpreter will throw. */
+#endif
+}
+
+/* -- Math library fast functions ----------------------------------------- */
+
+static void LJ_FASTCALL recff_math_abs(jit_State *J, RecordFFData *rd)
+{
+ TRef tr = lj_ir_tonum(J, J->base[0]);
+ J->base[0] = emitir(IRTN(IR_ABS), tr, lj_ir_ksimd(J, LJ_KSIMD_ABS));
+ UNUSED(rd);
+}
+
+/* Record rounding functions math.floor and math.ceil. */
+static void LJ_FASTCALL recff_math_round(jit_State *J, RecordFFData *rd)
+{
+ TRef tr = J->base[0];
+ if (!tref_isinteger(tr)) { /* Pass through integers unmodified. */
+ tr = emitir(IRTN(IR_FPMATH), lj_ir_tonum(J, tr), rd->data);
+ /* Result is integral (or NaN/Inf), but may not fit an int32_t. */
+ if (LJ_DUALNUM) { /* Try to narrow using a guarded conversion to int. */
+ lua_Number n = lj_vm_foldfpm(numberVnum(&rd->argv[0]), rd->data);
+ if (n == (lua_Number)lj_num2int(n))
+ tr = emitir(IRTGI(IR_CONV), tr, IRCONV_INT_NUM|IRCONV_CHECK);
+ }
+ J->base[0] = tr;
+ }
+}
+
+/* Record unary math.* functions, mapped to IR_FPMATH opcode. */
+static void LJ_FASTCALL recff_math_unary(jit_State *J, RecordFFData *rd)
+{
+ J->base[0] = emitir(IRTN(IR_FPMATH), lj_ir_tonum(J, J->base[0]), rd->data);
+}
+
+/* Record math.log. */
+static void LJ_FASTCALL recff_math_log(jit_State *J, RecordFFData *rd)
+{
+ TRef tr = lj_ir_tonum(J, J->base[0]);
+ if (J->base[1]) {
+#ifdef LUAJIT_NO_LOG2
+ uint32_t fpm = IRFPM_LOG;
+#else
+ uint32_t fpm = IRFPM_LOG2;
+#endif
+ TRef trb = lj_ir_tonum(J, J->base[1]);
+ tr = emitir(IRTN(IR_FPMATH), tr, fpm);
+ trb = emitir(IRTN(IR_FPMATH), trb, fpm);
+ trb = emitir(IRTN(IR_DIV), lj_ir_knum_one(J), trb);
+ tr = emitir(IRTN(IR_MUL), tr, trb);
+ } else {
+ tr = emitir(IRTN(IR_FPMATH), tr, IRFPM_LOG);
+ }
+ J->base[0] = tr;
+ UNUSED(rd);
+}
+
+/* Record math.atan2. */
+static void LJ_FASTCALL recff_math_atan2(jit_State *J, RecordFFData *rd)
+{
+ TRef tr = lj_ir_tonum(J, J->base[0]);
+ TRef tr2 = lj_ir_tonum(J, J->base[1]);
+ J->base[0] = lj_ir_call(J, IRCALL_atan2, tr, tr2);
+ UNUSED(rd);
+}
+
+/* Record math.ldexp. */
+static void LJ_FASTCALL recff_math_ldexp(jit_State *J, RecordFFData *rd)
+{
+ TRef tr = lj_ir_tonum(J, J->base[0]);
+#if LJ_TARGET_X86ORX64
+ TRef tr2 = lj_ir_tonum(J, J->base[1]);
+#else
+ TRef tr2 = lj_opt_narrow_toint(J, J->base[1]);
+#endif
+ J->base[0] = emitir(IRTN(IR_LDEXP), tr, tr2);
+ UNUSED(rd);
+}
+
+static void LJ_FASTCALL recff_math_call(jit_State *J, RecordFFData *rd)
+{
+ TRef tr = lj_ir_tonum(J, J->base[0]);
+ J->base[0] = emitir(IRTN(IR_CALLN), tr, rd->data);
+}
+
+static void LJ_FASTCALL recff_math_pow(jit_State *J, RecordFFData *rd)
+{
+ J->base[0] = lj_opt_narrow_arith(J, J->base[0], J->base[1],
+ &rd->argv[0], &rd->argv[1], IR_POW);
+ UNUSED(rd);
+}
+
+static void LJ_FASTCALL recff_math_minmax(jit_State *J, RecordFFData *rd)
+{
+ TRef tr = lj_ir_tonumber(J, J->base[0]);
+ uint32_t op = rd->data;
+ BCReg i;
+ for (i = 1; J->base[i] != 0; i++) {
+ TRef tr2 = lj_ir_tonumber(J, J->base[i]);
+ IRType t = IRT_INT;
+ if (!(tref_isinteger(tr) && tref_isinteger(tr2))) {
+ if (tref_isinteger(tr)) tr = emitir(IRTN(IR_CONV), tr, IRCONV_NUM_INT);
+ if (tref_isinteger(tr2)) tr2 = emitir(IRTN(IR_CONV), tr2, IRCONV_NUM_INT);
+ t = IRT_NUM;
+ }
+ tr = emitir(IRT(op, t), tr, tr2);
+ }
+ J->base[0] = tr;
+}
+
+static void LJ_FASTCALL recff_math_random(jit_State *J, RecordFFData *rd)
+{
+ GCudata *ud = udataV(&J->fn->c.upvalue[0]);
+ TRef tr, one;
+ lj_ir_kgc(J, obj2gco(ud), IRT_UDATA); /* Prevent collection. */
+ tr = lj_ir_call(J, IRCALL_lj_prng_u64d, lj_ir_kptr(J, uddata(ud)));
+ one = lj_ir_knum_one(J);
+ tr = emitir(IRTN(IR_SUB), tr, one);
+ if (J->base[0]) {
+ TRef tr1 = lj_ir_tonum(J, J->base[0]);
+ if (J->base[1]) { /* d = floor(d*(r2-r1+1.0)) + r1 */
+ TRef tr2 = lj_ir_tonum(J, J->base[1]);
+ tr2 = emitir(IRTN(IR_SUB), tr2, tr1);
+ tr2 = emitir(IRTN(IR_ADD), tr2, one);
+ tr = emitir(IRTN(IR_MUL), tr, tr2);
+ tr = emitir(IRTN(IR_FPMATH), tr, IRFPM_FLOOR);
+ tr = emitir(IRTN(IR_ADD), tr, tr1);
+ } else { /* d = floor(d*r1) + 1.0 */
+ tr = emitir(IRTN(IR_MUL), tr, tr1);
+ tr = emitir(IRTN(IR_FPMATH), tr, IRFPM_FLOOR);
+ tr = emitir(IRTN(IR_ADD), tr, one);
+ }
+ }
+ J->base[0] = tr;
+ UNUSED(rd);
+}
+
+/* -- Bit library fast functions ------------------------------------------ */
+
+/* Record bit.tobit. */
+static void LJ_FASTCALL recff_bit_tobit(jit_State *J, RecordFFData *rd)
+{
+ TRef tr = J->base[0];
+#if LJ_HASFFI
+ if (tref_iscdata(tr)) { recff_bit64_tobit(J, rd); return; }
+#endif
+ J->base[0] = lj_opt_narrow_tobit(J, tr);
+ UNUSED(rd);
+}
+
+/* Record unary bit.bnot, bit.bswap. */
+static void LJ_FASTCALL recff_bit_unary(jit_State *J, RecordFFData *rd)
+{
+#if LJ_HASFFI
+ if (recff_bit64_unary(J, rd))
+ return;
+#endif
+ J->base[0] = emitir(IRTI(rd->data), lj_opt_narrow_tobit(J, J->base[0]), 0);
+}
+
+/* Record N-ary bit.band, bit.bor, bit.bxor. */
+static void LJ_FASTCALL recff_bit_nary(jit_State *J, RecordFFData *rd)
+{
+#if LJ_HASFFI
+ if (recff_bit64_nary(J, rd))
+ return;
+#endif
+ {
+ TRef tr = lj_opt_narrow_tobit(J, J->base[0]);
+ uint32_t ot = IRTI(rd->data);
+ BCReg i;
+ for (i = 1; J->base[i] != 0; i++)
+ tr = emitir(ot, tr, lj_opt_narrow_tobit(J, J->base[i]));
+ J->base[0] = tr;
+ }
+}
+
+/* Record bit shifts. */
+static void LJ_FASTCALL recff_bit_shift(jit_State *J, RecordFFData *rd)
+{
+#if LJ_HASFFI
+ if (recff_bit64_shift(J, rd))
+ return;
+#endif
+ {
+ TRef tr = lj_opt_narrow_tobit(J, J->base[0]);
+ TRef tsh = lj_opt_narrow_tobit(J, J->base[1]);
+ IROp op = (IROp)rd->data;
+ if (!(op < IR_BROL ? LJ_TARGET_MASKSHIFT : LJ_TARGET_MASKROT) &&
+ !tref_isk(tsh))
+ tsh = emitir(IRTI(IR_BAND), tsh, lj_ir_kint(J, 31));
+#ifdef LJ_TARGET_UNIFYROT
+ if (op == (LJ_TARGET_UNIFYROT == 1 ? IR_BROR : IR_BROL)) {
+ op = LJ_TARGET_UNIFYROT == 1 ? IR_BROL : IR_BROR;
+ tsh = emitir(IRTI(IR_NEG), tsh, tsh);
+ }
+#endif
+ J->base[0] = emitir(IRTI(op), tr, tsh);
+ }
+}
+
+static void LJ_FASTCALL recff_bit_tohex(jit_State *J, RecordFFData *rd)
+{
+#if LJ_HASFFI
+ TRef hdr = recff_bufhdr(J);
+ TRef tr = recff_bit64_tohex(J, rd, hdr);
+ J->base[0] = emitir(IRTG(IR_BUFSTR, IRT_STR), tr, hdr);
+#else
+ recff_nyiu(J, rd); /* Don't bother working around this NYI. */
+#endif
+}
+
+/* -- String library fast functions --------------------------------------- */
+
+/* Specialize to relative starting position for string. */
+static TRef recff_string_start(jit_State *J, GCstr *s, int32_t *st, TRef tr,
+ TRef trlen, TRef tr0)
+{
+ int32_t start = *st;
+ if (start < 0) {
+ emitir(IRTGI(IR_LT), tr, tr0);
+ tr = emitir(IRTI(IR_ADD), trlen, tr);
+ start = start + (int32_t)s->len;
+ emitir(start < 0 ? IRTGI(IR_LT) : IRTGI(IR_GE), tr, tr0);
+ if (start < 0) {
+ tr = tr0;
+ start = 0;
+ }
+ } else if (start == 0) {
+ emitir(IRTGI(IR_EQ), tr, tr0);
+ tr = tr0;
+ } else {
+ tr = emitir(IRTI(IR_ADD), tr, lj_ir_kint(J, -1));
+ emitir(IRTGI(IR_GE), tr, tr0);
+ start--;
+ }
+ *st = start;
+ return tr;
+}
+
+/* Handle string.byte (rd->data = 0) and string.sub (rd->data = 1). */
+static void LJ_FASTCALL recff_string_range(jit_State *J, RecordFFData *rd)
+{
+ TRef trstr = lj_ir_tostr(J, J->base[0]);
+ TRef trlen = emitir(IRTI(IR_FLOAD), trstr, IRFL_STR_LEN);
+ TRef tr0 = lj_ir_kint(J, 0);
+ TRef trstart, trend;
+ GCstr *str = argv2str(J, &rd->argv[0]);
+ int32_t start, end;
+ if (rd->data) { /* string.sub(str, start [,end]) */
+ start = argv2int(J, &rd->argv[1]);
+ trstart = lj_opt_narrow_toint(J, J->base[1]);
+ trend = J->base[2];
+ if (tref_isnil(trend)) {
+ trend = lj_ir_kint(J, -1);
+ end = -1;
+ } else {
+ trend = lj_opt_narrow_toint(J, trend);
+ end = argv2int(J, &rd->argv[2]);
+ }
+ } else { /* string.byte(str, [,start [,end]]) */
+ if (tref_isnil(J->base[1])) {
+ start = 1;
+ trstart = lj_ir_kint(J, 1);
+ } else {
+ start = argv2int(J, &rd->argv[1]);
+ trstart = lj_opt_narrow_toint(J, J->base[1]);
+ }
+ if (J->base[1] && !tref_isnil(J->base[2])) {
+ trend = lj_opt_narrow_toint(J, J->base[2]);
+ end = argv2int(J, &rd->argv[2]);
+ } else {
+ trend = trstart;
+ end = start;
+ }
+ }
+ if (end < 0) {
+ emitir(IRTGI(IR_LT), trend, tr0);
+ trend = emitir(IRTI(IR_ADD), emitir(IRTI(IR_ADD), trlen, trend),
+ lj_ir_kint(J, 1));
+ end = end+(int32_t)str->len+1;
+ } else if ((MSize)end <= str->len) {
+ emitir(IRTGI(IR_ULE), trend, trlen);
+ } else {
+ emitir(IRTGI(IR_UGT), trend, trlen);
+ end = (int32_t)str->len;
+ trend = trlen;
+ }
+ trstart = recff_string_start(J, str, &start, trstart, trlen, tr0);
+ if (rd->data) { /* Return string.sub result. */
+ if (end - start >= 0) {
+ /* Also handle empty range here, to avoid extra traces. */
+ TRef trptr, trslen = emitir(IRTI(IR_SUB), trend, trstart);
+ emitir(IRTGI(IR_GE), trslen, tr0);
+ trptr = emitir(IRT(IR_STRREF, IRT_PGC), trstr, trstart);
+ J->base[0] = emitir(IRT(IR_SNEW, IRT_STR), trptr, trslen);
+ } else { /* Range underflow: return empty string. */
+ emitir(IRTGI(IR_LT), trend, trstart);
+ J->base[0] = lj_ir_kstr(J, &J2G(J)->strempty);
+ }
+ } else { /* Return string.byte result(s). */
+ ptrdiff_t i, len = end - start;
+ if (len > 0) {
+ TRef trslen = emitir(IRTI(IR_SUB), trend, trstart);
+ emitir(IRTGI(IR_EQ), trslen, lj_ir_kint(J, (int32_t)len));
+ if (J->baseslot + len > LJ_MAX_JSLOTS)
+ lj_trace_err_info(J, LJ_TRERR_STACKOV);
+ rd->nres = len;
+ for (i = 0; i < len; i++) {
+ TRef tmp = emitir(IRTI(IR_ADD), trstart, lj_ir_kint(J, (int32_t)i));
+ tmp = emitir(IRT(IR_STRREF, IRT_PGC), trstr, tmp);
+ J->base[i] = emitir(IRT(IR_XLOAD, IRT_U8), tmp, IRXLOAD_READONLY);
+ }
+ } else { /* Empty range or range underflow: return no results. */
+ emitir(IRTGI(IR_LE), trend, trstart);
+ rd->nres = 0;
+ }
+ }
+}
+
+static void LJ_FASTCALL recff_string_char(jit_State *J, RecordFFData *rd)
+{
+ TRef k255 = lj_ir_kint(J, 255);
+ BCReg i;
+ for (i = 0; J->base[i] != 0; i++) { /* Convert char values to strings. */
+ TRef tr = lj_opt_narrow_toint(J, J->base[i]);
+ emitir(IRTGI(IR_ULE), tr, k255);
+ J->base[i] = emitir(IRT(IR_TOSTR, IRT_STR), tr, IRTOSTR_CHAR);
+ }
+ if (i > 1) { /* Concatenate the strings, if there's more than one. */
+ TRef hdr = recff_bufhdr(J), tr = hdr;
+ for (i = 0; J->base[i] != 0; i++)
+ tr = emitir(IRTG(IR_BUFPUT, IRT_PGC), tr, J->base[i]);
+ J->base[0] = emitir(IRTG(IR_BUFSTR, IRT_STR), tr, hdr);
+ } else if (i == 0) {
+ J->base[0] = lj_ir_kstr(J, &J2G(J)->strempty);
+ }
+ UNUSED(rd);
+}
+
+static void LJ_FASTCALL recff_string_rep(jit_State *J, RecordFFData *rd)
+{
+ TRef str = lj_ir_tostr(J, J->base[0]);
+ TRef rep = lj_opt_narrow_toint(J, J->base[1]);
+ TRef hdr, tr, str2 = 0;
+ if (!tref_isnil(J->base[2])) {
+ TRef sep = lj_ir_tostr(J, J->base[2]);
+ int32_t vrep = argv2int(J, &rd->argv[1]);
+ emitir(IRTGI(vrep > 1 ? IR_GT : IR_LE), rep, lj_ir_kint(J, 1));
+ if (vrep > 1) {
+ TRef hdr2 = recff_bufhdr(J);
+ TRef tr2 = emitir(IRTG(IR_BUFPUT, IRT_PGC), hdr2, sep);
+ tr2 = emitir(IRTG(IR_BUFPUT, IRT_PGC), tr2, str);
+ str2 = emitir(IRTG(IR_BUFSTR, IRT_STR), tr2, hdr2);
+ }
+ }
+ tr = hdr = recff_bufhdr(J);
+ if (str2) {
+ tr = emitir(IRTG(IR_BUFPUT, IRT_PGC), tr, str);
+ str = str2;
+ rep = emitir(IRTI(IR_ADD), rep, lj_ir_kint(J, -1));
+ }
+ tr = lj_ir_call(J, IRCALL_lj_buf_putstr_rep, tr, str, rep);
+ J->base[0] = emitir(IRTG(IR_BUFSTR, IRT_STR), tr, hdr);
+}
+
+static void LJ_FASTCALL recff_string_op(jit_State *J, RecordFFData *rd)
+{
+ TRef str = lj_ir_tostr(J, J->base[0]);
+ TRef hdr = recff_bufhdr(J);
+ TRef tr = lj_ir_call(J, rd->data, hdr, str);
+ J->base[0] = emitir(IRTG(IR_BUFSTR, IRT_STR), tr, hdr);
+}
+
+static void LJ_FASTCALL recff_string_find(jit_State *J, RecordFFData *rd)
+{
+ TRef trstr = lj_ir_tostr(J, J->base[0]);
+ TRef trpat = lj_ir_tostr(J, J->base[1]);
+ TRef trlen = emitir(IRTI(IR_FLOAD), trstr, IRFL_STR_LEN);
+ TRef tr0 = lj_ir_kint(J, 0);
+ TRef trstart;
+ GCstr *str = argv2str(J, &rd->argv[0]);
+ GCstr *pat = argv2str(J, &rd->argv[1]);
+ int32_t start;
+ J->needsnap = 1;
+ if (tref_isnil(J->base[2])) {
+ trstart = lj_ir_kint(J, 1);
+ start = 1;
+ } else {
+ trstart = lj_opt_narrow_toint(J, J->base[2]);
+ start = argv2int(J, &rd->argv[2]);
+ }
+ trstart = recff_string_start(J, str, &start, trstart, trlen, tr0);
+ if ((MSize)start <= str->len) {
+ emitir(IRTGI(IR_ULE), trstart, trlen);
+ } else {
+ emitir(IRTGI(IR_UGT), trstart, trlen);
+#if LJ_52
+ J->base[0] = TREF_NIL;
+ return;
+#else
+ trstart = trlen;
+ start = str->len;
+#endif
+ }
+ /* Fixed arg or no pattern matching chars? (Specialized to pattern string.) */
+ if ((J->base[2] && tref_istruecond(J->base[3])) ||
+ (emitir(IRTG(IR_EQ, IRT_STR), trpat, lj_ir_kstr(J, pat)),
+ !lj_str_haspattern(pat))) { /* Search for fixed string. */
+ TRef trsptr = emitir(IRT(IR_STRREF, IRT_PGC), trstr, trstart);
+ TRef trpptr = emitir(IRT(IR_STRREF, IRT_PGC), trpat, tr0);
+ TRef trslen = emitir(IRTI(IR_SUB), trlen, trstart);
+ TRef trplen = emitir(IRTI(IR_FLOAD), trpat, IRFL_STR_LEN);
+ TRef tr = lj_ir_call(J, IRCALL_lj_str_find, trsptr, trpptr, trslen, trplen);
+ TRef trp0 = lj_ir_kkptr(J, NULL);
+ if (lj_str_find(strdata(str)+(MSize)start, strdata(pat),
+ str->len-(MSize)start, pat->len)) {
+ TRef pos;
+ emitir(IRTG(IR_NE, IRT_PGC), tr, trp0);
+ /* Recompute offset. trsptr may not point into trstr after folding. */
+ pos = emitir(IRTI(IR_ADD), emitir(IRTI(IR_SUB), tr, trsptr), trstart);
+ J->base[0] = emitir(IRTI(IR_ADD), pos, lj_ir_kint(J, 1));
+ J->base[1] = emitir(IRTI(IR_ADD), pos, trplen);
+ rd->nres = 2;
+ } else {
+ emitir(IRTG(IR_EQ, IRT_PGC), tr, trp0);
+ J->base[0] = TREF_NIL;
+ }
+ } else { /* Search for pattern. */
+ recff_nyiu(J, rd);
+ return;
+ }
+}
+
+static void recff_format(jit_State *J, RecordFFData *rd, TRef hdr, int sbufx)
+{
+ ptrdiff_t arg = sbufx;
+ TRef tr = hdr, trfmt = lj_ir_tostr(J, J->base[arg]);
+ GCstr *fmt = argv2str(J, &rd->argv[arg]);
+ FormatState fs;
+ SFormat sf;
+ /* Specialize to the format string. */
+ emitir(IRTG(IR_EQ, IRT_STR), trfmt, lj_ir_kstr(J, fmt));
+ lj_strfmt_init(&fs, strdata(fmt), fmt->len);
+ while ((sf = lj_strfmt_parse(&fs)) != STRFMT_EOF) { /* Parse format. */
+ TRef tra = sf == STRFMT_LIT ? 0 : J->base[++arg];
+ TRef trsf = lj_ir_kint(J, (int32_t)sf);
+ IRCallID id;
+ switch (STRFMT_TYPE(sf)) {
+ case STRFMT_LIT:
+ tr = emitir(IRTG(IR_BUFPUT, IRT_PGC), tr,
+ lj_ir_kstr(J, lj_str_new(J->L, fs.str, fs.len)));
+ break;
+ case STRFMT_INT:
+ id = IRCALL_lj_strfmt_putfnum_int;
+ handle_int:
+ if (!tref_isinteger(tra)) {
+#if LJ_HASFFI
+ if (tref_iscdata(tra)) {
+ tra = lj_crecord_loadiu64(J, tra, &rd->argv[arg]);
+ tr = lj_ir_call(J, IRCALL_lj_strfmt_putfxint, tr, trsf, tra);
+ break;
+ }
+#endif
+ goto handle_num;
+ }
+ if (sf == STRFMT_INT) { /* Shortcut for plain %d. */
+ tr = emitir(IRTG(IR_BUFPUT, IRT_PGC), tr,
+ emitir(IRT(IR_TOSTR, IRT_STR), tra, IRTOSTR_INT));
+ } else {
+#if LJ_HASFFI
+ tra = emitir(IRT(IR_CONV, IRT_U64), tra,
+ (IRT_INT|(IRT_U64<<5)|IRCONV_SEXT));
+ tr = lj_ir_call(J, IRCALL_lj_strfmt_putfxint, tr, trsf, tra);
+ lj_needsplit(J);
+#else
+ recff_nyiu(J, rd); /* Don't bother working around this NYI. */
+ return;
+#endif
+ }
+ break;
+ case STRFMT_UINT:
+ id = IRCALL_lj_strfmt_putfnum_uint;
+ goto handle_int;
+ case STRFMT_NUM:
+ id = IRCALL_lj_strfmt_putfnum;
+ handle_num:
+ tra = lj_ir_tonum(J, tra);
+ tr = lj_ir_call(J, id, tr, trsf, tra);
+ if (LJ_SOFTFP32) lj_needsplit(J);
+ break;
+ case STRFMT_STR:
+ if (!tref_isstr(tra)) {
+ recff_nyiu(J, rd); /* NYI: __tostring and non-string types for %s. */
+ /* NYI: also buffers. */
+ return;
+ }
+ if (sf == STRFMT_STR) /* Shortcut for plain %s. */
+ tr = emitir(IRTG(IR_BUFPUT, IRT_PGC), tr, tra);
+ else if ((sf & STRFMT_T_QUOTED))
+ tr = lj_ir_call(J, IRCALL_lj_strfmt_putquoted, tr, tra);
+ else
+ tr = lj_ir_call(J, IRCALL_lj_strfmt_putfstr, tr, trsf, tra);
+ break;
+ case STRFMT_CHAR:
+ tra = lj_opt_narrow_toint(J, tra);
+ if (sf == STRFMT_CHAR) /* Shortcut for plain %c. */
+ tr = emitir(IRTG(IR_BUFPUT, IRT_PGC), tr,
+ emitir(IRT(IR_TOSTR, IRT_STR), tra, IRTOSTR_CHAR));
+ else
+ tr = lj_ir_call(J, IRCALL_lj_strfmt_putfchar, tr, trsf, tra);
+ break;
+ case STRFMT_PTR: /* NYI */
+ case STRFMT_ERR:
+ default:
+ recff_nyiu(J, rd);
+ return;
+ }
+ }
+ if (sbufx) {
+ emitir(IRT(IR_USE, IRT_NIL), tr, 0);
+ } else {
+ J->base[0] = emitir(IRTG(IR_BUFSTR, IRT_STR), tr, hdr);
+ }
+}
+
+static void LJ_FASTCALL recff_string_format(jit_State *J, RecordFFData *rd)
+{
+ recff_format(J, rd, recff_bufhdr(J), 0);
+}
+
+/* -- Buffer library fast functions --------------------------------------- */
+
+#if LJ_HASBUFFER
+
+static LJ_AINLINE TRef recff_sbufx_get_L(jit_State *J, TRef ud)
+{
+ return emitir(IRT(IR_FLOAD, IRT_PGC), ud, IRFL_SBUF_L);
+}
+
+static LJ_AINLINE void recff_sbufx_set_L(jit_State *J, TRef ud, TRef val)
+{
+ TRef fref = emitir(IRT(IR_FREF, IRT_PGC), ud, IRFL_SBUF_L);
+ emitir(IRT(IR_FSTORE, IRT_PGC), fref, val);
+}
+
+static LJ_AINLINE TRef recff_sbufx_get_ptr(jit_State *J, TRef ud, IRFieldID fl)
+{
+ return emitir(IRT(IR_FLOAD, IRT_PTR), ud, fl);
+}
+
+static LJ_AINLINE void recff_sbufx_set_ptr(jit_State *J, TRef ud, IRFieldID fl, TRef val)
+{
+ TRef fref = emitir(IRT(IR_FREF, IRT_PTR), ud, fl);
+ emitir(IRT(IR_FSTORE, IRT_PTR), fref, val);
+}
+
+static LJ_AINLINE TRef recff_sbufx_len(jit_State *J, TRef trr, TRef trw)
+{
+ TRef len = emitir(IRT(IR_SUB, IRT_INTP), trw, trr);
+ if (LJ_64)
+ len = emitir(IRTI(IR_CONV), len, (IRT_INT<<5)|IRT_INTP|IRCONV_NONE);
+ return len;
+}
+
+/* Emit typecheck for string buffer. */
+static TRef recff_sbufx_check(jit_State *J, RecordFFData *rd, ptrdiff_t arg)
+{
+ TRef trtype, ud = J->base[arg];
+ if (!tvisbuf(&rd->argv[arg])) lj_trace_err(J, LJ_TRERR_BADTYPE);
+ trtype = emitir(IRT(IR_FLOAD, IRT_U8), ud, IRFL_UDATA_UDTYPE);
+ emitir(IRTGI(IR_EQ), trtype, lj_ir_kint(J, UDTYPE_BUFFER));
+ J->needsnap = 1;
+ return ud;
+}
+
+/* Emit BUFHDR for write to extended string buffer. */
+static TRef recff_sbufx_write(jit_State *J, TRef ud)
+{
+ TRef trbuf = emitir(IRT(IR_ADD, IRT_PGC), ud, lj_ir_kint(J, sizeof(GCudata)));
+ return emitir(IRT(IR_BUFHDR, IRT_PGC), trbuf, IRBUFHDR_WRITE);
+}
+
+/* Check for integer in range for the buffer API. */
+static TRef recff_sbufx_checkint(jit_State *J, RecordFFData *rd, ptrdiff_t arg)
+{
+ TRef tr = J->base[arg];
+ TRef trlim = lj_ir_kint(J, LJ_MAX_BUF);
+ if (tref_isinteger(tr)) {
+ emitir(IRTGI(IR_ULE), tr, trlim);
+ } else if (tref_isnum(tr)) {
+ tr = emitir(IRTI(IR_CONV), tr, IRCONV_INT_NUM|IRCONV_ANY);
+ emitir(IRTGI(IR_ULE), tr, trlim);
+#if LJ_HASFFI
+ } else if (tref_iscdata(tr)) {
+ tr = lj_crecord_loadiu64(J, tr, &rd->argv[arg]);
+ emitir(IRTG(IR_ULE, IRT_U64), tr, lj_ir_kint64(J, LJ_MAX_BUF));
+ tr = emitir(IRTI(IR_CONV), tr, (IRT_INT<<5)|IRT_I64|IRCONV_NONE);
+#else
+ UNUSED(rd);
+#endif
+ } else {
+ lj_trace_err(J, LJ_TRERR_BADTYPE);
+ }
+ return tr;
+}
+
+static void LJ_FASTCALL recff_buffer_method_reset(jit_State *J, RecordFFData *rd)
+{
+ TRef ud = recff_sbufx_check(J, rd, 0);
+ SBufExt *sbx = bufV(&rd->argv[0]);
+ int iscow = (int)sbufiscow(sbx);
+ TRef trl = recff_sbufx_get_L(J, ud);
+ TRef trcow = emitir(IRT(IR_BAND, IRT_IGC), trl, lj_ir_kint(J, SBUF_FLAG_COW));
+ TRef zero = lj_ir_kint(J, 0);
+ emitir(IRTG(iscow ? IR_NE : IR_EQ, IRT_IGC), trcow, zero);
+ if (iscow) {
+ trl = emitir(IRT(IR_BXOR, IRT_IGC), trl,
+ LJ_GC64 ? lj_ir_kint64(J, SBUF_FLAG_COW) :
+ lj_ir_kint(J, SBUF_FLAG_COW));
+ recff_sbufx_set_ptr(J, ud, IRFL_SBUF_W, zero);
+ recff_sbufx_set_ptr(J, ud, IRFL_SBUF_E, zero);
+ recff_sbufx_set_ptr(J, ud, IRFL_SBUF_B, zero);
+ recff_sbufx_set_L(J, ud, trl);
+ emitir(IRT(IR_FSTORE, IRT_PGC),
+ emitir(IRT(IR_FREF, IRT_PGC), ud, IRFL_SBUF_REF), zero);
+ recff_sbufx_set_ptr(J, ud, IRFL_SBUF_R, zero);
+ } else {
+ TRef trb = recff_sbufx_get_ptr(J, ud, IRFL_SBUF_B);
+ recff_sbufx_set_ptr(J, ud, IRFL_SBUF_W, trb);
+ recff_sbufx_set_ptr(J, ud, IRFL_SBUF_R, trb);
+ }
+}
+
+static void LJ_FASTCALL recff_buffer_method_skip(jit_State *J, RecordFFData *rd)
+{
+ TRef ud = recff_sbufx_check(J, rd, 0);
+ TRef trr = recff_sbufx_get_ptr(J, ud, IRFL_SBUF_R);
+ TRef trw = recff_sbufx_get_ptr(J, ud, IRFL_SBUF_W);
+ TRef len = recff_sbufx_len(J, trr, trw);
+ TRef trn = recff_sbufx_checkint(J, rd, 1);
+ len = emitir(IRTI(IR_MIN), len, trn);
+ trr = emitir(IRT(IR_ADD, IRT_PTR), trr, len);
+ recff_sbufx_set_ptr(J, ud, IRFL_SBUF_R, trr);
+}
+
+static void LJ_FASTCALL recff_buffer_method_set(jit_State *J, RecordFFData *rd)
+{
+ TRef ud = recff_sbufx_check(J, rd, 0);
+ TRef trbuf = recff_sbufx_write(J, ud);
+ TRef tr = J->base[1];
+ if (tref_isstr(tr)) {
+ TRef trp = emitir(IRT(IR_STRREF, IRT_PGC), tr, lj_ir_kint(J, 0));
+ TRef len = emitir(IRTI(IR_FLOAD), tr, IRFL_STR_LEN);
+ lj_ir_call(J, IRCALL_lj_bufx_set, trbuf, trp, len, tr);
+#if LJ_HASFFI
+ } else if (tref_iscdata(tr)) {
+ TRef trp = lj_crecord_topcvoid(J, tr, &rd->argv[1]);
+ TRef len = recff_sbufx_checkint(J, rd, 2);
+ lj_ir_call(J, IRCALL_lj_bufx_set, trbuf, trp, len, tr);
+#endif
+ } /* else: Interpreter will throw. */
+}
+
+static void LJ_FASTCALL recff_buffer_method_put(jit_State *J, RecordFFData *rd)
+{
+ TRef ud = recff_sbufx_check(J, rd, 0);
+ TRef trbuf = recff_sbufx_write(J, ud);
+ TRef tr;
+ ptrdiff_t arg;
+ if (!J->base[1]) return;
+ for (arg = 1; (tr = J->base[arg]); arg++) {
+ if (tref_isudata(tr)) {
+ TRef ud2 = recff_sbufx_check(J, rd, arg);
+ emitir(IRTG(IR_NE, IRT_PGC), ud, ud2);
+ }
+ }
+ for (arg = 1; (tr = J->base[arg]); arg++) {
+ if (tref_isstr(tr)) {
+ trbuf = emitir(IRTG(IR_BUFPUT, IRT_PGC), trbuf, tr);
+ } else if (tref_isnumber(tr)) {
+ trbuf = emitir(IRTG(IR_BUFPUT, IRT_PGC), trbuf,
+ emitir(IRT(IR_TOSTR, IRT_STR), tr,
+ tref_isnum(tr) ? IRTOSTR_NUM : IRTOSTR_INT));
+ } else if (tref_isudata(tr)) {
+ TRef trr = recff_sbufx_get_ptr(J, tr, IRFL_SBUF_R);
+ TRef trw = recff_sbufx_get_ptr(J, tr, IRFL_SBUF_W);
+ TRef len = recff_sbufx_len(J, trr, trw);
+ trbuf = lj_ir_call(J, IRCALL_lj_buf_putmem, trbuf, trr, len);
+ } else {
+ recff_nyiu(J, rd);
+ }
+ }
+ emitir(IRT(IR_USE, IRT_NIL), trbuf, 0);
+}
+
+static void LJ_FASTCALL recff_buffer_method_putf(jit_State *J, RecordFFData *rd)
+{
+ TRef ud = recff_sbufx_check(J, rd, 0);
+ TRef trbuf = recff_sbufx_write(J, ud);
+ recff_format(J, rd, trbuf, 1);
+}
+
+static void LJ_FASTCALL recff_buffer_method_get(jit_State *J, RecordFFData *rd)
+{
+ TRef ud = recff_sbufx_check(J, rd, 0);
+ TRef trr = recff_sbufx_get_ptr(J, ud, IRFL_SBUF_R);
+ TRef trw = recff_sbufx_get_ptr(J, ud, IRFL_SBUF_W);
+ TRef tr;
+ ptrdiff_t arg;
+ if (!J->base[1]) { J->base[1] = TREF_NIL; J->base[2] = 0; }
+ for (arg = 0; (tr = J->base[arg+1]); arg++) {
+ if (!tref_isnil(tr)) {
+ J->base[arg+1] = recff_sbufx_checkint(J, rd, arg+1);
+ }
+ }
+ for (arg = 0; (tr = J->base[arg+1]); arg++) {
+ TRef len = recff_sbufx_len(J, trr, trw);
+ if (tref_isnil(tr)) {
+ J->base[arg] = emitir(IRT(IR_XSNEW, IRT_STR), trr, len);
+ trr = trw;
+ } else {
+ TRef tru;
+ len = emitir(IRTI(IR_MIN), len, tr);
+ tru = emitir(IRT(IR_ADD, IRT_PTR), trr, len);
+ J->base[arg] = emitir(IRT(IR_XSNEW, IRT_STR), trr, len);
+ trr = tru; /* Doing the ADD before the SNEW generates better code. */
+ }
+ recff_sbufx_set_ptr(J, ud, IRFL_SBUF_R, trr);
+ }
+ rd->nres = arg;
+}
+
+static void LJ_FASTCALL recff_buffer_method___tostring(jit_State *J, RecordFFData *rd)
+{
+ TRef ud = recff_sbufx_check(J, rd, 0);
+ TRef trr = recff_sbufx_get_ptr(J, ud, IRFL_SBUF_R);
+ TRef trw = recff_sbufx_get_ptr(J, ud, IRFL_SBUF_W);
+ J->base[0] = emitir(IRT(IR_XSNEW, IRT_STR), trr, recff_sbufx_len(J, trr, trw));
+}
+
+static void LJ_FASTCALL recff_buffer_method___len(jit_State *J, RecordFFData *rd)
+{
+ TRef ud = recff_sbufx_check(J, rd, 0);
+ TRef trr = recff_sbufx_get_ptr(J, ud, IRFL_SBUF_R);
+ TRef trw = recff_sbufx_get_ptr(J, ud, IRFL_SBUF_W);
+ J->base[0] = recff_sbufx_len(J, trr, trw);
+}
+
+#if LJ_HASFFI
+static void LJ_FASTCALL recff_buffer_method_putcdata(jit_State *J, RecordFFData *rd)
+{
+ TRef ud = recff_sbufx_check(J, rd, 0);
+ TRef trbuf = recff_sbufx_write(J, ud);
+ TRef tr = lj_crecord_topcvoid(J, J->base[1], &rd->argv[1]);
+ TRef len = recff_sbufx_checkint(J, rd, 2);
+ trbuf = lj_ir_call(J, IRCALL_lj_buf_putmem, trbuf, tr, len);
+ emitir(IRT(IR_USE, IRT_NIL), trbuf, 0);
+}
+
+static void LJ_FASTCALL recff_buffer_method_reserve(jit_State *J, RecordFFData *rd)
+{
+ TRef ud = recff_sbufx_check(J, rd, 0);
+ TRef trbuf = recff_sbufx_write(J, ud);
+ TRef trsz = recff_sbufx_checkint(J, rd, 1);
+ J->base[1] = lj_ir_call(J, IRCALL_lj_bufx_more, trbuf, trsz);
+ J->base[0] = lj_crecord_topuint8(J, recff_sbufx_get_ptr(J, ud, IRFL_SBUF_W));
+ rd->nres = 2;
+}
+
+static void LJ_FASTCALL recff_buffer_method_commit(jit_State *J, RecordFFData *rd)
+{
+ TRef ud = recff_sbufx_check(J, rd, 0);
+ TRef len = recff_sbufx_checkint(J, rd, 1);
+ TRef trw = recff_sbufx_get_ptr(J, ud, IRFL_SBUF_W);
+ TRef tre = recff_sbufx_get_ptr(J, ud, IRFL_SBUF_E);
+ TRef left = emitir(IRT(IR_SUB, IRT_INTP), tre, trw);
+ if (LJ_64)
+ left = emitir(IRTI(IR_CONV), left, (IRT_INT<<5)|IRT_INTP|IRCONV_NONE);
+ emitir(IRTGI(IR_ULE), len, left);
+ trw = emitir(IRT(IR_ADD, IRT_PTR), trw, len);
+ recff_sbufx_set_ptr(J, ud, IRFL_SBUF_W, trw);
+}
+
+static void LJ_FASTCALL recff_buffer_method_ref(jit_State *J, RecordFFData *rd)
+{
+ TRef ud = recff_sbufx_check(J, rd, 0);
+ TRef trr = recff_sbufx_get_ptr(J, ud, IRFL_SBUF_R);
+ TRef trw = recff_sbufx_get_ptr(J, ud, IRFL_SBUF_W);
+ J->base[0] = lj_crecord_topuint8(J, trr);
+ J->base[1] = recff_sbufx_len(J, trr, trw);
+ rd->nres = 2;
+}
+#endif
+
+static void LJ_FASTCALL recff_buffer_method_encode(jit_State *J, RecordFFData *rd)
+{
+ TRef ud = recff_sbufx_check(J, rd, 0);
+ TRef trbuf = recff_sbufx_write(J, ud);
+ TRef tmp = recff_tmpref(J, J->base[1], IRTMPREF_IN1);
+ lj_ir_call(J, IRCALL_lj_serialize_put, trbuf, tmp);
+ /* No IR_USE needed, since the call is a store. */
+}
+
+static void LJ_FASTCALL recff_buffer_method_decode(jit_State *J, RecordFFData *rd)
+{
+ TRef ud = recff_sbufx_check(J, rd, 0);
+ TRef trbuf = recff_sbufx_write(J, ud);
+ TRef tmp = recff_tmpref(J, TREF_NIL, IRTMPREF_OUT1);
+ TRef trr = lj_ir_call(J, IRCALL_lj_serialize_get, trbuf, tmp);
+ IRType t = (IRType)lj_serialize_peektype(bufV(&rd->argv[0]));
+ /* No IR_USE needed, since the call is a store. */
+ J->base[0] = lj_record_vload(J, tmp, 0, t);
+ /* The sbx->r store must be after the VLOAD type check, in case it fails. */
+ recff_sbufx_set_ptr(J, ud, IRFL_SBUF_R, trr);
+}
+
+static void LJ_FASTCALL recff_buffer_encode(jit_State *J, RecordFFData *rd)
+{
+ TRef tmp = recff_tmpref(J, J->base[0], IRTMPREF_IN1);
+ J->base[0] = lj_ir_call(J, IRCALL_lj_serialize_encode, tmp);
+ /* IR_USE needed for IR_CALLA, because the encoder may throw non-OOM. */
+ emitir(IRT(IR_USE, IRT_NIL), J->base[0], 0);
+ UNUSED(rd);
+}
+
+static void LJ_FASTCALL recff_buffer_decode(jit_State *J, RecordFFData *rd)
+{
+ if (tvisstr(&rd->argv[0])) {
+ GCstr *str = strV(&rd->argv[0]);
+ SBufExt sbx;
+ IRType t;
+ TRef tmp = recff_tmpref(J, TREF_NIL, IRTMPREF_OUT1);
+ TRef tr = lj_ir_call(J, IRCALL_lj_serialize_decode, tmp, J->base[0]);
+ /* IR_USE needed for IR_CALLA, because the decoder may throw non-OOM.
+ ** That's why IRCALL_lj_serialize_decode needs a fake INT result.
+ */
+ emitir(IRT(IR_USE, IRT_NIL), tr, 0);
+ memset(&sbx, 0, sizeof(SBufExt));
+ lj_bufx_set_cow(J->L, &sbx, strdata(str), str->len);
+ t = (IRType)lj_serialize_peektype(&sbx);
+ J->base[0] = lj_record_vload(J, tmp, 0, t);
+ } /* else: Interpreter will throw. */
+}
+
+#endif
+
+/* -- Table library fast functions ---------------------------------------- */
+
+static void LJ_FASTCALL recff_table_insert(jit_State *J, RecordFFData *rd)
+{
+ RecordIndex ix;
+ ix.tab = J->base[0];
+ ix.val = J->base[1];
+ rd->nres = 0;
+ if (tref_istab(ix.tab) && ix.val) {
+ if (!J->base[2]) { /* Simple push: t[#t+1] = v */
+ TRef trlen = emitir(IRTI(IR_ALEN), ix.tab, TREF_NIL);
+ GCtab *t = tabV(&rd->argv[0]);
+ ix.key = emitir(IRTI(IR_ADD), trlen, lj_ir_kint(J, 1));
+ settabV(J->L, &ix.tabv, t);
+ setintV(&ix.keyv, lj_tab_len(t) + 1);
+ ix.idxchain = 0;
+ lj_record_idx(J, &ix); /* Set new value. */
+ } else { /* Complex case: insert in the middle. */
+ recff_nyiu(J, rd);
+ return;
+ }
+ } /* else: Interpreter will throw. */
+}
+
+static void LJ_FASTCALL recff_table_concat(jit_State *J, RecordFFData *rd)
+{
+ TRef tab = J->base[0];
+ if (tref_istab(tab)) {
+ TRef sep = !tref_isnil(J->base[1]) ?
+ lj_ir_tostr(J, J->base[1]) : lj_ir_knull(J, IRT_STR);
+ TRef tri = (J->base[1] && !tref_isnil(J->base[2])) ?
+ lj_opt_narrow_toint(J, J->base[2]) : lj_ir_kint(J, 1);
+ TRef tre = (J->base[1] && J->base[2] && !tref_isnil(J->base[3])) ?
+ lj_opt_narrow_toint(J, J->base[3]) :
+ emitir(IRTI(IR_ALEN), tab, TREF_NIL);
+ TRef hdr = recff_bufhdr(J);
+ TRef tr = lj_ir_call(J, IRCALL_lj_buf_puttab, hdr, tab, sep, tri, tre);
+ emitir(IRTG(IR_NE, IRT_PTR), tr, lj_ir_kptr(J, NULL));
+ J->base[0] = emitir(IRTG(IR_BUFSTR, IRT_STR), tr, hdr);
+ } /* else: Interpreter will throw. */
+ UNUSED(rd);
+}
+
+static void LJ_FASTCALL recff_table_new(jit_State *J, RecordFFData *rd)
+{
+ TRef tra = lj_opt_narrow_toint(J, J->base[0]);
+ TRef trh = lj_opt_narrow_toint(J, J->base[1]);
+ J->base[0] = lj_ir_call(J, IRCALL_lj_tab_new_ah, tra, trh);
+ UNUSED(rd);
+}
+
+static void LJ_FASTCALL recff_table_clear(jit_State *J, RecordFFData *rd)
+{
+ TRef tr = J->base[0];
+ if (tref_istab(tr)) {
+ rd->nres = 0;
+ lj_ir_call(J, IRCALL_lj_tab_clear, tr);
+ J->needsnap = 1;
+ } /* else: Interpreter will throw. */
+}
+
+/* -- I/O library fast functions ------------------------------------------ */
+
+/* Get FILE* for I/O function. Any I/O error aborts recording, so there's
+** no need to encode the alternate cases for any of the guards.
+*/
+static TRef recff_io_fp(jit_State *J, TRef *udp, int32_t id)
+{
+ TRef tr, ud, fp;
+ if (id) { /* io.func() */
+ ud = lj_ir_ggfload(J, IRT_UDATA, GG_OFS(g.gcroot[id]));
+ } else { /* fp:method() */
+ ud = J->base[0];
+ if (!tref_isudata(ud))
+ lj_trace_err(J, LJ_TRERR_BADTYPE);
+ tr = emitir(IRT(IR_FLOAD, IRT_U8), ud, IRFL_UDATA_UDTYPE);
+ emitir(IRTGI(IR_EQ), tr, lj_ir_kint(J, UDTYPE_IO_FILE));
+ }
+ *udp = ud;
+ fp = emitir(IRT(IR_FLOAD, IRT_PTR), ud, IRFL_UDATA_FILE);
+ emitir(IRTG(IR_NE, IRT_PTR), fp, lj_ir_knull(J, IRT_PTR));
+ return fp;
+}
+
+static void LJ_FASTCALL recff_io_write(jit_State *J, RecordFFData *rd)
+{
+ TRef ud, fp = recff_io_fp(J, &ud, rd->data);
+ TRef zero = lj_ir_kint(J, 0);
+ TRef one = lj_ir_kint(J, 1);
+ ptrdiff_t i = rd->data == 0 ? 1 : 0;
+ for (; J->base[i]; i++) {
+ TRef str = lj_ir_tostr(J, J->base[i]);
+ TRef buf = emitir(IRT(IR_STRREF, IRT_PGC), str, zero);
+ TRef len = emitir(IRTI(IR_FLOAD), str, IRFL_STR_LEN);
+ if (tref_isk(len) && IR(tref_ref(len))->i == 1) {
+ IRIns *irs = IR(tref_ref(str));
+ TRef tr = (irs->o == IR_TOSTR && irs->op2 == IRTOSTR_CHAR) ?
+ irs->op1 :
+ emitir(IRT(IR_XLOAD, IRT_U8), buf, IRXLOAD_READONLY);
+ tr = lj_ir_call(J, IRCALL_fputc, tr, fp);
+ if (results_wanted(J) != 0) /* Check result only if not ignored. */
+ emitir(IRTGI(IR_NE), tr, lj_ir_kint(J, -1));
+ } else {
+ TRef tr = lj_ir_call(J, IRCALL_fwrite, buf, one, len, fp);
+ if (results_wanted(J) != 0) /* Check result only if not ignored. */
+ emitir(IRTGI(IR_EQ), tr, len);
+ }
+ }
+ J->base[0] = LJ_52 ? ud : TREF_TRUE;
+}
+
+static void LJ_FASTCALL recff_io_flush(jit_State *J, RecordFFData *rd)
+{
+ TRef ud, fp = recff_io_fp(J, &ud, rd->data);
+ TRef tr = lj_ir_call(J, IRCALL_fflush, fp);
+ if (results_wanted(J) != 0) /* Check result only if not ignored. */
+ emitir(IRTGI(IR_EQ), tr, lj_ir_kint(J, 0));
+ J->base[0] = TREF_TRUE;
+}
+
+/* -- Debug library fast functions ---------------------------------------- */
+
+static void LJ_FASTCALL recff_debug_getmetatable(jit_State *J, RecordFFData *rd)
+{
+ GCtab *mt;
+ TRef mtref;
+ TRef tr = J->base[0];
+ if (tref_istab(tr)) {
+ mt = tabref(tabV(&rd->argv[0])->metatable);
+ mtref = emitir(IRT(IR_FLOAD, IRT_TAB), tr, IRFL_TAB_META);
+ } else if (tref_isudata(tr)) {
+ mt = tabref(udataV(&rd->argv[0])->metatable);
+ mtref = emitir(IRT(IR_FLOAD, IRT_TAB), tr, IRFL_UDATA_META);
+ } else {
+ mt = tabref(basemt_obj(J2G(J), &rd->argv[0]));
+ J->base[0] = mt ? lj_ir_ktab(J, mt) : TREF_NIL;
+ return;
+ }
+ emitir(IRTG(mt ? IR_NE : IR_EQ, IRT_TAB), mtref, lj_ir_knull(J, IRT_TAB));
+ J->base[0] = mt ? mtref : TREF_NIL;
+}
+
+/* -- Record calls to fast functions -------------------------------------- */
+
+#include "lj_recdef.h"
+
+static uint32_t recdef_lookup(GCfunc *fn)
+{
+ if (fn->c.ffid < sizeof(recff_idmap)/sizeof(recff_idmap[0]))
+ return recff_idmap[fn->c.ffid];
+ else
+ return 0;
+}
+
+/* Record entry to a fast function or C function. */
+void lj_ffrecord_func(jit_State *J)
+{
+ RecordFFData rd;
+ uint32_t m = recdef_lookup(J->fn);
+ rd.data = m & 0xff;
+ rd.nres = 1; /* Default is one result. */
+ rd.argv = J->L->base;
+ J->base[J->maxslot] = 0; /* Mark end of arguments. */
+ (recff_func[m >> 8])(J, &rd); /* Call recff_* handler. */
+ if (rd.nres >= 0) {
+ if (J->postproc == LJ_POST_NONE) J->postproc = LJ_POST_FFRETRY;
+ lj_record_ret(J, 0, rd.nres);
+ }
+}
+
+#undef IR
+#undef emitir
+
+#endif
diff --git a/libs/luajit-cmake/luajit/src/lj_ffrecord.h b/libs/luajit-cmake/luajit/src/lj_ffrecord.h
new file mode 100644
index 0000000..0acb8ed
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/lj_ffrecord.h
@@ -0,0 +1,24 @@
+/*
+** Fast function call recorder.
+** Copyright (C) 2005-2022 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LJ_FFRECORD_H
+#define _LJ_FFRECORD_H
+
+#include "lj_obj.h"
+#include "lj_jit.h"
+
+#if LJ_HASJIT
+/* Data used by handlers to record a fast function. */
+typedef struct RecordFFData {
+ TValue *argv; /* Runtime argument values. */
+ ptrdiff_t nres; /* Number of returned results (defaults to 1). */
+ uint32_t data; /* Per-ffid auxiliary data (opcode, literal etc.). */
+} RecordFFData;
+
+LJ_FUNC int32_t lj_ffrecord_select_mode(jit_State *J, TRef tr, TValue *tv);
+LJ_FUNC void lj_ffrecord_func(jit_State *J);
+#endif
+
+#endif
diff --git a/libs/luajit-cmake/luajit/src/lj_frame.h b/libs/luajit-cmake/luajit/src/lj_frame.h
new file mode 100644
index 0000000..aa1dc11
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/lj_frame.h
@@ -0,0 +1,297 @@
+/*
+** Stack frames.
+** Copyright (C) 2005-2022 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LJ_FRAME_H
+#define _LJ_FRAME_H
+
+#include "lj_obj.h"
+#include "lj_bc.h"
+
+/* -- Lua stack frame ----------------------------------------------------- */
+
+/* Frame type markers in LSB of PC (4-byte aligned) or delta (8-byte aligned:
+**
+** PC 00 Lua frame
+** delta 001 C frame
+** delta 010 Continuation frame
+** delta 011 Lua vararg frame
+** delta 101 cpcall() frame
+** delta 110 ff pcall() frame
+** delta 111 ff pcall() frame with active hook
+*/
+enum {
+ FRAME_LUA, FRAME_C, FRAME_CONT, FRAME_VARG,
+ FRAME_LUAP, FRAME_CP, FRAME_PCALL, FRAME_PCALLH
+};
+#define FRAME_TYPE 3
+#define FRAME_P 4
+#define FRAME_TYPEP (FRAME_TYPE|FRAME_P)
+
+/* Macros to access and modify Lua frames. */
+#if LJ_FR2
+/* Two-slot frame info, required for 64 bit PC/GCRef:
+**
+** base-2 base-1 | base base+1 ...
+** [func PC/delta/ft] | [slots ...]
+** ^-- frame | ^-- base ^-- top
+**
+** Continuation frames:
+**
+** base-4 base-3 base-2 base-1 | base base+1 ...
+** [cont PC ] [func PC/delta/ft] | [slots ...]
+** ^-- frame | ^-- base ^-- top
+*/
+#define frame_gc(f) (gcval((f)-1))
+#define frame_ftsz(f) ((ptrdiff_t)(f)->ftsz)
+#define frame_pc(f) ((const BCIns *)frame_ftsz(f))
+#define setframe_gc(f, p, tp) (setgcVraw((f), (p), (tp)))
+#define setframe_ftsz(f, sz) ((f)->ftsz = (sz))
+#define setframe_pc(f, pc) ((f)->ftsz = (int64_t)(intptr_t)(pc))
+#else
+/* One-slot frame info, sufficient for 32 bit PC/GCRef:
+**
+** base-1 | base base+1 ...
+** lo hi |
+** [func | PC/delta/ft] | [slots ...]
+** ^-- frame | ^-- base ^-- top
+**
+** Continuation frames:
+**
+** base-2 base-1 | base base+1 ...
+** lo hi lo hi |
+** [cont | PC] [func | PC/delta/ft] | [slots ...]
+** ^-- frame | ^-- base ^-- top
+*/
+#define frame_gc(f) (gcref((f)->fr.func))
+#define frame_ftsz(f) ((ptrdiff_t)(f)->fr.tp.ftsz)
+#define frame_pc(f) (mref((f)->fr.tp.pcr, const BCIns))
+#define setframe_gc(f, p, tp) (setgcref((f)->fr.func, (p)), UNUSED(tp))
+#define setframe_ftsz(f, sz) ((f)->fr.tp.ftsz = (int32_t)(sz))
+#define setframe_pc(f, pc) (setmref((f)->fr.tp.pcr, (pc)))
+#endif
+
+#define frame_type(f) (frame_ftsz(f) & FRAME_TYPE)
+#define frame_typep(f) (frame_ftsz(f) & FRAME_TYPEP)
+#define frame_islua(f) (frame_type(f) == FRAME_LUA)
+#define frame_isc(f) (frame_type(f) == FRAME_C)
+#define frame_iscont(f) (frame_typep(f) == FRAME_CONT)
+#define frame_isvarg(f) (frame_typep(f) == FRAME_VARG)
+#define frame_ispcall(f) ((frame_ftsz(f) & 6) == FRAME_PCALL)
+
+#define frame_func(f) (&frame_gc(f)->fn)
+#define frame_delta(f) (frame_ftsz(f) >> 3)
+#define frame_sized(f) (frame_ftsz(f) & ~FRAME_TYPEP)
+
+enum { LJ_CONT_TAILCALL, LJ_CONT_FFI_CALLBACK }; /* Special continuations. */
+
+#if LJ_FR2
+#define frame_contpc(f) (frame_pc((f)-2))
+#define frame_contv(f) (((f)-3)->u64)
+#else
+#define frame_contpc(f) (frame_pc((f)-1))
+#define frame_contv(f) (((f)-1)->u32.lo)
+#endif
+#if LJ_FR2
+#define frame_contf(f) ((ASMFunction)(uintptr_t)((f)-3)->u64)
+#elif LJ_64
+#define frame_contf(f) \
+ ((ASMFunction)(void *)((intptr_t)lj_vm_asm_begin + \
+ (intptr_t)(int32_t)((f)-1)->u32.lo))
+#else
+#define frame_contf(f) ((ASMFunction)gcrefp(((f)-1)->gcr, void))
+#endif
+#define frame_iscont_fficb(f) \
+ (LJ_HASFFI && frame_contv(f) == LJ_CONT_FFI_CALLBACK)
+
+#define frame_prevl(f) ((f) - (1+LJ_FR2+bc_a(frame_pc(f)[-1])))
+#define frame_prevd(f) ((TValue *)((char *)(f) - frame_sized(f)))
+#define frame_prev(f) (frame_islua(f)?frame_prevl(f):frame_prevd(f))
+/* Note: this macro does not skip over FRAME_VARG. */
+
+/* -- C stack frame ------------------------------------------------------- */
+
+/* Macros to access and modify the C stack frame chain. */
+
+/* These definitions must match with the arch-specific *.dasc files. */
+#if LJ_TARGET_X86
+#if LJ_ABI_WIN
+#define CFRAME_OFS_ERRF (19*4)
+#define CFRAME_OFS_NRES (18*4)
+#define CFRAME_OFS_PREV (17*4)
+#define CFRAME_OFS_L (16*4)
+#define CFRAME_OFS_SEH (9*4)
+#define CFRAME_OFS_PC (6*4)
+#define CFRAME_OFS_MULTRES (5*4)
+#define CFRAME_SIZE (16*4)
+#define CFRAME_SHIFT_MULTRES 0
+#else
+#define CFRAME_OFS_ERRF (15*4)
+#define CFRAME_OFS_NRES (14*4)
+#define CFRAME_OFS_PREV (13*4)
+#define CFRAME_OFS_L (12*4)
+#define CFRAME_OFS_PC (6*4)
+#define CFRAME_OFS_MULTRES (5*4)
+#define CFRAME_SIZE (12*4)
+#define CFRAME_SHIFT_MULTRES 0
+#endif
+#elif LJ_TARGET_X64
+#if LJ_ABI_WIN
+#define CFRAME_OFS_PREV (13*8)
+#if LJ_GC64
+#define CFRAME_OFS_PC (12*8)
+#define CFRAME_OFS_L (11*8)
+#define CFRAME_OFS_ERRF (21*4)
+#define CFRAME_OFS_NRES (20*4)
+#define CFRAME_OFS_MULTRES (8*4)
+#else
+#define CFRAME_OFS_PC (25*4)
+#define CFRAME_OFS_L (24*4)
+#define CFRAME_OFS_ERRF (23*4)
+#define CFRAME_OFS_NRES (22*4)
+#define CFRAME_OFS_MULTRES (21*4)
+#endif
+#define CFRAME_SIZE (10*8)
+#define CFRAME_SIZE_JIT (CFRAME_SIZE + 9*16 + 4*8)
+#define CFRAME_SHIFT_MULTRES 0
+#else
+#define CFRAME_OFS_PREV (4*8)
+#if LJ_GC64
+#define CFRAME_OFS_PC (3*8)
+#define CFRAME_OFS_L (2*8)
+#define CFRAME_OFS_ERRF (3*4)
+#define CFRAME_OFS_NRES (2*4)
+#define CFRAME_OFS_MULTRES (0*4)
+#else
+#define CFRAME_OFS_PC (7*4)
+#define CFRAME_OFS_L (6*4)
+#define CFRAME_OFS_ERRF (5*4)
+#define CFRAME_OFS_NRES (4*4)
+#define CFRAME_OFS_MULTRES (1*4)
+#endif
+#if LJ_NO_UNWIND
+#define CFRAME_SIZE (12*8)
+#else
+#define CFRAME_SIZE (10*8)
+#endif
+#define CFRAME_SIZE_JIT (CFRAME_SIZE + 16)
+#define CFRAME_SHIFT_MULTRES 0
+#endif
+#elif LJ_TARGET_ARM
+#define CFRAME_OFS_ERRF 24
+#define CFRAME_OFS_NRES 20
+#define CFRAME_OFS_PREV 16
+#define CFRAME_OFS_L 12
+#define CFRAME_OFS_PC 8
+#define CFRAME_OFS_MULTRES 4
+#if LJ_ARCH_HASFPU
+#define CFRAME_SIZE 128
+#else
+#define CFRAME_SIZE 64
+#endif
+#define CFRAME_SHIFT_MULTRES 3
+#elif LJ_TARGET_ARM64
+#define CFRAME_OFS_ERRF 36
+#define CFRAME_OFS_NRES 40
+#define CFRAME_OFS_PREV 0
+#define CFRAME_OFS_L 16
+#define CFRAME_OFS_PC 8
+#define CFRAME_OFS_MULTRES 32
+#define CFRAME_SIZE 208
+#define CFRAME_SHIFT_MULTRES 3
+#elif LJ_TARGET_PPC
+#if LJ_TARGET_XBOX360
+#define CFRAME_OFS_ERRF 424
+#define CFRAME_OFS_NRES 420
+#define CFRAME_OFS_PREV 400
+#define CFRAME_OFS_L 416
+#define CFRAME_OFS_PC 412
+#define CFRAME_OFS_MULTRES 408
+#define CFRAME_SIZE 384
+#define CFRAME_SHIFT_MULTRES 3
+#elif LJ_ARCH_PPC32ON64
+#define CFRAME_OFS_ERRF 472
+#define CFRAME_OFS_NRES 468
+#define CFRAME_OFS_PREV 448
+#define CFRAME_OFS_L 464
+#define CFRAME_OFS_PC 460
+#define CFRAME_OFS_MULTRES 456
+#define CFRAME_SIZE 400
+#define CFRAME_SHIFT_MULTRES 3
+#else
+#define CFRAME_OFS_ERRF 48
+#define CFRAME_OFS_NRES 44
+#define CFRAME_OFS_PREV 40
+#define CFRAME_OFS_L 36
+#define CFRAME_OFS_PC 32
+#define CFRAME_OFS_MULTRES 28
+#define CFRAME_SIZE (LJ_ARCH_HASFPU ? 272 : 128)
+#define CFRAME_SHIFT_MULTRES 3
+#endif
+#elif LJ_TARGET_MIPS32
+#if LJ_ARCH_HASFPU
+#define CFRAME_OFS_ERRF 124
+#define CFRAME_OFS_NRES 120
+#define CFRAME_OFS_PREV 116
+#define CFRAME_OFS_L 112
+#define CFRAME_SIZE 112
+#else
+#define CFRAME_OFS_ERRF 76
+#define CFRAME_OFS_NRES 72
+#define CFRAME_OFS_PREV 68
+#define CFRAME_OFS_L 64
+#define CFRAME_SIZE 64
+#endif
+#define CFRAME_OFS_PC 20
+#define CFRAME_OFS_MULTRES 16
+#define CFRAME_SHIFT_MULTRES 3
+#elif LJ_TARGET_MIPS64
+#if LJ_ARCH_HASFPU
+#define CFRAME_OFS_ERRF 188
+#define CFRAME_OFS_NRES 184
+#define CFRAME_OFS_PREV 176
+#define CFRAME_OFS_L 168
+#define CFRAME_OFS_PC 160
+#define CFRAME_SIZE 192
+#else
+#define CFRAME_OFS_ERRF 124
+#define CFRAME_OFS_NRES 120
+#define CFRAME_OFS_PREV 112
+#define CFRAME_OFS_L 104
+#define CFRAME_OFS_PC 96
+#define CFRAME_SIZE 128
+#endif
+#define CFRAME_OFS_MULTRES 0
+#define CFRAME_SHIFT_MULTRES 3
+#else
+#error "Missing CFRAME_* definitions for this architecture"
+#endif
+
+#ifndef CFRAME_SIZE_JIT
+#define CFRAME_SIZE_JIT CFRAME_SIZE
+#endif
+
+#define CFRAME_RESUME 1
+#define CFRAME_UNWIND_FF 2 /* Only used in unwinder. */
+#define CFRAME_RAWMASK (~(intptr_t)(CFRAME_RESUME|CFRAME_UNWIND_FF))
+
+#define cframe_errfunc(cf) (*(int32_t *)(((char *)(cf))+CFRAME_OFS_ERRF))
+#define cframe_nres(cf) (*(int32_t *)(((char *)(cf))+CFRAME_OFS_NRES))
+#define cframe_prev(cf) (*(void **)(((char *)(cf))+CFRAME_OFS_PREV))
+#define cframe_multres(cf) (*(uint32_t *)(((char *)(cf))+CFRAME_OFS_MULTRES))
+#define cframe_multres_n(cf) (cframe_multres((cf)) >> CFRAME_SHIFT_MULTRES)
+#define cframe_L(cf) \
+ (&gcref(*(GCRef *)(((char *)(cf))+CFRAME_OFS_L))->th)
+#define cframe_pc(cf) \
+ (mref(*(MRef *)(((char *)(cf))+CFRAME_OFS_PC), const BCIns))
+#define setcframe_L(cf, L) \
+ (setmref(*(MRef *)(((char *)(cf))+CFRAME_OFS_L), (L)))
+#define setcframe_pc(cf, pc) \
+ (setmref(*(MRef *)(((char *)(cf))+CFRAME_OFS_PC), (pc)))
+#define cframe_canyield(cf) ((intptr_t)(cf) & CFRAME_RESUME)
+#define cframe_unwind_ff(cf) ((intptr_t)(cf) & CFRAME_UNWIND_FF)
+#define cframe_raw(cf) ((void *)((intptr_t)(cf) & CFRAME_RAWMASK))
+#define cframe_Lpc(L) cframe_pc(cframe_raw(L->cframe))
+
+#endif
diff --git a/libs/luajit-cmake/luajit/src/lj_func.c b/libs/luajit-cmake/luajit/src/lj_func.c
new file mode 100644
index 0000000..9795a77
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/lj_func.c
@@ -0,0 +1,191 @@
+/*
+** Function handling (prototypes, functions and upvalues).
+** Copyright (C) 2005-2022 Mike Pall. See Copyright Notice in luajit.h
+**
+** Portions taken verbatim or adapted from the Lua interpreter.
+** Copyright (C) 1994-2008 Lua.org, PUC-Rio. See Copyright Notice in lua.h
+*/
+
+#define lj_func_c
+#define LUA_CORE
+
+#include "lj_obj.h"
+#include "lj_gc.h"
+#include "lj_func.h"
+#include "lj_trace.h"
+#include "lj_vm.h"
+
+/* -- Prototypes ---------------------------------------------------------- */
+
+void LJ_FASTCALL lj_func_freeproto(global_State *g, GCproto *pt)
+{
+ lj_mem_free(g, pt, pt->sizept);
+}
+
+/* -- Upvalues ------------------------------------------------------------ */
+
+static void unlinkuv(global_State *g, GCupval *uv)
+{
+ UNUSED(g);
+ lj_assertG(uvprev(uvnext(uv)) == uv && uvnext(uvprev(uv)) == uv,
+ "broken upvalue chain");
+ setgcrefr(uvnext(uv)->prev, uv->prev);
+ setgcrefr(uvprev(uv)->next, uv->next);
+}
+
+/* Find existing open upvalue for a stack slot or create a new one. */
+static GCupval *func_finduv(lua_State *L, TValue *slot)
+{
+ global_State *g = G(L);
+ GCRef *pp = &L->openupval;
+ GCupval *p;
+ GCupval *uv;
+ /* Search the sorted list of open upvalues. */
+ while (gcref(*pp) != NULL && uvval((p = gco2uv(gcref(*pp)))) >= slot) {
+ lj_assertG(!p->closed && uvval(p) != &p->tv, "closed upvalue in chain");
+ if (uvval(p) == slot) { /* Found open upvalue pointing to same slot? */
+ if (isdead(g, obj2gco(p))) /* Resurrect it, if it's dead. */
+ flipwhite(obj2gco(p));
+ return p;
+ }
+ pp = &p->nextgc;
+ }
+ /* No matching upvalue found. Create a new one. */
+ uv = lj_mem_newt(L, sizeof(GCupval), GCupval);
+ newwhite(g, uv);
+ uv->gct = ~LJ_TUPVAL;
+ uv->closed = 0; /* Still open. */
+ setmref(uv->v, slot); /* Pointing to the stack slot. */
+ /* NOBARRIER: The GCupval is new (marked white) and open. */
+ setgcrefr(uv->nextgc, *pp); /* Insert into sorted list of open upvalues. */
+ setgcref(*pp, obj2gco(uv));
+ setgcref(uv->prev, obj2gco(&g->uvhead)); /* Insert into GC list, too. */
+ setgcrefr(uv->next, g->uvhead.next);
+ setgcref(uvnext(uv)->prev, obj2gco(uv));
+ setgcref(g->uvhead.next, obj2gco(uv));
+ lj_assertG(uvprev(uvnext(uv)) == uv && uvnext(uvprev(uv)) == uv,
+ "broken upvalue chain");
+ return uv;
+}
+
+/* Create an empty and closed upvalue. */
+static GCupval *func_emptyuv(lua_State *L)
+{
+ GCupval *uv = (GCupval *)lj_mem_newgco(L, sizeof(GCupval));
+ uv->gct = ~LJ_TUPVAL;
+ uv->closed = 1;
+ setnilV(&uv->tv);
+ setmref(uv->v, &uv->tv);
+ return uv;
+}
+
+/* Close all open upvalues pointing to some stack level or above. */
+void LJ_FASTCALL lj_func_closeuv(lua_State *L, TValue *level)
+{
+ GCupval *uv;
+ global_State *g = G(L);
+ while (gcref(L->openupval) != NULL &&
+ uvval((uv = gco2uv(gcref(L->openupval)))) >= level) {
+ GCobj *o = obj2gco(uv);
+ lj_assertG(!isblack(o), "bad black upvalue");
+ lj_assertG(!uv->closed && uvval(uv) != &uv->tv, "closed upvalue in chain");
+ setgcrefr(L->openupval, uv->nextgc); /* No longer in open list. */
+ if (isdead(g, o)) {
+ lj_func_freeuv(g, uv);
+ } else {
+ unlinkuv(g, uv);
+ lj_gc_closeuv(g, uv);
+ }
+ }
+}
+
+void LJ_FASTCALL lj_func_freeuv(global_State *g, GCupval *uv)
+{
+ if (!uv->closed)
+ unlinkuv(g, uv);
+ lj_mem_freet(g, uv);
+}
+
+/* -- Functions (closures) ------------------------------------------------ */
+
+GCfunc *lj_func_newC(lua_State *L, MSize nelems, GCtab *env)
+{
+ GCfunc *fn = (GCfunc *)lj_mem_newgco(L, sizeCfunc(nelems));
+ fn->c.gct = ~LJ_TFUNC;
+ fn->c.ffid = FF_C;
+ fn->c.nupvalues = (uint8_t)nelems;
+ /* NOBARRIER: The GCfunc is new (marked white). */
+ setmref(fn->c.pc, &G(L)->bc_cfunc_ext);
+ setgcref(fn->c.env, obj2gco(env));
+ return fn;
+}
+
+static GCfunc *func_newL(lua_State *L, GCproto *pt, GCtab *env)
+{
+ uint32_t count;
+ GCfunc *fn = (GCfunc *)lj_mem_newgco(L, sizeLfunc((MSize)pt->sizeuv));
+ fn->l.gct = ~LJ_TFUNC;
+ fn->l.ffid = FF_LUA;
+ fn->l.nupvalues = 0; /* Set to zero until upvalues are initialized. */
+ /* NOBARRIER: Really a setgcref. But the GCfunc is new (marked white). */
+ setmref(fn->l.pc, proto_bc(pt));
+ setgcref(fn->l.env, obj2gco(env));
+ /* Saturating 3 bit counter (0..7) for created closures. */
+ count = (uint32_t)pt->flags + PROTO_CLCOUNT;
+ pt->flags = (uint8_t)(count - ((count >> PROTO_CLC_BITS) & PROTO_CLCOUNT));
+ return fn;
+}
+
+/* Create a new Lua function with empty upvalues. */
+GCfunc *lj_func_newL_empty(lua_State *L, GCproto *pt, GCtab *env)
+{
+ GCfunc *fn = func_newL(L, pt, env);
+ MSize i, nuv = pt->sizeuv;
+ /* NOBARRIER: The GCfunc is new (marked white). */
+ for (i = 0; i < nuv; i++) {
+ GCupval *uv = func_emptyuv(L);
+ int32_t v = proto_uv(pt)[i];
+ uv->immutable = ((v / PROTO_UV_IMMUTABLE) & 1);
+ uv->dhash = (uint32_t)(uintptr_t)pt ^ (v << 24);
+ setgcref(fn->l.uvptr[i], obj2gco(uv));
+ }
+ fn->l.nupvalues = (uint8_t)nuv;
+ return fn;
+}
+
+/* Do a GC check and create a new Lua function with inherited upvalues. */
+GCfunc *lj_func_newL_gc(lua_State *L, GCproto *pt, GCfuncL *parent)
+{
+ GCfunc *fn;
+ GCRef *puv;
+ MSize i, nuv;
+ TValue *base;
+ lj_gc_check_fixtop(L);
+ fn = func_newL(L, pt, tabref(parent->env));
+ /* NOBARRIER: The GCfunc is new (marked white). */
+ puv = parent->uvptr;
+ nuv = pt->sizeuv;
+ base = L->base;
+ for (i = 0; i < nuv; i++) {
+ uint32_t v = proto_uv(pt)[i];
+ GCupval *uv;
+ if ((v & PROTO_UV_LOCAL)) {
+ uv = func_finduv(L, base + (v & 0xff));
+ uv->immutable = ((v / PROTO_UV_IMMUTABLE) & 1);
+ uv->dhash = (uint32_t)(uintptr_t)mref(parent->pc, char) ^ (v << 24);
+ } else {
+ uv = &gcref(puv[v])->uv;
+ }
+ setgcref(fn->l.uvptr[i], obj2gco(uv));
+ }
+ fn->l.nupvalues = (uint8_t)nuv;
+ return fn;
+}
+
+void LJ_FASTCALL lj_func_free(global_State *g, GCfunc *fn)
+{
+ MSize size = isluafunc(fn) ? sizeLfunc((MSize)fn->l.nupvalues) :
+ sizeCfunc((MSize)fn->c.nupvalues);
+ lj_mem_free(g, fn, size);
+}
+
diff --git a/libs/luajit-cmake/luajit/src/lj_func.h b/libs/luajit-cmake/luajit/src/lj_func.h
new file mode 100644
index 0000000..44df4de
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/lj_func.h
@@ -0,0 +1,24 @@
+/*
+** Function handling (prototypes, functions and upvalues).
+** Copyright (C) 2005-2022 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LJ_FUNC_H
+#define _LJ_FUNC_H
+
+#include "lj_obj.h"
+
+/* Prototypes. */
+LJ_FUNC void LJ_FASTCALL lj_func_freeproto(global_State *g, GCproto *pt);
+
+/* Upvalues. */
+LJ_FUNCA void LJ_FASTCALL lj_func_closeuv(lua_State *L, TValue *level);
+LJ_FUNC void LJ_FASTCALL lj_func_freeuv(global_State *g, GCupval *uv);
+
+/* Functions (closures). */
+LJ_FUNC GCfunc *lj_func_newC(lua_State *L, MSize nelems, GCtab *env);
+LJ_FUNC GCfunc *lj_func_newL_empty(lua_State *L, GCproto *pt, GCtab *env);
+LJ_FUNCA GCfunc *lj_func_newL_gc(lua_State *L, GCproto *pt, GCfuncL *parent);
+LJ_FUNC void LJ_FASTCALL lj_func_free(global_State *g, GCfunc *c);
+
+#endif
diff --git a/libs/luajit-cmake/luajit/src/lj_gc.c b/libs/luajit-cmake/luajit/src/lj_gc.c
new file mode 100644
index 0000000..2fc52ec
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/lj_gc.c
@@ -0,0 +1,909 @@
+/*
+** Garbage collector.
+** Copyright (C) 2005-2022 Mike Pall. See Copyright Notice in luajit.h
+**
+** Major portions taken verbatim or adapted from the Lua interpreter.
+** Copyright (C) 1994-2008 Lua.org, PUC-Rio. See Copyright Notice in lua.h
+*/
+
+#define lj_gc_c
+#define LUA_CORE
+
+#include "lj_obj.h"
+#include "lj_gc.h"
+#include "lj_err.h"
+#include "lj_buf.h"
+#include "lj_str.h"
+#include "lj_tab.h"
+#include "lj_func.h"
+#include "lj_udata.h"
+#include "lj_meta.h"
+#include "lj_state.h"
+#include "lj_frame.h"
+#if LJ_HASFFI
+#include "lj_ctype.h"
+#include "lj_cdata.h"
+#endif
+#include "lj_trace.h"
+#include "lj_dispatch.h"
+#include "lj_vm.h"
+
+#define GCSTEPSIZE 1024u
+#define GCSWEEPMAX 40
+#define GCSWEEPCOST 10
+#define GCFINALIZECOST 100
+
+/* Macros to set GCobj colors and flags. */
+#define white2gray(x) ((x)->gch.marked &= (uint8_t)~LJ_GC_WHITES)
+#define gray2black(x) ((x)->gch.marked |= LJ_GC_BLACK)
+#define isfinalized(u) ((u)->marked & LJ_GC_FINALIZED)
+
+/* -- Mark phase ---------------------------------------------------------- */
+
+/* Mark a TValue (if needed). */
+#define gc_marktv(g, tv) \
+ { lj_assertG(!tvisgcv(tv) || (~itype(tv) == gcval(tv)->gch.gct), \
+ "TValue and GC type mismatch"); \
+ if (tviswhite(tv)) gc_mark(g, gcV(tv)); }
+
+/* Mark a GCobj (if needed). */
+#define gc_markobj(g, o) \
+ { if (iswhite(obj2gco(o))) gc_mark(g, obj2gco(o)); }
+
+/* Mark a string object. */
+#define gc_mark_str(s) ((s)->marked &= (uint8_t)~LJ_GC_WHITES)
+
+/* Mark a white GCobj. */
+static void gc_mark(global_State *g, GCobj *o)
+{
+ int gct = o->gch.gct;
+ lj_assertG(iswhite(o), "mark of non-white object");
+ lj_assertG(!isdead(g, o), "mark of dead object");
+ white2gray(o);
+ if (LJ_UNLIKELY(gct == ~LJ_TUDATA)) {
+ GCtab *mt = tabref(gco2ud(o)->metatable);
+ gray2black(o); /* Userdata are never gray. */
+ if (mt) gc_markobj(g, mt);
+ gc_markobj(g, tabref(gco2ud(o)->env));
+ if (LJ_HASBUFFER && gco2ud(o)->udtype == UDTYPE_BUFFER) {
+ SBufExt *sbx = (SBufExt *)uddata(gco2ud(o));
+ if (sbufiscow(sbx) && gcref(sbx->cowref))
+ gc_markobj(g, gcref(sbx->cowref));
+ if (gcref(sbx->dict_str))
+ gc_markobj(g, gcref(sbx->dict_str));
+ if (gcref(sbx->dict_mt))
+ gc_markobj(g, gcref(sbx->dict_mt));
+ }
+ } else if (LJ_UNLIKELY(gct == ~LJ_TUPVAL)) {
+ GCupval *uv = gco2uv(o);
+ gc_marktv(g, uvval(uv));
+ if (uv->closed)
+ gray2black(o); /* Closed upvalues are never gray. */
+ } else if (gct != ~LJ_TSTR && gct != ~LJ_TCDATA) {
+ lj_assertG(gct == ~LJ_TFUNC || gct == ~LJ_TTAB ||
+ gct == ~LJ_TTHREAD || gct == ~LJ_TPROTO || gct == ~LJ_TTRACE,
+ "bad GC type %d", gct);
+ setgcrefr(o->gch.gclist, g->gc.gray);
+ setgcref(g->gc.gray, o);
+ }
+}
+
+/* Mark GC roots. */
+static void gc_mark_gcroot(global_State *g)
+{
+ ptrdiff_t i;
+ for (i = 0; i < GCROOT_MAX; i++)
+ if (gcref(g->gcroot[i]) != NULL)
+ gc_markobj(g, gcref(g->gcroot[i]));
+}
+
+/* Start a GC cycle and mark the root set. */
+static void gc_mark_start(global_State *g)
+{
+ setgcrefnull(g->gc.gray);
+ setgcrefnull(g->gc.grayagain);
+ setgcrefnull(g->gc.weak);
+ gc_markobj(g, mainthread(g));
+ gc_markobj(g, tabref(mainthread(g)->env));
+ gc_marktv(g, &g->registrytv);
+ gc_mark_gcroot(g);
+ g->gc.state = GCSpropagate;
+}
+
+/* Mark open upvalues. */
+static void gc_mark_uv(global_State *g)
+{
+ GCupval *uv;
+ for (uv = uvnext(&g->uvhead); uv != &g->uvhead; uv = uvnext(uv)) {
+ lj_assertG(uvprev(uvnext(uv)) == uv && uvnext(uvprev(uv)) == uv,
+ "broken upvalue chain");
+ if (isgray(obj2gco(uv)))
+ gc_marktv(g, uvval(uv));
+ }
+}
+
+/* Mark userdata in mmudata list. */
+static void gc_mark_mmudata(global_State *g)
+{
+ GCobj *root = gcref(g->gc.mmudata);
+ GCobj *u = root;
+ if (u) {
+ do {
+ u = gcnext(u);
+ makewhite(g, u); /* Could be from previous GC. */
+ gc_mark(g, u);
+ } while (u != root);
+ }
+}
+
+/* Separate userdata objects to be finalized to mmudata list. */
+size_t lj_gc_separateudata(global_State *g, int all)
+{
+ size_t m = 0;
+ GCRef *p = &mainthread(g)->nextgc;
+ GCobj *o;
+ while ((o = gcref(*p)) != NULL) {
+ if (!(iswhite(o) || all) || isfinalized(gco2ud(o))) {
+ p = &o->gch.nextgc; /* Nothing to do. */
+ } else if (!lj_meta_fastg(g, tabref(gco2ud(o)->metatable), MM_gc)) {
+ markfinalized(o); /* Done, as there's no __gc metamethod. */
+ p = &o->gch.nextgc;
+ } else { /* Otherwise move userdata to be finalized to mmudata list. */
+ m += sizeudata(gco2ud(o));
+ markfinalized(o);
+ *p = o->gch.nextgc;
+ if (gcref(g->gc.mmudata)) { /* Link to end of mmudata list. */
+ GCobj *root = gcref(g->gc.mmudata);
+ setgcrefr(o->gch.nextgc, root->gch.nextgc);
+ setgcref(root->gch.nextgc, o);
+ setgcref(g->gc.mmudata, o);
+ } else { /* Create circular list. */
+ setgcref(o->gch.nextgc, o);
+ setgcref(g->gc.mmudata, o);
+ }
+ }
+ }
+ return m;
+}
+
+/* -- Propagation phase --------------------------------------------------- */
+
+/* Traverse a table. */
+static int gc_traverse_tab(global_State *g, GCtab *t)
+{
+ int weak = 0;
+ cTValue *mode;
+ GCtab *mt = tabref(t->metatable);
+ if (mt)
+ gc_markobj(g, mt);
+ mode = lj_meta_fastg(g, mt, MM_mode);
+ if (mode && tvisstr(mode)) { /* Valid __mode field? */
+ const char *modestr = strVdata(mode);
+ int c;
+ while ((c = *modestr++)) {
+ if (c == 'k') weak |= LJ_GC_WEAKKEY;
+ else if (c == 'v') weak |= LJ_GC_WEAKVAL;
+ }
+ if (weak) { /* Weak tables are cleared in the atomic phase. */
+#if LJ_HASFFI
+ CTState *cts = ctype_ctsG(g);
+ if (cts && cts->finalizer == t) {
+ weak = (int)(~0u & ~LJ_GC_WEAKVAL);
+ } else
+#endif
+ {
+ t->marked = (uint8_t)((t->marked & ~LJ_GC_WEAK) | weak);
+ setgcrefr(t->gclist, g->gc.weak);
+ setgcref(g->gc.weak, obj2gco(t));
+ }
+ }
+ }
+ if (weak == LJ_GC_WEAK) /* Nothing to mark if both keys/values are weak. */
+ return 1;
+ if (!(weak & LJ_GC_WEAKVAL)) { /* Mark array part. */
+ MSize i, asize = t->asize;
+ for (i = 0; i < asize; i++)
+ gc_marktv(g, arrayslot(t, i));
+ }
+ if (t->hmask > 0) { /* Mark hash part. */
+ Node *node = noderef(t->node);
+ MSize i, hmask = t->hmask;
+ for (i = 0; i <= hmask; i++) {
+ Node *n = &node[i];
+ if (!tvisnil(&n->val)) { /* Mark non-empty slot. */
+ lj_assertG(!tvisnil(&n->key), "mark of nil key in non-empty slot");
+ if (!(weak & LJ_GC_WEAKKEY)) gc_marktv(g, &n->key);
+ if (!(weak & LJ_GC_WEAKVAL)) gc_marktv(g, &n->val);
+ }
+ }
+ }
+ return weak;
+}
+
+/* Traverse a function. */
+static void gc_traverse_func(global_State *g, GCfunc *fn)
+{
+ gc_markobj(g, tabref(fn->c.env));
+ if (isluafunc(fn)) {
+ uint32_t i;
+ lj_assertG(fn->l.nupvalues <= funcproto(fn)->sizeuv,
+ "function upvalues out of range");
+ gc_markobj(g, funcproto(fn));
+ for (i = 0; i < fn->l.nupvalues; i++) /* Mark Lua function upvalues. */
+ gc_markobj(g, &gcref(fn->l.uvptr[i])->uv);
+ } else {
+ uint32_t i;
+ for (i = 0; i < fn->c.nupvalues; i++) /* Mark C function upvalues. */
+ gc_marktv(g, &fn->c.upvalue[i]);
+ }
+}
+
+#if LJ_HASJIT
+/* Mark a trace. */
+static void gc_marktrace(global_State *g, TraceNo traceno)
+{
+ GCobj *o = obj2gco(traceref(G2J(g), traceno));
+ lj_assertG(traceno != G2J(g)->cur.traceno, "active trace escaped");
+ if (iswhite(o)) {
+ white2gray(o);
+ setgcrefr(o->gch.gclist, g->gc.gray);
+ setgcref(g->gc.gray, o);
+ }
+}
+
+/* Traverse a trace. */
+static void gc_traverse_trace(global_State *g, GCtrace *T)
+{
+ IRRef ref;
+ if (T->traceno == 0) return;
+ for (ref = T->nk; ref < REF_TRUE; ref++) {
+ IRIns *ir = &T->ir[ref];
+ if (ir->o == IR_KGC)
+ gc_markobj(g, ir_kgc(ir));
+ if (irt_is64(ir->t) && ir->o != IR_KNULL)
+ ref++;
+ }
+ if (T->link) gc_marktrace(g, T->link);
+ if (T->nextroot) gc_marktrace(g, T->nextroot);
+ if (T->nextside) gc_marktrace(g, T->nextside);
+ gc_markobj(g, gcref(T->startpt));
+}
+
+/* The current trace is a GC root while not anchored in the prototype (yet). */
+#define gc_traverse_curtrace(g) gc_traverse_trace(g, &G2J(g)->cur)
+#else
+#define gc_traverse_curtrace(g) UNUSED(g)
+#endif
+
+/* Traverse a prototype. */
+static void gc_traverse_proto(global_State *g, GCproto *pt)
+{
+ ptrdiff_t i;
+ gc_mark_str(proto_chunkname(pt));
+ for (i = -(ptrdiff_t)pt->sizekgc; i < 0; i++) /* Mark collectable consts. */
+ gc_markobj(g, proto_kgc(pt, i));
+#if LJ_HASJIT
+ if (pt->trace) gc_marktrace(g, pt->trace);
+#endif
+}
+
+/* Traverse the frame structure of a stack. */
+static MSize gc_traverse_frames(global_State *g, lua_State *th)
+{
+ TValue *frame, *top = th->top-1, *bot = tvref(th->stack);
+ /* Note: extra vararg frame not skipped, marks function twice (harmless). */
+ for (frame = th->base-1; frame > bot+LJ_FR2; frame = frame_prev(frame)) {
+ GCfunc *fn = frame_func(frame);
+ TValue *ftop = frame;
+ if (isluafunc(fn)) ftop += funcproto(fn)->framesize;
+ if (ftop > top) top = ftop;
+ if (!LJ_FR2) gc_markobj(g, fn); /* Need to mark hidden function (or L). */
+ }
+ top++; /* Correct bias of -1 (frame == base-1). */
+ if (top > tvref(th->maxstack)) top = tvref(th->maxstack);
+ return (MSize)(top - bot); /* Return minimum needed stack size. */
+}
+
+/* Traverse a thread object. */
+static void gc_traverse_thread(global_State *g, lua_State *th)
+{
+ TValue *o, *top = th->top;
+ for (o = tvref(th->stack)+1+LJ_FR2; o < top; o++)
+ gc_marktv(g, o);
+ if (g->gc.state == GCSatomic) {
+ top = tvref(th->stack) + th->stacksize;
+ for (; o < top; o++) /* Clear unmarked slots. */
+ setnilV(o);
+ }
+ gc_markobj(g, tabref(th->env));
+ lj_state_shrinkstack(th, gc_traverse_frames(g, th));
+}
+
+/* Propagate one gray object. Traverse it and turn it black. */
+static size_t propagatemark(global_State *g)
+{
+ GCobj *o = gcref(g->gc.gray);
+ int gct = o->gch.gct;
+ lj_assertG(isgray(o), "propagation of non-gray object");
+ gray2black(o);
+ setgcrefr(g->gc.gray, o->gch.gclist); /* Remove from gray list. */
+ if (LJ_LIKELY(gct == ~LJ_TTAB)) {
+ GCtab *t = gco2tab(o);
+ if (gc_traverse_tab(g, t) > 0)
+ black2gray(o); /* Keep weak tables gray. */
+ return sizeof(GCtab) + sizeof(TValue) * t->asize +
+ (t->hmask ? sizeof(Node) * (t->hmask + 1) : 0);
+ } else if (LJ_LIKELY(gct == ~LJ_TFUNC)) {
+ GCfunc *fn = gco2func(o);
+ gc_traverse_func(g, fn);
+ return isluafunc(fn) ? sizeLfunc((MSize)fn->l.nupvalues) :
+ sizeCfunc((MSize)fn->c.nupvalues);
+ } else if (LJ_LIKELY(gct == ~LJ_TPROTO)) {
+ GCproto *pt = gco2pt(o);
+ gc_traverse_proto(g, pt);
+ return pt->sizept;
+ } else if (LJ_LIKELY(gct == ~LJ_TTHREAD)) {
+ lua_State *th = gco2th(o);
+ setgcrefr(th->gclist, g->gc.grayagain);
+ setgcref(g->gc.grayagain, o);
+ black2gray(o); /* Threads are never black. */
+ gc_traverse_thread(g, th);
+ return sizeof(lua_State) + sizeof(TValue) * th->stacksize;
+ } else {
+#if LJ_HASJIT
+ GCtrace *T = gco2trace(o);
+ gc_traverse_trace(g, T);
+ return ((sizeof(GCtrace)+7)&~7) + (T->nins-T->nk)*sizeof(IRIns) +
+ T->nsnap*sizeof(SnapShot) + T->nsnapmap*sizeof(SnapEntry);
+#else
+ lj_assertG(0, "bad GC type %d", gct);
+ return 0;
+#endif
+ }
+}
+
+/* Propagate all gray objects. */
+static size_t gc_propagate_gray(global_State *g)
+{
+ size_t m = 0;
+ while (gcref(g->gc.gray) != NULL)
+ m += propagatemark(g);
+ return m;
+}
+
+/* -- Sweep phase --------------------------------------------------------- */
+
+/* Type of GC free functions. */
+typedef void (LJ_FASTCALL *GCFreeFunc)(global_State *g, GCobj *o);
+
+/* GC free functions for LJ_TSTR .. LJ_TUDATA. ORDER LJ_T */
+static const GCFreeFunc gc_freefunc[] = {
+ (GCFreeFunc)lj_str_free,
+ (GCFreeFunc)lj_func_freeuv,
+ (GCFreeFunc)lj_state_free,
+ (GCFreeFunc)lj_func_freeproto,
+ (GCFreeFunc)lj_func_free,
+#if LJ_HASJIT
+ (GCFreeFunc)lj_trace_free,
+#else
+ (GCFreeFunc)0,
+#endif
+#if LJ_HASFFI
+ (GCFreeFunc)lj_cdata_free,
+#else
+ (GCFreeFunc)0,
+#endif
+ (GCFreeFunc)lj_tab_free,
+ (GCFreeFunc)lj_udata_free
+};
+
+/* Full sweep of a GC list. */
+#define gc_fullsweep(g, p) gc_sweep(g, (p), ~(uint32_t)0)
+
+/* Partial sweep of a GC list. */
+static GCRef *gc_sweep(global_State *g, GCRef *p, uint32_t lim)
+{
+ /* Mask with other white and LJ_GC_FIXED. Or LJ_GC_SFIXED on shutdown. */
+ int ow = otherwhite(g);
+ GCobj *o;
+ while ((o = gcref(*p)) != NULL && lim-- > 0) {
+ if (o->gch.gct == ~LJ_TTHREAD) /* Need to sweep open upvalues, too. */
+ gc_fullsweep(g, &gco2th(o)->openupval);
+ if (((o->gch.marked ^ LJ_GC_WHITES) & ow)) { /* Black or current white? */
+ lj_assertG(!isdead(g, o) || (o->gch.marked & LJ_GC_FIXED),
+ "sweep of undead object");
+ makewhite(g, o); /* Value is alive, change to the current white. */
+ p = &o->gch.nextgc;
+ } else { /* Otherwise value is dead, free it. */
+ lj_assertG(isdead(g, o) || ow == LJ_GC_SFIXED,
+ "sweep of unlive object");
+ setgcrefr(*p, o->gch.nextgc);
+ if (o == gcref(g->gc.root))
+ setgcrefr(g->gc.root, o->gch.nextgc); /* Adjust list anchor. */
+ gc_freefunc[o->gch.gct - ~LJ_TSTR](g, o);
+ }
+ }
+ return p;
+}
+
+/* Sweep one string interning table chain. Preserves hashalg bit. */
+static void gc_sweepstr(global_State *g, GCRef *chain)
+{
+ /* Mask with other white and LJ_GC_FIXED. Or LJ_GC_SFIXED on shutdown. */
+ int ow = otherwhite(g);
+ uintptr_t u = gcrefu(*chain);
+ GCRef q;
+ GCRef *p = &q;
+ GCobj *o;
+ setgcrefp(q, (u & ~(uintptr_t)1));
+ while ((o = gcref(*p)) != NULL) {
+ if (((o->gch.marked ^ LJ_GC_WHITES) & ow)) { /* Black or current white? */
+ lj_assertG(!isdead(g, o) || (o->gch.marked & LJ_GC_FIXED),
+ "sweep of undead string");
+ makewhite(g, o); /* String is alive, change to the current white. */
+ p = &o->gch.nextgc;
+ } else { /* Otherwise string is dead, free it. */
+ lj_assertG(isdead(g, o) || ow == LJ_GC_SFIXED,
+ "sweep of unlive string");
+ setgcrefr(*p, o->gch.nextgc);
+ lj_str_free(g, gco2str(o));
+ }
+ }
+ setgcrefp(*chain, (gcrefu(q) | (u & 1)));
+}
+
+/* Check whether we can clear a key or a value slot from a table. */
+static int gc_mayclear(cTValue *o, int val)
+{
+ if (tvisgcv(o)) { /* Only collectable objects can be weak references. */
+ if (tvisstr(o)) { /* But strings cannot be used as weak references. */
+ gc_mark_str(strV(o)); /* And need to be marked. */
+ return 0;
+ }
+ if (iswhite(gcV(o)))
+ return 1; /* Object is about to be collected. */
+ if (tvisudata(o) && val && isfinalized(udataV(o)))
+ return 1; /* Finalized userdata is dropped only from values. */
+ }
+ return 0; /* Cannot clear. */
+}
+
+/* Clear collected entries from weak tables. */
+static void gc_clearweak(global_State *g, GCobj *o)
+{
+ UNUSED(g);
+ while (o) {
+ GCtab *t = gco2tab(o);
+ lj_assertG((t->marked & LJ_GC_WEAK), "clear of non-weak table");
+ if ((t->marked & LJ_GC_WEAKVAL)) {
+ MSize i, asize = t->asize;
+ for (i = 0; i < asize; i++) {
+ /* Clear array slot when value is about to be collected. */
+ TValue *tv = arrayslot(t, i);
+ if (gc_mayclear(tv, 1))
+ setnilV(tv);
+ }
+ }
+ if (t->hmask > 0) {
+ Node *node = noderef(t->node);
+ MSize i, hmask = t->hmask;
+ for (i = 0; i <= hmask; i++) {
+ Node *n = &node[i];
+ /* Clear hash slot when key or value is about to be collected. */
+ if (!tvisnil(&n->val) && (gc_mayclear(&n->key, 0) ||
+ gc_mayclear(&n->val, 1)))
+ setnilV(&n->val);
+ }
+ }
+ o = gcref(t->gclist);
+ }
+}
+
+/* Call a userdata or cdata finalizer. */
+static void gc_call_finalizer(global_State *g, lua_State *L,
+ cTValue *mo, GCobj *o)
+{
+ /* Save and restore lots of state around the __gc callback. */
+ uint8_t oldh = hook_save(g);
+ GCSize oldt = g->gc.threshold;
+ int errcode;
+ TValue *top;
+ lj_trace_abort(g);
+ hook_entergc(g); /* Disable hooks and new traces during __gc. */
+ if (LJ_HASPROFILE && (oldh & HOOK_PROFILE)) lj_dispatch_update(g);
+ g->gc.threshold = LJ_MAX_MEM; /* Prevent GC steps. */
+ top = L->top;
+ copyTV(L, top++, mo);
+ if (LJ_FR2) setnilV(top++);
+ setgcV(L, top, o, ~o->gch.gct);
+ L->top = top+1;
+ errcode = lj_vm_pcall(L, top, 1+0, -1); /* Stack: |mo|o| -> | */
+ hook_restore(g, oldh);
+ if (LJ_HASPROFILE && (oldh & HOOK_PROFILE)) lj_dispatch_update(g);
+ g->gc.threshold = oldt; /* Restore GC threshold. */
+ if (errcode)
+ lj_err_throw(L, errcode); /* Propagate errors. */
+}
+
+/* Finalize one userdata or cdata object from the mmudata list. */
+static void gc_finalize(lua_State *L)
+{
+ global_State *g = G(L);
+ GCobj *o = gcnext(gcref(g->gc.mmudata));
+ cTValue *mo;
+ lj_assertG(tvref(g->jit_base) == NULL, "finalizer called on trace");
+ /* Unchain from list of userdata to be finalized. */
+ if (o == gcref(g->gc.mmudata))
+ setgcrefnull(g->gc.mmudata);
+ else
+ setgcrefr(gcref(g->gc.mmudata)->gch.nextgc, o->gch.nextgc);
+#if LJ_HASFFI
+ if (o->gch.gct == ~LJ_TCDATA) {
+ TValue tmp, *tv;
+ /* Add cdata back to the GC list and make it white. */
+ setgcrefr(o->gch.nextgc, g->gc.root);
+ setgcref(g->gc.root, o);
+ makewhite(g, o);
+ o->gch.marked &= (uint8_t)~LJ_GC_CDATA_FIN;
+ /* Resolve finalizer. */
+ setcdataV(L, &tmp, gco2cd(o));
+ tv = lj_tab_set(L, ctype_ctsG(g)->finalizer, &tmp);
+ if (!tvisnil(tv)) {
+ g->gc.nocdatafin = 0;
+ copyTV(L, &tmp, tv);
+ setnilV(tv); /* Clear entry in finalizer table. */
+ gc_call_finalizer(g, L, &tmp, o);
+ }
+ return;
+ }
+#endif
+ /* Add userdata back to the main userdata list and make it white. */
+ setgcrefr(o->gch.nextgc, mainthread(g)->nextgc);
+ setgcref(mainthread(g)->nextgc, o);
+ makewhite(g, o);
+ /* Resolve the __gc metamethod. */
+ mo = lj_meta_fastg(g, tabref(gco2ud(o)->metatable), MM_gc);
+ if (mo)
+ gc_call_finalizer(g, L, mo, o);
+}
+
+/* Finalize all userdata objects from mmudata list. */
+void lj_gc_finalize_udata(lua_State *L)
+{
+ while (gcref(G(L)->gc.mmudata) != NULL)
+ gc_finalize(L);
+}
+
+#if LJ_HASFFI
+/* Finalize all cdata objects from finalizer table. */
+void lj_gc_finalize_cdata(lua_State *L)
+{
+ global_State *g = G(L);
+ CTState *cts = ctype_ctsG(g);
+ if (cts) {
+ GCtab *t = cts->finalizer;
+ Node *node = noderef(t->node);
+ ptrdiff_t i;
+ setgcrefnull(t->metatable); /* Mark finalizer table as disabled. */
+ for (i = (ptrdiff_t)t->hmask; i >= 0; i--)
+ if (!tvisnil(&node[i].val) && tviscdata(&node[i].key)) {
+ GCobj *o = gcV(&node[i].key);
+ TValue tmp;
+ makewhite(g, o);
+ o->gch.marked &= (uint8_t)~LJ_GC_CDATA_FIN;
+ copyTV(L, &tmp, &node[i].val);
+ setnilV(&node[i].val);
+ gc_call_finalizer(g, L, &tmp, o);
+ }
+ }
+}
+#endif
+
+/* Free all remaining GC objects. */
+void lj_gc_freeall(global_State *g)
+{
+ MSize i, strmask;
+ /* Free everything, except super-fixed objects (the main thread). */
+ g->gc.currentwhite = LJ_GC_WHITES | LJ_GC_SFIXED;
+ gc_fullsweep(g, &g->gc.root);
+ strmask = g->str.mask;
+ for (i = 0; i <= strmask; i++) /* Free all string hash chains. */
+ gc_sweepstr(g, &g->str.tab[i]);
+}
+
+/* -- Collector ----------------------------------------------------------- */
+
+/* Atomic part of the GC cycle, transitioning from mark to sweep phase. */
+static void atomic(global_State *g, lua_State *L)
+{
+ size_t udsize;
+
+ gc_mark_uv(g); /* Need to remark open upvalues (the thread may be dead). */
+ gc_propagate_gray(g); /* Propagate any left-overs. */
+
+ setgcrefr(g->gc.gray, g->gc.weak); /* Empty the list of weak tables. */
+ setgcrefnull(g->gc.weak);
+ lj_assertG(!iswhite(obj2gco(mainthread(g))), "main thread turned white");
+ gc_markobj(g, L); /* Mark running thread. */
+ gc_traverse_curtrace(g); /* Traverse current trace. */
+ gc_mark_gcroot(g); /* Mark GC roots (again). */
+ gc_propagate_gray(g); /* Propagate all of the above. */
+
+ setgcrefr(g->gc.gray, g->gc.grayagain); /* Empty the 2nd chance list. */
+ setgcrefnull(g->gc.grayagain);
+ gc_propagate_gray(g); /* Propagate it. */
+
+ udsize = lj_gc_separateudata(g, 0); /* Separate userdata to be finalized. */
+ gc_mark_mmudata(g); /* Mark them. */
+ udsize += gc_propagate_gray(g); /* And propagate the marks. */
+
+ /* All marking done, clear weak tables. */
+ gc_clearweak(g, gcref(g->gc.weak));
+
+ lj_buf_shrink(L, &g->tmpbuf); /* Shrink temp buffer. */
+
+ /* Prepare for sweep phase. */
+ g->gc.currentwhite = (uint8_t)otherwhite(g); /* Flip current white. */
+ g->strempty.marked = g->gc.currentwhite;
+ setmref(g->gc.sweep, &g->gc.root);
+ g->gc.estimate = g->gc.total - (GCSize)udsize; /* Initial estimate. */
+}
+
+/* GC state machine. Returns a cost estimate for each step performed. */
+static size_t gc_onestep(lua_State *L)
+{
+ global_State *g = G(L);
+ switch (g->gc.state) {
+ case GCSpause:
+ gc_mark_start(g); /* Start a new GC cycle by marking all GC roots. */
+ return 0;
+ case GCSpropagate:
+ if (gcref(g->gc.gray) != NULL)
+ return propagatemark(g); /* Propagate one gray object. */
+ g->gc.state = GCSatomic; /* End of mark phase. */
+ return 0;
+ case GCSatomic:
+ if (tvref(g->jit_base)) /* Don't run atomic phase on trace. */
+ return LJ_MAX_MEM;
+ atomic(g, L);
+ g->gc.state = GCSsweepstring; /* Start of sweep phase. */
+ g->gc.sweepstr = 0;
+ return 0;
+ case GCSsweepstring: {
+ GCSize old = g->gc.total;
+ gc_sweepstr(g, &g->str.tab[g->gc.sweepstr++]); /* Sweep one chain. */
+ if (g->gc.sweepstr > g->str.mask)
+ g->gc.state = GCSsweep; /* All string hash chains sweeped. */
+ lj_assertG(old >= g->gc.total, "sweep increased memory");
+ g->gc.estimate -= old - g->gc.total;
+ return GCSWEEPCOST;
+ }
+ case GCSsweep: {
+ GCSize old = g->gc.total;
+ setmref(g->gc.sweep, gc_sweep(g, mref(g->gc.sweep, GCRef), GCSWEEPMAX));
+ lj_assertG(old >= g->gc.total, "sweep increased memory");
+ g->gc.estimate -= old - g->gc.total;
+ if (gcref(*mref(g->gc.sweep, GCRef)) == NULL) {
+ if (g->str.num <= (g->str.mask >> 2) && g->str.mask > LJ_MIN_STRTAB*2-1)
+ lj_str_resize(L, g->str.mask >> 1); /* Shrink string table. */
+ if (gcref(g->gc.mmudata)) { /* Need any finalizations? */
+ g->gc.state = GCSfinalize;
+#if LJ_HASFFI
+ g->gc.nocdatafin = 1;
+#endif
+ } else { /* Otherwise skip this phase to help the JIT. */
+ g->gc.state = GCSpause; /* End of GC cycle. */
+ g->gc.debt = 0;
+ }
+ }
+ return GCSWEEPMAX*GCSWEEPCOST;
+ }
+ case GCSfinalize:
+ if (gcref(g->gc.mmudata) != NULL) {
+ GCSize old = g->gc.total;
+ if (tvref(g->jit_base)) /* Don't call finalizers on trace. */
+ return LJ_MAX_MEM;
+ gc_finalize(L); /* Finalize one userdata object. */
+ if (old >= g->gc.total && g->gc.estimate > old - g->gc.total)
+ g->gc.estimate -= old - g->gc.total;
+ if (g->gc.estimate > GCFINALIZECOST)
+ g->gc.estimate -= GCFINALIZECOST;
+ return GCFINALIZECOST;
+ }
+#if LJ_HASFFI
+ if (!g->gc.nocdatafin) lj_tab_rehash(L, ctype_ctsG(g)->finalizer);
+#endif
+ g->gc.state = GCSpause; /* End of GC cycle. */
+ g->gc.debt = 0;
+ return 0;
+ default:
+ lj_assertG(0, "bad GC state");
+ return 0;
+ }
+}
+
+/* Perform a limited amount of incremental GC steps. */
+int LJ_FASTCALL lj_gc_step(lua_State *L)
+{
+ global_State *g = G(L);
+ GCSize lim;
+ int32_t ostate = g->vmstate;
+ setvmstate(g, GC);
+ lim = (GCSTEPSIZE/100) * g->gc.stepmul;
+ if (lim == 0)
+ lim = LJ_MAX_MEM;
+ if (g->gc.total > g->gc.threshold)
+ g->gc.debt += g->gc.total - g->gc.threshold;
+ do {
+ lim -= (GCSize)gc_onestep(L);
+ if (g->gc.state == GCSpause) {
+ g->gc.threshold = (g->gc.estimate/100) * g->gc.pause;
+ g->vmstate = ostate;
+ return 1; /* Finished a GC cycle. */
+ }
+ } while (sizeof(lim) == 8 ? ((int64_t)lim > 0) : ((int32_t)lim > 0));
+ if (g->gc.debt < GCSTEPSIZE) {
+ g->gc.threshold = g->gc.total + GCSTEPSIZE;
+ g->vmstate = ostate;
+ return -1;
+ } else {
+ g->gc.debt -= GCSTEPSIZE;
+ g->gc.threshold = g->gc.total;
+ g->vmstate = ostate;
+ return 0;
+ }
+}
+
+/* Ditto, but fix the stack top first. */
+void LJ_FASTCALL lj_gc_step_fixtop(lua_State *L)
+{
+ if (curr_funcisL(L)) L->top = curr_topL(L);
+ lj_gc_step(L);
+}
+
+#if LJ_HASJIT
+/* Perform multiple GC steps. Called from JIT-compiled code. */
+int LJ_FASTCALL lj_gc_step_jit(global_State *g, MSize steps)
+{
+ lua_State *L = gco2th(gcref(g->cur_L));
+ L->base = tvref(G(L)->jit_base);
+ L->top = curr_topL(L);
+ while (steps-- > 0 && lj_gc_step(L) == 0)
+ ;
+ /* Return 1 to force a trace exit. */
+ return (G(L)->gc.state == GCSatomic || G(L)->gc.state == GCSfinalize);
+}
+#endif
+
+/* Perform a full GC cycle. */
+void lj_gc_fullgc(lua_State *L)
+{
+ global_State *g = G(L);
+ int32_t ostate = g->vmstate;
+ setvmstate(g, GC);
+ if (g->gc.state <= GCSatomic) { /* Caught somewhere in the middle. */
+ setmref(g->gc.sweep, &g->gc.root); /* Sweep everything (preserving it). */
+ setgcrefnull(g->gc.gray); /* Reset lists from partial propagation. */
+ setgcrefnull(g->gc.grayagain);
+ setgcrefnull(g->gc.weak);
+ g->gc.state = GCSsweepstring; /* Fast forward to the sweep phase. */
+ g->gc.sweepstr = 0;
+ }
+ while (g->gc.state == GCSsweepstring || g->gc.state == GCSsweep)
+ gc_onestep(L); /* Finish sweep. */
+ lj_assertG(g->gc.state == GCSfinalize || g->gc.state == GCSpause,
+ "bad GC state");
+ /* Now perform a full GC. */
+ g->gc.state = GCSpause;
+ do { gc_onestep(L); } while (g->gc.state != GCSpause);
+ g->gc.threshold = (g->gc.estimate/100) * g->gc.pause;
+ g->vmstate = ostate;
+}
+
+/* -- Write barriers ------------------------------------------------------ */
+
+/* Move the GC propagation frontier forward. */
+void lj_gc_barrierf(global_State *g, GCobj *o, GCobj *v)
+{
+ lj_assertG(isblack(o) && iswhite(v) && !isdead(g, v) && !isdead(g, o),
+ "bad object states for forward barrier");
+ lj_assertG(g->gc.state != GCSfinalize && g->gc.state != GCSpause,
+ "bad GC state");
+ lj_assertG(o->gch.gct != ~LJ_TTAB, "barrier object is not a table");
+ /* Preserve invariant during propagation. Otherwise it doesn't matter. */
+ if (g->gc.state == GCSpropagate || g->gc.state == GCSatomic)
+ gc_mark(g, v); /* Move frontier forward. */
+ else
+ makewhite(g, o); /* Make it white to avoid the following barrier. */
+}
+
+/* Specialized barrier for closed upvalue. Pass &uv->tv. */
+void LJ_FASTCALL lj_gc_barrieruv(global_State *g, TValue *tv)
+{
+#define TV2MARKED(x) \
+ (*((uint8_t *)(x) - offsetof(GCupval, tv) + offsetof(GCupval, marked)))
+ if (g->gc.state == GCSpropagate || g->gc.state == GCSatomic)
+ gc_mark(g, gcV(tv));
+ else
+ TV2MARKED(tv) = (TV2MARKED(tv) & (uint8_t)~LJ_GC_COLORS) | curwhite(g);
+#undef TV2MARKED
+}
+
+/* Close upvalue. Also needs a write barrier. */
+void lj_gc_closeuv(global_State *g, GCupval *uv)
+{
+ GCobj *o = obj2gco(uv);
+ /* Copy stack slot to upvalue itself and point to the copy. */
+ copyTV(mainthread(g), &uv->tv, uvval(uv));
+ setmref(uv->v, &uv->tv);
+ uv->closed = 1;
+ setgcrefr(o->gch.nextgc, g->gc.root);
+ setgcref(g->gc.root, o);
+ if (isgray(o)) { /* A closed upvalue is never gray, so fix this. */
+ if (g->gc.state == GCSpropagate || g->gc.state == GCSatomic) {
+ gray2black(o); /* Make it black and preserve invariant. */
+ if (tviswhite(&uv->tv))
+ lj_gc_barrierf(g, o, gcV(&uv->tv));
+ } else {
+ makewhite(g, o); /* Make it white, i.e. sweep the upvalue. */
+ lj_assertG(g->gc.state != GCSfinalize && g->gc.state != GCSpause,
+ "bad GC state");
+ }
+ }
+}
+
+#if LJ_HASJIT
+/* Mark a trace if it's saved during the propagation phase. */
+void lj_gc_barriertrace(global_State *g, uint32_t traceno)
+{
+ if (g->gc.state == GCSpropagate || g->gc.state == GCSatomic)
+ gc_marktrace(g, traceno);
+}
+#endif
+
+/* -- Allocator ----------------------------------------------------------- */
+
+/* Call pluggable memory allocator to allocate or resize a fragment. */
+void *lj_mem_realloc(lua_State *L, void *p, GCSize osz, GCSize nsz)
+{
+ global_State *g = G(L);
+ lj_assertG((osz == 0) == (p == NULL), "realloc API violation");
+ p = g->allocf(g->allocd, p, osz, nsz);
+ if (p == NULL && nsz > 0)
+ lj_err_mem(L);
+ lj_assertG((nsz == 0) == (p == NULL), "allocf API violation");
+ lj_assertG(checkptrGC(p),
+ "allocated memory address %p outside required range", p);
+ g->gc.total = (g->gc.total - osz) + nsz;
+ return p;
+}
+
+/* Allocate new GC object and link it to the root set. */
+void * LJ_FASTCALL lj_mem_newgco(lua_State *L, GCSize size)
+{
+ global_State *g = G(L);
+ GCobj *o = (GCobj *)g->allocf(g->allocd, NULL, 0, size);
+ if (o == NULL)
+ lj_err_mem(L);
+ lj_assertG(checkptrGC(o),
+ "allocated memory address %p outside required range", o);
+ g->gc.total += size;
+ setgcrefr(o->gch.nextgc, g->gc.root);
+ setgcref(g->gc.root, o);
+ newwhite(g, o);
+ return o;
+}
+
+/* Resize growable vector. */
+void *lj_mem_grow(lua_State *L, void *p, MSize *szp, MSize lim, MSize esz)
+{
+ MSize sz = (*szp) << 1;
+ if (sz < LJ_MIN_VECSZ)
+ sz = LJ_MIN_VECSZ;
+ if (sz > lim)
+ sz = lim;
+ p = lj_mem_realloc(L, p, (*szp)*esz, sz*esz);
+ *szp = sz;
+ return p;
+}
+
diff --git a/libs/luajit-cmake/luajit/src/lj_gc.h b/libs/luajit-cmake/luajit/src/lj_gc.h
new file mode 100644
index 0000000..0df7dee
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/lj_gc.h
@@ -0,0 +1,136 @@
+/*
+** Garbage collector.
+** Copyright (C) 2005-2022 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LJ_GC_H
+#define _LJ_GC_H
+
+#include "lj_obj.h"
+
+/* Garbage collector states. Order matters. */
+enum {
+ GCSpause, GCSpropagate, GCSatomic, GCSsweepstring, GCSsweep, GCSfinalize
+};
+
+/* Bitmasks for marked field of GCobj. */
+#define LJ_GC_WHITE0 0x01
+#define LJ_GC_WHITE1 0x02
+#define LJ_GC_BLACK 0x04
+#define LJ_GC_FINALIZED 0x08
+#define LJ_GC_WEAKKEY 0x08
+#define LJ_GC_WEAKVAL 0x10
+#define LJ_GC_CDATA_FIN 0x10
+#define LJ_GC_FIXED 0x20
+#define LJ_GC_SFIXED 0x40
+
+#define LJ_GC_WHITES (LJ_GC_WHITE0 | LJ_GC_WHITE1)
+#define LJ_GC_COLORS (LJ_GC_WHITES | LJ_GC_BLACK)
+#define LJ_GC_WEAK (LJ_GC_WEAKKEY | LJ_GC_WEAKVAL)
+
+/* Macros to test and set GCobj colors. */
+#define iswhite(x) ((x)->gch.marked & LJ_GC_WHITES)
+#define isblack(x) ((x)->gch.marked & LJ_GC_BLACK)
+#define isgray(x) (!((x)->gch.marked & (LJ_GC_BLACK|LJ_GC_WHITES)))
+#define tviswhite(x) (tvisgcv(x) && iswhite(gcV(x)))
+#define otherwhite(g) (g->gc.currentwhite ^ LJ_GC_WHITES)
+#define isdead(g, v) ((v)->gch.marked & otherwhite(g) & LJ_GC_WHITES)
+
+#define curwhite(g) ((g)->gc.currentwhite & LJ_GC_WHITES)
+#define newwhite(g, x) (obj2gco(x)->gch.marked = (uint8_t)curwhite(g))
+#define makewhite(g, x) \
+ ((x)->gch.marked = ((x)->gch.marked & (uint8_t)~LJ_GC_COLORS) | curwhite(g))
+#define flipwhite(x) ((x)->gch.marked ^= LJ_GC_WHITES)
+#define black2gray(x) ((x)->gch.marked &= (uint8_t)~LJ_GC_BLACK)
+#define fixstring(s) ((s)->marked |= LJ_GC_FIXED)
+#define markfinalized(x) ((x)->gch.marked |= LJ_GC_FINALIZED)
+
+/* Collector. */
+LJ_FUNC size_t lj_gc_separateudata(global_State *g, int all);
+LJ_FUNC void lj_gc_finalize_udata(lua_State *L);
+#if LJ_HASFFI
+LJ_FUNC void lj_gc_finalize_cdata(lua_State *L);
+#else
+#define lj_gc_finalize_cdata(L) UNUSED(L)
+#endif
+LJ_FUNC void lj_gc_freeall(global_State *g);
+LJ_FUNCA int LJ_FASTCALL lj_gc_step(lua_State *L);
+LJ_FUNCA void LJ_FASTCALL lj_gc_step_fixtop(lua_State *L);
+#if LJ_HASJIT
+LJ_FUNC int LJ_FASTCALL lj_gc_step_jit(global_State *g, MSize steps);
+#endif
+LJ_FUNC void lj_gc_fullgc(lua_State *L);
+
+/* GC check: drive collector forward if the GC threshold has been reached. */
+#define lj_gc_check(L) \
+ { if (LJ_UNLIKELY(G(L)->gc.total >= G(L)->gc.threshold)) \
+ lj_gc_step(L); }
+#define lj_gc_check_fixtop(L) \
+ { if (LJ_UNLIKELY(G(L)->gc.total >= G(L)->gc.threshold)) \
+ lj_gc_step_fixtop(L); }
+
+/* Write barriers. */
+LJ_FUNC void lj_gc_barrierf(global_State *g, GCobj *o, GCobj *v);
+LJ_FUNCA void LJ_FASTCALL lj_gc_barrieruv(global_State *g, TValue *tv);
+LJ_FUNC void lj_gc_closeuv(global_State *g, GCupval *uv);
+#if LJ_HASJIT
+LJ_FUNC void lj_gc_barriertrace(global_State *g, uint32_t traceno);
+#endif
+
+/* Move the GC propagation frontier back for tables (make it gray again). */
+static LJ_AINLINE void lj_gc_barrierback(global_State *g, GCtab *t)
+{
+ GCobj *o = obj2gco(t);
+ lj_assertG(isblack(o) && !isdead(g, o),
+ "bad object states for backward barrier");
+ lj_assertG(g->gc.state != GCSfinalize && g->gc.state != GCSpause,
+ "bad GC state");
+ black2gray(o);
+ setgcrefr(t->gclist, g->gc.grayagain);
+ setgcref(g->gc.grayagain, o);
+}
+
+/* Barrier for stores to table objects. TValue and GCobj variant. */
+#define lj_gc_anybarriert(L, t) \
+ { if (LJ_UNLIKELY(isblack(obj2gco(t)))) lj_gc_barrierback(G(L), (t)); }
+#define lj_gc_barriert(L, t, tv) \
+ { if (tviswhite(tv) && isblack(obj2gco(t))) \
+ lj_gc_barrierback(G(L), (t)); }
+#define lj_gc_objbarriert(L, t, o) \
+ { if (iswhite(obj2gco(o)) && isblack(obj2gco(t))) \
+ lj_gc_barrierback(G(L), (t)); }
+
+/* Barrier for stores to any other object. TValue and GCobj variant. */
+#define lj_gc_barrier(L, p, tv) \
+ { if (tviswhite(tv) && isblack(obj2gco(p))) \
+ lj_gc_barrierf(G(L), obj2gco(p), gcV(tv)); }
+#define lj_gc_objbarrier(L, p, o) \
+ { if (iswhite(obj2gco(o)) && isblack(obj2gco(p))) \
+ lj_gc_barrierf(G(L), obj2gco(p), obj2gco(o)); }
+
+/* Allocator. */
+LJ_FUNC void *lj_mem_realloc(lua_State *L, void *p, GCSize osz, GCSize nsz);
+LJ_FUNC void * LJ_FASTCALL lj_mem_newgco(lua_State *L, GCSize size);
+LJ_FUNC void *lj_mem_grow(lua_State *L, void *p,
+ MSize *szp, MSize lim, MSize esz);
+
+#define lj_mem_new(L, s) lj_mem_realloc(L, NULL, 0, (s))
+
+static LJ_AINLINE void lj_mem_free(global_State *g, void *p, size_t osize)
+{
+ g->gc.total -= (GCSize)osize;
+ g->allocf(g->allocd, p, osize, 0);
+}
+
+#define lj_mem_newvec(L, n, t) ((t *)lj_mem_new(L, (GCSize)((n)*sizeof(t))))
+#define lj_mem_reallocvec(L, p, on, n, t) \
+ ((p) = (t *)lj_mem_realloc(L, p, (on)*sizeof(t), (GCSize)((n)*sizeof(t))))
+#define lj_mem_growvec(L, p, n, m, t) \
+ ((p) = (t *)lj_mem_grow(L, (p), &(n), (m), (MSize)sizeof(t)))
+#define lj_mem_freevec(g, p, n, t) lj_mem_free(g, (p), (n)*sizeof(t))
+
+#define lj_mem_newobj(L, t) ((t *)lj_mem_newgco(L, sizeof(t)))
+#define lj_mem_newt(L, s, t) ((t *)lj_mem_new(L, (s)))
+#define lj_mem_freet(g, p) lj_mem_free(g, (p), sizeof(*(p)))
+
+#endif
diff --git a/libs/luajit-cmake/luajit/src/lj_gdbjit.c b/libs/luajit-cmake/luajit/src/lj_gdbjit.c
new file mode 100644
index 0000000..c50d0d4
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/lj_gdbjit.c
@@ -0,0 +1,818 @@
+/*
+** Client for the GDB JIT API.
+** Copyright (C) 2005-2022 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#define lj_gdbjit_c
+#define LUA_CORE
+
+#include "lj_obj.h"
+
+#if LJ_HASJIT
+
+#include "lj_gc.h"
+#include "lj_err.h"
+#include "lj_debug.h"
+#include "lj_frame.h"
+#include "lj_buf.h"
+#include "lj_strfmt.h"
+#include "lj_jit.h"
+#include "lj_dispatch.h"
+
+/* This is not compiled in by default.
+** Enable with -DLUAJIT_USE_GDBJIT in the Makefile and recompile everything.
+*/
+#ifdef LUAJIT_USE_GDBJIT
+
+/* The GDB JIT API allows JIT compilers to pass debug information about
+** JIT-compiled code back to GDB. You need at least GDB 7.0 or higher
+** to see it in action.
+**
+** This is a passive API, so it works even when not running under GDB
+** or when attaching to an already running process. Alas, this implies
+** enabling it always has a non-negligible overhead -- do not use in
+** release mode!
+**
+** The LuaJIT GDB JIT client is rather minimal at the moment. It gives
+** each trace a symbol name and adds a source location and frame unwind
+** information. Obviously LuaJIT itself and any embedding C application
+** should be compiled with debug symbols, too (see the Makefile).
+**
+** Traces are named TRACE_1, TRACE_2, ... these correspond to the trace
+** numbers from -jv or -jdump. Use "break TRACE_1" or "tbreak TRACE_1" etc.
+** to set breakpoints on specific traces (even ahead of their creation).
+**
+** The source location for each trace allows listing the corresponding
+** source lines with the GDB command "list" (but only if the Lua source
+** has been loaded from a file). Currently this is always set to the
+** location where the trace has been started.
+**
+** Frame unwind information can be inspected with the GDB command
+** "info frame". This also allows proper backtraces across JIT-compiled
+** code with the GDB command "bt".
+**
+** You probably want to add the following settings to a .gdbinit file
+** (or add them to ~/.gdbinit):
+** set disassembly-flavor intel
+** set breakpoint pending on
+**
+** Here's a sample GDB session:
+** ------------------------------------------------------------------------
+
+$ cat >x.lua
+for outer=1,100 do
+ for inner=1,100 do end
+end
+^D
+
+$ luajit -jv x.lua
+[TRACE 1 x.lua:2]
+[TRACE 2 (1/3) x.lua:1 -> 1]
+
+$ gdb --quiet --args luajit x.lua
+(gdb) tbreak TRACE_1
+Function "TRACE_1" not defined.
+Temporary breakpoint 1 (TRACE_1) pending.
+(gdb) run
+Starting program: luajit x.lua
+
+Temporary breakpoint 1, TRACE_1 () at x.lua:2
+2 for inner=1,100 do end
+(gdb) list
+1 for outer=1,100 do
+2 for inner=1,100 do end
+3 end
+(gdb) bt
+#0 TRACE_1 () at x.lua:2
+#1 0x08053690 in lua_pcall [...]
+[...]
+#7 0x0806ff90 in main [...]
+(gdb) disass TRACE_1
+Dump of assembler code for function TRACE_1:
+0xf7fd9fba <TRACE_1+0>: mov DWORD PTR ds:0xf7e0e2a0,0x1
+0xf7fd9fc4 <TRACE_1+10>: movsd xmm7,QWORD PTR [edx+0x20]
+[...]
+0xf7fd9ff8 <TRACE_1+62>: jmp 0xf7fd2014
+End of assembler dump.
+(gdb) tbreak TRACE_2
+Function "TRACE_2" not defined.
+Temporary breakpoint 2 (TRACE_2) pending.
+(gdb) cont
+Continuing.
+
+Temporary breakpoint 2, TRACE_2 () at x.lua:1
+1 for outer=1,100 do
+(gdb) info frame
+Stack level 0, frame at 0xffffd7c0:
+ eip = 0xf7fd9f60 in TRACE_2 (x.lua:1); saved eip 0x8053690
+ called by frame at 0xffffd7e0
+ source language unknown.
+ Arglist at 0xffffd78c, args:
+ Locals at 0xffffd78c, Previous frame's sp is 0xffffd7c0
+ Saved registers:
+ ebx at 0xffffd7ac, ebp at 0xffffd7b8, esi at 0xffffd7b0, edi at 0xffffd7b4,
+ eip at 0xffffd7bc
+(gdb)
+
+** ------------------------------------------------------------------------
+*/
+
+/* -- GDB JIT API --------------------------------------------------------- */
+
+/* GDB JIT actions. */
+enum {
+ GDBJIT_NOACTION = 0,
+ GDBJIT_REGISTER,
+ GDBJIT_UNREGISTER
+};
+
+/* GDB JIT entry. */
+typedef struct GDBJITentry {
+ struct GDBJITentry *next_entry;
+ struct GDBJITentry *prev_entry;
+ const char *symfile_addr;
+ uint64_t symfile_size;
+} GDBJITentry;
+
+/* GDB JIT descriptor. */
+typedef struct GDBJITdesc {
+ uint32_t version;
+ uint32_t action_flag;
+ GDBJITentry *relevant_entry;
+ GDBJITentry *first_entry;
+} GDBJITdesc;
+
+GDBJITdesc __jit_debug_descriptor = {
+ 1, GDBJIT_NOACTION, NULL, NULL
+};
+
+/* GDB sets a breakpoint at this function. */
+void LJ_NOINLINE __jit_debug_register_code()
+{
+ __asm__ __volatile__("");
+};
+
+/* -- In-memory ELF object definitions ------------------------------------ */
+
+/* ELF definitions. */
+typedef struct ELFheader {
+ uint8_t emagic[4];
+ uint8_t eclass;
+ uint8_t eendian;
+ uint8_t eversion;
+ uint8_t eosabi;
+ uint8_t eabiversion;
+ uint8_t epad[7];
+ uint16_t type;
+ uint16_t machine;
+ uint32_t version;
+ uintptr_t entry;
+ uintptr_t phofs;
+ uintptr_t shofs;
+ uint32_t flags;
+ uint16_t ehsize;
+ uint16_t phentsize;
+ uint16_t phnum;
+ uint16_t shentsize;
+ uint16_t shnum;
+ uint16_t shstridx;
+} ELFheader;
+
+typedef struct ELFsectheader {
+ uint32_t name;
+ uint32_t type;
+ uintptr_t flags;
+ uintptr_t addr;
+ uintptr_t ofs;
+ uintptr_t size;
+ uint32_t link;
+ uint32_t info;
+ uintptr_t align;
+ uintptr_t entsize;
+} ELFsectheader;
+
+#define ELFSECT_IDX_ABS 0xfff1
+
+enum {
+ ELFSECT_TYPE_PROGBITS = 1,
+ ELFSECT_TYPE_SYMTAB = 2,
+ ELFSECT_TYPE_STRTAB = 3,
+ ELFSECT_TYPE_NOBITS = 8
+};
+
+#define ELFSECT_FLAGS_WRITE 1
+#define ELFSECT_FLAGS_ALLOC 2
+#define ELFSECT_FLAGS_EXEC 4
+
+typedef struct ELFsymbol {
+#if LJ_64
+ uint32_t name;
+ uint8_t info;
+ uint8_t other;
+ uint16_t sectidx;
+ uintptr_t value;
+ uint64_t size;
+#else
+ uint32_t name;
+ uintptr_t value;
+ uint32_t size;
+ uint8_t info;
+ uint8_t other;
+ uint16_t sectidx;
+#endif
+} ELFsymbol;
+
+enum {
+ ELFSYM_TYPE_FUNC = 2,
+ ELFSYM_TYPE_FILE = 4,
+ ELFSYM_BIND_LOCAL = 0 << 4,
+ ELFSYM_BIND_GLOBAL = 1 << 4,
+};
+
+/* DWARF definitions. */
+#define DW_CIE_VERSION 1
+
+enum {
+ DW_CFA_nop = 0x0,
+ DW_CFA_offset_extended = 0x5,
+ DW_CFA_def_cfa = 0xc,
+ DW_CFA_def_cfa_offset = 0xe,
+ DW_CFA_offset_extended_sf = 0x11,
+ DW_CFA_advance_loc = 0x40,
+ DW_CFA_offset = 0x80
+};
+
+enum {
+ DW_EH_PE_udata4 = 3,
+ DW_EH_PE_textrel = 0x20
+};
+
+enum {
+ DW_TAG_compile_unit = 0x11
+};
+
+enum {
+ DW_children_no = 0,
+ DW_children_yes = 1
+};
+
+enum {
+ DW_AT_name = 0x03,
+ DW_AT_stmt_list = 0x10,
+ DW_AT_low_pc = 0x11,
+ DW_AT_high_pc = 0x12
+};
+
+enum {
+ DW_FORM_addr = 0x01,
+ DW_FORM_data4 = 0x06,
+ DW_FORM_string = 0x08
+};
+
+enum {
+ DW_LNS_extended_op = 0,
+ DW_LNS_copy = 1,
+ DW_LNS_advance_pc = 2,
+ DW_LNS_advance_line = 3
+};
+
+enum {
+ DW_LNE_end_sequence = 1,
+ DW_LNE_set_address = 2
+};
+
+enum {
+#if LJ_TARGET_X86
+ DW_REG_AX, DW_REG_CX, DW_REG_DX, DW_REG_BX,
+ DW_REG_SP, DW_REG_BP, DW_REG_SI, DW_REG_DI,
+ DW_REG_RA,
+#elif LJ_TARGET_X64
+ /* Yes, the order is strange, but correct. */
+ DW_REG_AX, DW_REG_DX, DW_REG_CX, DW_REG_BX,
+ DW_REG_SI, DW_REG_DI, DW_REG_BP, DW_REG_SP,
+ DW_REG_8, DW_REG_9, DW_REG_10, DW_REG_11,
+ DW_REG_12, DW_REG_13, DW_REG_14, DW_REG_15,
+ DW_REG_RA,
+#elif LJ_TARGET_ARM
+ DW_REG_SP = 13,
+ DW_REG_RA = 14,
+#elif LJ_TARGET_ARM64
+ DW_REG_SP = 31,
+ DW_REG_RA = 30,
+#elif LJ_TARGET_PPC
+ DW_REG_SP = 1,
+ DW_REG_RA = 65,
+ DW_REG_CR = 70,
+#elif LJ_TARGET_MIPS
+ DW_REG_SP = 29,
+ DW_REG_RA = 31,
+#else
+#error "Unsupported target architecture"
+#endif
+};
+
+/* Minimal list of sections for the in-memory ELF object. */
+enum {
+ GDBJIT_SECT_NULL,
+ GDBJIT_SECT_text,
+ GDBJIT_SECT_eh_frame,
+ GDBJIT_SECT_shstrtab,
+ GDBJIT_SECT_strtab,
+ GDBJIT_SECT_symtab,
+ GDBJIT_SECT_debug_info,
+ GDBJIT_SECT_debug_abbrev,
+ GDBJIT_SECT_debug_line,
+ GDBJIT_SECT__MAX
+};
+
+enum {
+ GDBJIT_SYM_UNDEF,
+ GDBJIT_SYM_FILE,
+ GDBJIT_SYM_FUNC,
+ GDBJIT_SYM__MAX
+};
+
+/* In-memory ELF object. */
+typedef struct GDBJITobj {
+ ELFheader hdr; /* ELF header. */
+ ELFsectheader sect[GDBJIT_SECT__MAX]; /* ELF sections. */
+ ELFsymbol sym[GDBJIT_SYM__MAX]; /* ELF symbol table. */
+ uint8_t space[4096]; /* Space for various section data. */
+} GDBJITobj;
+
+/* Combined structure for GDB JIT entry and ELF object. */
+typedef struct GDBJITentryobj {
+ GDBJITentry entry;
+ size_t sz;
+ GDBJITobj obj;
+} GDBJITentryobj;
+
+/* Template for in-memory ELF header. */
+static const ELFheader elfhdr_template = {
+ .emagic = { 0x7f, 'E', 'L', 'F' },
+ .eclass = LJ_64 ? 2 : 1,
+ .eendian = LJ_ENDIAN_SELECT(1, 2),
+ .eversion = 1,
+#if LJ_TARGET_LINUX
+ .eosabi = 0, /* Nope, it's not 3. */
+#elif defined(__FreeBSD__)
+ .eosabi = 9,
+#elif defined(__NetBSD__)
+ .eosabi = 2,
+#elif defined(__OpenBSD__)
+ .eosabi = 12,
+#elif defined(__DragonFly__)
+ .eosabi = 0,
+#elif LJ_TARGET_SOLARIS
+ .eosabi = 6,
+#else
+ .eosabi = 0,
+#endif
+ .eabiversion = 0,
+ .epad = { 0, 0, 0, 0, 0, 0, 0 },
+ .type = 1,
+#if LJ_TARGET_X86
+ .machine = 3,
+#elif LJ_TARGET_X64
+ .machine = 62,
+#elif LJ_TARGET_ARM
+ .machine = 40,
+#elif LJ_TARGET_ARM64
+ .machine = 183,
+#elif LJ_TARGET_PPC
+ .machine = 20,
+#elif LJ_TARGET_MIPS
+ .machine = 8,
+#else
+#error "Unsupported target architecture"
+#endif
+ .version = 1,
+ .entry = 0,
+ .phofs = 0,
+ .shofs = offsetof(GDBJITobj, sect),
+ .flags = 0,
+ .ehsize = sizeof(ELFheader),
+ .phentsize = 0,
+ .phnum = 0,
+ .shentsize = sizeof(ELFsectheader),
+ .shnum = GDBJIT_SECT__MAX,
+ .shstridx = GDBJIT_SECT_shstrtab
+};
+
+/* -- In-memory ELF object generation ------------------------------------- */
+
+/* Context for generating the ELF object for the GDB JIT API. */
+typedef struct GDBJITctx {
+ uint8_t *p; /* Pointer to next address in obj.space. */
+ uint8_t *startp; /* Pointer to start address in obj.space. */
+ GCtrace *T; /* Generate symbols for this trace. */
+ uintptr_t mcaddr; /* Machine code address. */
+ MSize szmcode; /* Size of machine code. */
+ MSize spadjp; /* Stack adjustment for parent trace or interpreter. */
+ MSize spadj; /* Stack adjustment for trace itself. */
+ BCLine lineno; /* Starting line number. */
+ const char *filename; /* Starting file name. */
+ size_t objsize; /* Final size of ELF object. */
+ GDBJITobj obj; /* In-memory ELF object. */
+} GDBJITctx;
+
+/* Add a zero-terminated string. */
+static uint32_t gdbjit_strz(GDBJITctx *ctx, const char *str)
+{
+ uint8_t *p = ctx->p;
+ uint32_t ofs = (uint32_t)(p - ctx->startp);
+ do {
+ *p++ = (uint8_t)*str;
+ } while (*str++);
+ ctx->p = p;
+ return ofs;
+}
+
+/* Append a decimal number. */
+static void gdbjit_catnum(GDBJITctx *ctx, uint32_t n)
+{
+ if (n >= 10) { uint32_t m = n / 10; n = n % 10; gdbjit_catnum(ctx, m); }
+ *ctx->p++ = '0' + n;
+}
+
+/* Add a SLEB128 value. */
+static void gdbjit_sleb128(GDBJITctx *ctx, int32_t v)
+{
+ uint8_t *p = ctx->p;
+ for (; (uint32_t)(v+0x40) >= 0x80; v >>= 7)
+ *p++ = (uint8_t)((v & 0x7f) | 0x80);
+ *p++ = (uint8_t)(v & 0x7f);
+ ctx->p = p;
+}
+
+/* Shortcuts to generate DWARF structures. */
+#define DB(x) (*p++ = (x))
+#define DI8(x) (*(int8_t *)p = (x), p++)
+#define DU16(x) (*(uint16_t *)p = (x), p += 2)
+#define DU32(x) (*(uint32_t *)p = (x), p += 4)
+#define DADDR(x) (*(uintptr_t *)p = (x), p += sizeof(uintptr_t))
+#define DUV(x) (p = (uint8_t *)lj_strfmt_wuleb128((char *)p, (x)))
+#define DSV(x) (ctx->p = p, gdbjit_sleb128(ctx, (x)), p = ctx->p)
+#define DSTR(str) (ctx->p = p, gdbjit_strz(ctx, (str)), p = ctx->p)
+#define DALIGNNOP(s) while ((uintptr_t)p & ((s)-1)) *p++ = DW_CFA_nop
+#define DSECT(name, stmt) \
+ { uint32_t *szp_##name = (uint32_t *)p; p += 4; stmt \
+ *szp_##name = (uint32_t)((p-(uint8_t *)szp_##name)-4); } \
+
+/* Initialize ELF section headers. */
+static void LJ_FASTCALL gdbjit_secthdr(GDBJITctx *ctx)
+{
+ ELFsectheader *sect;
+
+ *ctx->p++ = '\0'; /* Empty string at start of string table. */
+
+#define SECTDEF(id, tp, al) \
+ sect = &ctx->obj.sect[GDBJIT_SECT_##id]; \
+ sect->name = gdbjit_strz(ctx, "." #id); \
+ sect->type = ELFSECT_TYPE_##tp; \
+ sect->align = (al)
+
+ SECTDEF(text, NOBITS, 16);
+ sect->flags = ELFSECT_FLAGS_ALLOC|ELFSECT_FLAGS_EXEC;
+ sect->addr = ctx->mcaddr;
+ sect->ofs = 0;
+ sect->size = ctx->szmcode;
+
+ SECTDEF(eh_frame, PROGBITS, sizeof(uintptr_t));
+ sect->flags = ELFSECT_FLAGS_ALLOC;
+
+ SECTDEF(shstrtab, STRTAB, 1);
+ SECTDEF(strtab, STRTAB, 1);
+
+ SECTDEF(symtab, SYMTAB, sizeof(uintptr_t));
+ sect->ofs = offsetof(GDBJITobj, sym);
+ sect->size = sizeof(ctx->obj.sym);
+ sect->link = GDBJIT_SECT_strtab;
+ sect->entsize = sizeof(ELFsymbol);
+ sect->info = GDBJIT_SYM_FUNC;
+
+ SECTDEF(debug_info, PROGBITS, 1);
+ SECTDEF(debug_abbrev, PROGBITS, 1);
+ SECTDEF(debug_line, PROGBITS, 1);
+
+#undef SECTDEF
+}
+
+/* Initialize symbol table. */
+static void LJ_FASTCALL gdbjit_symtab(GDBJITctx *ctx)
+{
+ ELFsymbol *sym;
+
+ *ctx->p++ = '\0'; /* Empty string at start of string table. */
+
+ sym = &ctx->obj.sym[GDBJIT_SYM_FILE];
+ sym->name = gdbjit_strz(ctx, "JIT mcode");
+ sym->sectidx = ELFSECT_IDX_ABS;
+ sym->info = ELFSYM_TYPE_FILE|ELFSYM_BIND_LOCAL;
+
+ sym = &ctx->obj.sym[GDBJIT_SYM_FUNC];
+ sym->name = gdbjit_strz(ctx, "TRACE_"); ctx->p--;
+ gdbjit_catnum(ctx, ctx->T->traceno); *ctx->p++ = '\0';
+ sym->sectidx = GDBJIT_SECT_text;
+ sym->value = 0;
+ sym->size = ctx->szmcode;
+ sym->info = ELFSYM_TYPE_FUNC|ELFSYM_BIND_GLOBAL;
+}
+
+/* Initialize .eh_frame section. */
+static void LJ_FASTCALL gdbjit_ehframe(GDBJITctx *ctx)
+{
+ uint8_t *p = ctx->p;
+ uint8_t *framep = p;
+
+ /* Emit DWARF EH CIE. */
+ DSECT(CIE,
+ DU32(0); /* Offset to CIE itself. */
+ DB(DW_CIE_VERSION);
+ DSTR("zR"); /* Augmentation. */
+ DUV(1); /* Code alignment factor. */
+ DSV(-(int32_t)sizeof(uintptr_t)); /* Data alignment factor. */
+ DB(DW_REG_RA); /* Return address register. */
+ DB(1); DB(DW_EH_PE_textrel|DW_EH_PE_udata4); /* Augmentation data. */
+ DB(DW_CFA_def_cfa); DUV(DW_REG_SP); DUV(sizeof(uintptr_t));
+#if LJ_TARGET_PPC
+ DB(DW_CFA_offset_extended_sf); DB(DW_REG_RA); DSV(-1);
+#else
+ DB(DW_CFA_offset|DW_REG_RA); DUV(1);
+#endif
+ DALIGNNOP(sizeof(uintptr_t));
+ )
+
+ /* Emit DWARF EH FDE. */
+ DSECT(FDE,
+ DU32((uint32_t)(p-framep)); /* Offset to CIE. */
+ DU32(0); /* Machine code offset relative to .text. */
+ DU32(ctx->szmcode); /* Machine code length. */
+ DB(0); /* Augmentation data. */
+ /* Registers saved in CFRAME. */
+#if LJ_TARGET_X86
+ DB(DW_CFA_offset|DW_REG_BP); DUV(2);
+ DB(DW_CFA_offset|DW_REG_DI); DUV(3);
+ DB(DW_CFA_offset|DW_REG_SI); DUV(4);
+ DB(DW_CFA_offset|DW_REG_BX); DUV(5);
+#elif LJ_TARGET_X64
+ DB(DW_CFA_offset|DW_REG_BP); DUV(2);
+ DB(DW_CFA_offset|DW_REG_BX); DUV(3);
+ DB(DW_CFA_offset|DW_REG_15); DUV(4);
+ DB(DW_CFA_offset|DW_REG_14); DUV(5);
+ /* Extra registers saved for JIT-compiled code. */
+ DB(DW_CFA_offset|DW_REG_13); DUV(LJ_GC64 ? 10 : 9);
+ DB(DW_CFA_offset|DW_REG_12); DUV(LJ_GC64 ? 11 : 10);
+#elif LJ_TARGET_ARM
+ {
+ int i;
+ for (i = 11; i >= 4; i--) { DB(DW_CFA_offset|i); DUV(2+(11-i)); }
+ }
+#elif LJ_TARGET_ARM64
+ {
+ int i;
+ DB(DW_CFA_offset|31); DUV(2);
+ for (i = 28; i >= 19; i--) { DB(DW_CFA_offset|i); DUV(3+(28-i)); }
+ for (i = 15; i >= 8; i--) { DB(DW_CFA_offset|32|i); DUV(28-i); }
+ }
+#elif LJ_TARGET_PPC
+ {
+ int i;
+ DB(DW_CFA_offset_extended); DB(DW_REG_CR); DUV(55);
+ for (i = 14; i <= 31; i++) {
+ DB(DW_CFA_offset|i); DUV(37+(31-i));
+ DB(DW_CFA_offset|32|i); DUV(2+2*(31-i));
+ }
+ }
+#elif LJ_TARGET_MIPS
+ {
+ int i;
+ DB(DW_CFA_offset|30); DUV(2);
+ for (i = 23; i >= 16; i--) { DB(DW_CFA_offset|i); DUV(26-i); }
+ for (i = 30; i >= 20; i -= 2) { DB(DW_CFA_offset|32|i); DUV(42-i); }
+ }
+#else
+#error "Unsupported target architecture"
+#endif
+ if (ctx->spadjp != ctx->spadj) { /* Parent/interpreter stack frame size. */
+ DB(DW_CFA_def_cfa_offset); DUV(ctx->spadjp);
+ DB(DW_CFA_advance_loc|1); /* Only an approximation. */
+ }
+ DB(DW_CFA_def_cfa_offset); DUV(ctx->spadj); /* Trace stack frame size. */
+ DALIGNNOP(sizeof(uintptr_t));
+ )
+
+ ctx->p = p;
+}
+
+/* Initialize .debug_info section. */
+static void LJ_FASTCALL gdbjit_debuginfo(GDBJITctx *ctx)
+{
+ uint8_t *p = ctx->p;
+
+ DSECT(info,
+ DU16(2); /* DWARF version. */
+ DU32(0); /* Abbrev offset. */
+ DB(sizeof(uintptr_t)); /* Pointer size. */
+
+ DUV(1); /* Abbrev #1: DW_TAG_compile_unit. */
+ DSTR(ctx->filename); /* DW_AT_name. */
+ DADDR(ctx->mcaddr); /* DW_AT_low_pc. */
+ DADDR(ctx->mcaddr + ctx->szmcode); /* DW_AT_high_pc. */
+ DU32(0); /* DW_AT_stmt_list. */
+ )
+
+ ctx->p = p;
+}
+
+/* Initialize .debug_abbrev section. */
+static void LJ_FASTCALL gdbjit_debugabbrev(GDBJITctx *ctx)
+{
+ uint8_t *p = ctx->p;
+
+ /* Abbrev #1: DW_TAG_compile_unit. */
+ DUV(1); DUV(DW_TAG_compile_unit);
+ DB(DW_children_no);
+ DUV(DW_AT_name); DUV(DW_FORM_string);
+ DUV(DW_AT_low_pc); DUV(DW_FORM_addr);
+ DUV(DW_AT_high_pc); DUV(DW_FORM_addr);
+ DUV(DW_AT_stmt_list); DUV(DW_FORM_data4);
+ DB(0); DB(0);
+
+ ctx->p = p;
+}
+
+#define DLNE(op, s) (DB(DW_LNS_extended_op), DUV(1+(s)), DB((op)))
+
+/* Initialize .debug_line section. */
+static void LJ_FASTCALL gdbjit_debugline(GDBJITctx *ctx)
+{
+ uint8_t *p = ctx->p;
+
+ DSECT(line,
+ DU16(2); /* DWARF version. */
+ DSECT(header,
+ DB(1); /* Minimum instruction length. */
+ DB(1); /* is_stmt. */
+ DI8(0); /* Line base for special opcodes. */
+ DB(2); /* Line range for special opcodes. */
+ DB(3+1); /* Opcode base at DW_LNS_advance_line+1. */
+ DB(0); DB(1); DB(1); /* Standard opcode lengths. */
+ /* Directory table. */
+ DB(0);
+ /* File name table. */
+ DSTR(ctx->filename); DUV(0); DUV(0); DUV(0);
+ DB(0);
+ )
+
+ DLNE(DW_LNE_set_address, sizeof(uintptr_t)); DADDR(ctx->mcaddr);
+ if (ctx->lineno) {
+ DB(DW_LNS_advance_line); DSV(ctx->lineno-1);
+ }
+ DB(DW_LNS_copy);
+ DB(DW_LNS_advance_pc); DUV(ctx->szmcode);
+ DLNE(DW_LNE_end_sequence, 0);
+ )
+
+ ctx->p = p;
+}
+
+#undef DLNE
+
+/* Undef shortcuts. */
+#undef DB
+#undef DI8
+#undef DU16
+#undef DU32
+#undef DADDR
+#undef DUV
+#undef DSV
+#undef DSTR
+#undef DALIGNNOP
+#undef DSECT
+
+/* Type of a section initializer callback. */
+typedef void (LJ_FASTCALL *GDBJITinitf)(GDBJITctx *ctx);
+
+/* Call section initializer and set the section offset and size. */
+static void gdbjit_initsect(GDBJITctx *ctx, int sect, GDBJITinitf initf)
+{
+ ctx->startp = ctx->p;
+ ctx->obj.sect[sect].ofs = (uintptr_t)((char *)ctx->p - (char *)&ctx->obj);
+ initf(ctx);
+ ctx->obj.sect[sect].size = (uintptr_t)(ctx->p - ctx->startp);
+}
+
+#define SECTALIGN(p, a) \
+ ((p) = (uint8_t *)(((uintptr_t)(p) + ((a)-1)) & ~(uintptr_t)((a)-1)))
+
+/* Build in-memory ELF object. */
+static void gdbjit_buildobj(GDBJITctx *ctx)
+{
+ GDBJITobj *obj = &ctx->obj;
+ /* Fill in ELF header and clear structures. */
+ memcpy(&obj->hdr, &elfhdr_template, sizeof(ELFheader));
+ memset(&obj->sect, 0, sizeof(ELFsectheader)*GDBJIT_SECT__MAX);
+ memset(&obj->sym, 0, sizeof(ELFsymbol)*GDBJIT_SYM__MAX);
+ /* Initialize sections. */
+ ctx->p = obj->space;
+ gdbjit_initsect(ctx, GDBJIT_SECT_shstrtab, gdbjit_secthdr);
+ gdbjit_initsect(ctx, GDBJIT_SECT_strtab, gdbjit_symtab);
+ gdbjit_initsect(ctx, GDBJIT_SECT_debug_info, gdbjit_debuginfo);
+ gdbjit_initsect(ctx, GDBJIT_SECT_debug_abbrev, gdbjit_debugabbrev);
+ gdbjit_initsect(ctx, GDBJIT_SECT_debug_line, gdbjit_debugline);
+ SECTALIGN(ctx->p, sizeof(uintptr_t));
+ gdbjit_initsect(ctx, GDBJIT_SECT_eh_frame, gdbjit_ehframe);
+ ctx->objsize = (size_t)((char *)ctx->p - (char *)obj);
+ lj_assertX(ctx->objsize < sizeof(GDBJITobj), "GDBJITobj overflow");
+}
+
+#undef SECTALIGN
+
+/* -- Interface to GDB JIT API -------------------------------------------- */
+
+static int gdbjit_lock;
+
+static void gdbjit_lock_acquire()
+{
+ while (__sync_lock_test_and_set(&gdbjit_lock, 1)) {
+ /* Just spin; futexes or pthreads aren't worth the portability cost. */
+ }
+}
+
+static void gdbjit_lock_release()
+{
+ __sync_lock_release(&gdbjit_lock);
+}
+
+/* Add new entry to GDB JIT symbol chain. */
+static void gdbjit_newentry(lua_State *L, GDBJITctx *ctx)
+{
+ /* Allocate memory for GDB JIT entry and ELF object. */
+ MSize sz = (MSize)(sizeof(GDBJITentryobj) - sizeof(GDBJITobj) + ctx->objsize);
+ GDBJITentryobj *eo = lj_mem_newt(L, sz, GDBJITentryobj);
+ memcpy(&eo->obj, &ctx->obj, ctx->objsize); /* Copy ELF object. */
+ eo->sz = sz;
+ ctx->T->gdbjit_entry = (void *)eo;
+ /* Link new entry to chain and register it. */
+ eo->entry.prev_entry = NULL;
+ gdbjit_lock_acquire();
+ eo->entry.next_entry = __jit_debug_descriptor.first_entry;
+ if (eo->entry.next_entry)
+ eo->entry.next_entry->prev_entry = &eo->entry;
+ eo->entry.symfile_addr = (const char *)&eo->obj;
+ eo->entry.symfile_size = ctx->objsize;
+ __jit_debug_descriptor.first_entry = &eo->entry;
+ __jit_debug_descriptor.relevant_entry = &eo->entry;
+ __jit_debug_descriptor.action_flag = GDBJIT_REGISTER;
+ __jit_debug_register_code();
+ gdbjit_lock_release();
+}
+
+/* Add debug info for newly compiled trace and notify GDB. */
+void lj_gdbjit_addtrace(jit_State *J, GCtrace *T)
+{
+ GDBJITctx ctx;
+ GCproto *pt = &gcref(T->startpt)->pt;
+ TraceNo parent = T->ir[REF_BASE].op1;
+ const BCIns *startpc = mref(T->startpc, const BCIns);
+ ctx.T = T;
+ ctx.mcaddr = (uintptr_t)T->mcode;
+ ctx.szmcode = T->szmcode;
+ ctx.spadjp = CFRAME_SIZE_JIT +
+ (MSize)(parent ? traceref(J, parent)->spadjust : 0);
+ ctx.spadj = CFRAME_SIZE_JIT + T->spadjust;
+ lj_assertJ(startpc >= proto_bc(pt) && startpc < proto_bc(pt) + pt->sizebc,
+ "start PC out of range");
+ ctx.lineno = lj_debug_line(pt, proto_bcpos(pt, startpc));
+ ctx.filename = proto_chunknamestr(pt);
+ if (*ctx.filename == '@' || *ctx.filename == '=')
+ ctx.filename++;
+ else
+ ctx.filename = "(string)";
+ gdbjit_buildobj(&ctx);
+ gdbjit_newentry(J->L, &ctx);
+}
+
+/* Delete debug info for trace and notify GDB. */
+void lj_gdbjit_deltrace(jit_State *J, GCtrace *T)
+{
+ GDBJITentryobj *eo = (GDBJITentryobj *)T->gdbjit_entry;
+ if (eo) {
+ gdbjit_lock_acquire();
+ if (eo->entry.prev_entry)
+ eo->entry.prev_entry->next_entry = eo->entry.next_entry;
+ else
+ __jit_debug_descriptor.first_entry = eo->entry.next_entry;
+ if (eo->entry.next_entry)
+ eo->entry.next_entry->prev_entry = eo->entry.prev_entry;
+ __jit_debug_descriptor.relevant_entry = &eo->entry;
+ __jit_debug_descriptor.action_flag = GDBJIT_UNREGISTER;
+ __jit_debug_register_code();
+ gdbjit_lock_release();
+ lj_mem_free(J2G(J), eo, eo->sz);
+ }
+}
+
+#endif
+#endif
diff --git a/libs/luajit-cmake/luajit/src/lj_gdbjit.h b/libs/luajit-cmake/luajit/src/lj_gdbjit.h
new file mode 100644
index 0000000..f531be6
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/lj_gdbjit.h
@@ -0,0 +1,22 @@
+/*
+** Client for the GDB JIT API.
+** Copyright (C) 2005-2022 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LJ_GDBJIT_H
+#define _LJ_GDBJIT_H
+
+#include "lj_obj.h"
+#include "lj_jit.h"
+
+#if LJ_HASJIT && defined(LUAJIT_USE_GDBJIT)
+
+LJ_FUNC void lj_gdbjit_addtrace(jit_State *J, GCtrace *T);
+LJ_FUNC void lj_gdbjit_deltrace(jit_State *J, GCtrace *T);
+
+#else
+#define lj_gdbjit_addtrace(J, T) UNUSED(T)
+#define lj_gdbjit_deltrace(J, T) UNUSED(T)
+#endif
+
+#endif
diff --git a/libs/luajit-cmake/luajit/src/lj_ir.c b/libs/luajit-cmake/luajit/src/lj_ir.c
new file mode 100644
index 0000000..6590151
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/lj_ir.c
@@ -0,0 +1,500 @@
+/*
+** SSA IR (Intermediate Representation) emitter.
+** Copyright (C) 2005-2022 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#define lj_ir_c
+#define LUA_CORE
+
+/* For pointers to libc/libm functions. */
+#include <stdio.h>
+#include <math.h>
+
+#include "lj_obj.h"
+
+#if LJ_HASJIT
+
+#include "lj_gc.h"
+#include "lj_buf.h"
+#include "lj_str.h"
+#include "lj_tab.h"
+#include "lj_ir.h"
+#include "lj_jit.h"
+#include "lj_ircall.h"
+#include "lj_iropt.h"
+#include "lj_trace.h"
+#if LJ_HASFFI
+#include "lj_ctype.h"
+#include "lj_cdata.h"
+#include "lj_carith.h"
+#endif
+#include "lj_vm.h"
+#include "lj_strscan.h"
+#include "lj_serialize.h"
+#include "lj_strfmt.h"
+#include "lj_prng.h"
+
+/* Some local macros to save typing. Undef'd at the end. */
+#define IR(ref) (&J->cur.ir[(ref)])
+#define fins (&J->fold.ins)
+
+/* Pass IR on to next optimization in chain (FOLD). */
+#define emitir(ot, a, b) (lj_ir_set(J, (ot), (a), (b)), lj_opt_fold(J))
+
+/* -- IR tables ----------------------------------------------------------- */
+
+/* IR instruction modes. */
+LJ_DATADEF const uint8_t lj_ir_mode[IR__MAX+1] = {
+IRDEF(IRMODE)
+ 0
+};
+
+/* IR type sizes. */
+LJ_DATADEF const uint8_t lj_ir_type_size[IRT__MAX+1] = {
+#define IRTSIZE(name, size) size,
+IRTDEF(IRTSIZE)
+#undef IRTSIZE
+ 0
+};
+
+/* C call info for CALL* instructions. */
+LJ_DATADEF const CCallInfo lj_ir_callinfo[] = {
+#define IRCALLCI(cond, name, nargs, kind, type, flags) \
+ { (ASMFunction)IRCALLCOND_##cond(name), \
+ (nargs)|(CCI_CALL_##kind)|(IRT_##type<<CCI_OTSHIFT)|(flags) },
+IRCALLDEF(IRCALLCI)
+#undef IRCALLCI
+ { NULL, 0 }
+};
+
+/* -- IR emitter ---------------------------------------------------------- */
+
+/* Grow IR buffer at the top. */
+void LJ_FASTCALL lj_ir_growtop(jit_State *J)
+{
+ IRIns *baseir = J->irbuf + J->irbotlim;
+ MSize szins = J->irtoplim - J->irbotlim;
+ if (szins) {
+ baseir = (IRIns *)lj_mem_realloc(J->L, baseir, szins*sizeof(IRIns),
+ 2*szins*sizeof(IRIns));
+ J->irtoplim = J->irbotlim + 2*szins;
+ } else {
+ baseir = (IRIns *)lj_mem_realloc(J->L, NULL, 0, LJ_MIN_IRSZ*sizeof(IRIns));
+ J->irbotlim = REF_BASE - LJ_MIN_IRSZ/4;
+ J->irtoplim = J->irbotlim + LJ_MIN_IRSZ;
+ }
+ J->cur.ir = J->irbuf = baseir - J->irbotlim;
+}
+
+/* Grow IR buffer at the bottom or shift it up. */
+static void lj_ir_growbot(jit_State *J)
+{
+ IRIns *baseir = J->irbuf + J->irbotlim;
+ MSize szins = J->irtoplim - J->irbotlim;
+ lj_assertJ(szins != 0, "zero IR size");
+ lj_assertJ(J->cur.nk == J->irbotlim || J->cur.nk-1 == J->irbotlim,
+ "unexpected IR growth");
+ if (J->cur.nins + (szins >> 1) < J->irtoplim) {
+ /* More than half of the buffer is free on top: shift up by a quarter. */
+ MSize ofs = szins >> 2;
+ memmove(baseir + ofs, baseir, (J->cur.nins - J->irbotlim)*sizeof(IRIns));
+ J->irbotlim -= ofs;
+ J->irtoplim -= ofs;
+ J->cur.ir = J->irbuf = baseir - J->irbotlim;
+ } else {
+ /* Double the buffer size, but split the growth amongst top/bottom. */
+ IRIns *newbase = lj_mem_newt(J->L, 2*szins*sizeof(IRIns), IRIns);
+ MSize ofs = szins >= 256 ? 128 : (szins >> 1); /* Limit bottom growth. */
+ memcpy(newbase + ofs, baseir, (J->cur.nins - J->irbotlim)*sizeof(IRIns));
+ lj_mem_free(G(J->L), baseir, szins*sizeof(IRIns));
+ J->irbotlim -= ofs;
+ J->irtoplim = J->irbotlim + 2*szins;
+ J->cur.ir = J->irbuf = newbase - J->irbotlim;
+ }
+}
+
+/* Emit IR without any optimizations. */
+TRef LJ_FASTCALL lj_ir_emit(jit_State *J)
+{
+ IRRef ref = lj_ir_nextins(J);
+ IRIns *ir = IR(ref);
+ IROp op = fins->o;
+ ir->prev = J->chain[op];
+ J->chain[op] = (IRRef1)ref;
+ ir->o = op;
+ ir->op1 = fins->op1;
+ ir->op2 = fins->op2;
+ J->guardemit.irt |= fins->t.irt;
+ return TREF(ref, irt_t((ir->t = fins->t)));
+}
+
+/* Emit call to a C function. */
+TRef lj_ir_call(jit_State *J, IRCallID id, ...)
+{
+ const CCallInfo *ci = &lj_ir_callinfo[id];
+ uint32_t n = CCI_NARGS(ci);
+ TRef tr = TREF_NIL;
+ va_list argp;
+ va_start(argp, id);
+ if ((ci->flags & CCI_L)) n--;
+ if (n > 0)
+ tr = va_arg(argp, IRRef);
+ while (n-- > 1)
+ tr = emitir(IRT(IR_CARG, IRT_NIL), tr, va_arg(argp, IRRef));
+ va_end(argp);
+ if (CCI_OP(ci) == IR_CALLS)
+ J->needsnap = 1; /* Need snapshot after call with side effect. */
+ return emitir(CCI_OPTYPE(ci), tr, id);
+}
+
+/* Load field of type t from GG_State + offset. Must be 32 bit aligned. */
+TRef lj_ir_ggfload(jit_State *J, IRType t, uintptr_t ofs)
+{
+ lj_assertJ((ofs & 3) == 0, "unaligned GG_State field offset");
+ ofs >>= 2;
+ lj_assertJ(ofs >= IRFL__MAX && ofs <= 0x3ff,
+ "GG_State field offset breaks 10 bit FOLD key limit");
+ lj_ir_set(J, IRT(IR_FLOAD, t), REF_NIL, ofs);
+ return lj_opt_fold(J);
+}
+
+/* -- Interning of constants ---------------------------------------------- */
+
+/*
+** IR instructions for constants are kept between J->cur.nk >= ref < REF_BIAS.
+** They are chained like all other instructions, but grow downwards.
+** The are interned (like strings in the VM) to facilitate reference
+** comparisons. The same constant must get the same reference.
+*/
+
+/* Get ref of next IR constant and optionally grow IR.
+** Note: this may invalidate all IRIns *!
+*/
+static LJ_AINLINE IRRef ir_nextk(jit_State *J)
+{
+ IRRef ref = J->cur.nk;
+ if (LJ_UNLIKELY(ref <= J->irbotlim)) lj_ir_growbot(J);
+ J->cur.nk = --ref;
+ return ref;
+}
+
+/* Get ref of next 64 bit IR constant and optionally grow IR.
+** Note: this may invalidate all IRIns *!
+*/
+static LJ_AINLINE IRRef ir_nextk64(jit_State *J)
+{
+ IRRef ref = J->cur.nk - 2;
+ lj_assertJ(J->state != LJ_TRACE_ASM, "bad JIT state");
+ if (LJ_UNLIKELY(ref < J->irbotlim)) lj_ir_growbot(J);
+ J->cur.nk = ref;
+ return ref;
+}
+
+#if LJ_GC64
+#define ir_nextkgc ir_nextk64
+#else
+#define ir_nextkgc ir_nextk
+#endif
+
+/* Intern int32_t constant. */
+TRef LJ_FASTCALL lj_ir_kint(jit_State *J, int32_t k)
+{
+ IRIns *ir, *cir = J->cur.ir;
+ IRRef ref;
+ for (ref = J->chain[IR_KINT]; ref; ref = cir[ref].prev)
+ if (cir[ref].i == k)
+ goto found;
+ ref = ir_nextk(J);
+ ir = IR(ref);
+ ir->i = k;
+ ir->t.irt = IRT_INT;
+ ir->o = IR_KINT;
+ ir->prev = J->chain[IR_KINT];
+ J->chain[IR_KINT] = (IRRef1)ref;
+found:
+ return TREF(ref, IRT_INT);
+}
+
+/* Intern 64 bit constant, given by its 64 bit pattern. */
+TRef lj_ir_k64(jit_State *J, IROp op, uint64_t u64)
+{
+ IRIns *ir, *cir = J->cur.ir;
+ IRRef ref;
+ IRType t = op == IR_KNUM ? IRT_NUM : IRT_I64;
+ for (ref = J->chain[op]; ref; ref = cir[ref].prev)
+ if (ir_k64(&cir[ref])->u64 == u64)
+ goto found;
+ ref = ir_nextk64(J);
+ ir = IR(ref);
+ ir[1].tv.u64 = u64;
+ ir->t.irt = t;
+ ir->o = op;
+ ir->op12 = 0;
+ ir->prev = J->chain[op];
+ J->chain[op] = (IRRef1)ref;
+found:
+ return TREF(ref, t);
+}
+
+/* Intern FP constant, given by its 64 bit pattern. */
+TRef lj_ir_knum_u64(jit_State *J, uint64_t u64)
+{
+ return lj_ir_k64(J, IR_KNUM, u64);
+}
+
+/* Intern 64 bit integer constant. */
+TRef lj_ir_kint64(jit_State *J, uint64_t u64)
+{
+ return lj_ir_k64(J, IR_KINT64, u64);
+}
+
+/* Check whether a number is int and return it. -0 is NOT considered an int. */
+static int numistrueint(lua_Number n, int32_t *kp)
+{
+ int32_t k = lj_num2int(n);
+ if (n == (lua_Number)k) {
+ if (kp) *kp = k;
+ if (k == 0) { /* Special check for -0. */
+ TValue tv;
+ setnumV(&tv, n);
+ if (tv.u32.hi != 0)
+ return 0;
+ }
+ return 1;
+ }
+ return 0;
+}
+
+/* Intern number as int32_t constant if possible, otherwise as FP constant. */
+TRef lj_ir_knumint(jit_State *J, lua_Number n)
+{
+ int32_t k;
+ if (numistrueint(n, &k))
+ return lj_ir_kint(J, k);
+ else
+ return lj_ir_knum(J, n);
+}
+
+/* Intern GC object "constant". */
+TRef lj_ir_kgc(jit_State *J, GCobj *o, IRType t)
+{
+ IRIns *ir, *cir = J->cur.ir;
+ IRRef ref;
+ lj_assertJ(!isdead(J2G(J), o), "interning of dead GC object");
+ for (ref = J->chain[IR_KGC]; ref; ref = cir[ref].prev)
+ if (ir_kgc(&cir[ref]) == o)
+ goto found;
+ ref = ir_nextkgc(J);
+ ir = IR(ref);
+ /* NOBARRIER: Current trace is a GC root. */
+ ir->op12 = 0;
+ setgcref(ir[LJ_GC64].gcr, o);
+ ir->t.irt = (uint8_t)t;
+ ir->o = IR_KGC;
+ ir->prev = J->chain[IR_KGC];
+ J->chain[IR_KGC] = (IRRef1)ref;
+found:
+ return TREF(ref, t);
+}
+
+/* Allocate GCtrace constant placeholder (no interning). */
+TRef lj_ir_ktrace(jit_State *J)
+{
+ IRRef ref = ir_nextkgc(J);
+ IRIns *ir = IR(ref);
+ lj_assertJ(irt_toitype_(IRT_P64) == LJ_TTRACE, "mismatched type mapping");
+ ir->t.irt = IRT_P64;
+ ir->o = LJ_GC64 ? IR_KNUM : IR_KNULL; /* Not IR_KGC yet, but same size. */
+ ir->op12 = 0;
+ ir->prev = 0;
+ return TREF(ref, IRT_P64);
+}
+
+/* Intern pointer constant. */
+TRef lj_ir_kptr_(jit_State *J, IROp op, void *ptr)
+{
+ IRIns *ir, *cir = J->cur.ir;
+ IRRef ref;
+#if LJ_64 && !LJ_GC64
+ lj_assertJ((void *)(uintptr_t)u32ptr(ptr) == ptr, "out-of-range GC pointer");
+#endif
+ for (ref = J->chain[op]; ref; ref = cir[ref].prev)
+ if (ir_kptr(&cir[ref]) == ptr)
+ goto found;
+#if LJ_GC64
+ ref = ir_nextk64(J);
+#else
+ ref = ir_nextk(J);
+#endif
+ ir = IR(ref);
+ ir->op12 = 0;
+ setmref(ir[LJ_GC64].ptr, ptr);
+ ir->t.irt = IRT_PGC;
+ ir->o = op;
+ ir->prev = J->chain[op];
+ J->chain[op] = (IRRef1)ref;
+found:
+ return TREF(ref, IRT_PGC);
+}
+
+/* Intern typed NULL constant. */
+TRef lj_ir_knull(jit_State *J, IRType t)
+{
+ IRIns *ir, *cir = J->cur.ir;
+ IRRef ref;
+ for (ref = J->chain[IR_KNULL]; ref; ref = cir[ref].prev)
+ if (irt_t(cir[ref].t) == t)
+ goto found;
+ ref = ir_nextk(J);
+ ir = IR(ref);
+ ir->i = 0;
+ ir->t.irt = (uint8_t)t;
+ ir->o = IR_KNULL;
+ ir->prev = J->chain[IR_KNULL];
+ J->chain[IR_KNULL] = (IRRef1)ref;
+found:
+ return TREF(ref, t);
+}
+
+/* Intern key slot. */
+TRef lj_ir_kslot(jit_State *J, TRef key, IRRef slot)
+{
+ IRIns *ir, *cir = J->cur.ir;
+ IRRef2 op12 = IRREF2((IRRef1)key, (IRRef1)slot);
+ IRRef ref;
+ /* Const part is not touched by CSE/DCE, so 0-65535 is ok for IRMlit here. */
+ lj_assertJ(tref_isk(key) && slot == (IRRef)(IRRef1)slot,
+ "out-of-range key/slot");
+ for (ref = J->chain[IR_KSLOT]; ref; ref = cir[ref].prev)
+ if (cir[ref].op12 == op12)
+ goto found;
+ ref = ir_nextk(J);
+ ir = IR(ref);
+ ir->op12 = op12;
+ ir->t.irt = IRT_P32;
+ ir->o = IR_KSLOT;
+ ir->prev = J->chain[IR_KSLOT];
+ J->chain[IR_KSLOT] = (IRRef1)ref;
+found:
+ return TREF(ref, IRT_P32);
+}
+
+/* -- Access to IR constants ---------------------------------------------- */
+
+/* Copy value of IR constant. */
+void lj_ir_kvalue(lua_State *L, TValue *tv, const IRIns *ir)
+{
+ UNUSED(L);
+ lj_assertL(ir->o != IR_KSLOT, "unexpected KSLOT"); /* Common mistake. */
+ switch (ir->o) {
+ case IR_KPRI: setpriV(tv, irt_toitype(ir->t)); break;
+ case IR_KINT: setintV(tv, ir->i); break;
+ case IR_KGC: setgcV(L, tv, ir_kgc(ir), irt_toitype(ir->t)); break;
+ case IR_KPTR: case IR_KKPTR:
+ setnumV(tv, (lua_Number)(uintptr_t)ir_kptr(ir));
+ break;
+ case IR_KNULL: setintV(tv, 0); break;
+ case IR_KNUM: setnumV(tv, ir_knum(ir)->n); break;
+#if LJ_HASFFI
+ case IR_KINT64: {
+ GCcdata *cd = lj_cdata_new_(L, CTID_INT64, 8);
+ *(uint64_t *)cdataptr(cd) = ir_kint64(ir)->u64;
+ setcdataV(L, tv, cd);
+ break;
+ }
+#endif
+ default: lj_assertL(0, "bad IR constant op %d", ir->o); break;
+ }
+}
+
+/* -- Convert IR operand types -------------------------------------------- */
+
+/* Convert from string to number. */
+TRef LJ_FASTCALL lj_ir_tonumber(jit_State *J, TRef tr)
+{
+ if (!tref_isnumber(tr)) {
+ if (tref_isstr(tr))
+ tr = emitir(IRTG(IR_STRTO, IRT_NUM), tr, 0);
+ else
+ lj_trace_err(J, LJ_TRERR_BADTYPE);
+ }
+ return tr;
+}
+
+/* Convert from integer or string to number. */
+TRef LJ_FASTCALL lj_ir_tonum(jit_State *J, TRef tr)
+{
+ if (!tref_isnum(tr)) {
+ if (tref_isinteger(tr))
+ tr = emitir(IRTN(IR_CONV), tr, IRCONV_NUM_INT);
+ else if (tref_isstr(tr))
+ tr = emitir(IRTG(IR_STRTO, IRT_NUM), tr, 0);
+ else
+ lj_trace_err(J, LJ_TRERR_BADTYPE);
+ }
+ return tr;
+}
+
+/* Convert from integer or number to string. */
+TRef LJ_FASTCALL lj_ir_tostr(jit_State *J, TRef tr)
+{
+ if (!tref_isstr(tr)) {
+ if (!tref_isnumber(tr))
+ lj_trace_err(J, LJ_TRERR_BADTYPE);
+ tr = emitir(IRT(IR_TOSTR, IRT_STR), tr,
+ tref_isnum(tr) ? IRTOSTR_NUM : IRTOSTR_INT);
+ }
+ return tr;
+}
+
+/* -- Miscellaneous IR ops ------------------------------------------------ */
+
+/* Evaluate numeric comparison. */
+int lj_ir_numcmp(lua_Number a, lua_Number b, IROp op)
+{
+ switch (op) {
+ case IR_EQ: return (a == b);
+ case IR_NE: return (a != b);
+ case IR_LT: return (a < b);
+ case IR_GE: return (a >= b);
+ case IR_LE: return (a <= b);
+ case IR_GT: return (a > b);
+ case IR_ULT: return !(a >= b);
+ case IR_UGE: return !(a < b);
+ case IR_ULE: return !(a > b);
+ case IR_UGT: return !(a <= b);
+ default: lj_assertX(0, "bad IR op %d", op); return 0;
+ }
+}
+
+/* Evaluate string comparison. */
+int lj_ir_strcmp(GCstr *a, GCstr *b, IROp op)
+{
+ int res = lj_str_cmp(a, b);
+ switch (op) {
+ case IR_LT: return (res < 0);
+ case IR_GE: return (res >= 0);
+ case IR_LE: return (res <= 0);
+ case IR_GT: return (res > 0);
+ default: lj_assertX(0, "bad IR op %d", op); return 0;
+ }
+}
+
+/* Rollback IR to previous state. */
+void lj_ir_rollback(jit_State *J, IRRef ref)
+{
+ IRRef nins = J->cur.nins;
+ while (nins > ref) {
+ IRIns *ir;
+ nins--;
+ ir = IR(nins);
+ J->chain[ir->o] = ir->prev;
+ }
+ J->cur.nins = nins;
+}
+
+#undef IR
+#undef fins
+#undef emitir
+
+#endif
diff --git a/libs/luajit-cmake/luajit/src/lj_ir.h b/libs/luajit-cmake/luajit/src/lj_ir.h
new file mode 100644
index 0000000..ed492e9
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/lj_ir.h
@@ -0,0 +1,614 @@
+/*
+** SSA IR (Intermediate Representation) format.
+** Copyright (C) 2005-2022 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LJ_IR_H
+#define _LJ_IR_H
+
+#include "lj_obj.h"
+
+/* -- IR instructions ----------------------------------------------------- */
+
+/* IR instruction definition. Order matters, see below. ORDER IR */
+#define IRDEF(_) \
+ /* Guarded assertions. */ \
+ /* Must be properly aligned to flip opposites (^1) and (un)ordered (^4). */ \
+ _(LT, N , ref, ref) \
+ _(GE, N , ref, ref) \
+ _(LE, N , ref, ref) \
+ _(GT, N , ref, ref) \
+ \
+ _(ULT, N , ref, ref) \
+ _(UGE, N , ref, ref) \
+ _(ULE, N , ref, ref) \
+ _(UGT, N , ref, ref) \
+ \
+ _(EQ, C , ref, ref) \
+ _(NE, C , ref, ref) \
+ \
+ _(ABC, N , ref, ref) \
+ _(RETF, S , ref, ref) \
+ \
+ /* Miscellaneous ops. */ \
+ _(NOP, N , ___, ___) \
+ _(BASE, N , lit, lit) \
+ _(PVAL, N , lit, ___) \
+ _(GCSTEP, S , ___, ___) \
+ _(HIOP, S , ref, ref) \
+ _(LOOP, S , ___, ___) \
+ _(USE, S , ref, ___) \
+ _(PHI, S , ref, ref) \
+ _(RENAME, S , ref, lit) \
+ _(PROF, S , ___, ___) \
+ \
+ /* Constants. */ \
+ _(KPRI, N , ___, ___) \
+ _(KINT, N , cst, ___) \
+ _(KGC, N , cst, ___) \
+ _(KPTR, N , cst, ___) \
+ _(KKPTR, N , cst, ___) \
+ _(KNULL, N , cst, ___) \
+ _(KNUM, N , cst, ___) \
+ _(KINT64, N , cst, ___) \
+ _(KSLOT, N , ref, lit) \
+ \
+ /* Bit ops. */ \
+ _(BNOT, N , ref, ___) \
+ _(BSWAP, N , ref, ___) \
+ _(BAND, C , ref, ref) \
+ _(BOR, C , ref, ref) \
+ _(BXOR, C , ref, ref) \
+ _(BSHL, N , ref, ref) \
+ _(BSHR, N , ref, ref) \
+ _(BSAR, N , ref, ref) \
+ _(BROL, N , ref, ref) \
+ _(BROR, N , ref, ref) \
+ \
+ /* Arithmetic ops. ORDER ARITH */ \
+ _(ADD, C , ref, ref) \
+ _(SUB, N , ref, ref) \
+ _(MUL, C , ref, ref) \
+ _(DIV, N , ref, ref) \
+ _(MOD, N , ref, ref) \
+ _(POW, N , ref, ref) \
+ _(NEG, N , ref, ref) \
+ \
+ _(ABS, N , ref, ref) \
+ _(LDEXP, N , ref, ref) \
+ _(MIN, C , ref, ref) \
+ _(MAX, C , ref, ref) \
+ _(FPMATH, N , ref, lit) \
+ \
+ /* Overflow-checking arithmetic ops. */ \
+ _(ADDOV, CW, ref, ref) \
+ _(SUBOV, NW, ref, ref) \
+ _(MULOV, CW, ref, ref) \
+ \
+ /* Memory ops. A = array, H = hash, U = upvalue, F = field, S = stack. */ \
+ \
+ /* Memory references. */ \
+ _(AREF, R , ref, ref) \
+ _(HREFK, R , ref, ref) \
+ _(HREF, L , ref, ref) \
+ _(NEWREF, S , ref, ref) \
+ _(UREFO, LW, ref, lit) \
+ _(UREFC, LW, ref, lit) \
+ _(FREF, R , ref, lit) \
+ _(TMPREF, S , ref, lit) \
+ _(STRREF, N , ref, ref) \
+ _(LREF, L , ___, ___) \
+ \
+ /* Loads and Stores. These must be in the same order. */ \
+ _(ALOAD, L , ref, ___) \
+ _(HLOAD, L , ref, ___) \
+ _(ULOAD, L , ref, ___) \
+ _(FLOAD, L , ref, lit) \
+ _(XLOAD, L , ref, lit) \
+ _(SLOAD, L , lit, lit) \
+ _(VLOAD, L , ref, lit) \
+ _(ALEN, L , ref, ref) \
+ \
+ _(ASTORE, S , ref, ref) \
+ _(HSTORE, S , ref, ref) \
+ _(USTORE, S , ref, ref) \
+ _(FSTORE, S , ref, ref) \
+ _(XSTORE, S , ref, ref) \
+ \
+ /* Allocations. */ \
+ _(SNEW, N , ref, ref) /* CSE is ok, not marked as A. */ \
+ _(XSNEW, A , ref, ref) \
+ _(TNEW, AW, lit, lit) \
+ _(TDUP, AW, ref, ___) \
+ _(CNEW, AW, ref, ref) \
+ _(CNEWI, NW, ref, ref) /* CSE is ok, not marked as A. */ \
+ \
+ /* Buffer operations. */ \
+ _(BUFHDR, L , ref, lit) \
+ _(BUFPUT, LW, ref, ref) \
+ _(BUFSTR, AW, ref, ref) \
+ \
+ /* Barriers. */ \
+ _(TBAR, S , ref, ___) \
+ _(OBAR, S , ref, ref) \
+ _(XBAR, S , ___, ___) \
+ \
+ /* Type conversions. */ \
+ _(CONV, N , ref, lit) \
+ _(TOBIT, N , ref, ref) \
+ _(TOSTR, N , ref, lit) \
+ _(STRTO, N , ref, ___) \
+ \
+ /* Calls. */ \
+ _(CALLN, NW, ref, lit) \
+ _(CALLA, AW, ref, lit) \
+ _(CALLL, LW, ref, lit) \
+ _(CALLS, S , ref, lit) \
+ _(CALLXS, S , ref, ref) \
+ _(CARG, N , ref, ref) \
+ \
+ /* End of list. */
+
+/* IR opcodes (max. 256). */
+typedef enum {
+#define IRENUM(name, m, m1, m2) IR_##name,
+IRDEF(IRENUM)
+#undef IRENUM
+ IR__MAX
+} IROp;
+
+/* Stored opcode. */
+typedef uint8_t IROp1;
+
+LJ_STATIC_ASSERT(((int)IR_EQ^1) == (int)IR_NE);
+LJ_STATIC_ASSERT(((int)IR_LT^1) == (int)IR_GE);
+LJ_STATIC_ASSERT(((int)IR_LE^1) == (int)IR_GT);
+LJ_STATIC_ASSERT(((int)IR_LT^3) == (int)IR_GT);
+LJ_STATIC_ASSERT(((int)IR_LT^4) == (int)IR_ULT);
+
+/* Delta between xLOAD and xSTORE. */
+#define IRDELTA_L2S ((int)IR_ASTORE - (int)IR_ALOAD)
+
+LJ_STATIC_ASSERT((int)IR_HLOAD + IRDELTA_L2S == (int)IR_HSTORE);
+LJ_STATIC_ASSERT((int)IR_ULOAD + IRDELTA_L2S == (int)IR_USTORE);
+LJ_STATIC_ASSERT((int)IR_FLOAD + IRDELTA_L2S == (int)IR_FSTORE);
+LJ_STATIC_ASSERT((int)IR_XLOAD + IRDELTA_L2S == (int)IR_XSTORE);
+
+/* -- Named IR literals --------------------------------------------------- */
+
+/* FPMATH sub-functions. ORDER FPM. */
+#define IRFPMDEF(_) \
+ _(FLOOR) _(CEIL) _(TRUNC) /* Must be first and in this order. */ \
+ _(SQRT) _(LOG) _(LOG2) \
+ _(OTHER)
+
+typedef enum {
+#define FPMENUM(name) IRFPM_##name,
+IRFPMDEF(FPMENUM)
+#undef FPMENUM
+ IRFPM__MAX
+} IRFPMathOp;
+
+/* FLOAD fields. */
+#define IRFLDEF(_) \
+ _(STR_LEN, offsetof(GCstr, len)) \
+ _(FUNC_ENV, offsetof(GCfunc, l.env)) \
+ _(FUNC_PC, offsetof(GCfunc, l.pc)) \
+ _(FUNC_FFID, offsetof(GCfunc, l.ffid)) \
+ _(THREAD_ENV, offsetof(lua_State, env)) \
+ _(TAB_META, offsetof(GCtab, metatable)) \
+ _(TAB_ARRAY, offsetof(GCtab, array)) \
+ _(TAB_NODE, offsetof(GCtab, node)) \
+ _(TAB_ASIZE, offsetof(GCtab, asize)) \
+ _(TAB_HMASK, offsetof(GCtab, hmask)) \
+ _(TAB_NOMM, offsetof(GCtab, nomm)) \
+ _(UDATA_META, offsetof(GCudata, metatable)) \
+ _(UDATA_UDTYPE, offsetof(GCudata, udtype)) \
+ _(UDATA_FILE, sizeof(GCudata)) \
+ _(SBUF_W, sizeof(GCudata) + offsetof(SBufExt, w)) \
+ _(SBUF_E, sizeof(GCudata) + offsetof(SBufExt, e)) \
+ _(SBUF_B, sizeof(GCudata) + offsetof(SBufExt, b)) \
+ _(SBUF_L, sizeof(GCudata) + offsetof(SBufExt, L)) \
+ _(SBUF_REF, sizeof(GCudata) + offsetof(SBufExt, cowref)) \
+ _(SBUF_R, sizeof(GCudata) + offsetof(SBufExt, r)) \
+ _(CDATA_CTYPEID, offsetof(GCcdata, ctypeid)) \
+ _(CDATA_PTR, sizeof(GCcdata)) \
+ _(CDATA_INT, sizeof(GCcdata)) \
+ _(CDATA_INT64, sizeof(GCcdata)) \
+ _(CDATA_INT64_4, sizeof(GCcdata) + 4)
+
+typedef enum {
+#define FLENUM(name, ofs) IRFL_##name,
+IRFLDEF(FLENUM)
+#undef FLENUM
+ IRFL__MAX
+} IRFieldID;
+
+/* TMPREF mode bits, stored in op2. */
+#define IRTMPREF_IN1 0x01 /* First input value. */
+#define IRTMPREF_OUT1 0x02 /* First output value. */
+#define IRTMPREF_OUT2 0x04 /* Second output value. */
+
+/* SLOAD mode bits, stored in op2. */
+#define IRSLOAD_PARENT 0x01 /* Coalesce with parent trace. */
+#define IRSLOAD_FRAME 0x02 /* Load 32 bits of ftsz. */
+#define IRSLOAD_TYPECHECK 0x04 /* Needs type check. */
+#define IRSLOAD_CONVERT 0x08 /* Number to integer conversion. */
+#define IRSLOAD_READONLY 0x10 /* Read-only, omit slot store. */
+#define IRSLOAD_INHERIT 0x20 /* Inherited by exits/side traces. */
+#define IRSLOAD_KEYINDEX 0x40 /* Table traversal key index. */
+
+/* XLOAD mode bits, stored in op2. */
+#define IRXLOAD_READONLY 0x01 /* Load from read-only data. */
+#define IRXLOAD_VOLATILE 0x02 /* Load from volatile data. */
+#define IRXLOAD_UNALIGNED 0x04 /* Unaligned load. */
+
+/* BUFHDR mode, stored in op2. */
+#define IRBUFHDR_RESET 0 /* Reset buffer. */
+#define IRBUFHDR_APPEND 1 /* Append to buffer. */
+#define IRBUFHDR_WRITE 2 /* Write to string buffer. */
+
+/* CONV mode, stored in op2. */
+#define IRCONV_SRCMASK 0x001f /* Source IRType. */
+#define IRCONV_DSTMASK 0x03e0 /* Dest. IRType (also in ir->t). */
+#define IRCONV_DSH 5
+#define IRCONV_NUM_INT ((IRT_NUM<<IRCONV_DSH)|IRT_INT)
+#define IRCONV_INT_NUM ((IRT_INT<<IRCONV_DSH)|IRT_NUM)
+#define IRCONV_SEXT 0x0800 /* Sign-extend integer to integer. */
+#define IRCONV_MODEMASK 0x0fff
+#define IRCONV_CONVMASK 0xf000
+#define IRCONV_CSH 12
+/* Number to integer conversion mode. Ordered by strength of the checks. */
+#define IRCONV_TOBIT (0<<IRCONV_CSH) /* None. Cache only: TOBIT conv. */
+#define IRCONV_ANY (1<<IRCONV_CSH) /* Any FP number is ok. */
+#define IRCONV_INDEX (2<<IRCONV_CSH) /* Check + special backprop rules. */
+#define IRCONV_CHECK (3<<IRCONV_CSH) /* Number checked for integerness. */
+#define IRCONV_NONE IRCONV_ANY /* INT|*64 no conv, but change type. */
+
+/* TOSTR mode, stored in op2. */
+#define IRTOSTR_INT 0 /* Convert integer to string. */
+#define IRTOSTR_NUM 1 /* Convert number to string. */
+#define IRTOSTR_CHAR 2 /* Convert char value to string. */
+
+/* -- IR operands --------------------------------------------------------- */
+
+/* IR operand mode (2 bit). */
+typedef enum {
+ IRMref, /* IR reference. */
+ IRMlit, /* 16 bit unsigned literal. */
+ IRMcst, /* Constant literal: i, gcr or ptr. */
+ IRMnone /* Unused operand. */
+} IRMode;
+#define IRM___ IRMnone
+
+/* Mode bits: Commutative, {Normal/Ref, Alloc, Load, Store}, Non-weak guard. */
+#define IRM_C 0x10
+
+#define IRM_N 0x00
+#define IRM_R IRM_N
+#define IRM_A 0x20
+#define IRM_L 0x40
+#define IRM_S 0x60
+
+#define IRM_W 0x80
+
+#define IRM_NW (IRM_N|IRM_W)
+#define IRM_CW (IRM_C|IRM_W)
+#define IRM_AW (IRM_A|IRM_W)
+#define IRM_LW (IRM_L|IRM_W)
+
+#define irm_op1(m) ((IRMode)((m)&3))
+#define irm_op2(m) ((IRMode)(((m)>>2)&3))
+#define irm_iscomm(m) ((m) & IRM_C)
+#define irm_kind(m) ((m) & IRM_S)
+
+#define IRMODE(name, m, m1, m2) (((IRM##m1)|((IRM##m2)<<2)|(IRM_##m))^IRM_W),
+
+LJ_DATA const uint8_t lj_ir_mode[IR__MAX+1];
+
+/* -- IR instruction types ------------------------------------------------ */
+
+#define IRTSIZE_PGC (LJ_GC64 ? 8 : 4)
+
+/* Map of itypes to non-negative numbers and their sizes. ORDER LJ_T.
+** LJ_TUPVAL/LJ_TTRACE never appear in a TValue. Use these itypes for
+** IRT_P32 and IRT_P64, which never escape the IR.
+** The various integers are only used in the IR and can only escape to
+** a TValue after implicit or explicit conversion. Their types must be
+** contiguous and next to IRT_NUM (see the typerange macros below).
+*/
+#define IRTDEF(_) \
+ _(NIL, 4) _(FALSE, 4) _(TRUE, 4) _(LIGHTUD, LJ_64 ? 8 : 4) \
+ _(STR, IRTSIZE_PGC) _(P32, 4) _(THREAD, IRTSIZE_PGC) _(PROTO, IRTSIZE_PGC) \
+ _(FUNC, IRTSIZE_PGC) _(P64, 8) _(CDATA, IRTSIZE_PGC) _(TAB, IRTSIZE_PGC) \
+ _(UDATA, IRTSIZE_PGC) \
+ _(FLOAT, 4) _(NUM, 8) _(I8, 1) _(U8, 1) _(I16, 2) _(U16, 2) \
+ _(INT, 4) _(U32, 4) _(I64, 8) _(U64, 8) \
+ _(SOFTFP, 4) /* There is room for 8 more types. */
+
+/* IR result type and flags (8 bit). */
+typedef enum {
+#define IRTENUM(name, size) IRT_##name,
+IRTDEF(IRTENUM)
+#undef IRTENUM
+ IRT__MAX,
+
+ /* Native pointer type and the corresponding integer type. */
+ IRT_PTR = LJ_64 ? IRT_P64 : IRT_P32,
+ IRT_PGC = LJ_GC64 ? IRT_P64 : IRT_P32,
+ IRT_IGC = LJ_GC64 ? IRT_I64 : IRT_INT,
+ IRT_INTP = LJ_64 ? IRT_I64 : IRT_INT,
+ IRT_UINTP = LJ_64 ? IRT_U64 : IRT_U32,
+
+ /* Additional flags. */
+ IRT_MARK = 0x20, /* Marker for misc. purposes. */
+ IRT_ISPHI = 0x40, /* Instruction is left or right PHI operand. */
+ IRT_GUARD = 0x80, /* Instruction is a guard. */
+
+ /* Masks. */
+ IRT_TYPE = 0x1f,
+ IRT_T = 0xff
+} IRType;
+
+#define irtype_ispri(irt) ((uint32_t)(irt) <= IRT_TRUE)
+
+/* Stored IRType. */
+typedef struct IRType1 { uint8_t irt; } IRType1;
+
+#define IRT(o, t) ((uint32_t)(((o)<<8) | (t)))
+#define IRTI(o) (IRT((o), IRT_INT))
+#define IRTN(o) (IRT((o), IRT_NUM))
+#define IRTG(o, t) (IRT((o), IRT_GUARD|(t)))
+#define IRTGI(o) (IRT((o), IRT_GUARD|IRT_INT))
+
+#define irt_t(t) ((IRType)(t).irt)
+#define irt_type(t) ((IRType)((t).irt & IRT_TYPE))
+#define irt_sametype(t1, t2) ((((t1).irt ^ (t2).irt) & IRT_TYPE) == 0)
+#define irt_typerange(t, first, last) \
+ ((uint32_t)((t).irt & IRT_TYPE) - (uint32_t)(first) <= (uint32_t)(last-first))
+
+#define irt_isnil(t) (irt_type(t) == IRT_NIL)
+#define irt_ispri(t) ((uint32_t)irt_type(t) <= IRT_TRUE)
+#define irt_islightud(t) (irt_type(t) == IRT_LIGHTUD)
+#define irt_isstr(t) (irt_type(t) == IRT_STR)
+#define irt_istab(t) (irt_type(t) == IRT_TAB)
+#define irt_iscdata(t) (irt_type(t) == IRT_CDATA)
+#define irt_isfloat(t) (irt_type(t) == IRT_FLOAT)
+#define irt_isnum(t) (irt_type(t) == IRT_NUM)
+#define irt_isint(t) (irt_type(t) == IRT_INT)
+#define irt_isi8(t) (irt_type(t) == IRT_I8)
+#define irt_isu8(t) (irt_type(t) == IRT_U8)
+#define irt_isi16(t) (irt_type(t) == IRT_I16)
+#define irt_isu16(t) (irt_type(t) == IRT_U16)
+#define irt_isu32(t) (irt_type(t) == IRT_U32)
+#define irt_isi64(t) (irt_type(t) == IRT_I64)
+#define irt_isu64(t) (irt_type(t) == IRT_U64)
+
+#define irt_isfp(t) (irt_isnum(t) || irt_isfloat(t))
+#define irt_isinteger(t) (irt_typerange((t), IRT_I8, IRT_INT))
+#define irt_isgcv(t) (irt_typerange((t), IRT_STR, IRT_UDATA))
+#define irt_isaddr(t) (irt_typerange((t), IRT_LIGHTUD, IRT_UDATA))
+#define irt_isint64(t) (irt_typerange((t), IRT_I64, IRT_U64))
+
+#if LJ_GC64
+/* Include IRT_NIL, so IR(ASMREF_L) (aka REF_NIL) is considered 64 bit. */
+#define IRT_IS64 \
+ ((1u<<IRT_NUM)|(1u<<IRT_I64)|(1u<<IRT_U64)|(1u<<IRT_P64)|\
+ (1u<<IRT_LIGHTUD)|(1u<<IRT_STR)|(1u<<IRT_THREAD)|(1u<<IRT_PROTO)|\
+ (1u<<IRT_FUNC)|(1u<<IRT_CDATA)|(1u<<IRT_TAB)|(1u<<IRT_UDATA)|\
+ (1u<<IRT_NIL))
+#elif LJ_64
+#define IRT_IS64 \
+ ((1u<<IRT_NUM)|(1u<<IRT_I64)|(1u<<IRT_U64)|(1u<<IRT_P64)|(1u<<IRT_LIGHTUD))
+#else
+#define IRT_IS64 \
+ ((1u<<IRT_NUM)|(1u<<IRT_I64)|(1u<<IRT_U64))
+#endif
+
+#define irt_is64(t) ((IRT_IS64 >> irt_type(t)) & 1)
+#define irt_is64orfp(t) (((IRT_IS64|(1u<<IRT_FLOAT))>>irt_type(t)) & 1)
+
+#define irt_size(t) (lj_ir_type_size[irt_t((t))])
+
+LJ_DATA const uint8_t lj_ir_type_size[];
+
+static LJ_AINLINE IRType itype2irt(const TValue *tv)
+{
+ if (tvisint(tv))
+ return IRT_INT;
+ else if (tvisnum(tv))
+ return IRT_NUM;
+#if LJ_64 && !LJ_GC64
+ else if (tvislightud(tv))
+ return IRT_LIGHTUD;
+#endif
+ else
+ return (IRType)~itype(tv);
+}
+
+static LJ_AINLINE uint32_t irt_toitype_(IRType t)
+{
+ lj_assertX(!LJ_64 || LJ_GC64 || t != IRT_LIGHTUD,
+ "no plain type tag for lightuserdata");
+ if (LJ_DUALNUM && t > IRT_NUM) {
+ return LJ_TISNUM;
+ } else {
+ lj_assertX(t <= IRT_NUM, "no plain type tag for IR type %d", t);
+ return ~(uint32_t)t;
+ }
+}
+
+#define irt_toitype(t) irt_toitype_(irt_type((t)))
+
+#define irt_isguard(t) ((t).irt & IRT_GUARD)
+#define irt_ismarked(t) ((t).irt & IRT_MARK)
+#define irt_setmark(t) ((t).irt |= IRT_MARK)
+#define irt_clearmark(t) ((t).irt &= ~IRT_MARK)
+#define irt_isphi(t) ((t).irt & IRT_ISPHI)
+#define irt_setphi(t) ((t).irt |= IRT_ISPHI)
+#define irt_clearphi(t) ((t).irt &= ~IRT_ISPHI)
+
+/* Stored combined IR opcode and type. */
+typedef uint16_t IROpT;
+
+/* -- IR references ------------------------------------------------------- */
+
+/* IR references. */
+typedef uint16_t IRRef1; /* One stored reference. */
+typedef uint32_t IRRef2; /* Two stored references. */
+typedef uint32_t IRRef; /* Used to pass around references. */
+
+/* Fixed references. */
+enum {
+ REF_BIAS = 0x8000,
+ REF_TRUE = REF_BIAS-3,
+ REF_FALSE = REF_BIAS-2,
+ REF_NIL = REF_BIAS-1, /* \--- Constants grow downwards. */
+ REF_BASE = REF_BIAS, /* /--- IR grows upwards. */
+ REF_FIRST = REF_BIAS+1,
+ REF_DROP = 0xffff
+};
+
+/* Note: IRMlit operands must be < REF_BIAS, too!
+** This allows for fast and uniform manipulation of all operands
+** without looking up the operand mode in lj_ir_mode:
+** - CSE calculates the maximum reference of two operands.
+** This must work with mixed reference/literal operands, too.
+** - DCE marking only checks for operand >= REF_BIAS.
+** - LOOP needs to substitute reference operands.
+** Constant references and literals must not be modified.
+*/
+
+#define IRREF2(lo, hi) ((IRRef2)(lo) | ((IRRef2)(hi) << 16))
+
+#define irref_isk(ref) ((ref) < REF_BIAS)
+
+/* Tagged IR references (32 bit).
+**
+** +-------+-------+---------------+
+** | irt | flags | ref |
+** +-------+-------+---------------+
+**
+** The tag holds a copy of the IRType and speeds up IR type checks.
+*/
+typedef uint32_t TRef;
+
+#define TREF_REFMASK 0x0000ffff
+#define TREF_FRAME 0x00010000
+#define TREF_CONT 0x00020000
+#define TREF_KEYINDEX 0x00100000
+
+#define TREF(ref, t) ((TRef)((ref) + ((t)<<24)))
+
+#define tref_ref(tr) ((IRRef1)(tr))
+#define tref_t(tr) ((IRType)((tr)>>24))
+#define tref_type(tr) ((IRType)(((tr)>>24) & IRT_TYPE))
+#define tref_typerange(tr, first, last) \
+ ((((tr)>>24) & IRT_TYPE) - (TRef)(first) <= (TRef)(last-first))
+
+#define tref_istype(tr, t) (((tr) & (IRT_TYPE<<24)) == ((t)<<24))
+#define tref_isnil(tr) (tref_istype((tr), IRT_NIL))
+#define tref_isfalse(tr) (tref_istype((tr), IRT_FALSE))
+#define tref_istrue(tr) (tref_istype((tr), IRT_TRUE))
+#define tref_islightud(tr) (tref_istype((tr), IRT_LIGHTUD))
+#define tref_isstr(tr) (tref_istype((tr), IRT_STR))
+#define tref_isfunc(tr) (tref_istype((tr), IRT_FUNC))
+#define tref_iscdata(tr) (tref_istype((tr), IRT_CDATA))
+#define tref_istab(tr) (tref_istype((tr), IRT_TAB))
+#define tref_isudata(tr) (tref_istype((tr), IRT_UDATA))
+#define tref_isnum(tr) (tref_istype((tr), IRT_NUM))
+#define tref_isint(tr) (tref_istype((tr), IRT_INT))
+
+#define tref_isbool(tr) (tref_typerange((tr), IRT_FALSE, IRT_TRUE))
+#define tref_ispri(tr) (tref_typerange((tr), IRT_NIL, IRT_TRUE))
+#define tref_istruecond(tr) (!tref_typerange((tr), IRT_NIL, IRT_FALSE))
+#define tref_isinteger(tr) (tref_typerange((tr), IRT_I8, IRT_INT))
+#define tref_isnumber(tr) (tref_typerange((tr), IRT_NUM, IRT_INT))
+#define tref_isnumber_str(tr) (tref_isnumber((tr)) || tref_isstr((tr)))
+#define tref_isgcv(tr) (tref_typerange((tr), IRT_STR, IRT_UDATA))
+
+#define tref_isk(tr) (irref_isk(tref_ref((tr))))
+#define tref_isk2(tr1, tr2) (irref_isk(tref_ref((tr1) | (tr2))))
+
+#define TREF_PRI(t) (TREF(REF_NIL-(t), (t)))
+#define TREF_NIL (TREF_PRI(IRT_NIL))
+#define TREF_FALSE (TREF_PRI(IRT_FALSE))
+#define TREF_TRUE (TREF_PRI(IRT_TRUE))
+
+/* -- IR format ----------------------------------------------------------- */
+
+/* IR instruction format (64 bit).
+**
+** 16 16 8 8 8 8
+** +-------+-------+---+---+---+---+
+** | op1 | op2 | t | o | r | s |
+** +-------+-------+---+---+---+---+
+** | op12/i/gco32 | ot | prev | (alternative fields in union)
+** +-------+-------+---+---+---+---+
+** | TValue/gco64 | (2nd IR slot for 64 bit constants)
+** +---------------+-------+-------+
+** 32 16 16
+**
+** prev is only valid prior to register allocation and then reused for r + s.
+*/
+
+typedef union IRIns {
+ struct {
+ LJ_ENDIAN_LOHI(
+ IRRef1 op1; /* IR operand 1. */
+ , IRRef1 op2; /* IR operand 2. */
+ )
+ IROpT ot; /* IR opcode and type (overlaps t and o). */
+ IRRef1 prev; /* Previous ins in same chain (overlaps r and s). */
+ };
+ struct {
+ IRRef2 op12; /* IR operand 1 and 2 (overlaps op1 and op2). */
+ LJ_ENDIAN_LOHI(
+ IRType1 t; /* IR type. */
+ , IROp1 o; /* IR opcode. */
+ )
+ LJ_ENDIAN_LOHI(
+ uint8_t r; /* Register allocation (overlaps prev). */
+ , uint8_t s; /* Spill slot allocation (overlaps prev). */
+ )
+ };
+ int32_t i; /* 32 bit signed integer literal (overlaps op12). */
+ GCRef gcr; /* GCobj constant (overlaps op12 or entire slot). */
+ MRef ptr; /* Pointer constant (overlaps op12 or entire slot). */
+ TValue tv; /* TValue constant (overlaps entire slot). */
+} IRIns;
+
+#define ir_isk64(ir) \
+ ((ir)->o == IR_KNUM || (ir)->o == IR_KINT64 || \
+ (LJ_GC64 && \
+ ((ir)->o == IR_KGC || (ir)->o == IR_KPTR || (ir)->o == IR_KKPTR)))
+
+#define ir_kgc(ir) check_exp((ir)->o == IR_KGC, gcref((ir)[LJ_GC64].gcr))
+#define ir_kstr(ir) (gco2str(ir_kgc((ir))))
+#define ir_ktab(ir) (gco2tab(ir_kgc((ir))))
+#define ir_kfunc(ir) (gco2func(ir_kgc((ir))))
+#define ir_kcdata(ir) (gco2cd(ir_kgc((ir))))
+#define ir_knum(ir) check_exp((ir)->o == IR_KNUM, &(ir)[1].tv)
+#define ir_kint64(ir) check_exp((ir)->o == IR_KINT64, &(ir)[1].tv)
+#define ir_k64(ir) check_exp(ir_isk64(ir), &(ir)[1].tv)
+#define ir_kptr(ir) \
+ check_exp((ir)->o == IR_KPTR || (ir)->o == IR_KKPTR, \
+ mref((ir)[LJ_GC64].ptr, void))
+
+/* A store or any other op with a non-weak guard has a side-effect. */
+static LJ_AINLINE int ir_sideeff(IRIns *ir)
+{
+ return (((ir->t.irt | ~IRT_GUARD) & lj_ir_mode[ir->o]) >= IRM_S);
+}
+
+LJ_STATIC_ASSERT((int)IRT_GUARD == (int)IRM_W);
+
+/* Replace IR instruction with NOP. */
+static LJ_AINLINE void lj_ir_nop(IRIns *ir)
+{
+ ir->ot = IRT(IR_NOP, IRT_NIL);
+ ir->op1 = ir->op2 = 0;
+ ir->prev = 0;
+}
+
+#endif
diff --git a/libs/luajit-cmake/luajit/src/lj_ircall.h b/libs/luajit-cmake/luajit/src/lj_ircall.h
new file mode 100644
index 0000000..67fb58a
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/lj_ircall.h
@@ -0,0 +1,383 @@
+/*
+** IR CALL* instruction definitions.
+** Copyright (C) 2005-2022 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LJ_IRCALL_H
+#define _LJ_IRCALL_H
+
+#include "lj_obj.h"
+#include "lj_ir.h"
+#include "lj_jit.h"
+
+/* C call info for CALL* instructions. */
+typedef struct CCallInfo {
+ ASMFunction func; /* Function pointer. */
+ uint32_t flags; /* Number of arguments and flags. */
+} CCallInfo;
+
+#define CCI_NARGS(ci) ((ci)->flags & 0xff) /* # of args. */
+#define CCI_NARGS_MAX 32 /* Max. # of args. */
+
+#define CCI_OTSHIFT 16
+#define CCI_OPTYPE(ci) ((ci)->flags >> CCI_OTSHIFT) /* Get op/type. */
+#define CCI_TYPE(ci) (((ci)->flags>>CCI_OTSHIFT) & IRT_TYPE)
+#define CCI_OPSHIFT 24
+#define CCI_OP(ci) ((ci)->flags >> CCI_OPSHIFT) /* Get op. */
+
+#define CCI_CALL_N (IR_CALLN << CCI_OPSHIFT)
+#define CCI_CALL_A (IR_CALLA << CCI_OPSHIFT)
+#define CCI_CALL_L (IR_CALLL << CCI_OPSHIFT)
+#define CCI_CALL_S (IR_CALLS << CCI_OPSHIFT)
+#define CCI_CALL_FN (CCI_CALL_N|CCI_CC_FASTCALL)
+#define CCI_CALL_FA (CCI_CALL_A|CCI_CC_FASTCALL)
+#define CCI_CALL_FL (CCI_CALL_L|CCI_CC_FASTCALL)
+#define CCI_CALL_FS (CCI_CALL_S|CCI_CC_FASTCALL)
+
+/* C call info flags. */
+#define CCI_T (IRT_GUARD << CCI_OTSHIFT) /* May throw. */
+#define CCI_L 0x0100 /* Implicit L arg. */
+#define CCI_CASTU64 0x0200 /* Cast u64 result to number. */
+#define CCI_NOFPRCLOBBER 0x0400 /* Does not clobber any FPRs. */
+#define CCI_VARARG 0x0800 /* Vararg function. */
+
+#define CCI_CC_MASK 0x3000 /* Calling convention mask. */
+#define CCI_CC_SHIFT 12
+/* ORDER CC */
+#define CCI_CC_CDECL 0x0000 /* Default cdecl calling convention. */
+#define CCI_CC_THISCALL 0x1000 /* Thiscall calling convention. */
+#define CCI_CC_FASTCALL 0x2000 /* Fastcall calling convention. */
+#define CCI_CC_STDCALL 0x3000 /* Stdcall calling convention. */
+
+/* Extra args for SOFTFP, SPLIT 64 bit. */
+#define CCI_XARGS_SHIFT 14
+#define CCI_XARGS(ci) (((ci)->flags >> CCI_XARGS_SHIFT) & 3)
+#define CCI_XA (1u << CCI_XARGS_SHIFT)
+
+#if LJ_SOFTFP32 || (LJ_32 && LJ_HASFFI)
+#define CCI_XNARGS(ci) (CCI_NARGS((ci)) + CCI_XARGS((ci)))
+#else
+#define CCI_XNARGS(ci) CCI_NARGS((ci))
+#endif
+
+/* Helpers for conditional function definitions. */
+#define IRCALLCOND_ANY(x) x
+
+#if LJ_TARGET_X86ORX64
+#define IRCALLCOND_FPMATH(x) NULL
+#else
+#define IRCALLCOND_FPMATH(x) x
+#endif
+
+#if LJ_SOFTFP
+#define IRCALLCOND_SOFTFP(x) x
+#if LJ_HASFFI
+#define IRCALLCOND_SOFTFP_FFI(x) x
+#else
+#define IRCALLCOND_SOFTFP_FFI(x) NULL
+#endif
+#else
+#define IRCALLCOND_SOFTFP(x) NULL
+#define IRCALLCOND_SOFTFP_FFI(x) NULL
+#endif
+
+#if LJ_SOFTFP && LJ_TARGET_MIPS
+#define IRCALLCOND_SOFTFP_MIPS(x) x
+#else
+#define IRCALLCOND_SOFTFP_MIPS(x) NULL
+#endif
+
+#if LJ_SOFTFP && LJ_TARGET_MIPS64
+#define IRCALLCOND_SOFTFP_MIPS64(x) x
+#else
+#define IRCALLCOND_SOFTFP_MIPS64(x) NULL
+#endif
+
+#define LJ_NEED_FP64 (LJ_TARGET_ARM || LJ_TARGET_PPC || LJ_TARGET_MIPS)
+
+#if LJ_HASFFI && (LJ_SOFTFP || LJ_NEED_FP64)
+#define IRCALLCOND_FP64_FFI(x) x
+#else
+#define IRCALLCOND_FP64_FFI(x) NULL
+#endif
+
+#if LJ_HASFFI
+#define IRCALLCOND_FFI(x) x
+#if LJ_32
+#define IRCALLCOND_FFI32(x) x
+#else
+#define IRCALLCOND_FFI32(x) NULL
+#endif
+#else
+#define IRCALLCOND_FFI(x) NULL
+#define IRCALLCOND_FFI32(x) NULL
+#endif
+
+#if LJ_HASBUFFER
+#define IRCALLCOND_BUFFER(x) x
+#else
+#define IRCALLCOND_BUFFER(x) NULL
+#endif
+
+#if LJ_HASBUFFER && LJ_HASFFI
+#define IRCALLCOND_BUFFFI(x) x
+#else
+#define IRCALLCOND_BUFFFI(x) NULL
+#endif
+
+#if LJ_SOFTFP
+#define XA_FP CCI_XA
+#define XA2_FP (CCI_XA+CCI_XA)
+#else
+#define XA_FP 0
+#define XA2_FP 0
+#endif
+
+#if LJ_SOFTFP32
+#define XA_FP32 CCI_XA
+#define XA2_FP32 (CCI_XA+CCI_XA)
+#else
+#define XA_FP32 0
+#define XA2_FP32 0
+#endif
+
+#if LJ_32
+#define XA_64 CCI_XA
+#define XA2_64 (CCI_XA+CCI_XA)
+#else
+#define XA_64 0
+#define XA2_64 0
+#endif
+
+/* Function definitions for CALL* instructions. */
+#define IRCALLDEF(_) \
+ _(ANY, lj_str_cmp, 2, FN, INT, CCI_NOFPRCLOBBER) \
+ _(ANY, lj_str_find, 4, N, PGC, 0) \
+ _(ANY, lj_str_new, 3, S, STR, CCI_L|CCI_T) \
+ _(ANY, lj_strscan_num, 2, FN, INT, 0) \
+ _(ANY, lj_strfmt_int, 2, FN, STR, CCI_L|CCI_T) \
+ _(ANY, lj_strfmt_num, 2, FN, STR, CCI_L|CCI_T) \
+ _(ANY, lj_strfmt_char, 2, FN, STR, CCI_L|CCI_T) \
+ _(ANY, lj_strfmt_putint, 2, FL, PGC, CCI_T) \
+ _(ANY, lj_strfmt_putnum, 2, FL, PGC, CCI_T) \
+ _(ANY, lj_strfmt_putquoted, 2, FL, PGC, CCI_T) \
+ _(ANY, lj_strfmt_putfxint, 3, L, PGC, XA_64|CCI_T) \
+ _(ANY, lj_strfmt_putfnum_int, 3, L, PGC, XA_FP|CCI_T) \
+ _(ANY, lj_strfmt_putfnum_uint, 3, L, PGC, XA_FP|CCI_T) \
+ _(ANY, lj_strfmt_putfnum, 3, L, PGC, XA_FP|CCI_T) \
+ _(ANY, lj_strfmt_putfstr, 3, L, PGC, CCI_T) \
+ _(ANY, lj_strfmt_putfchar, 3, L, PGC, CCI_T) \
+ _(ANY, lj_buf_putmem, 3, S, PGC, CCI_T) \
+ _(ANY, lj_buf_putstr, 2, FL, PGC, CCI_T) \
+ _(ANY, lj_buf_putchar, 2, FL, PGC, CCI_T) \
+ _(ANY, lj_buf_putstr_reverse, 2, FL, PGC, CCI_T) \
+ _(ANY, lj_buf_putstr_lower, 2, FL, PGC, CCI_T) \
+ _(ANY, lj_buf_putstr_upper, 2, FL, PGC, CCI_T) \
+ _(ANY, lj_buf_putstr_rep, 3, L, PGC, CCI_T) \
+ _(ANY, lj_buf_puttab, 5, L, PGC, CCI_T) \
+ _(BUFFER, lj_bufx_set, 4, S, NIL, 0) \
+ _(BUFFFI, lj_bufx_more, 2, FS, INT, CCI_T) \
+ _(BUFFER, lj_serialize_put, 2, FS, PGC, CCI_T) \
+ _(BUFFER, lj_serialize_get, 2, FS, PTR, CCI_T) \
+ _(BUFFER, lj_serialize_encode, 2, FA, STR, CCI_L|CCI_T) \
+ _(BUFFER, lj_serialize_decode, 3, A, INT, CCI_L|CCI_T) \
+ _(ANY, lj_buf_tostr, 1, FL, STR, CCI_T) \
+ _(ANY, lj_tab_new_ah, 3, A, TAB, CCI_L|CCI_T) \
+ _(ANY, lj_tab_new1, 2, FA, TAB, CCI_L|CCI_T) \
+ _(ANY, lj_tab_dup, 2, FA, TAB, CCI_L|CCI_T) \
+ _(ANY, lj_tab_clear, 1, FS, NIL, 0) \
+ _(ANY, lj_tab_newkey, 3, S, PGC, CCI_L|CCI_T) \
+ _(ANY, lj_tab_keyindex, 2, FL, INT, 0) \
+ _(ANY, lj_vm_next, 2, FL, PTR, 0) \
+ _(ANY, lj_tab_len, 1, FL, INT, 0) \
+ _(ANY, lj_tab_len_hint, 2, FL, INT, 0) \
+ _(ANY, lj_gc_step_jit, 2, FS, NIL, CCI_L) \
+ _(ANY, lj_gc_barrieruv, 2, FS, NIL, 0) \
+ _(ANY, lj_mem_newgco, 2, FA, PGC, CCI_L|CCI_T) \
+ _(ANY, lj_prng_u64d, 1, FS, NUM, CCI_CASTU64) \
+ _(ANY, lj_vm_modi, 2, FN, INT, 0) \
+ _(ANY, log10, 1, N, NUM, XA_FP) \
+ _(ANY, exp, 1, N, NUM, XA_FP) \
+ _(ANY, sin, 1, N, NUM, XA_FP) \
+ _(ANY, cos, 1, N, NUM, XA_FP) \
+ _(ANY, tan, 1, N, NUM, XA_FP) \
+ _(ANY, asin, 1, N, NUM, XA_FP) \
+ _(ANY, acos, 1, N, NUM, XA_FP) \
+ _(ANY, atan, 1, N, NUM, XA_FP) \
+ _(ANY, sinh, 1, N, NUM, XA_FP) \
+ _(ANY, cosh, 1, N, NUM, XA_FP) \
+ _(ANY, tanh, 1, N, NUM, XA_FP) \
+ _(ANY, fputc, 2, S, INT, 0) \
+ _(ANY, fwrite, 4, S, INT, 0) \
+ _(ANY, fflush, 1, S, INT, 0) \
+ /* ORDER FPM */ \
+ _(FPMATH, lj_vm_floor, 1, N, NUM, XA_FP) \
+ _(FPMATH, lj_vm_ceil, 1, N, NUM, XA_FP) \
+ _(FPMATH, lj_vm_trunc, 1, N, NUM, XA_FP) \
+ _(FPMATH, sqrt, 1, N, NUM, XA_FP) \
+ _(ANY, log, 1, N, NUM, XA_FP) \
+ _(ANY, lj_vm_log2, 1, N, NUM, XA_FP) \
+ _(ANY, pow, 2, N, NUM, XA2_FP) \
+ _(ANY, atan2, 2, N, NUM, XA2_FP) \
+ _(ANY, ldexp, 2, N, NUM, XA_FP) \
+ _(SOFTFP, lj_vm_tobit, 1, N, INT, XA_FP32) \
+ _(SOFTFP, softfp_add, 2, N, NUM, XA2_FP32) \
+ _(SOFTFP, softfp_sub, 2, N, NUM, XA2_FP32) \
+ _(SOFTFP, softfp_mul, 2, N, NUM, XA2_FP32) \
+ _(SOFTFP, softfp_div, 2, N, NUM, XA2_FP32) \
+ _(SOFTFP, softfp_cmp, 2, N, NIL, XA2_FP32) \
+ _(SOFTFP, softfp_i2d, 1, N, NUM, 0) \
+ _(SOFTFP, softfp_d2i, 1, N, INT, XA_FP32) \
+ _(SOFTFP_MIPS, lj_vm_sfmin, 2, N, NUM, XA2_FP32) \
+ _(SOFTFP_MIPS, lj_vm_sfmax, 2, N, NUM, XA2_FP32) \
+ _(SOFTFP_MIPS64, lj_vm_tointg, 1, N, INT, 0) \
+ _(SOFTFP_FFI, softfp_ui2d, 1, N, NUM, 0) \
+ _(SOFTFP_FFI, softfp_f2d, 1, N, NUM, 0) \
+ _(SOFTFP_FFI, softfp_d2ui, 1, N, INT, XA_FP32) \
+ _(SOFTFP_FFI, softfp_d2f, 1, N, FLOAT, XA_FP32) \
+ _(SOFTFP_FFI, softfp_i2f, 1, N, FLOAT, 0) \
+ _(SOFTFP_FFI, softfp_ui2f, 1, N, FLOAT, 0) \
+ _(SOFTFP_FFI, softfp_f2i, 1, N, INT, 0) \
+ _(SOFTFP_FFI, softfp_f2ui, 1, N, INT, 0) \
+ _(FP64_FFI, fp64_l2d, 1, N, NUM, XA_64) \
+ _(FP64_FFI, fp64_ul2d, 1, N, NUM, XA_64) \
+ _(FP64_FFI, fp64_l2f, 1, N, FLOAT, XA_64) \
+ _(FP64_FFI, fp64_ul2f, 1, N, FLOAT, XA_64) \
+ _(FP64_FFI, fp64_d2l, 1, N, I64, XA_FP) \
+ _(FP64_FFI, fp64_d2ul, 1, N, U64, XA_FP) \
+ _(FP64_FFI, fp64_f2l, 1, N, I64, 0) \
+ _(FP64_FFI, fp64_f2ul, 1, N, U64, 0) \
+ _(FFI, lj_carith_divi64, 2, N, I64, XA2_64|CCI_NOFPRCLOBBER) \
+ _(FFI, lj_carith_divu64, 2, N, U64, XA2_64|CCI_NOFPRCLOBBER) \
+ _(FFI, lj_carith_modi64, 2, N, I64, XA2_64|CCI_NOFPRCLOBBER) \
+ _(FFI, lj_carith_modu64, 2, N, U64, XA2_64|CCI_NOFPRCLOBBER) \
+ _(FFI, lj_carith_powi64, 2, N, I64, XA2_64|CCI_NOFPRCLOBBER) \
+ _(FFI, lj_carith_powu64, 2, N, U64, XA2_64|CCI_NOFPRCLOBBER) \
+ _(FFI, lj_cdata_newv, 4, S, CDATA, CCI_L) \
+ _(FFI, lj_cdata_setfin, 4, S, NIL, CCI_L) \
+ _(FFI, strlen, 1, L, INTP, 0) \
+ _(FFI, memcpy, 3, S, PTR, 0) \
+ _(FFI, memset, 3, S, PTR, 0) \
+ _(FFI, lj_vm_errno, 0, S, INT, CCI_NOFPRCLOBBER) \
+ _(FFI32, lj_carith_mul64, 2, N, I64, XA2_64|CCI_NOFPRCLOBBER) \
+ _(FFI32, lj_carith_shl64, 2, N, U64, XA_64|CCI_NOFPRCLOBBER) \
+ _(FFI32, lj_carith_shr64, 2, N, U64, XA_64|CCI_NOFPRCLOBBER) \
+ _(FFI32, lj_carith_sar64, 2, N, U64, XA_64|CCI_NOFPRCLOBBER) \
+ _(FFI32, lj_carith_rol64, 2, N, U64, XA_64|CCI_NOFPRCLOBBER) \
+ _(FFI32, lj_carith_ror64, 2, N, U64, XA_64|CCI_NOFPRCLOBBER) \
+ \
+ /* End of list. */
+
+typedef enum {
+#define IRCALLENUM(cond, name, nargs, kind, type, flags) IRCALL_##name,
+IRCALLDEF(IRCALLENUM)
+#undef IRCALLENUM
+ IRCALL__MAX
+} IRCallID;
+
+LJ_FUNC TRef lj_ir_call(jit_State *J, IRCallID id, ...);
+
+LJ_DATA const CCallInfo lj_ir_callinfo[IRCALL__MAX+1];
+
+/* Soft-float declarations. */
+#if LJ_SOFTFP
+#if LJ_TARGET_ARM
+#define softfp_add __aeabi_dadd
+#define softfp_sub __aeabi_dsub
+#define softfp_mul __aeabi_dmul
+#define softfp_div __aeabi_ddiv
+#define softfp_cmp __aeabi_cdcmple
+#define softfp_i2d __aeabi_i2d
+#define softfp_d2i __aeabi_d2iz
+#define softfp_ui2d __aeabi_ui2d
+#define softfp_f2d __aeabi_f2d
+#define softfp_d2ui __aeabi_d2uiz
+#define softfp_d2f __aeabi_d2f
+#define softfp_i2f __aeabi_i2f
+#define softfp_ui2f __aeabi_ui2f
+#define softfp_f2i __aeabi_f2iz
+#define softfp_f2ui __aeabi_f2uiz
+#define fp64_l2d __aeabi_l2d
+#define fp64_ul2d __aeabi_ul2d
+#define fp64_l2f __aeabi_l2f
+#define fp64_ul2f __aeabi_ul2f
+#if LJ_TARGET_IOS
+#define fp64_d2l __fixdfdi
+#define fp64_d2ul __fixunsdfdi
+#define fp64_f2l __fixsfdi
+#define fp64_f2ul __fixunssfdi
+#else
+#define fp64_d2l __aeabi_d2lz
+#define fp64_d2ul __aeabi_d2ulz
+#define fp64_f2l __aeabi_f2lz
+#define fp64_f2ul __aeabi_f2ulz
+#endif
+#elif LJ_TARGET_MIPS || LJ_TARGET_PPC
+#define softfp_add __adddf3
+#define softfp_sub __subdf3
+#define softfp_mul __muldf3
+#define softfp_div __divdf3
+#define softfp_cmp __ledf2
+#define softfp_i2d __floatsidf
+#define softfp_d2i __fixdfsi
+#define softfp_ui2d __floatunsidf
+#define softfp_f2d __extendsfdf2
+#define softfp_d2ui __fixunsdfsi
+#define softfp_d2f __truncdfsf2
+#define softfp_i2f __floatsisf
+#define softfp_ui2f __floatunsisf
+#define softfp_f2i __fixsfsi
+#define softfp_f2ui __fixunssfsi
+#else
+#error "Missing soft-float definitions for target architecture"
+#endif
+extern double softfp_add(double a, double b);
+extern double softfp_sub(double a, double b);
+extern double softfp_mul(double a, double b);
+extern double softfp_div(double a, double b);
+extern void softfp_cmp(double a, double b);
+extern double softfp_i2d(int32_t a);
+extern int32_t softfp_d2i(double a);
+#if LJ_HASFFI
+extern double softfp_ui2d(uint32_t a);
+extern double softfp_f2d(float a);
+extern uint32_t softfp_d2ui(double a);
+extern float softfp_d2f(double a);
+extern float softfp_i2f(int32_t a);
+extern float softfp_ui2f(uint32_t a);
+extern int32_t softfp_f2i(float a);
+extern uint32_t softfp_f2ui(float a);
+#endif
+#if LJ_TARGET_MIPS
+extern double lj_vm_sfmin(double a, double b);
+extern double lj_vm_sfmax(double a, double b);
+#endif
+#endif
+
+#if LJ_HASFFI && LJ_NEED_FP64 && !(LJ_TARGET_ARM && LJ_SOFTFP)
+#if defined(__GNUC__) || defined(__clang__)
+#define fp64_l2d __floatdidf
+#define fp64_ul2d __floatundidf
+#define fp64_l2f __floatdisf
+#define fp64_ul2f __floatundisf
+#define fp64_d2l __fixdfdi
+#define fp64_d2ul __fixunsdfdi
+#define fp64_f2l __fixsfdi
+#define fp64_f2ul __fixunssfdi
+#else
+#error "Missing fp64 helper definitions for this compiler"
+#endif
+#endif
+
+#if LJ_HASFFI && (LJ_SOFTFP || LJ_NEED_FP64)
+extern double fp64_l2d(int64_t a);
+extern double fp64_ul2d(uint64_t a);
+extern float fp64_l2f(int64_t a);
+extern float fp64_ul2f(uint64_t a);
+extern int64_t fp64_d2l(double a);
+extern uint64_t fp64_d2ul(double a);
+extern int64_t fp64_f2l(float a);
+extern uint64_t fp64_f2ul(float a);
+#endif
+
+#endif
diff --git a/libs/luajit-cmake/luajit/src/lj_iropt.h b/libs/luajit-cmake/luajit/src/lj_iropt.h
new file mode 100644
index 0000000..d239f17
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/lj_iropt.h
@@ -0,0 +1,162 @@
+/*
+** Common header for IR emitter and optimizations.
+** Copyright (C) 2005-2022 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LJ_IROPT_H
+#define _LJ_IROPT_H
+
+#include <stdarg.h>
+
+#include "lj_obj.h"
+#include "lj_jit.h"
+
+#if LJ_HASJIT
+/* IR emitter. */
+LJ_FUNC void LJ_FASTCALL lj_ir_growtop(jit_State *J);
+LJ_FUNC TRef LJ_FASTCALL lj_ir_emit(jit_State *J);
+
+/* Save current IR in J->fold.ins, but do not emit it (yet). */
+static LJ_AINLINE void lj_ir_set_(jit_State *J, uint16_t ot, IRRef1 a, IRRef1 b)
+{
+ J->fold.ins.ot = ot; J->fold.ins.op1 = a; J->fold.ins.op2 = b;
+}
+
+#define lj_ir_set(J, ot, a, b) \
+ lj_ir_set_(J, (uint16_t)(ot), (IRRef1)(a), (IRRef1)(b))
+
+/* Get ref of next IR instruction and optionally grow IR.
+** Note: this may invalidate all IRIns*!
+*/
+static LJ_AINLINE IRRef lj_ir_nextins(jit_State *J)
+{
+ IRRef ref = J->cur.nins;
+ if (LJ_UNLIKELY(ref >= J->irtoplim)) lj_ir_growtop(J);
+ J->cur.nins = ref + 1;
+ return ref;
+}
+
+LJ_FUNC TRef lj_ir_ggfload(jit_State *J, IRType t, uintptr_t ofs);
+
+/* Interning of constants. */
+LJ_FUNC TRef LJ_FASTCALL lj_ir_kint(jit_State *J, int32_t k);
+LJ_FUNC TRef lj_ir_k64(jit_State *J, IROp op, uint64_t u64);
+LJ_FUNC TRef lj_ir_knum_u64(jit_State *J, uint64_t u64);
+LJ_FUNC TRef lj_ir_knumint(jit_State *J, lua_Number n);
+LJ_FUNC TRef lj_ir_kint64(jit_State *J, uint64_t u64);
+LJ_FUNC TRef lj_ir_kgc(jit_State *J, GCobj *o, IRType t);
+LJ_FUNC TRef lj_ir_kptr_(jit_State *J, IROp op, void *ptr);
+LJ_FUNC TRef lj_ir_knull(jit_State *J, IRType t);
+LJ_FUNC TRef lj_ir_kslot(jit_State *J, TRef key, IRRef slot);
+LJ_FUNC TRef lj_ir_ktrace(jit_State *J);
+
+#if LJ_64
+#define lj_ir_kintp(J, k) lj_ir_kint64(J, (uint64_t)(k))
+#else
+#define lj_ir_kintp(J, k) lj_ir_kint(J, (int32_t)(k))
+#endif
+
+static LJ_AINLINE TRef lj_ir_knum(jit_State *J, lua_Number n)
+{
+ TValue tv;
+ tv.n = n;
+ return lj_ir_knum_u64(J, tv.u64);
+}
+
+#define lj_ir_kstr(J, str) lj_ir_kgc(J, obj2gco((str)), IRT_STR)
+#define lj_ir_ktab(J, tab) lj_ir_kgc(J, obj2gco((tab)), IRT_TAB)
+#define lj_ir_kfunc(J, func) lj_ir_kgc(J, obj2gco((func)), IRT_FUNC)
+#define lj_ir_kptr(J, ptr) lj_ir_kptr_(J, IR_KPTR, (ptr))
+#define lj_ir_kkptr(J, ptr) lj_ir_kptr_(J, IR_KKPTR, (ptr))
+
+/* Special FP constants. */
+#define lj_ir_knum_zero(J) lj_ir_knum_u64(J, U64x(00000000,00000000))
+#define lj_ir_knum_one(J) lj_ir_knum_u64(J, U64x(3ff00000,00000000))
+#define lj_ir_knum_tobit(J) lj_ir_knum_u64(J, U64x(43380000,00000000))
+
+/* Special 128 bit SIMD constants. */
+#define lj_ir_ksimd(J, idx) \
+ lj_ir_ggfload(J, IRT_NUM, (uintptr_t)LJ_KSIMD(J, idx) - (uintptr_t)J2GG(J))
+
+/* Access to constants. */
+LJ_FUNC void lj_ir_kvalue(lua_State *L, TValue *tv, const IRIns *ir);
+
+/* Convert IR operand types. */
+LJ_FUNC TRef LJ_FASTCALL lj_ir_tonumber(jit_State *J, TRef tr);
+LJ_FUNC TRef LJ_FASTCALL lj_ir_tonum(jit_State *J, TRef tr);
+LJ_FUNC TRef LJ_FASTCALL lj_ir_tostr(jit_State *J, TRef tr);
+
+/* Miscellaneous IR ops. */
+LJ_FUNC int lj_ir_numcmp(lua_Number a, lua_Number b, IROp op);
+LJ_FUNC int lj_ir_strcmp(GCstr *a, GCstr *b, IROp op);
+LJ_FUNC void lj_ir_rollback(jit_State *J, IRRef ref);
+
+/* Emit IR instructions with on-the-fly optimizations. */
+LJ_FUNC TRef LJ_FASTCALL lj_opt_fold(jit_State *J);
+LJ_FUNC TRef LJ_FASTCALL lj_opt_cse(jit_State *J);
+LJ_FUNC TRef LJ_FASTCALL lj_opt_cselim(jit_State *J, IRRef lim);
+
+/* Special return values for the fold functions. */
+enum {
+ NEXTFOLD, /* Couldn't fold, pass on. */
+ RETRYFOLD, /* Retry fold with modified fins. */
+ KINTFOLD, /* Return ref for int constant in fins->i. */
+ FAILFOLD, /* Guard would always fail. */
+ DROPFOLD, /* Guard eliminated. */
+ MAX_FOLD
+};
+
+#define INTFOLD(k) ((J->fold.ins.i = (k)), (TRef)KINTFOLD)
+#define INT64FOLD(k) (lj_ir_kint64(J, (k)))
+#define CONDFOLD(cond) ((TRef)FAILFOLD + (TRef)(cond))
+#define LEFTFOLD (J->fold.ins.op1)
+#define RIGHTFOLD (J->fold.ins.op2)
+#define CSEFOLD (lj_opt_cse(J))
+#define EMITFOLD (lj_ir_emit(J))
+
+/* Load/store forwarding. */
+LJ_FUNC TRef LJ_FASTCALL lj_opt_fwd_aload(jit_State *J);
+LJ_FUNC TRef LJ_FASTCALL lj_opt_fwd_hload(jit_State *J);
+LJ_FUNC TRef LJ_FASTCALL lj_opt_fwd_uload(jit_State *J);
+LJ_FUNC TRef LJ_FASTCALL lj_opt_fwd_fload(jit_State *J);
+LJ_FUNC TRef LJ_FASTCALL lj_opt_fwd_xload(jit_State *J);
+LJ_FUNC TRef LJ_FASTCALL lj_opt_fwd_alen(jit_State *J);
+LJ_FUNC TRef LJ_FASTCALL lj_opt_fwd_hrefk(jit_State *J);
+LJ_FUNC int LJ_FASTCALL lj_opt_fwd_href_nokey(jit_State *J);
+LJ_FUNC int LJ_FASTCALL lj_opt_fwd_tptr(jit_State *J, IRRef lim);
+LJ_FUNC int LJ_FASTCALL lj_opt_fwd_sbuf(jit_State *J, IRRef lim);
+LJ_FUNC int lj_opt_fwd_wasnonnil(jit_State *J, IROpT loadop, IRRef xref);
+
+/* Dead-store elimination. */
+LJ_FUNC TRef LJ_FASTCALL lj_opt_dse_ahstore(jit_State *J);
+LJ_FUNC TRef LJ_FASTCALL lj_opt_dse_ustore(jit_State *J);
+LJ_FUNC TRef LJ_FASTCALL lj_opt_dse_fstore(jit_State *J);
+LJ_FUNC TRef LJ_FASTCALL lj_opt_dse_xstore(jit_State *J);
+
+/* Narrowing. */
+LJ_FUNC TRef LJ_FASTCALL lj_opt_narrow_convert(jit_State *J);
+LJ_FUNC TRef LJ_FASTCALL lj_opt_narrow_index(jit_State *J, TRef key);
+LJ_FUNC TRef LJ_FASTCALL lj_opt_narrow_toint(jit_State *J, TRef tr);
+LJ_FUNC TRef LJ_FASTCALL lj_opt_narrow_tobit(jit_State *J, TRef tr);
+#if LJ_HASFFI
+LJ_FUNC TRef LJ_FASTCALL lj_opt_narrow_cindex(jit_State *J, TRef key);
+#endif
+LJ_FUNC TRef lj_opt_narrow_arith(jit_State *J, TRef rb, TRef rc,
+ TValue *vb, TValue *vc, IROp op);
+LJ_FUNC TRef lj_opt_narrow_unm(jit_State *J, TRef rc, TValue *vc);
+LJ_FUNC TRef lj_opt_narrow_mod(jit_State *J, TRef rb, TRef rc, TValue *vb, TValue *vc);
+LJ_FUNC IRType lj_opt_narrow_forl(jit_State *J, cTValue *forbase);
+
+/* Optimization passes. */
+LJ_FUNC void lj_opt_dce(jit_State *J);
+LJ_FUNC int lj_opt_loop(jit_State *J);
+#if LJ_SOFTFP32 || (LJ_32 && LJ_HASFFI)
+LJ_FUNC void lj_opt_split(jit_State *J);
+#else
+#define lj_opt_split(J) UNUSED(J)
+#endif
+LJ_FUNC void lj_opt_sink(jit_State *J);
+
+#endif
+
+#endif
diff --git a/libs/luajit-cmake/luajit/src/lj_jit.h b/libs/luajit-cmake/luajit/src/lj_jit.h
new file mode 100644
index 0000000..32b3861
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/lj_jit.h
@@ -0,0 +1,528 @@
+/*
+** Common definitions for the JIT compiler.
+** Copyright (C) 2005-2022 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LJ_JIT_H
+#define _LJ_JIT_H
+
+#include "lj_obj.h"
+#if LJ_HASJIT
+#include "lj_ir.h"
+
+/* -- JIT engine flags ---------------------------------------------------- */
+
+/* General JIT engine flags. 4 bits. */
+#define JIT_F_ON 0x00000001
+
+/* CPU-specific JIT engine flags. 12 bits. Flags and strings must match. */
+#define JIT_F_CPU 0x00000010
+
+#if LJ_TARGET_X86ORX64
+
+#define JIT_F_SSE3 (JIT_F_CPU << 0)
+#define JIT_F_SSE4_1 (JIT_F_CPU << 1)
+#define JIT_F_BMI2 (JIT_F_CPU << 2)
+
+
+#define JIT_F_CPUSTRING "\4SSE3\6SSE4.1\4BMI2"
+
+#elif LJ_TARGET_ARM
+
+#define JIT_F_ARMV6_ (JIT_F_CPU << 0)
+#define JIT_F_ARMV6T2_ (JIT_F_CPU << 1)
+#define JIT_F_ARMV7 (JIT_F_CPU << 2)
+#define JIT_F_ARMV8 (JIT_F_CPU << 3)
+#define JIT_F_VFPV2 (JIT_F_CPU << 4)
+#define JIT_F_VFPV3 (JIT_F_CPU << 5)
+
+#define JIT_F_ARMV6 (JIT_F_ARMV6_|JIT_F_ARMV6T2_|JIT_F_ARMV7|JIT_F_ARMV8)
+#define JIT_F_ARMV6T2 (JIT_F_ARMV6T2_|JIT_F_ARMV7|JIT_F_ARMV8)
+#define JIT_F_VFP (JIT_F_VFPV2|JIT_F_VFPV3)
+
+#define JIT_F_CPUSTRING "\5ARMv6\7ARMv6T2\5ARMv7\5ARMv8\5VFPv2\5VFPv3"
+
+#elif LJ_TARGET_PPC
+
+#define JIT_F_SQRT (JIT_F_CPU << 0)
+#define JIT_F_ROUND (JIT_F_CPU << 1)
+
+#define JIT_F_CPUSTRING "\4SQRT\5ROUND"
+
+#elif LJ_TARGET_MIPS
+
+#define JIT_F_MIPSXXR2 (JIT_F_CPU << 0)
+
+#if LJ_TARGET_MIPS32
+#if LJ_TARGET_MIPSR6
+#define JIT_F_CPUSTRING "\010MIPS32R6"
+#else
+#define JIT_F_CPUSTRING "\010MIPS32R2"
+#endif
+#else
+#if LJ_TARGET_MIPSR6
+#define JIT_F_CPUSTRING "\010MIPS64R6"
+#else
+#define JIT_F_CPUSTRING "\010MIPS64R2"
+#endif
+#endif
+
+#else
+
+#define JIT_F_CPUSTRING ""
+
+#endif
+
+/* Optimization flags. 12 bits. */
+#define JIT_F_OPT 0x00010000
+#define JIT_F_OPT_MASK 0x0fff0000
+
+#define JIT_F_OPT_FOLD (JIT_F_OPT << 0)
+#define JIT_F_OPT_CSE (JIT_F_OPT << 1)
+#define JIT_F_OPT_DCE (JIT_F_OPT << 2)
+#define JIT_F_OPT_FWD (JIT_F_OPT << 3)
+#define JIT_F_OPT_DSE (JIT_F_OPT << 4)
+#define JIT_F_OPT_NARROW (JIT_F_OPT << 5)
+#define JIT_F_OPT_LOOP (JIT_F_OPT << 6)
+#define JIT_F_OPT_ABC (JIT_F_OPT << 7)
+#define JIT_F_OPT_SINK (JIT_F_OPT << 8)
+#define JIT_F_OPT_FUSE (JIT_F_OPT << 9)
+
+/* Optimizations names for -O. Must match the order above. */
+#define JIT_F_OPTSTRING \
+ "\4fold\3cse\3dce\3fwd\3dse\6narrow\4loop\3abc\4sink\4fuse"
+
+/* Optimization levels set a fixed combination of flags. */
+#define JIT_F_OPT_0 0
+#define JIT_F_OPT_1 (JIT_F_OPT_FOLD|JIT_F_OPT_CSE|JIT_F_OPT_DCE)
+#define JIT_F_OPT_2 (JIT_F_OPT_1|JIT_F_OPT_NARROW|JIT_F_OPT_LOOP)
+#define JIT_F_OPT_3 (JIT_F_OPT_2|\
+ JIT_F_OPT_FWD|JIT_F_OPT_DSE|JIT_F_OPT_ABC|JIT_F_OPT_SINK|JIT_F_OPT_FUSE)
+#define JIT_F_OPT_DEFAULT JIT_F_OPT_3
+
+/* -- JIT engine parameters ----------------------------------------------- */
+
+#if LJ_TARGET_WINDOWS || LJ_64
+/* See: http://blogs.msdn.com/oldnewthing/archive/2003/10/08/55239.aspx */
+#define JIT_P_sizemcode_DEFAULT 64
+#else
+/* Could go as low as 4K, but the mmap() overhead would be rather high. */
+#define JIT_P_sizemcode_DEFAULT 32
+#endif
+
+/* Optimization parameters and their defaults. Length is a char in octal! */
+#define JIT_PARAMDEF(_) \
+ _(\010, maxtrace, 1000) /* Max. # of traces in cache. */ \
+ _(\011, maxrecord, 4000) /* Max. # of recorded IR instructions. */ \
+ _(\012, maxirconst, 500) /* Max. # of IR constants of a trace. */ \
+ _(\007, maxside, 100) /* Max. # of side traces of a root trace. */ \
+ _(\007, maxsnap, 500) /* Max. # of snapshots for a trace. */ \
+ _(\011, minstitch, 0) /* Min. # of IR ins for a stitched trace. */ \
+ \
+ _(\007, hotloop, 56) /* # of iter. to detect a hot loop/call. */ \
+ _(\007, hotexit, 10) /* # of taken exits to start a side trace. */ \
+ _(\007, tryside, 4) /* # of attempts to compile a side trace. */ \
+ \
+ _(\012, instunroll, 4) /* Max. unroll for instable loops. */ \
+ _(\012, loopunroll, 15) /* Max. unroll for loop ops in side traces. */ \
+ _(\012, callunroll, 3) /* Max. unroll for recursive calls. */ \
+ _(\011, recunroll, 2) /* Min. unroll for true recursion. */ \
+ \
+ /* Size of each machine code area (in KBytes). */ \
+ _(\011, sizemcode, JIT_P_sizemcode_DEFAULT) \
+ /* Max. total size of all machine code areas (in KBytes). */ \
+ _(\010, maxmcode, 512) \
+ /* End of list. */
+
+enum {
+#define JIT_PARAMENUM(len, name, value) JIT_P_##name,
+JIT_PARAMDEF(JIT_PARAMENUM)
+#undef JIT_PARAMENUM
+ JIT_P__MAX
+};
+
+#define JIT_PARAMSTR(len, name, value) #len #name
+#define JIT_P_STRING JIT_PARAMDEF(JIT_PARAMSTR)
+
+/* -- JIT engine data structures ------------------------------------------ */
+
+/* Trace compiler state. */
+typedef enum {
+ LJ_TRACE_IDLE, /* Trace compiler idle. */
+ LJ_TRACE_ACTIVE = 0x10,
+ LJ_TRACE_RECORD, /* Bytecode recording active. */
+ LJ_TRACE_RECORD_1ST, /* Record 1st instruction, too. */
+ LJ_TRACE_START, /* New trace started. */
+ LJ_TRACE_END, /* End of trace. */
+ LJ_TRACE_ASM, /* Assemble trace. */
+ LJ_TRACE_ERR /* Trace aborted with error. */
+} TraceState;
+
+/* Post-processing action. */
+typedef enum {
+ LJ_POST_NONE, /* No action. */
+ LJ_POST_FIXCOMP, /* Fixup comparison and emit pending guard. */
+ LJ_POST_FIXGUARD, /* Fixup and emit pending guard. */
+ LJ_POST_FIXGUARDSNAP, /* Fixup and emit pending guard and snapshot. */
+ LJ_POST_FIXBOOL, /* Fixup boolean result. */
+ LJ_POST_FIXCONST, /* Fixup constant results. */
+ LJ_POST_FFRETRY /* Suppress recording of retried fast functions. */
+} PostProc;
+
+/* Machine code type. */
+#if LJ_TARGET_X86ORX64
+typedef uint8_t MCode;
+#else
+typedef uint32_t MCode;
+#endif
+
+/* Linked list of MCode areas. */
+typedef struct MCLink {
+ MCode *next; /* Next area. */
+ size_t size; /* Size of current area. */
+} MCLink;
+
+/* Stack snapshot header. */
+typedef struct SnapShot {
+ uint32_t mapofs; /* Offset into snapshot map. */
+ IRRef1 ref; /* First IR ref for this snapshot. */
+ uint16_t mcofs; /* Offset into machine code in MCode units. */
+ uint8_t nslots; /* Number of valid slots. */
+ uint8_t topslot; /* Maximum frame extent. */
+ uint8_t nent; /* Number of compressed entries. */
+ uint8_t count; /* Count of taken exits for this snapshot. */
+} SnapShot;
+
+#define SNAPCOUNT_DONE 255 /* Already compiled and linked a side trace. */
+
+/* Compressed snapshot entry. */
+typedef uint32_t SnapEntry;
+
+#define SNAP_FRAME 0x010000 /* Frame slot. */
+#define SNAP_CONT 0x020000 /* Continuation slot. */
+#define SNAP_NORESTORE 0x040000 /* No need to restore slot. */
+#define SNAP_SOFTFPNUM 0x080000 /* Soft-float number. */
+#define SNAP_KEYINDEX 0x100000 /* Traversal key index. */
+LJ_STATIC_ASSERT(SNAP_FRAME == TREF_FRAME);
+LJ_STATIC_ASSERT(SNAP_CONT == TREF_CONT);
+LJ_STATIC_ASSERT(SNAP_KEYINDEX == TREF_KEYINDEX);
+
+#define SNAP(slot, flags, ref) (((SnapEntry)(slot) << 24) + (flags) + (ref))
+#define SNAP_TR(slot, tr) \
+ (((SnapEntry)(slot) << 24) + \
+ ((tr) & (TREF_KEYINDEX|TREF_CONT|TREF_FRAME|TREF_REFMASK)))
+#if !LJ_FR2
+#define SNAP_MKPC(pc) ((SnapEntry)u32ptr(pc))
+#endif
+#define SNAP_MKFTSZ(ftsz) ((SnapEntry)(ftsz))
+#define snap_ref(sn) ((sn) & 0xffff)
+#define snap_slot(sn) ((BCReg)((sn) >> 24))
+#define snap_isframe(sn) ((sn) & SNAP_FRAME)
+#define snap_setref(sn, ref) (((sn) & (0xffff0000&~SNAP_NORESTORE)) | (ref))
+
+static LJ_AINLINE const BCIns *snap_pc(SnapEntry *sn)
+{
+#if LJ_FR2
+ uint64_t pcbase;
+ memcpy(&pcbase, sn, sizeof(uint64_t));
+ return (const BCIns *)(pcbase >> 8);
+#else
+ return (const BCIns *)(uintptr_t)*sn;
+#endif
+}
+
+/* Snapshot and exit numbers. */
+typedef uint32_t SnapNo;
+typedef uint32_t ExitNo;
+
+/* Trace number. */
+typedef uint32_t TraceNo; /* Used to pass around trace numbers. */
+typedef uint16_t TraceNo1; /* Stored trace number. */
+
+/* Type of link. ORDER LJ_TRLINK */
+typedef enum {
+ LJ_TRLINK_NONE, /* Incomplete trace. No link, yet. */
+ LJ_TRLINK_ROOT, /* Link to other root trace. */
+ LJ_TRLINK_LOOP, /* Loop to same trace. */
+ LJ_TRLINK_TAILREC, /* Tail-recursion. */
+ LJ_TRLINK_UPREC, /* Up-recursion. */
+ LJ_TRLINK_DOWNREC, /* Down-recursion. */
+ LJ_TRLINK_INTERP, /* Fallback to interpreter. */
+ LJ_TRLINK_RETURN, /* Return to interpreter. */
+ LJ_TRLINK_STITCH /* Trace stitching. */
+} TraceLink;
+
+/* Trace object. */
+typedef struct GCtrace {
+ GCHeader;
+ uint16_t nsnap; /* Number of snapshots. */
+ IRRef nins; /* Next IR instruction. Biased with REF_BIAS. */
+#if LJ_GC64
+ uint32_t unused_gc64;
+#endif
+ GCRef gclist;
+ IRIns *ir; /* IR instructions/constants. Biased with REF_BIAS. */
+ IRRef nk; /* Lowest IR constant. Biased with REF_BIAS. */
+ uint32_t nsnapmap; /* Number of snapshot map elements. */
+ SnapShot *snap; /* Snapshot array. */
+ SnapEntry *snapmap; /* Snapshot map. */
+ GCRef startpt; /* Starting prototype. */
+ MRef startpc; /* Bytecode PC of starting instruction. */
+ BCIns startins; /* Original bytecode of starting instruction. */
+ MSize szmcode; /* Size of machine code. */
+ MCode *mcode; /* Start of machine code. */
+ MSize mcloop; /* Offset of loop start in machine code. */
+ uint16_t nchild; /* Number of child traces (root trace only). */
+ uint16_t spadjust; /* Stack pointer adjustment (offset in bytes). */
+ TraceNo1 traceno; /* Trace number. */
+ TraceNo1 link; /* Linked trace (or self for loops). */
+ TraceNo1 root; /* Root trace of side trace (or 0 for root traces). */
+ TraceNo1 nextroot; /* Next root trace for same prototype. */
+ TraceNo1 nextside; /* Next side trace of same root trace. */
+ uint8_t sinktags; /* Trace has SINK tags. */
+ uint8_t topslot; /* Top stack slot already checked to be allocated. */
+ uint8_t linktype; /* Type of link. */
+ uint8_t unused1;
+#ifdef LUAJIT_USE_GDBJIT
+ void *gdbjit_entry; /* GDB JIT entry. */
+#endif
+} GCtrace;
+
+#define gco2trace(o) check_exp((o)->gch.gct == ~LJ_TTRACE, (GCtrace *)(o))
+#define traceref(J, n) \
+ check_exp((n)>0 && (MSize)(n)<J->sizetrace, (GCtrace *)gcref(J->trace[(n)]))
+
+LJ_STATIC_ASSERT(offsetof(GChead, gclist) == offsetof(GCtrace, gclist));
+
+static LJ_AINLINE MSize snap_nextofs(GCtrace *T, SnapShot *snap)
+{
+ if (snap+1 == &T->snap[T->nsnap])
+ return T->nsnapmap;
+ else
+ return (snap+1)->mapofs;
+}
+
+/* Round-robin penalty cache for bytecodes leading to aborted traces. */
+typedef struct HotPenalty {
+ MRef pc; /* Starting bytecode PC. */
+ uint16_t val; /* Penalty value, i.e. hotcount start. */
+ uint16_t reason; /* Abort reason (really TraceErr). */
+} HotPenalty;
+
+#define PENALTY_SLOTS 64 /* Penalty cache slot. Must be a power of 2. */
+#define PENALTY_MIN (36*2) /* Minimum penalty value. */
+#define PENALTY_MAX 60000 /* Maximum penalty value. */
+#define PENALTY_RNDBITS 4 /* # of random bits to add to penalty value. */
+
+/* Round-robin backpropagation cache for narrowing conversions. */
+typedef struct BPropEntry {
+ IRRef1 key; /* Key: original reference. */
+ IRRef1 val; /* Value: reference after conversion. */
+ IRRef mode; /* Mode for this entry (currently IRCONV_*). */
+} BPropEntry;
+
+/* Number of slots for the backpropagation cache. Must be a power of 2. */
+#define BPROP_SLOTS 16
+
+/* Scalar evolution analysis cache. */
+typedef struct ScEvEntry {
+ MRef pc; /* Bytecode PC of FORI. */
+ IRRef1 idx; /* Index reference. */
+ IRRef1 start; /* Constant start reference. */
+ IRRef1 stop; /* Constant stop reference. */
+ IRRef1 step; /* Constant step reference. */
+ IRType1 t; /* Scalar type. */
+ uint8_t dir; /* Direction. 1: +, 0: -. */
+} ScEvEntry;
+
+/* Reverse bytecode map (IRRef -> PC). Only for selected instructions. */
+typedef struct RBCHashEntry {
+ MRef pc; /* Bytecode PC. */
+ GCRef pt; /* Prototype. */
+ IRRef ref; /* IR reference. */
+} RBCHashEntry;
+
+/* Number of slots in the reverse bytecode hash table. Must be a power of 2. */
+#define RBCHASH_SLOTS 8
+
+/* 128 bit SIMD constants. */
+enum {
+ LJ_KSIMD_ABS,
+ LJ_KSIMD_NEG,
+ LJ_KSIMD__MAX
+};
+
+enum {
+#if LJ_TARGET_X86ORX64
+ LJ_K64_TOBIT, /* 2^52 + 2^51 */
+ LJ_K64_2P64, /* 2^64 */
+ LJ_K64_M2P64, /* -2^64 */
+#if LJ_32
+ LJ_K64_M2P64_31, /* -2^64 or -2^31 */
+#else
+ LJ_K64_M2P64_31 = LJ_K64_M2P64,
+#endif
+#endif
+#if LJ_TARGET_MIPS
+ LJ_K64_2P31, /* 2^31 */
+#if LJ_64
+ LJ_K64_2P63, /* 2^63 */
+ LJ_K64_M2P64, /* -2^64 */
+#endif
+#endif
+ LJ_K64__MAX,
+};
+#define LJ_K64__USED (LJ_TARGET_X86ORX64 || LJ_TARGET_MIPS)
+
+enum {
+#if LJ_TARGET_X86ORX64
+ LJ_K32_M2P64_31, /* -2^64 or -2^31 */
+#endif
+#if LJ_TARGET_PPC
+ LJ_K32_2P52_2P31, /* 2^52 + 2^31 */
+ LJ_K32_2P52, /* 2^52 */
+#endif
+#if LJ_TARGET_PPC || LJ_TARGET_MIPS
+ LJ_K32_2P31, /* 2^31 */
+#endif
+#if LJ_TARGET_MIPS64
+ LJ_K32_2P63, /* 2^63 */
+ LJ_K32_M2P64, /* -2^64 */
+#endif
+ LJ_K32__MAX
+};
+#define LJ_K32__USED (LJ_TARGET_X86ORX64 || LJ_TARGET_PPC || LJ_TARGET_MIPS)
+
+/* Get 16 byte aligned pointer to SIMD constant. */
+#define LJ_KSIMD(J, n) \
+ ((TValue *)(((intptr_t)&J->ksimd[2*(n)] + 15) & ~(intptr_t)15))
+
+/* Set/reset flag to activate the SPLIT pass for the current trace. */
+#if LJ_SOFTFP32 || (LJ_32 && LJ_HASFFI)
+#define lj_needsplit(J) (J->needsplit = 1)
+#define lj_resetsplit(J) (J->needsplit = 0)
+#else
+#define lj_needsplit(J) UNUSED(J)
+#define lj_resetsplit(J) UNUSED(J)
+#endif
+
+/* Fold state is used to fold instructions on-the-fly. */
+typedef struct FoldState {
+ IRIns ins; /* Currently emitted instruction. */
+ IRIns left[2]; /* Instruction referenced by left operand. */
+ IRIns right[2]; /* Instruction referenced by right operand. */
+} FoldState;
+
+/* JIT compiler state. */
+typedef struct jit_State {
+ GCtrace cur; /* Current trace. */
+ GCtrace *curfinal; /* Final address of current trace (set during asm). */
+
+ lua_State *L; /* Current Lua state. */
+ const BCIns *pc; /* Current PC. */
+ GCfunc *fn; /* Current function. */
+ GCproto *pt; /* Current prototype. */
+ TRef *base; /* Current frame base, points into J->slots. */
+
+ uint32_t flags; /* JIT engine flags. */
+ BCReg maxslot; /* Relative to baseslot. */
+ BCReg baseslot; /* Current frame base, offset into J->slots. */
+
+ uint8_t mergesnap; /* Allowed to merge with next snapshot. */
+ uint8_t needsnap; /* Need snapshot before recording next bytecode. */
+ IRType1 guardemit; /* Accumulated IRT_GUARD for emitted instructions. */
+ uint8_t bcskip; /* Number of bytecode instructions to skip. */
+
+ FoldState fold; /* Fold state. */
+
+ const BCIns *bc_min; /* Start of allowed bytecode range for root trace. */
+ MSize bc_extent; /* Extent of the range. */
+
+ TraceState state; /* Trace compiler state. */
+
+ int32_t instunroll; /* Unroll counter for instable loops. */
+ int32_t loopunroll; /* Unroll counter for loop ops in side traces. */
+ int32_t tailcalled; /* Number of successive tailcalls. */
+ int32_t framedepth; /* Current frame depth. */
+ int32_t retdepth; /* Return frame depth (count of RETF). */
+
+#if LJ_K32__USED
+ uint32_t k32[LJ_K32__MAX]; /* Common 4 byte constants used by backends. */
+#endif
+ TValue ksimd[LJ_KSIMD__MAX*2+1]; /* 16 byte aligned SIMD constants. */
+#if LJ_K64__USED
+ TValue k64[LJ_K64__MAX]; /* Common 8 byte constants. */
+#endif
+
+ IRIns *irbuf; /* Temp. IR instruction buffer. Biased with REF_BIAS. */
+ IRRef irtoplim; /* Upper limit of instuction buffer (biased). */
+ IRRef irbotlim; /* Lower limit of instuction buffer (biased). */
+ IRRef loopref; /* Last loop reference or ref of final LOOP (or 0). */
+
+ MSize sizesnap; /* Size of temp. snapshot buffer. */
+ SnapShot *snapbuf; /* Temp. snapshot buffer. */
+ SnapEntry *snapmapbuf; /* Temp. snapshot map buffer. */
+ MSize sizesnapmap; /* Size of temp. snapshot map buffer. */
+
+ PostProc postproc; /* Required post-processing after execution. */
+#if LJ_SOFTFP32 || (LJ_32 && LJ_HASFFI)
+ uint8_t needsplit; /* Need SPLIT pass. */
+#endif
+ uint8_t retryrec; /* Retry recording. */
+
+ GCRef *trace; /* Array of traces. */
+ TraceNo freetrace; /* Start of scan for next free trace. */
+ MSize sizetrace; /* Size of trace array. */
+ IRRef1 ktrace; /* Reference to KGC with GCtrace. */
+
+ IRRef1 chain[IR__MAX]; /* IR instruction skip-list chain anchors. */
+ TRef slot[LJ_MAX_JSLOTS+LJ_STACK_EXTRA]; /* Stack slot map. */
+
+ int32_t param[JIT_P__MAX]; /* JIT engine parameters. */
+
+ MCode *exitstubgroup[LJ_MAX_EXITSTUBGR]; /* Exit stub group addresses. */
+
+ HotPenalty penalty[PENALTY_SLOTS]; /* Penalty slots. */
+ uint32_t penaltyslot; /* Round-robin index into penalty slots. */
+
+#ifdef LUAJIT_ENABLE_TABLE_BUMP
+ RBCHashEntry rbchash[RBCHASH_SLOTS]; /* Reverse bytecode map. */
+#endif
+
+ BPropEntry bpropcache[BPROP_SLOTS]; /* Backpropagation cache slots. */
+ uint32_t bpropslot; /* Round-robin index into bpropcache slots. */
+
+ ScEvEntry scev; /* Scalar evolution analysis cache slots. */
+
+ const BCIns *startpc; /* Bytecode PC of starting instruction. */
+ TraceNo parent; /* Parent of current side trace (0 for root traces). */
+ ExitNo exitno; /* Exit number in parent of current side trace. */
+ int exitcode; /* Exit code from unwound trace. */
+
+ BCIns *patchpc; /* PC for pending re-patch. */
+ BCIns patchins; /* Instruction for pending re-patch. */
+
+ int mcprot; /* Protection of current mcode area. */
+ MCode *mcarea; /* Base of current mcode area. */
+ MCode *mctop; /* Top of current mcode area. */
+ MCode *mcbot; /* Bottom of current mcode area. */
+ size_t szmcarea; /* Size of current mcode area. */
+ size_t szallmcarea; /* Total size of all allocated mcode areas. */
+
+ TValue errinfo; /* Additional info element for trace errors. */
+
+#if LJ_HASPROFILE
+ GCproto *prev_pt; /* Previous prototype. */
+ BCLine prev_line; /* Previous line. */
+ int prof_mode; /* Profiling mode: 0, 'f', 'l'. */
+#endif
+} jit_State;
+
+#ifdef LUA_USE_ASSERT
+#define lj_assertJ(c, ...) lj_assertG_(J2G(J), (c), __VA_ARGS__)
+#else
+#define lj_assertJ(c, ...) ((void)J)
+#endif
+#endif
+
+#endif
diff --git a/libs/luajit-cmake/luajit/src/lj_lex.c b/libs/luajit-cmake/luajit/src/lj_lex.c
new file mode 100644
index 0000000..463a87c
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/lj_lex.c
@@ -0,0 +1,514 @@
+/*
+** Lexical analyzer.
+** Copyright (C) 2005-2022 Mike Pall. See Copyright Notice in luajit.h
+**
+** Major portions taken verbatim or adapted from the Lua interpreter.
+** Copyright (C) 1994-2008 Lua.org, PUC-Rio. See Copyright Notice in lua.h
+*/
+
+#define lj_lex_c
+#define LUA_CORE
+
+#include "lj_obj.h"
+#include "lj_gc.h"
+#include "lj_err.h"
+#include "lj_buf.h"
+#include "lj_str.h"
+#if LJ_HASFFI
+#include "lj_tab.h"
+#include "lj_ctype.h"
+#include "lj_cdata.h"
+#include "lualib.h"
+#endif
+#include "lj_state.h"
+#include "lj_lex.h"
+#include "lj_parse.h"
+#include "lj_char.h"
+#include "lj_strscan.h"
+#include "lj_strfmt.h"
+
+/* Lua lexer token names. */
+static const char *const tokennames[] = {
+#define TKSTR1(name) #name,
+#define TKSTR2(name, sym) #sym,
+TKDEF(TKSTR1, TKSTR2)
+#undef TKSTR1
+#undef TKSTR2
+ NULL
+};
+
+/* -- Buffer handling ----------------------------------------------------- */
+
+#define LEX_EOF (-1)
+#define lex_iseol(ls) (ls->c == '\n' || ls->c == '\r')
+
+/* Get more input from reader. */
+static LJ_NOINLINE LexChar lex_more(LexState *ls)
+{
+ size_t sz;
+ const char *p = ls->rfunc(ls->L, ls->rdata, &sz);
+ if (p == NULL || sz == 0) return LEX_EOF;
+ if (sz >= LJ_MAX_BUF) {
+ if (sz != ~(size_t)0) lj_err_mem(ls->L);
+ sz = ~(uintptr_t)0 - (uintptr_t)p;
+ if (sz >= LJ_MAX_BUF) sz = LJ_MAX_BUF-1;
+ ls->endmark = 1;
+ }
+ ls->pe = p + sz;
+ ls->p = p + 1;
+ return (LexChar)(uint8_t)p[0];
+}
+
+/* Get next character. */
+static LJ_AINLINE LexChar lex_next(LexState *ls)
+{
+ return (ls->c = ls->p < ls->pe ? (LexChar)(uint8_t)*ls->p++ : lex_more(ls));
+}
+
+/* Save character. */
+static LJ_AINLINE void lex_save(LexState *ls, LexChar c)
+{
+ lj_buf_putb(&ls->sb, c);
+}
+
+/* Save previous character and get next character. */
+static LJ_AINLINE LexChar lex_savenext(LexState *ls)
+{
+ lex_save(ls, ls->c);
+ return lex_next(ls);
+}
+
+/* Skip line break. Handles "\n", "\r", "\r\n" or "\n\r". */
+static void lex_newline(LexState *ls)
+{
+ LexChar old = ls->c;
+ lj_assertLS(lex_iseol(ls), "bad usage");
+ lex_next(ls); /* Skip "\n" or "\r". */
+ if (lex_iseol(ls) && ls->c != old) lex_next(ls); /* Skip "\n\r" or "\r\n". */
+ if (++ls->linenumber >= LJ_MAX_LINE)
+ lj_lex_error(ls, ls->tok, LJ_ERR_XLINES);
+}
+
+/* -- Scanner for terminals ----------------------------------------------- */
+
+/* Parse a number literal. */
+static void lex_number(LexState *ls, TValue *tv)
+{
+ StrScanFmt fmt;
+ LexChar c, xp = 'e';
+ lj_assertLS(lj_char_isdigit(ls->c), "bad usage");
+ if ((c = ls->c) == '0' && (lex_savenext(ls) | 0x20) == 'x')
+ xp = 'p';
+ while (lj_char_isident(ls->c) || ls->c == '.' ||
+ ((ls->c == '-' || ls->c == '+') && (c | 0x20) == xp)) {
+ c = ls->c;
+ lex_savenext(ls);
+ }
+ lex_save(ls, '\0');
+ fmt = lj_strscan_scan((const uint8_t *)ls->sb.b, sbuflen(&ls->sb)-1, tv,
+ (LJ_DUALNUM ? STRSCAN_OPT_TOINT : STRSCAN_OPT_TONUM) |
+ (LJ_HASFFI ? (STRSCAN_OPT_LL|STRSCAN_OPT_IMAG) : 0));
+ if (LJ_DUALNUM && fmt == STRSCAN_INT) {
+ setitype(tv, LJ_TISNUM);
+ } else if (fmt == STRSCAN_NUM) {
+ /* Already in correct format. */
+#if LJ_HASFFI
+ } else if (fmt != STRSCAN_ERROR) {
+ lua_State *L = ls->L;
+ GCcdata *cd;
+ lj_assertLS(fmt == STRSCAN_I64 || fmt == STRSCAN_U64 || fmt == STRSCAN_IMAG,
+ "unexpected number format %d", fmt);
+ ctype_loadffi(L);
+ if (fmt == STRSCAN_IMAG) {
+ cd = lj_cdata_new_(L, CTID_COMPLEX_DOUBLE, 2*sizeof(double));
+ ((double *)cdataptr(cd))[0] = 0;
+ ((double *)cdataptr(cd))[1] = numV(tv);
+ } else {
+ cd = lj_cdata_new_(L, fmt==STRSCAN_I64 ? CTID_INT64 : CTID_UINT64, 8);
+ *(uint64_t *)cdataptr(cd) = tv->u64;
+ }
+ lj_parse_keepcdata(ls, tv, cd);
+#endif
+ } else {
+ lj_assertLS(fmt == STRSCAN_ERROR,
+ "unexpected number format %d", fmt);
+ lj_lex_error(ls, TK_number, LJ_ERR_XNUMBER);
+ }
+}
+
+/* Skip equal signs for "[=...=[" and "]=...=]" and return their count. */
+static int lex_skipeq(LexState *ls)
+{
+ int count = 0;
+ LexChar s = ls->c;
+ lj_assertLS(s == '[' || s == ']', "bad usage");
+ while (lex_savenext(ls) == '=' && count < 0x20000000)
+ count++;
+ return (ls->c == s) ? count : (-count) - 1;
+}
+
+/* Parse a long string or long comment (tv set to NULL). */
+static void lex_longstring(LexState *ls, TValue *tv, int sep)
+{
+ lex_savenext(ls); /* Skip second '['. */
+ if (lex_iseol(ls)) /* Skip initial newline. */
+ lex_newline(ls);
+ for (;;) {
+ switch (ls->c) {
+ case LEX_EOF:
+ lj_lex_error(ls, TK_eof, tv ? LJ_ERR_XLSTR : LJ_ERR_XLCOM);
+ break;
+ case ']':
+ if (lex_skipeq(ls) == sep) {
+ lex_savenext(ls); /* Skip second ']'. */
+ goto endloop;
+ }
+ break;
+ case '\n':
+ case '\r':
+ lex_save(ls, '\n');
+ lex_newline(ls);
+ if (!tv) lj_buf_reset(&ls->sb); /* Don't waste space for comments. */
+ break;
+ default:
+ lex_savenext(ls);
+ break;
+ }
+ } endloop:
+ if (tv) {
+ GCstr *str = lj_parse_keepstr(ls, ls->sb.b + (2 + (MSize)sep),
+ sbuflen(&ls->sb) - 2*(2 + (MSize)sep));
+ setstrV(ls->L, tv, str);
+ }
+}
+
+/* Parse a string. */
+static void lex_string(LexState *ls, TValue *tv)
+{
+ LexChar delim = ls->c; /* Delimiter is '\'' or '"'. */
+ lex_savenext(ls);
+ while (ls->c != delim) {
+ switch (ls->c) {
+ case LEX_EOF:
+ lj_lex_error(ls, TK_eof, LJ_ERR_XSTR);
+ continue;
+ case '\n':
+ case '\r':
+ lj_lex_error(ls, TK_string, LJ_ERR_XSTR);
+ continue;
+ case '\\': {
+ LexChar c = lex_next(ls); /* Skip the '\\'. */
+ switch (c) {
+ case 'a': c = '\a'; break;
+ case 'b': c = '\b'; break;
+ case 'f': c = '\f'; break;
+ case 'n': c = '\n'; break;
+ case 'r': c = '\r'; break;
+ case 't': c = '\t'; break;
+ case 'v': c = '\v'; break;
+ case 'x': /* Hexadecimal escape '\xXX'. */
+ c = (lex_next(ls) & 15u) << 4;
+ if (!lj_char_isdigit(ls->c)) {
+ if (!lj_char_isxdigit(ls->c)) goto err_xesc;
+ c += 9 << 4;
+ }
+ c += (lex_next(ls) & 15u);
+ if (!lj_char_isdigit(ls->c)) {
+ if (!lj_char_isxdigit(ls->c)) goto err_xesc;
+ c += 9;
+ }
+ break;
+ case 'u': /* Unicode escape '\u{XX...}'. */
+ if (lex_next(ls) != '{') goto err_xesc;
+ lex_next(ls);
+ c = 0;
+ do {
+ c = (c << 4) | (ls->c & 15u);
+ if (!lj_char_isdigit(ls->c)) {
+ if (!lj_char_isxdigit(ls->c)) goto err_xesc;
+ c += 9;
+ }
+ if (c >= 0x110000) goto err_xesc; /* Out of Unicode range. */
+ } while (lex_next(ls) != '}');
+ if (c < 0x800) {
+ if (c < 0x80) break;
+ lex_save(ls, 0xc0 | (c >> 6));
+ } else {
+ if (c >= 0x10000) {
+ lex_save(ls, 0xf0 | (c >> 18));
+ lex_save(ls, 0x80 | ((c >> 12) & 0x3f));
+ } else {
+ if (c >= 0xd800 && c < 0xe000) goto err_xesc; /* No surrogates. */
+ lex_save(ls, 0xe0 | (c >> 12));
+ }
+ lex_save(ls, 0x80 | ((c >> 6) & 0x3f));
+ }
+ c = 0x80 | (c & 0x3f);
+ break;
+ case 'z': /* Skip whitespace. */
+ lex_next(ls);
+ while (lj_char_isspace(ls->c))
+ if (lex_iseol(ls)) lex_newline(ls); else lex_next(ls);
+ continue;
+ case '\n': case '\r': lex_save(ls, '\n'); lex_newline(ls); continue;
+ case '\\': case '\"': case '\'': break;
+ case LEX_EOF: continue;
+ default:
+ if (!lj_char_isdigit(c))
+ goto err_xesc;
+ c -= '0'; /* Decimal escape '\ddd'. */
+ if (lj_char_isdigit(lex_next(ls))) {
+ c = c*10 + (ls->c - '0');
+ if (lj_char_isdigit(lex_next(ls))) {
+ c = c*10 + (ls->c - '0');
+ if (c > 255) {
+ err_xesc:
+ lj_lex_error(ls, TK_string, LJ_ERR_XESC);
+ }
+ lex_next(ls);
+ }
+ }
+ lex_save(ls, c);
+ continue;
+ }
+ lex_save(ls, c);
+ lex_next(ls);
+ continue;
+ }
+ default:
+ lex_savenext(ls);
+ break;
+ }
+ }
+ lex_savenext(ls); /* Skip trailing delimiter. */
+ setstrV(ls->L, tv,
+ lj_parse_keepstr(ls, ls->sb.b+1, sbuflen(&ls->sb)-2));
+}
+
+/* -- Main lexical scanner ------------------------------------------------ */
+
+/* Get next lexical token. */
+static LexToken lex_scan(LexState *ls, TValue *tv)
+{
+ lj_buf_reset(&ls->sb);
+ for (;;) {
+ if (lj_char_isident(ls->c)) {
+ GCstr *s;
+ if (lj_char_isdigit(ls->c)) { /* Numeric literal. */
+ lex_number(ls, tv);
+ return TK_number;
+ }
+ /* Identifier or reserved word. */
+ do {
+ lex_savenext(ls);
+ } while (lj_char_isident(ls->c));
+ s = lj_parse_keepstr(ls, ls->sb.b, sbuflen(&ls->sb));
+ setstrV(ls->L, tv, s);
+ if (s->reserved > 0) /* Reserved word? */
+ return TK_OFS + s->reserved;
+ return TK_name;
+ }
+ switch (ls->c) {
+ case '\n':
+ case '\r':
+ lex_newline(ls);
+ continue;
+ case ' ':
+ case '\t':
+ case '\v':
+ case '\f':
+ lex_next(ls);
+ continue;
+ case '-':
+ lex_next(ls);
+ if (ls->c != '-') return '-';
+ lex_next(ls);
+ if (ls->c == '[') { /* Long comment "--[=*[...]=*]". */
+ int sep = lex_skipeq(ls);
+ lj_buf_reset(&ls->sb); /* `lex_skipeq' may dirty the buffer */
+ if (sep >= 0) {
+ lex_longstring(ls, NULL, sep);
+ lj_buf_reset(&ls->sb);
+ continue;
+ }
+ }
+ /* Short comment "--.*\n". */
+ while (!lex_iseol(ls) && ls->c != LEX_EOF)
+ lex_next(ls);
+ continue;
+ case '[': {
+ int sep = lex_skipeq(ls);
+ if (sep >= 0) {
+ lex_longstring(ls, tv, sep);
+ return TK_string;
+ } else if (sep == -1) {
+ return '[';
+ } else {
+ lj_lex_error(ls, TK_string, LJ_ERR_XLDELIM);
+ continue;
+ }
+ }
+ case '=':
+ lex_next(ls);
+ if (ls->c != '=') return '='; else { lex_next(ls); return TK_eq; }
+ case '<':
+ lex_next(ls);
+ if (ls->c != '=') return '<'; else { lex_next(ls); return TK_le; }
+ case '>':
+ lex_next(ls);
+ if (ls->c != '=') return '>'; else { lex_next(ls); return TK_ge; }
+ case '~':
+ lex_next(ls);
+ if (ls->c != '=') return '~'; else { lex_next(ls); return TK_ne; }
+ case ':':
+ lex_next(ls);
+ if (ls->c != ':') return ':'; else { lex_next(ls); return TK_label; }
+ case '"':
+ case '\'':
+ lex_string(ls, tv);
+ return TK_string;
+ case '.':
+ if (lex_savenext(ls) == '.') {
+ lex_next(ls);
+ if (ls->c == '.') {
+ lex_next(ls);
+ return TK_dots; /* ... */
+ }
+ return TK_concat; /* .. */
+ } else if (!lj_char_isdigit(ls->c)) {
+ return '.';
+ } else {
+ lex_number(ls, tv);
+ return TK_number;
+ }
+ case LEX_EOF:
+ return TK_eof;
+ default: {
+ LexChar c = ls->c;
+ lex_next(ls);
+ return c; /* Single-char tokens (+ - / ...). */
+ }
+ }
+ }
+}
+
+/* -- Lexer API ----------------------------------------------------------- */
+
+/* Setup lexer state. */
+int lj_lex_setup(lua_State *L, LexState *ls)
+{
+ int header = 0;
+ ls->L = L;
+ ls->fs = NULL;
+ ls->pe = ls->p = NULL;
+ ls->vstack = NULL;
+ ls->sizevstack = 0;
+ ls->vtop = 0;
+ ls->bcstack = NULL;
+ ls->sizebcstack = 0;
+ ls->tok = 0;
+ ls->lookahead = TK_eof; /* No look-ahead token. */
+ ls->linenumber = 1;
+ ls->lastline = 1;
+ ls->endmark = 0;
+ lex_next(ls); /* Read-ahead first char. */
+ if (ls->c == 0xef && ls->p + 2 <= ls->pe && (uint8_t)ls->p[0] == 0xbb &&
+ (uint8_t)ls->p[1] == 0xbf) { /* Skip UTF-8 BOM (if buffered). */
+ ls->p += 2;
+ lex_next(ls);
+ header = 1;
+ }
+ if (ls->c == '#') { /* Skip POSIX #! header line. */
+ do {
+ lex_next(ls);
+ if (ls->c == LEX_EOF) return 0;
+ } while (!lex_iseol(ls));
+ lex_newline(ls);
+ header = 1;
+ }
+ if (ls->c == LUA_SIGNATURE[0]) { /* Bytecode dump. */
+ if (header) {
+ /*
+ ** Loading bytecode with an extra header is disabled for security
+ ** reasons. This may circumvent the usual check for bytecode vs.
+ ** Lua code by looking at the first char. Since this is a potential
+ ** security violation no attempt is made to echo the chunkname either.
+ */
+ setstrV(L, L->top++, lj_err_str(L, LJ_ERR_BCBAD));
+ lj_err_throw(L, LUA_ERRSYNTAX);
+ }
+ return 1;
+ }
+ return 0;
+}
+
+/* Cleanup lexer state. */
+void lj_lex_cleanup(lua_State *L, LexState *ls)
+{
+ global_State *g = G(L);
+ lj_mem_freevec(g, ls->bcstack, ls->sizebcstack, BCInsLine);
+ lj_mem_freevec(g, ls->vstack, ls->sizevstack, VarInfo);
+ lj_buf_free(g, &ls->sb);
+}
+
+/* Return next lexical token. */
+void lj_lex_next(LexState *ls)
+{
+ ls->lastline = ls->linenumber;
+ if (LJ_LIKELY(ls->lookahead == TK_eof)) { /* No lookahead token? */
+ ls->tok = lex_scan(ls, &ls->tokval); /* Get next token. */
+ } else { /* Otherwise return lookahead token. */
+ ls->tok = ls->lookahead;
+ ls->lookahead = TK_eof;
+ ls->tokval = ls->lookaheadval;
+ }
+}
+
+/* Look ahead for the next token. */
+LexToken lj_lex_lookahead(LexState *ls)
+{
+ lj_assertLS(ls->lookahead == TK_eof, "double lookahead");
+ ls->lookahead = lex_scan(ls, &ls->lookaheadval);
+ return ls->lookahead;
+}
+
+/* Convert token to string. */
+const char *lj_lex_token2str(LexState *ls, LexToken tok)
+{
+ if (tok > TK_OFS)
+ return tokennames[tok-TK_OFS-1];
+ else if (!lj_char_iscntrl(tok))
+ return lj_strfmt_pushf(ls->L, "%c", tok);
+ else
+ return lj_strfmt_pushf(ls->L, "char(%d)", tok);
+}
+
+/* Lexer error. */
+void lj_lex_error(LexState *ls, LexToken tok, ErrMsg em, ...)
+{
+ const char *tokstr;
+ va_list argp;
+ if (tok == 0) {
+ tokstr = NULL;
+ } else if (tok == TK_name || tok == TK_string || tok == TK_number) {
+ lex_save(ls, '\0');
+ tokstr = ls->sb.b;
+ } else {
+ tokstr = lj_lex_token2str(ls, tok);
+ }
+ va_start(argp, em);
+ lj_err_lex(ls->L, ls->chunkname, tokstr, ls->linenumber, em, argp);
+ va_end(argp);
+}
+
+/* Initialize strings for reserved words. */
+void lj_lex_init(lua_State *L)
+{
+ uint32_t i;
+ for (i = 0; i < TK_RESERVED; i++) {
+ GCstr *s = lj_str_newz(L, tokennames[i]);
+ fixstring(s); /* Reserved words are never collected. */
+ s->reserved = (uint8_t)(i+1);
+ }
+}
+
diff --git a/libs/luajit-cmake/luajit/src/lj_lex.h b/libs/luajit-cmake/luajit/src/lj_lex.h
new file mode 100644
index 0000000..cb5b576
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/lj_lex.h
@@ -0,0 +1,93 @@
+/*
+** Lexical analyzer.
+** Copyright (C) 2005-2022 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LJ_LEX_H
+#define _LJ_LEX_H
+
+#include <stdarg.h>
+
+#include "lj_obj.h"
+#include "lj_err.h"
+
+/* Lua lexer tokens. */
+#define TKDEF(_, __) \
+ _(and) _(break) _(do) _(else) _(elseif) _(end) _(false) \
+ _(for) _(function) _(goto) _(if) _(in) _(local) _(nil) _(not) _(or) \
+ _(repeat) _(return) _(then) _(true) _(until) _(while) \
+ __(concat, ..) __(dots, ...) __(eq, ==) __(ge, >=) __(le, <=) __(ne, ~=) \
+ __(label, ::) __(number, <number>) __(name, <name>) __(string, <string>) \
+ __(eof, <eof>)
+
+enum {
+ TK_OFS = 256,
+#define TKENUM1(name) TK_##name,
+#define TKENUM2(name, sym) TK_##name,
+TKDEF(TKENUM1, TKENUM2)
+#undef TKENUM1
+#undef TKENUM2
+ TK_RESERVED = TK_while - TK_OFS
+};
+
+typedef int LexChar; /* Lexical character. Unsigned ext. from char. */
+typedef int LexToken; /* Lexical token. */
+
+/* Combined bytecode ins/line. Only used during bytecode generation. */
+typedef struct BCInsLine {
+ BCIns ins; /* Bytecode instruction. */
+ BCLine line; /* Line number for this bytecode. */
+} BCInsLine;
+
+/* Info for local variables. Only used during bytecode generation. */
+typedef struct VarInfo {
+ GCRef name; /* Local variable name or goto/label name. */
+ BCPos startpc; /* First point where the local variable is active. */
+ BCPos endpc; /* First point where the local variable is dead. */
+ uint8_t slot; /* Variable slot. */
+ uint8_t info; /* Variable/goto/label info. */
+} VarInfo;
+
+/* Lua lexer state. */
+typedef struct LexState {
+ struct FuncState *fs; /* Current FuncState. Defined in lj_parse.c. */
+ struct lua_State *L; /* Lua state. */
+ TValue tokval; /* Current token value. */
+ TValue lookaheadval; /* Lookahead token value. */
+ const char *p; /* Current position in input buffer. */
+ const char *pe; /* End of input buffer. */
+ LexChar c; /* Current character. */
+ LexToken tok; /* Current token. */
+ LexToken lookahead; /* Lookahead token. */
+ SBuf sb; /* String buffer for tokens. */
+ lua_Reader rfunc; /* Reader callback. */
+ void *rdata; /* Reader callback data. */
+ BCLine linenumber; /* Input line counter. */
+ BCLine lastline; /* Line of last token. */
+ GCstr *chunkname; /* Current chunk name (interned string). */
+ const char *chunkarg; /* Chunk name argument. */
+ const char *mode; /* Allow loading bytecode (b) and/or source text (t). */
+ VarInfo *vstack; /* Stack for names and extents of local variables. */
+ MSize sizevstack; /* Size of variable stack. */
+ MSize vtop; /* Top of variable stack. */
+ BCInsLine *bcstack; /* Stack for bytecode instructions/line numbers. */
+ MSize sizebcstack; /* Size of bytecode stack. */
+ uint32_t level; /* Syntactical nesting level. */
+ int endmark; /* Trust bytecode end marker, even if not at EOF. */
+} LexState;
+
+LJ_FUNC int lj_lex_setup(lua_State *L, LexState *ls);
+LJ_FUNC void lj_lex_cleanup(lua_State *L, LexState *ls);
+LJ_FUNC void lj_lex_next(LexState *ls);
+LJ_FUNC LexToken lj_lex_lookahead(LexState *ls);
+LJ_FUNC const char *lj_lex_token2str(LexState *ls, LexToken tok);
+LJ_FUNC_NORET void lj_lex_error(LexState *ls, LexToken tok, ErrMsg em, ...);
+LJ_FUNC void lj_lex_init(lua_State *L);
+
+#ifdef LUA_USE_ASSERT
+#define lj_assertLS(c, ...) (lj_assertG_(G(ls->L), (c), __VA_ARGS__))
+#else
+#define lj_assertLS(c, ...) ((void)ls)
+#endif
+
+#endif
diff --git a/libs/luajit-cmake/luajit/src/lj_lib.c b/libs/luajit-cmake/luajit/src/lj_lib.c
new file mode 100644
index 0000000..82a9e25
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/lj_lib.c
@@ -0,0 +1,359 @@
+/*
+** Library function support.
+** Copyright (C) 2005-2022 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#define lj_lib_c
+#define LUA_CORE
+
+#include "lauxlib.h"
+
+#include "lj_obj.h"
+#include "lj_gc.h"
+#include "lj_err.h"
+#include "lj_str.h"
+#include "lj_tab.h"
+#include "lj_func.h"
+#include "lj_bc.h"
+#include "lj_dispatch.h"
+#if LJ_HASFFI
+#include "lj_ctype.h"
+#endif
+#include "lj_vm.h"
+#include "lj_strscan.h"
+#include "lj_strfmt.h"
+#include "lj_lex.h"
+#include "lj_bcdump.h"
+#include "lj_lib.h"
+
+/* -- Library initialization ---------------------------------------------- */
+
+static GCtab *lib_create_table(lua_State *L, const char *libname, int hsize)
+{
+ if (libname) {
+ luaL_findtable(L, LUA_REGISTRYINDEX, "_LOADED", 16);
+ lua_getfield(L, -1, libname);
+ if (!tvistab(L->top-1)) {
+ L->top--;
+ if (luaL_findtable(L, LUA_GLOBALSINDEX, libname, hsize) != NULL)
+ lj_err_callerv(L, LJ_ERR_BADMODN, libname);
+ settabV(L, L->top, tabV(L->top-1));
+ L->top++;
+ lua_setfield(L, -3, libname); /* _LOADED[libname] = new table */
+ }
+ L->top--;
+ settabV(L, L->top-1, tabV(L->top));
+ } else {
+ lua_createtable(L, 0, hsize);
+ }
+ return tabV(L->top-1);
+}
+
+static const uint8_t *lib_read_lfunc(lua_State *L, const uint8_t *p, GCtab *tab)
+{
+ int len = *p++;
+ GCstr *name = lj_str_new(L, (const char *)p, len);
+ LexState ls;
+ GCproto *pt;
+ GCfunc *fn;
+ memset(&ls, 0, sizeof(ls));
+ ls.L = L;
+ ls.p = (const char *)(p+len);
+ ls.pe = (const char *)~(uintptr_t)0;
+ ls.c = -1;
+ ls.level = (BCDUMP_F_STRIP|(LJ_BE*BCDUMP_F_BE));
+ ls.chunkname = name;
+ pt = lj_bcread_proto(&ls);
+ pt->firstline = ~(BCLine)0;
+ fn = lj_func_newL_empty(L, pt, tabref(L->env));
+ /* NOBARRIER: See below for common barrier. */
+ setfuncV(L, lj_tab_setstr(L, tab, name), fn);
+ return (const uint8_t *)ls.p;
+}
+
+void lj_lib_register(lua_State *L, const char *libname,
+ const uint8_t *p, const lua_CFunction *cf)
+{
+ GCtab *env = tabref(L->env);
+ GCfunc *ofn = NULL;
+ int ffid = *p++;
+ BCIns *bcff = &L2GG(L)->bcff[*p++];
+ GCtab *tab = lib_create_table(L, libname, *p++);
+ ptrdiff_t tpos = L->top - L->base;
+
+ /* Avoid barriers further down. */
+ lj_gc_anybarriert(L, tab);
+ tab->nomm = 0;
+
+ for (;;) {
+ uint32_t tag = *p++;
+ MSize len = tag & LIBINIT_LENMASK;
+ tag &= LIBINIT_TAGMASK;
+ if (tag != LIBINIT_STRING) {
+ const char *name;
+ MSize nuv = (MSize)(L->top - L->base - tpos);
+ GCfunc *fn = lj_func_newC(L, nuv, env);
+ if (nuv) {
+ L->top = L->base + tpos;
+ memcpy(fn->c.upvalue, L->top, sizeof(TValue)*nuv);
+ }
+ fn->c.ffid = (uint8_t)(ffid++);
+ name = (const char *)p;
+ p += len;
+ if (tag == LIBINIT_CF)
+ setmref(fn->c.pc, &G(L)->bc_cfunc_int);
+ else
+ setmref(fn->c.pc, bcff++);
+ if (tag == LIBINIT_ASM_)
+ fn->c.f = ofn->c.f; /* Copy handler from previous function. */
+ else
+ fn->c.f = *cf++; /* Get cf or handler from C function table. */
+ if (len) {
+ /* NOBARRIER: See above for common barrier. */
+ setfuncV(L, lj_tab_setstr(L, tab, lj_str_new(L, name, len)), fn);
+ }
+ ofn = fn;
+ } else {
+ switch (tag | len) {
+ case LIBINIT_LUA:
+ p = lib_read_lfunc(L, p, tab);
+ break;
+ case LIBINIT_SET:
+ L->top -= 2;
+ if (tvisstr(L->top+1) && strV(L->top+1)->len == 0)
+ env = tabV(L->top);
+ else /* NOBARRIER: See above for common barrier. */
+ copyTV(L, lj_tab_set(L, tab, L->top+1), L->top);
+ break;
+ case LIBINIT_NUMBER:
+ memcpy(&L->top->n, p, sizeof(double));
+ L->top++;
+ p += sizeof(double);
+ break;
+ case LIBINIT_COPY:
+ copyTV(L, L->top, L->top - *p++);
+ L->top++;
+ break;
+ case LIBINIT_LASTCL:
+ setfuncV(L, L->top++, ofn);
+ break;
+ case LIBINIT_FFID:
+ ffid++;
+ break;
+ case LIBINIT_END:
+ return;
+ default:
+ setstrV(L, L->top++, lj_str_new(L, (const char *)p, len));
+ p += len;
+ break;
+ }
+ }
+ }
+}
+
+/* Push internal function on the stack. */
+GCfunc *lj_lib_pushcc(lua_State *L, lua_CFunction f, int id, int n)
+{
+ GCfunc *fn;
+ lua_pushcclosure(L, f, n);
+ fn = funcV(L->top-1);
+ fn->c.ffid = (uint8_t)id;
+ setmref(fn->c.pc, &G(L)->bc_cfunc_int);
+ return fn;
+}
+
+void lj_lib_prereg(lua_State *L, const char *name, lua_CFunction f, GCtab *env)
+{
+ luaL_findtable(L, LUA_REGISTRYINDEX, "_PRELOAD", 4);
+ lua_pushcfunction(L, f);
+ /* NOBARRIER: The function is new (marked white). */
+ setgcref(funcV(L->top-1)->c.env, obj2gco(env));
+ lua_setfield(L, -2, name);
+ L->top--;
+}
+
+int lj_lib_postreg(lua_State *L, lua_CFunction cf, int id, const char *name)
+{
+ GCfunc *fn = lj_lib_pushcf(L, cf, id);
+ GCtab *t = tabref(curr_func(L)->c.env); /* Reference to parent table. */
+ setfuncV(L, lj_tab_setstr(L, t, lj_str_newz(L, name)), fn);
+ lj_gc_anybarriert(L, t);
+ setfuncV(L, L->top++, fn);
+ return 1;
+}
+
+/* -- Type checks --------------------------------------------------------- */
+
+TValue *lj_lib_checkany(lua_State *L, int narg)
+{
+ TValue *o = L->base + narg-1;
+ if (o >= L->top)
+ lj_err_arg(L, narg, LJ_ERR_NOVAL);
+ return o;
+}
+
+GCstr *lj_lib_checkstr(lua_State *L, int narg)
+{
+ TValue *o = L->base + narg-1;
+ if (o < L->top) {
+ if (LJ_LIKELY(tvisstr(o))) {
+ return strV(o);
+ } else if (tvisnumber(o)) {
+ GCstr *s = lj_strfmt_number(L, o);
+ setstrV(L, o, s);
+ return s;
+ }
+ }
+ lj_err_argt(L, narg, LUA_TSTRING);
+ return NULL; /* unreachable */
+}
+
+GCstr *lj_lib_optstr(lua_State *L, int narg)
+{
+ TValue *o = L->base + narg-1;
+ return (o < L->top && !tvisnil(o)) ? lj_lib_checkstr(L, narg) : NULL;
+}
+
+#if LJ_DUALNUM
+void lj_lib_checknumber(lua_State *L, int narg)
+{
+ TValue *o = L->base + narg-1;
+ if (!(o < L->top && lj_strscan_numberobj(o)))
+ lj_err_argt(L, narg, LUA_TNUMBER);
+}
+#endif
+
+lua_Number lj_lib_checknum(lua_State *L, int narg)
+{
+ TValue *o = L->base + narg-1;
+ if (!(o < L->top &&
+ (tvisnumber(o) || (tvisstr(o) && lj_strscan_num(strV(o), o)))))
+ lj_err_argt(L, narg, LUA_TNUMBER);
+ if (LJ_UNLIKELY(tvisint(o))) {
+ lua_Number n = (lua_Number)intV(o);
+ setnumV(o, n);
+ return n;
+ } else {
+ return numV(o);
+ }
+}
+
+int32_t lj_lib_checkint(lua_State *L, int narg)
+{
+ TValue *o = L->base + narg-1;
+ if (!(o < L->top && lj_strscan_numberobj(o)))
+ lj_err_argt(L, narg, LUA_TNUMBER);
+ if (LJ_LIKELY(tvisint(o))) {
+ return intV(o);
+ } else {
+ int32_t i = lj_num2int(numV(o));
+ if (LJ_DUALNUM) setintV(o, i);
+ return i;
+ }
+}
+
+int32_t lj_lib_optint(lua_State *L, int narg, int32_t def)
+{
+ TValue *o = L->base + narg-1;
+ return (o < L->top && !tvisnil(o)) ? lj_lib_checkint(L, narg) : def;
+}
+
+GCfunc *lj_lib_checkfunc(lua_State *L, int narg)
+{
+ TValue *o = L->base + narg-1;
+ if (!(o < L->top && tvisfunc(o)))
+ lj_err_argt(L, narg, LUA_TFUNCTION);
+ return funcV(o);
+}
+
+GCtab *lj_lib_checktab(lua_State *L, int narg)
+{
+ TValue *o = L->base + narg-1;
+ if (!(o < L->top && tvistab(o)))
+ lj_err_argt(L, narg, LUA_TTABLE);
+ return tabV(o);
+}
+
+GCtab *lj_lib_checktabornil(lua_State *L, int narg)
+{
+ TValue *o = L->base + narg-1;
+ if (o < L->top) {
+ if (tvistab(o))
+ return tabV(o);
+ else if (tvisnil(o))
+ return NULL;
+ }
+ lj_err_arg(L, narg, LJ_ERR_NOTABN);
+ return NULL; /* unreachable */
+}
+
+int lj_lib_checkopt(lua_State *L, int narg, int def, const char *lst)
+{
+ GCstr *s = def >= 0 ? lj_lib_optstr(L, narg) : lj_lib_checkstr(L, narg);
+ if (s) {
+ const char *opt = strdata(s);
+ MSize len = s->len;
+ int i;
+ for (i = 0; *(const uint8_t *)lst; i++) {
+ if (*(const uint8_t *)lst == len && memcmp(opt, lst+1, len) == 0)
+ return i;
+ lst += 1+*(const uint8_t *)lst;
+ }
+ lj_err_argv(L, narg, LJ_ERR_INVOPTM, opt);
+ }
+ return def;
+}
+
+/* -- Strict type checks -------------------------------------------------- */
+
+/* The following type checks do not coerce between strings and numbers.
+** And they handle plain int64_t/uint64_t FFI numbers, too.
+*/
+
+#if LJ_HASBUFFER
+GCstr *lj_lib_checkstrx(lua_State *L, int narg)
+{
+ TValue *o = L->base + narg-1;
+ if (!(o < L->top && tvisstr(o))) lj_err_argt(L, narg, LUA_TSTRING);
+ return strV(o);
+}
+
+int32_t lj_lib_checkintrange(lua_State *L, int narg, int32_t a, int32_t b)
+{
+ TValue *o = L->base + narg-1;
+ lj_assertL(b >= 0, "expected range must be non-negative");
+ if (o < L->top) {
+ if (LJ_LIKELY(tvisint(o))) {
+ int32_t i = intV(o);
+ if (i >= a && i <= b) return i;
+ } else if (LJ_LIKELY(tvisnum(o))) {
+ /* For performance reasons, this doesn't check for integerness or
+ ** integer overflow. Overflow detection still works, since all FPUs
+ ** return either MININT or MAXINT, which is then out of range.
+ */
+ int32_t i = (int32_t)numV(o);
+ if (i >= a && i <= b) return i;
+#if LJ_HASFFI
+ } else if (tviscdata(o)) {
+ GCcdata *cd = cdataV(o);
+ if (cd->ctypeid == CTID_INT64) {
+ int64_t i = *(int64_t *)cdataptr(cd);
+ if (i >= (int64_t)a && i <= (int64_t)b) return (int32_t)i;
+ } else if (cd->ctypeid == CTID_UINT64) {
+ uint64_t i = *(uint64_t *)cdataptr(cd);
+ if ((a < 0 || i >= (uint64_t)a) && i <= (uint64_t)b) return (int32_t)i;
+ } else {
+ goto badtype;
+ }
+#endif
+ } else {
+ goto badtype;
+ }
+ lj_err_arg(L, narg, LJ_ERR_NUMRNG);
+ }
+badtype:
+ lj_err_argt(L, narg, LUA_TNUMBER);
+ return 0; /* unreachable */
+}
+#endif
+
diff --git a/libs/luajit-cmake/luajit/src/lj_lib.h b/libs/luajit-cmake/luajit/src/lj_lib.h
new file mode 100644
index 0000000..a18f52b
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/lj_lib.h
@@ -0,0 +1,116 @@
+/*
+** Library function support.
+** Copyright (C) 2005-2022 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LJ_LIB_H
+#define _LJ_LIB_H
+
+#include "lj_obj.h"
+
+/*
+** A fallback handler is called by the assembler VM if the fast path fails:
+**
+** - too few arguments: unrecoverable.
+** - wrong argument type: recoverable, if coercion succeeds.
+** - bad argument value: unrecoverable.
+** - stack overflow: recoverable, if stack reallocation succeeds.
+** - extra handling: recoverable.
+**
+** The unrecoverable cases throw an error with lj_err_arg(), lj_err_argtype(),
+** lj_err_caller() or lj_err_callermsg().
+** The recoverable cases return 0 or the number of results + 1.
+** The assembler VM retries the fast path only if 0 is returned.
+** This time the fallback must not be called again or it gets stuck in a loop.
+*/
+
+/* Return values from fallback handler. */
+#define FFH_RETRY 0
+#define FFH_UNREACHABLE FFH_RETRY
+#define FFH_RES(n) ((n)+1)
+#define FFH_TAILCALL (-1)
+
+LJ_FUNC TValue *lj_lib_checkany(lua_State *L, int narg);
+LJ_FUNC GCstr *lj_lib_checkstr(lua_State *L, int narg);
+LJ_FUNC GCstr *lj_lib_optstr(lua_State *L, int narg);
+#if LJ_DUALNUM
+LJ_FUNC void lj_lib_checknumber(lua_State *L, int narg);
+#else
+#define lj_lib_checknumber(L, narg) lj_lib_checknum((L), (narg))
+#endif
+LJ_FUNC lua_Number lj_lib_checknum(lua_State *L, int narg);
+LJ_FUNC int32_t lj_lib_checkint(lua_State *L, int narg);
+LJ_FUNC int32_t lj_lib_optint(lua_State *L, int narg, int32_t def);
+LJ_FUNC GCfunc *lj_lib_checkfunc(lua_State *L, int narg);
+LJ_FUNC GCtab *lj_lib_checktab(lua_State *L, int narg);
+LJ_FUNC GCtab *lj_lib_checktabornil(lua_State *L, int narg);
+LJ_FUNC int lj_lib_checkopt(lua_State *L, int narg, int def, const char *lst);
+
+#if LJ_HASBUFFER
+LJ_FUNC GCstr *lj_lib_checkstrx(lua_State *L, int narg);
+LJ_FUNC int32_t lj_lib_checkintrange(lua_State *L, int narg,
+ int32_t a, int32_t b);
+#endif
+
+/* Avoid including lj_frame.h. */
+#if LJ_GC64
+#define lj_lib_upvalue(L, n) \
+ (&gcval(L->base-2)->fn.c.upvalue[(n)-1])
+#elif LJ_FR2
+#define lj_lib_upvalue(L, n) \
+ (&gcref((L->base-2)->gcr)->fn.c.upvalue[(n)-1])
+#else
+#define lj_lib_upvalue(L, n) \
+ (&gcref((L->base-1)->fr.func)->fn.c.upvalue[(n)-1])
+#endif
+
+#if LJ_TARGET_WINDOWS
+#define lj_lib_checkfpu(L) \
+ do { setnumV(L->top++, (lua_Number)1437217655); \
+ if (lua_tointeger(L, -1) != 1437217655) lj_err_caller(L, LJ_ERR_BADFPU); \
+ L->top--; } while (0)
+#else
+#define lj_lib_checkfpu(L) UNUSED(L)
+#endif
+
+LJ_FUNC GCfunc *lj_lib_pushcc(lua_State *L, lua_CFunction f, int id, int n);
+#define lj_lib_pushcf(L, fn, id) (lj_lib_pushcc(L, (fn), (id), 0))
+
+/* Library function declarations. Scanned by buildvm. */
+#define LJLIB_CF(name) static int lj_cf_##name(lua_State *L)
+#define LJLIB_ASM(name) static int lj_ffh_##name(lua_State *L)
+#define LJLIB_ASM_(name)
+#define LJLIB_LUA(name)
+#define LJLIB_SET(name)
+#define LJLIB_PUSH(arg)
+#define LJLIB_REC(handler)
+#define LJLIB_NOREGUV
+#define LJLIB_NOREG
+
+#define LJ_LIB_REG(L, regname, name) \
+ lj_lib_register(L, regname, lj_lib_init_##name, lj_lib_cf_##name)
+
+LJ_FUNC void lj_lib_register(lua_State *L, const char *libname,
+ const uint8_t *init, const lua_CFunction *cf);
+LJ_FUNC void lj_lib_prereg(lua_State *L, const char *name, lua_CFunction f,
+ GCtab *env);
+LJ_FUNC int lj_lib_postreg(lua_State *L, lua_CFunction cf, int id,
+ const char *name);
+
+/* Library init data tags. */
+#define LIBINIT_LENMASK 0x3f
+#define LIBINIT_TAGMASK 0xc0
+#define LIBINIT_CF 0x00
+#define LIBINIT_ASM 0x40
+#define LIBINIT_ASM_ 0x80
+#define LIBINIT_STRING 0xc0
+#define LIBINIT_MAXSTR 0x38
+#define LIBINIT_LUA 0xf9
+#define LIBINIT_SET 0xfa
+#define LIBINIT_NUMBER 0xfb
+#define LIBINIT_COPY 0xfc
+#define LIBINIT_LASTCL 0xfd
+#define LIBINIT_FFID 0xfe
+#define LIBINIT_END 0xff
+
+#endif
diff --git a/libs/luajit-cmake/luajit/src/lj_load.c b/libs/luajit-cmake/luajit/src/lj_load.c
new file mode 100644
index 0000000..0aab488
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/lj_load.c
@@ -0,0 +1,168 @@
+/*
+** Load and dump code.
+** Copyright (C) 2005-2022 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#include <errno.h>
+#include <stdio.h>
+
+#define lj_load_c
+#define LUA_CORE
+
+#include "lua.h"
+#include "lauxlib.h"
+
+#include "lj_obj.h"
+#include "lj_gc.h"
+#include "lj_err.h"
+#include "lj_buf.h"
+#include "lj_func.h"
+#include "lj_frame.h"
+#include "lj_vm.h"
+#include "lj_lex.h"
+#include "lj_bcdump.h"
+#include "lj_parse.h"
+
+/* -- Load Lua source code and bytecode ----------------------------------- */
+
+static TValue *cpparser(lua_State *L, lua_CFunction dummy, void *ud)
+{
+ LexState *ls = (LexState *)ud;
+ GCproto *pt;
+ GCfunc *fn;
+ int bc;
+ UNUSED(dummy);
+ cframe_errfunc(L->cframe) = -1; /* Inherit error function. */
+ bc = lj_lex_setup(L, ls);
+ if (ls->mode && !strchr(ls->mode, bc ? 'b' : 't')) {
+ setstrV(L, L->top++, lj_err_str(L, LJ_ERR_XMODE));
+ lj_err_throw(L, LUA_ERRSYNTAX);
+ }
+ pt = bc ? lj_bcread(ls) : lj_parse(ls);
+ fn = lj_func_newL_empty(L, pt, tabref(L->env));
+ /* Don't combine above/below into one statement. */
+ setfuncV(L, L->top++, fn);
+ return NULL;
+}
+
+LUA_API int lua_loadx(lua_State *L, lua_Reader reader, void *data,
+ const char *chunkname, const char *mode)
+{
+ LexState ls;
+ int status;
+ ls.rfunc = reader;
+ ls.rdata = data;
+ ls.chunkarg = chunkname ? chunkname : "?";
+ ls.mode = mode;
+ lj_buf_init(L, &ls.sb);
+ status = lj_vm_cpcall(L, NULL, &ls, cpparser);
+ lj_lex_cleanup(L, &ls);
+ lj_gc_check(L);
+ return status;
+}
+
+LUA_API int lua_load(lua_State *L, lua_Reader reader, void *data,
+ const char *chunkname)
+{
+ return lua_loadx(L, reader, data, chunkname, NULL);
+}
+
+typedef struct FileReaderCtx {
+ FILE *fp;
+ char buf[LUAL_BUFFERSIZE];
+} FileReaderCtx;
+
+static const char *reader_file(lua_State *L, void *ud, size_t *size)
+{
+ FileReaderCtx *ctx = (FileReaderCtx *)ud;
+ UNUSED(L);
+ if (feof(ctx->fp)) return NULL;
+ *size = fread(ctx->buf, 1, sizeof(ctx->buf), ctx->fp);
+ return *size > 0 ? ctx->buf : NULL;
+}
+
+LUALIB_API int luaL_loadfilex(lua_State *L, const char *filename,
+ const char *mode)
+{
+ FileReaderCtx ctx;
+ int status;
+ const char *chunkname;
+ if (filename) {
+ ctx.fp = fopen(filename, "rb");
+ if (ctx.fp == NULL) {
+ lua_pushfstring(L, "cannot open %s: %s", filename, strerror(errno));
+ return LUA_ERRFILE;
+ }
+ chunkname = lua_pushfstring(L, "@%s", filename);
+ } else {
+ ctx.fp = stdin;
+ chunkname = "=stdin";
+ }
+ status = lua_loadx(L, reader_file, &ctx, chunkname, mode);
+ if (ferror(ctx.fp)) {
+ L->top -= filename ? 2 : 1;
+ lua_pushfstring(L, "cannot read %s: %s", chunkname+1, strerror(errno));
+ if (filename)
+ fclose(ctx.fp);
+ return LUA_ERRFILE;
+ }
+ if (filename) {
+ L->top--;
+ copyTV(L, L->top-1, L->top);
+ fclose(ctx.fp);
+ }
+ return status;
+}
+
+LUALIB_API int luaL_loadfile(lua_State *L, const char *filename)
+{
+ return luaL_loadfilex(L, filename, NULL);
+}
+
+typedef struct StringReaderCtx {
+ const char *str;
+ size_t size;
+} StringReaderCtx;
+
+static const char *reader_string(lua_State *L, void *ud, size_t *size)
+{
+ StringReaderCtx *ctx = (StringReaderCtx *)ud;
+ UNUSED(L);
+ if (ctx->size == 0) return NULL;
+ *size = ctx->size;
+ ctx->size = 0;
+ return ctx->str;
+}
+
+LUALIB_API int luaL_loadbufferx(lua_State *L, const char *buf, size_t size,
+ const char *name, const char *mode)
+{
+ StringReaderCtx ctx;
+ ctx.str = buf;
+ ctx.size = size;
+ return lua_loadx(L, reader_string, &ctx, name, mode);
+}
+
+LUALIB_API int luaL_loadbuffer(lua_State *L, const char *buf, size_t size,
+ const char *name)
+{
+ return luaL_loadbufferx(L, buf, size, name, NULL);
+}
+
+LUALIB_API int luaL_loadstring(lua_State *L, const char *s)
+{
+ return luaL_loadbuffer(L, s, strlen(s), s);
+}
+
+/* -- Dump bytecode ------------------------------------------------------- */
+
+LUA_API int lua_dump(lua_State *L, lua_Writer writer, void *data)
+{
+ cTValue *o = L->top-1;
+ lj_checkapi(L->top > L->base, "top slot empty");
+ if (tvisfunc(o) && isluafunc(funcV(o)))
+ return lj_bcwrite(L, funcproto(funcV(o)), writer, data, 0);
+ else
+ return 1;
+}
+
diff --git a/libs/luajit-cmake/luajit/src/lj_mcode.c b/libs/luajit-cmake/luajit/src/lj_mcode.c
new file mode 100644
index 0000000..163aada
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/lj_mcode.c
@@ -0,0 +1,374 @@
+/*
+** Machine code management.
+** Copyright (C) 2005-2022 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#define lj_mcode_c
+#define LUA_CORE
+
+#include "lj_obj.h"
+#if LJ_HASJIT
+#include "lj_gc.h"
+#include "lj_err.h"
+#include "lj_jit.h"
+#include "lj_mcode.h"
+#include "lj_trace.h"
+#include "lj_dispatch.h"
+#include "lj_prng.h"
+#endif
+#if LJ_HASJIT || LJ_HASFFI
+#include "lj_vm.h"
+#endif
+
+/* -- OS-specific functions ----------------------------------------------- */
+
+#if LJ_HASJIT || LJ_HASFFI
+
+/* Define this if you want to run LuaJIT with Valgrind. */
+#ifdef LUAJIT_USE_VALGRIND
+#include <valgrind/valgrind.h>
+#endif
+
+#if LJ_TARGET_IOS
+void sys_icache_invalidate(void *start, size_t len);
+#endif
+
+/* Synchronize data/instruction cache. */
+void lj_mcode_sync(void *start, void *end)
+{
+#ifdef LUAJIT_USE_VALGRIND
+ VALGRIND_DISCARD_TRANSLATIONS(start, (char *)end-(char *)start);
+#endif
+#if LJ_TARGET_X86ORX64
+ UNUSED(start); UNUSED(end);
+#elif LJ_TARGET_IOS
+ sys_icache_invalidate(start, (char *)end-(char *)start);
+#elif LJ_TARGET_PPC
+ lj_vm_cachesync(start, end);
+#elif defined(__GNUC__) || defined(__clang__)
+ __clear_cache(start, end);
+#else
+#error "Missing builtin to flush instruction cache"
+#endif
+}
+
+#endif
+
+#if LJ_HASJIT
+
+#if LJ_TARGET_WINDOWS
+
+#define WIN32_LEAN_AND_MEAN
+#include <windows.h>
+
+#define MCPROT_RW PAGE_READWRITE
+#define MCPROT_RX PAGE_EXECUTE_READ
+#define MCPROT_RWX PAGE_EXECUTE_READWRITE
+
+static void *mcode_alloc_at(jit_State *J, uintptr_t hint, size_t sz, DWORD prot)
+{
+ void *p = LJ_WIN_VALLOC((void *)hint, sz,
+ MEM_RESERVE|MEM_COMMIT|MEM_TOP_DOWN, prot);
+ if (!p && !hint)
+ lj_trace_err(J, LJ_TRERR_MCODEAL);
+ return p;
+}
+
+static void mcode_free(jit_State *J, void *p, size_t sz)
+{
+ UNUSED(J); UNUSED(sz);
+ VirtualFree(p, 0, MEM_RELEASE);
+}
+
+static int mcode_setprot(void *p, size_t sz, DWORD prot)
+{
+ DWORD oprot;
+ return !LJ_WIN_VPROTECT(p, sz, prot, &oprot);
+}
+
+#elif LJ_TARGET_POSIX
+
+#include <sys/mman.h>
+
+#ifndef MAP_ANONYMOUS
+#define MAP_ANONYMOUS MAP_ANON
+#endif
+
+#define MCPROT_RW (PROT_READ|PROT_WRITE)
+#define MCPROT_RX (PROT_READ|PROT_EXEC)
+#define MCPROT_RWX (PROT_READ|PROT_WRITE|PROT_EXEC)
+#ifdef PROT_MPROTECT
+#define MCPROT_CREATE (PROT_MPROTECT(MCPROT_RWX))
+#else
+#define MCPROT_CREATE 0
+#endif
+
+static void *mcode_alloc_at(jit_State *J, uintptr_t hint, size_t sz, int prot)
+{
+ void *p = mmap((void *)hint, sz, prot|MCPROT_CREATE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
+ if (p == MAP_FAILED) {
+ if (!hint) lj_trace_err(J, LJ_TRERR_MCODEAL);
+ p = NULL;
+ }
+ return p;
+}
+
+static void mcode_free(jit_State *J, void *p, size_t sz)
+{
+ UNUSED(J);
+ munmap(p, sz);
+}
+
+static int mcode_setprot(void *p, size_t sz, int prot)
+{
+ return mprotect(p, sz, prot);
+}
+
+#else
+
+#error "Missing OS support for explicit placement of executable memory"
+
+#endif
+
+/* -- MCode area protection ----------------------------------------------- */
+
+#if LUAJIT_SECURITY_MCODE == 0
+
+/* Define this ONLY if page protection twiddling becomes a bottleneck.
+**
+** It's generally considered to be a potential security risk to have
+** pages with simultaneous write *and* execute access in a process.
+**
+** Do not even think about using this mode for server processes or
+** apps handling untrusted external data.
+**
+** The security risk is not in LuaJIT itself -- but if an adversary finds
+** any *other* flaw in your C application logic, then any RWX memory pages
+** simplify writing an exploit considerably.
+*/
+#define MCPROT_GEN MCPROT_RWX
+#define MCPROT_RUN MCPROT_RWX
+
+static void mcode_protect(jit_State *J, int prot)
+{
+ UNUSED(J); UNUSED(prot); UNUSED(mcode_setprot);
+}
+
+#else
+
+/* This is the default behaviour and much safer:
+**
+** Most of the time the memory pages holding machine code are executable,
+** but NONE of them is writable.
+**
+** The current memory area is marked read-write (but NOT executable) only
+** during the short time window while the assembler generates machine code.
+*/
+#define MCPROT_GEN MCPROT_RW
+#define MCPROT_RUN MCPROT_RX
+
+/* Protection twiddling failed. Probably due to kernel security. */
+static LJ_NORET LJ_NOINLINE void mcode_protfail(jit_State *J)
+{
+ lua_CFunction panic = J2G(J)->panic;
+ if (panic) {
+ lua_State *L = J->L;
+ setstrV(L, L->top++, lj_err_str(L, LJ_ERR_JITPROT));
+ panic(L);
+ }
+ exit(EXIT_FAILURE);
+}
+
+/* Change protection of MCode area. */
+static void mcode_protect(jit_State *J, int prot)
+{
+ if (J->mcprot != prot) {
+ if (LJ_UNLIKELY(mcode_setprot(J->mcarea, J->szmcarea, prot)))
+ mcode_protfail(J);
+ J->mcprot = prot;
+ }
+}
+
+#endif
+
+/* -- MCode area allocation ----------------------------------------------- */
+
+#if LJ_64
+#define mcode_validptr(p) (p)
+#else
+#define mcode_validptr(p) ((p) && (uintptr_t)(p) < 0xffff0000)
+#endif
+
+#ifdef LJ_TARGET_JUMPRANGE
+
+/* Get memory within relative jump distance of our code in 64 bit mode. */
+static void *mcode_alloc(jit_State *J, size_t sz)
+{
+ /* Target an address in the static assembler code (64K aligned).
+ ** Try addresses within a distance of target-range/2+1MB..target+range/2-1MB.
+ ** Use half the jump range so every address in the range can reach any other.
+ */
+#if LJ_TARGET_MIPS
+ /* Use the middle of the 256MB-aligned region. */
+ uintptr_t target = ((uintptr_t)(void *)lj_vm_exit_handler &
+ ~(uintptr_t)0x0fffffffu) + 0x08000000u;
+#else
+ uintptr_t target = (uintptr_t)(void *)lj_vm_exit_handler & ~(uintptr_t)0xffff;
+#endif
+ const uintptr_t range = (1u << (LJ_TARGET_JUMPRANGE-1)) - (1u << 21);
+ /* First try a contiguous area below the last one. */
+ uintptr_t hint = J->mcarea ? (uintptr_t)J->mcarea - sz : 0;
+ int i;
+ /* Limit probing iterations, depending on the available pool size. */
+ for (i = 0; i < LJ_TARGET_JUMPRANGE; i++) {
+ if (mcode_validptr(hint)) {
+ void *p = mcode_alloc_at(J, hint, sz, MCPROT_GEN);
+
+ if (mcode_validptr(p) &&
+ ((uintptr_t)p + sz - target < range || target - (uintptr_t)p < range))
+ return p;
+ if (p) mcode_free(J, p, sz); /* Free badly placed area. */
+ }
+ /* Next try probing 64K-aligned pseudo-random addresses. */
+ do {
+ hint = lj_prng_u64(&J2G(J)->prng) & ((1u<<LJ_TARGET_JUMPRANGE)-0x10000);
+ } while (!(hint + sz < range+range));
+ hint = target + hint - range;
+ }
+ lj_trace_err(J, LJ_TRERR_MCODEAL); /* Give up. OS probably ignores hints? */
+ return NULL;
+}
+
+#else
+
+/* All memory addresses are reachable by relative jumps. */
+static void *mcode_alloc(jit_State *J, size_t sz)
+{
+#if defined(__OpenBSD__) || defined(__NetBSD__) || LJ_TARGET_UWP
+ /* Allow better executable memory allocation for OpenBSD W^X mode. */
+ void *p = mcode_alloc_at(J, 0, sz, MCPROT_RUN);
+ if (p && mcode_setprot(p, sz, MCPROT_GEN)) {
+ mcode_free(J, p, sz);
+ return NULL;
+ }
+ return p;
+#else
+ return mcode_alloc_at(J, 0, sz, MCPROT_GEN);
+#endif
+}
+
+#endif
+
+/* -- MCode area management ----------------------------------------------- */
+
+/* Allocate a new MCode area. */
+static void mcode_allocarea(jit_State *J)
+{
+ MCode *oldarea = J->mcarea;
+ size_t sz = (size_t)J->param[JIT_P_sizemcode] << 10;
+ sz = (sz + LJ_PAGESIZE-1) & ~(size_t)(LJ_PAGESIZE - 1);
+ J->mcarea = (MCode *)mcode_alloc(J, sz);
+ J->szmcarea = sz;
+ J->mcprot = MCPROT_GEN;
+ J->mctop = (MCode *)((char *)J->mcarea + J->szmcarea);
+ J->mcbot = (MCode *)((char *)J->mcarea + sizeof(MCLink));
+ ((MCLink *)J->mcarea)->next = oldarea;
+ ((MCLink *)J->mcarea)->size = sz;
+ J->szallmcarea += sz;
+ J->mcbot = (MCode *)lj_err_register_mcode(J->mcarea, sz, (uint8_t *)J->mcbot);
+}
+
+/* Free all MCode areas. */
+void lj_mcode_free(jit_State *J)
+{
+ MCode *mc = J->mcarea;
+ J->mcarea = NULL;
+ J->szallmcarea = 0;
+ while (mc) {
+ MCode *next = ((MCLink *)mc)->next;
+ size_t sz = ((MCLink *)mc)->size;
+ lj_err_deregister_mcode(mc, sz, (uint8_t *)mc + sizeof(MCLink));
+ mcode_free(J, mc, sz);
+ mc = next;
+ }
+}
+
+/* -- MCode transactions -------------------------------------------------- */
+
+/* Reserve the remainder of the current MCode area. */
+MCode *lj_mcode_reserve(jit_State *J, MCode **lim)
+{
+ if (!J->mcarea)
+ mcode_allocarea(J);
+ else
+ mcode_protect(J, MCPROT_GEN);
+ *lim = J->mcbot;
+ return J->mctop;
+}
+
+/* Commit the top part of the current MCode area. */
+void lj_mcode_commit(jit_State *J, MCode *top)
+{
+ J->mctop = top;
+ mcode_protect(J, MCPROT_RUN);
+}
+
+/* Abort the reservation. */
+void lj_mcode_abort(jit_State *J)
+{
+ if (J->mcarea)
+ mcode_protect(J, MCPROT_RUN);
+}
+
+/* Set/reset protection to allow patching of MCode areas. */
+MCode *lj_mcode_patch(jit_State *J, MCode *ptr, int finish)
+{
+ if (finish) {
+#if LUAJIT_SECURITY_MCODE
+ if (J->mcarea == ptr)
+ mcode_protect(J, MCPROT_RUN);
+ else if (LJ_UNLIKELY(mcode_setprot(ptr, ((MCLink *)ptr)->size, MCPROT_RUN)))
+ mcode_protfail(J);
+#endif
+ return NULL;
+ } else {
+ MCode *mc = J->mcarea;
+ /* Try current area first to use the protection cache. */
+ if (ptr >= mc && ptr < (MCode *)((char *)mc + J->szmcarea)) {
+#if LUAJIT_SECURITY_MCODE
+ mcode_protect(J, MCPROT_GEN);
+#endif
+ return mc;
+ }
+ /* Otherwise search through the list of MCode areas. */
+ for (;;) {
+ mc = ((MCLink *)mc)->next;
+ lj_assertJ(mc != NULL, "broken MCode area chain");
+ if (ptr >= mc && ptr < (MCode *)((char *)mc + ((MCLink *)mc)->size)) {
+#if LUAJIT_SECURITY_MCODE
+ if (LJ_UNLIKELY(mcode_setprot(mc, ((MCLink *)mc)->size, MCPROT_GEN)))
+ mcode_protfail(J);
+#endif
+ return mc;
+ }
+ }
+ }
+}
+
+/* Limit of MCode reservation reached. */
+void lj_mcode_limiterr(jit_State *J, size_t need)
+{
+ size_t sizemcode, maxmcode;
+ lj_mcode_abort(J);
+ sizemcode = (size_t)J->param[JIT_P_sizemcode] << 10;
+ sizemcode = (sizemcode + LJ_PAGESIZE-1) & ~(size_t)(LJ_PAGESIZE - 1);
+ maxmcode = (size_t)J->param[JIT_P_maxmcode] << 10;
+ if ((size_t)need > sizemcode)
+ lj_trace_err(J, LJ_TRERR_MCODEOV); /* Too long for any area. */
+ if (J->szallmcarea + sizemcode > maxmcode)
+ lj_trace_err(J, LJ_TRERR_MCODEAL);
+ mcode_allocarea(J);
+ lj_trace_err(J, LJ_TRERR_MCODELM); /* Retry with new area. */
+}
+
+#endif
diff --git a/libs/luajit-cmake/luajit/src/lj_mcode.h b/libs/luajit-cmake/luajit/src/lj_mcode.h
new file mode 100644
index 0000000..be35925
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/lj_mcode.h
@@ -0,0 +1,30 @@
+/*
+** Machine code management.
+** Copyright (C) 2005-2022 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LJ_MCODE_H
+#define _LJ_MCODE_H
+
+#include "lj_obj.h"
+
+#if LJ_HASJIT || LJ_HASFFI
+LJ_FUNC void lj_mcode_sync(void *start, void *end);
+#endif
+
+#if LJ_HASJIT
+
+#include "lj_jit.h"
+
+LJ_FUNC void lj_mcode_free(jit_State *J);
+LJ_FUNC MCode *lj_mcode_reserve(jit_State *J, MCode **lim);
+LJ_FUNC void lj_mcode_commit(jit_State *J, MCode *m);
+LJ_FUNC void lj_mcode_abort(jit_State *J);
+LJ_FUNC MCode *lj_mcode_patch(jit_State *J, MCode *ptr, int finish);
+LJ_FUNC_NORET void lj_mcode_limiterr(jit_State *J, size_t need);
+
+#define lj_mcode_commitbot(J, m) (J->mcbot = (m))
+
+#endif
+
+#endif
diff --git a/libs/luajit-cmake/luajit/src/lj_meta.c b/libs/luajit-cmake/luajit/src/lj_meta.c
new file mode 100644
index 0000000..5324c66
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/lj_meta.c
@@ -0,0 +1,482 @@
+/*
+** Metamethod handling.
+** Copyright (C) 2005-2022 Mike Pall. See Copyright Notice in luajit.h
+**
+** Portions taken verbatim or adapted from the Lua interpreter.
+** Copyright (C) 1994-2008 Lua.org, PUC-Rio. See Copyright Notice in lua.h
+*/
+
+#define lj_meta_c
+#define LUA_CORE
+
+#include "lj_obj.h"
+#include "lj_gc.h"
+#include "lj_err.h"
+#include "lj_buf.h"
+#include "lj_str.h"
+#include "lj_tab.h"
+#include "lj_meta.h"
+#include "lj_frame.h"
+#include "lj_bc.h"
+#include "lj_vm.h"
+#include "lj_strscan.h"
+#include "lj_strfmt.h"
+#include "lj_lib.h"
+
+/* -- Metamethod handling ------------------------------------------------- */
+
+/* String interning of metamethod names for fast indexing. */
+void lj_meta_init(lua_State *L)
+{
+#define MMNAME(name) "__" #name
+ const char *metanames = MMDEF(MMNAME);
+#undef MMNAME
+ global_State *g = G(L);
+ const char *p, *q;
+ uint32_t mm;
+ for (mm = 0, p = metanames; *p; mm++, p = q) {
+ GCstr *s;
+ for (q = p+2; *q && *q != '_'; q++) ;
+ s = lj_str_new(L, p, (size_t)(q-p));
+ /* NOBARRIER: g->gcroot[] is a GC root. */
+ setgcref(g->gcroot[GCROOT_MMNAME+mm], obj2gco(s));
+ }
+}
+
+/* Negative caching of a few fast metamethods. See the lj_meta_fast() macro. */
+cTValue *lj_meta_cache(GCtab *mt, MMS mm, GCstr *name)
+{
+ cTValue *mo = lj_tab_getstr(mt, name);
+ lj_assertX(mm <= MM_FAST, "bad metamethod %d", mm);
+ if (!mo || tvisnil(mo)) { /* No metamethod? */
+ mt->nomm |= (uint8_t)(1u<<mm); /* Set negative cache flag. */
+ return NULL;
+ }
+ return mo;
+}
+
+/* Lookup metamethod for object. */
+cTValue *lj_meta_lookup(lua_State *L, cTValue *o, MMS mm)
+{
+ GCtab *mt;
+ if (tvistab(o))
+ mt = tabref(tabV(o)->metatable);
+ else if (tvisudata(o))
+ mt = tabref(udataV(o)->metatable);
+ else
+ mt = tabref(basemt_obj(G(L), o));
+ if (mt) {
+ cTValue *mo = lj_tab_getstr(mt, mmname_str(G(L), mm));
+ if (mo)
+ return mo;
+ }
+ return niltv(L);
+}
+
+#if LJ_HASFFI
+/* Tailcall from C function. */
+int lj_meta_tailcall(lua_State *L, cTValue *tv)
+{
+ TValue *base = L->base;
+ TValue *top = L->top;
+ const BCIns *pc = frame_pc(base-1); /* Preserve old PC from frame. */
+ copyTV(L, base-1-LJ_FR2, tv); /* Replace frame with new object. */
+ if (LJ_FR2)
+ (top++)->u64 = LJ_CONT_TAILCALL;
+ else
+ top->u32.lo = LJ_CONT_TAILCALL;
+ setframe_pc(top++, pc);
+ setframe_gc(top, obj2gco(L), LJ_TTHREAD); /* Dummy frame object. */
+ if (LJ_FR2) top++;
+ setframe_ftsz(top, ((char *)(top+1) - (char *)base) + FRAME_CONT);
+ L->base = L->top = top+1;
+ /*
+ ** before: [old_mo|PC] [... ...]
+ ** ^base ^top
+ ** after: [new_mo|itype] [... ...] [NULL|PC] [dummy|delta]
+ ** ^base/top
+ ** tailcall: [new_mo|PC] [... ...]
+ ** ^base ^top
+ */
+ return 0;
+}
+#endif
+
+/* Setup call to metamethod to be run by Assembler VM. */
+static TValue *mmcall(lua_State *L, ASMFunction cont, cTValue *mo,
+ cTValue *a, cTValue *b)
+{
+ /*
+ ** |-- framesize -> top top+1 top+2 top+3
+ ** before: [func slots ...]
+ ** mm setup: [func slots ...] [cont|?] [mo|tmtype] [a] [b]
+ ** in asm: [func slots ...] [cont|PC] [mo|delta] [a] [b]
+ ** ^-- func base ^-- mm base
+ ** after mm: [func slots ...] [result]
+ ** ^-- copy to base[PC_RA] --/ for lj_cont_ra
+ ** istruecond + branch for lj_cont_cond*
+ ** ignore for lj_cont_nop
+ ** next PC: [func slots ...]
+ */
+ TValue *top = L->top;
+ if (curr_funcisL(L)) top = curr_topL(L);
+ setcont(top++, cont); /* Assembler VM stores PC in upper word or FR2. */
+ if (LJ_FR2) setnilV(top++);
+ copyTV(L, top++, mo); /* Store metamethod and two arguments. */
+ if (LJ_FR2) setnilV(top++);
+ copyTV(L, top, a);
+ copyTV(L, top+1, b);
+ return top; /* Return new base. */
+}
+
+/* -- C helpers for some instructions, called from assembler VM ----------- */
+
+/* Helper for TGET*. __index chain and metamethod. */
+cTValue *lj_meta_tget(lua_State *L, cTValue *o, cTValue *k)
+{
+ int loop;
+ for (loop = 0; loop < LJ_MAX_IDXCHAIN; loop++) {
+ cTValue *mo;
+ if (LJ_LIKELY(tvistab(o))) {
+ GCtab *t = tabV(o);
+ cTValue *tv = lj_tab_get(L, t, k);
+ if (!tvisnil(tv) ||
+ !(mo = lj_meta_fast(L, tabref(t->metatable), MM_index)))
+ return tv;
+ } else if (tvisnil(mo = lj_meta_lookup(L, o, MM_index))) {
+ lj_err_optype(L, o, LJ_ERR_OPINDEX);
+ return NULL; /* unreachable */
+ }
+ if (tvisfunc(mo)) {
+ L->top = mmcall(L, lj_cont_ra, mo, o, k);
+ return NULL; /* Trigger metamethod call. */
+ }
+ o = mo;
+ }
+ lj_err_msg(L, LJ_ERR_GETLOOP);
+ return NULL; /* unreachable */
+}
+
+/* Helper for TSET*. __newindex chain and metamethod. */
+TValue *lj_meta_tset(lua_State *L, cTValue *o, cTValue *k)
+{
+ TValue tmp;
+ int loop;
+ for (loop = 0; loop < LJ_MAX_IDXCHAIN; loop++) {
+ cTValue *mo;
+ if (LJ_LIKELY(tvistab(o))) {
+ GCtab *t = tabV(o);
+ cTValue *tv = lj_tab_get(L, t, k);
+ if (LJ_LIKELY(!tvisnil(tv))) {
+ t->nomm = 0; /* Invalidate negative metamethod cache. */
+ lj_gc_anybarriert(L, t);
+ return (TValue *)tv;
+ } else if (!(mo = lj_meta_fast(L, tabref(t->metatable), MM_newindex))) {
+ t->nomm = 0; /* Invalidate negative metamethod cache. */
+ lj_gc_anybarriert(L, t);
+ if (tv != niltv(L))
+ return (TValue *)tv;
+ if (tvisnil(k)) lj_err_msg(L, LJ_ERR_NILIDX);
+ else if (tvisint(k)) { setnumV(&tmp, (lua_Number)intV(k)); k = &tmp; }
+ else if (tvisnum(k) && tvisnan(k)) lj_err_msg(L, LJ_ERR_NANIDX);
+ return lj_tab_newkey(L, t, k);
+ }
+ } else if (tvisnil(mo = lj_meta_lookup(L, o, MM_newindex))) {
+ lj_err_optype(L, o, LJ_ERR_OPINDEX);
+ return NULL; /* unreachable */
+ }
+ if (tvisfunc(mo)) {
+ L->top = mmcall(L, lj_cont_nop, mo, o, k);
+ /* L->top+2 = v filled in by caller. */
+ return NULL; /* Trigger metamethod call. */
+ }
+ copyTV(L, &tmp, mo);
+ o = &tmp;
+ }
+ lj_err_msg(L, LJ_ERR_SETLOOP);
+ return NULL; /* unreachable */
+}
+
+static cTValue *str2num(cTValue *o, TValue *n)
+{
+ if (tvisnum(o))
+ return o;
+ else if (tvisint(o))
+ return (setnumV(n, (lua_Number)intV(o)), n);
+ else if (tvisstr(o) && lj_strscan_num(strV(o), n))
+ return n;
+ else
+ return NULL;
+}
+
+/* Helper for arithmetic instructions. Coercion, metamethod. */
+TValue *lj_meta_arith(lua_State *L, TValue *ra, cTValue *rb, cTValue *rc,
+ BCReg op)
+{
+ MMS mm = bcmode_mm(op);
+ TValue tempb, tempc;
+ cTValue *b, *c;
+ if ((b = str2num(rb, &tempb)) != NULL &&
+ (c = str2num(rc, &tempc)) != NULL) { /* Try coercion first. */
+ setnumV(ra, lj_vm_foldarith(numV(b), numV(c), (int)mm-MM_add));
+ return NULL;
+ } else {
+ cTValue *mo = lj_meta_lookup(L, rb, mm);
+ if (tvisnil(mo)) {
+ mo = lj_meta_lookup(L, rc, mm);
+ if (tvisnil(mo)) {
+ if (str2num(rb, &tempb) == NULL) rc = rb;
+ lj_err_optype(L, rc, LJ_ERR_OPARITH);
+ return NULL; /* unreachable */
+ }
+ }
+ return mmcall(L, lj_cont_ra, mo, rb, rc);
+ }
+}
+
+/* Helper for CAT. Coercion, iterative concat, __concat metamethod. */
+TValue *lj_meta_cat(lua_State *L, TValue *top, int left)
+{
+ int fromc = 0;
+ if (left < 0) { left = -left; fromc = 1; }
+ do {
+ if (!(tvisstr(top) || tvisnumber(top) || tvisbuf(top)) ||
+ !(tvisstr(top-1) || tvisnumber(top-1) || tvisbuf(top-1))) {
+ cTValue *mo = lj_meta_lookup(L, top-1, MM_concat);
+ if (tvisnil(mo)) {
+ mo = lj_meta_lookup(L, top, MM_concat);
+ if (tvisnil(mo)) {
+ if (tvisstr(top-1) || tvisnumber(top-1)) top++;
+ lj_err_optype(L, top-1, LJ_ERR_OPCAT);
+ return NULL; /* unreachable */
+ }
+ }
+ /* One of the top two elements is not a string, call __cat metamethod:
+ **
+ ** before: [...][CAT stack .........................]
+ ** top-1 top top+1 top+2
+ ** pick two: [...][CAT stack ...] [o1] [o2]
+ ** setup mm: [...][CAT stack ...] [cont|?] [mo|tmtype] [o1] [o2]
+ ** in asm: [...][CAT stack ...] [cont|PC] [mo|delta] [o1] [o2]
+ ** ^-- func base ^-- mm base
+ ** after mm: [...][CAT stack ...] <--push-- [result]
+ ** next step: [...][CAT stack .............]
+ */
+ copyTV(L, top+2*LJ_FR2+2, top); /* Carefully ordered stack copies! */
+ copyTV(L, top+2*LJ_FR2+1, top-1);
+ copyTV(L, top+LJ_FR2, mo);
+ setcont(top-1, lj_cont_cat);
+ if (LJ_FR2) { setnilV(top); setnilV(top+2); top += 2; }
+ return top+1; /* Trigger metamethod call. */
+ } else {
+ /* Pick as many strings as possible from the top and concatenate them:
+ **
+ ** before: [...][CAT stack ...........................]
+ ** pick str: [...][CAT stack ...] [...... strings ......]
+ ** concat: [...][CAT stack ...] [result]
+ ** next step: [...][CAT stack ............]
+ */
+ TValue *e, *o = top;
+ uint64_t tlen = tvisstr(o) ? strV(o)->len :
+ tvisbuf(o) ? sbufxlen(bufV(o)) : STRFMT_MAXBUF_NUM;
+ SBuf *sb;
+ do {
+ o--; tlen += tvisstr(o) ? strV(o)->len :
+ tvisbuf(o) ? sbufxlen(bufV(o)) : STRFMT_MAXBUF_NUM;
+ } while (--left > 0 && (tvisstr(o-1) || tvisnumber(o-1)));
+ if (tlen >= LJ_MAX_STR) lj_err_msg(L, LJ_ERR_STROV);
+ sb = lj_buf_tmp_(L);
+ lj_buf_more(sb, (MSize)tlen);
+ for (e = top, top = o; o <= e; o++) {
+ if (tvisstr(o)) {
+ GCstr *s = strV(o);
+ MSize len = s->len;
+ lj_buf_putmem(sb, strdata(s), len);
+ } else if (tvisbuf(o)) {
+ SBufExt *sbx = bufV(o);
+ lj_buf_putmem(sb, sbx->r, sbufxlen(sbx));
+ } else if (tvisint(o)) {
+ lj_strfmt_putint(sb, intV(o));
+ } else {
+ lj_strfmt_putfnum(sb, STRFMT_G14, numV(o));
+ }
+ }
+ setstrV(L, top, lj_buf_str(L, sb));
+ }
+ } while (left >= 1);
+ if (LJ_UNLIKELY(G(L)->gc.total >= G(L)->gc.threshold)) {
+ if (!fromc) L->top = curr_topL(L);
+ lj_gc_step(L);
+ }
+ return NULL;
+}
+
+/* Helper for LEN. __len metamethod. */
+TValue * LJ_FASTCALL lj_meta_len(lua_State *L, cTValue *o)
+{
+ cTValue *mo = lj_meta_lookup(L, o, MM_len);
+ if (tvisnil(mo)) {
+ if (LJ_52 && tvistab(o))
+ tabref(tabV(o)->metatable)->nomm |= (uint8_t)(1u<<MM_len);
+ else
+ lj_err_optype(L, o, LJ_ERR_OPLEN);
+ return NULL;
+ }
+ return mmcall(L, lj_cont_ra, mo, o, LJ_52 ? o : niltv(L));
+}
+
+/* Helper for equality comparisons. __eq metamethod. */
+TValue *lj_meta_equal(lua_State *L, GCobj *o1, GCobj *o2, int ne)
+{
+ /* Field metatable must be at same offset for GCtab and GCudata! */
+ cTValue *mo = lj_meta_fast(L, tabref(o1->gch.metatable), MM_eq);
+ if (mo) {
+ TValue *top;
+ uint32_t it;
+ if (tabref(o1->gch.metatable) != tabref(o2->gch.metatable)) {
+ cTValue *mo2 = lj_meta_fast(L, tabref(o2->gch.metatable), MM_eq);
+ if (mo2 == NULL || !lj_obj_equal(mo, mo2))
+ return (TValue *)(intptr_t)ne;
+ }
+ top = curr_top(L);
+ setcont(top++, ne ? lj_cont_condf : lj_cont_condt);
+ if (LJ_FR2) setnilV(top++);
+ copyTV(L, top++, mo);
+ if (LJ_FR2) setnilV(top++);
+ it = ~(uint32_t)o1->gch.gct;
+ setgcV(L, top, o1, it);
+ setgcV(L, top+1, o2, it);
+ return top; /* Trigger metamethod call. */
+ }
+ return (TValue *)(intptr_t)ne;
+}
+
+#if LJ_HASFFI
+TValue * LJ_FASTCALL lj_meta_equal_cd(lua_State *L, BCIns ins)
+{
+ ASMFunction cont = (bc_op(ins) & 1) ? lj_cont_condf : lj_cont_condt;
+ int op = (int)bc_op(ins) & ~1;
+ TValue tv;
+ cTValue *mo, *o2, *o1 = &L->base[bc_a(ins)];
+ cTValue *o1mm = o1;
+ if (op == BC_ISEQV) {
+ o2 = &L->base[bc_d(ins)];
+ if (!tviscdata(o1mm)) o1mm = o2;
+ } else if (op == BC_ISEQS) {
+ setstrV(L, &tv, gco2str(proto_kgc(curr_proto(L), ~(ptrdiff_t)bc_d(ins))));
+ o2 = &tv;
+ } else if (op == BC_ISEQN) {
+ o2 = &mref(curr_proto(L)->k, cTValue)[bc_d(ins)];
+ } else {
+ lj_assertL(op == BC_ISEQP, "bad bytecode op %d", op);
+ setpriV(&tv, ~bc_d(ins));
+ o2 = &tv;
+ }
+ mo = lj_meta_lookup(L, o1mm, MM_eq);
+ if (LJ_LIKELY(!tvisnil(mo)))
+ return mmcall(L, cont, mo, o1, o2);
+ else
+ return (TValue *)(intptr_t)(bc_op(ins) & 1);
+}
+#endif
+
+/* Helper for ordered comparisons. String compare, __lt/__le metamethods. */
+TValue *lj_meta_comp(lua_State *L, cTValue *o1, cTValue *o2, int op)
+{
+ if (LJ_HASFFI && (tviscdata(o1) || tviscdata(o2))) {
+ ASMFunction cont = (op & 1) ? lj_cont_condf : lj_cont_condt;
+ MMS mm = (op & 2) ? MM_le : MM_lt;
+ cTValue *mo = lj_meta_lookup(L, tviscdata(o1) ? o1 : o2, mm);
+ if (LJ_UNLIKELY(tvisnil(mo))) goto err;
+ return mmcall(L, cont, mo, o1, o2);
+ } else if (LJ_52 || itype(o1) == itype(o2)) {
+ /* Never called with two numbers. */
+ if (tvisstr(o1) && tvisstr(o2)) {
+ int32_t res = lj_str_cmp(strV(o1), strV(o2));
+ return (TValue *)(intptr_t)(((op&2) ? res <= 0 : res < 0) ^ (op&1));
+ } else {
+ trymt:
+ while (1) {
+ ASMFunction cont = (op & 1) ? lj_cont_condf : lj_cont_condt;
+ MMS mm = (op & 2) ? MM_le : MM_lt;
+ cTValue *mo = lj_meta_lookup(L, o1, mm);
+#if LJ_52
+ if (tvisnil(mo) && tvisnil((mo = lj_meta_lookup(L, o2, mm))))
+#else
+ cTValue *mo2 = lj_meta_lookup(L, o2, mm);
+ if (tvisnil(mo) || !lj_obj_equal(mo, mo2))
+#endif
+ {
+ if (op & 2) { /* MM_le not found: retry with MM_lt. */
+ cTValue *ot = o1; o1 = o2; o2 = ot; /* Swap operands. */
+ op ^= 3; /* Use LT and flip condition. */
+ continue;
+ }
+ goto err;
+ }
+ return mmcall(L, cont, mo, o1, o2);
+ }
+ }
+ } else if (tvisbool(o1) && tvisbool(o2)) {
+ goto trymt;
+ } else {
+ err:
+ lj_err_comp(L, o1, o2);
+ return NULL;
+ }
+}
+
+/* Helper for ISTYPE and ISNUM. Implicit coercion or error. */
+void lj_meta_istype(lua_State *L, BCReg ra, BCReg tp)
+{
+ L->top = curr_topL(L);
+ ra++; tp--;
+ lj_assertL(LJ_DUALNUM || tp != ~LJ_TNUMX, "bad type for ISTYPE");
+ if (LJ_DUALNUM && tp == ~LJ_TNUMX) lj_lib_checkint(L, ra);
+ else if (tp == ~LJ_TNUMX+1) lj_lib_checknum(L, ra);
+ else if (tp == ~LJ_TSTR) lj_lib_checkstr(L, ra);
+ else lj_err_argtype(L, ra, lj_obj_itypename[tp]);
+}
+
+/* Helper for calls. __call metamethod. */
+void lj_meta_call(lua_State *L, TValue *func, TValue *top)
+{
+ cTValue *mo = lj_meta_lookup(L, func, MM_call);
+ TValue *p;
+ if (!tvisfunc(mo))
+ lj_err_optype_call(L, func);
+ for (p = top; p > func+2*LJ_FR2; p--) copyTV(L, p, p-1);
+ if (LJ_FR2) copyTV(L, func+2, func);
+ copyTV(L, func, mo);
+}
+
+/* Helper for FORI. Coercion. */
+void LJ_FASTCALL lj_meta_for(lua_State *L, TValue *o)
+{
+ if (!lj_strscan_numberobj(o)) lj_err_msg(L, LJ_ERR_FORINIT);
+ if (!lj_strscan_numberobj(o+1)) lj_err_msg(L, LJ_ERR_FORLIM);
+ if (!lj_strscan_numberobj(o+2)) lj_err_msg(L, LJ_ERR_FORSTEP);
+ if (LJ_DUALNUM) {
+ /* Ensure all slots are integers or all slots are numbers. */
+ int32_t k[3];
+ int nint = 0;
+ ptrdiff_t i;
+ for (i = 0; i <= 2; i++) {
+ if (tvisint(o+i)) {
+ k[i] = intV(o+i); nint++;
+ } else {
+ k[i] = lj_num2int(numV(o+i)); nint += ((lua_Number)k[i] == numV(o+i));
+ }
+ }
+ if (nint == 3) { /* Narrow to integers. */
+ setintV(o, k[0]);
+ setintV(o+1, k[1]);
+ setintV(o+2, k[2]);
+ } else if (nint != 0) { /* Widen to numbers. */
+ if (tvisint(o)) setnumV(o, (lua_Number)intV(o));
+ if (tvisint(o+1)) setnumV(o+1, (lua_Number)intV(o+1));
+ if (tvisint(o+2)) setnumV(o+2, (lua_Number)intV(o+2));
+ }
+ }
+}
+
diff --git a/libs/luajit-cmake/luajit/src/lj_meta.h b/libs/luajit-cmake/luajit/src/lj_meta.h
new file mode 100644
index 0000000..3a6eaac
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/lj_meta.h
@@ -0,0 +1,38 @@
+/*
+** Metamethod handling.
+** Copyright (C) 2005-2022 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LJ_META_H
+#define _LJ_META_H
+
+#include "lj_obj.h"
+
+/* Metamethod handling */
+LJ_FUNC void lj_meta_init(lua_State *L);
+LJ_FUNC cTValue *lj_meta_cache(GCtab *mt, MMS mm, GCstr *name);
+LJ_FUNC cTValue *lj_meta_lookup(lua_State *L, cTValue *o, MMS mm);
+#if LJ_HASFFI
+LJ_FUNC int lj_meta_tailcall(lua_State *L, cTValue *tv);
+#endif
+
+#define lj_meta_fastg(g, mt, mm) \
+ ((mt) == NULL ? NULL : ((mt)->nomm & (1u<<(mm))) ? NULL : \
+ lj_meta_cache(mt, mm, mmname_str(g, mm)))
+#define lj_meta_fast(L, mt, mm) lj_meta_fastg(G(L), mt, mm)
+
+/* C helpers for some instructions, called from assembler VM. */
+LJ_FUNCA cTValue *lj_meta_tget(lua_State *L, cTValue *o, cTValue *k);
+LJ_FUNCA TValue *lj_meta_tset(lua_State *L, cTValue *o, cTValue *k);
+LJ_FUNCA TValue *lj_meta_arith(lua_State *L, TValue *ra, cTValue *rb,
+ cTValue *rc, BCReg op);
+LJ_FUNCA TValue *lj_meta_cat(lua_State *L, TValue *top, int left);
+LJ_FUNCA TValue * LJ_FASTCALL lj_meta_len(lua_State *L, cTValue *o);
+LJ_FUNCA TValue *lj_meta_equal(lua_State *L, GCobj *o1, GCobj *o2, int ne);
+LJ_FUNCA TValue * LJ_FASTCALL lj_meta_equal_cd(lua_State *L, BCIns ins);
+LJ_FUNCA TValue *lj_meta_comp(lua_State *L, cTValue *o1, cTValue *o2, int op);
+LJ_FUNCA void lj_meta_istype(lua_State *L, BCReg ra, BCReg tp);
+LJ_FUNCA void lj_meta_call(lua_State *L, TValue *func, TValue *top);
+LJ_FUNCA void LJ_FASTCALL lj_meta_for(lua_State *L, TValue *o);
+
+#endif
diff --git a/libs/luajit-cmake/luajit/src/lj_obj.c b/libs/luajit-cmake/luajit/src/lj_obj.c
new file mode 100644
index 0000000..65cbe1a
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/lj_obj.c
@@ -0,0 +1,51 @@
+/*
+** Miscellaneous object handling.
+** Copyright (C) 2005-2022 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#define lj_obj_c
+#define LUA_CORE
+
+#include "lj_obj.h"
+
+/* Object type names. */
+LJ_DATADEF const char *const lj_obj_typename[] = { /* ORDER LUA_T */
+ "no value", "nil", "boolean", "userdata", "number", "string",
+ "table", "function", "userdata", "thread", "proto", "cdata"
+};
+
+LJ_DATADEF const char *const lj_obj_itypename[] = { /* ORDER LJ_T */
+ "nil", "boolean", "boolean", "userdata", "string", "upval", "thread",
+ "proto", "function", "trace", "cdata", "table", "userdata", "number"
+};
+
+/* Compare two objects without calling metamethods. */
+int LJ_FASTCALL lj_obj_equal(cTValue *o1, cTValue *o2)
+{
+ if (itype(o1) == itype(o2)) {
+ if (tvispri(o1))
+ return 1;
+ if (!tvisnum(o1))
+ return gcrefeq(o1->gcr, o2->gcr);
+ } else if (!tvisnumber(o1) || !tvisnumber(o2)) {
+ return 0;
+ }
+ return numberVnum(o1) == numberVnum(o2);
+}
+
+/* Return pointer to object or its object data. */
+const void * LJ_FASTCALL lj_obj_ptr(global_State *g, cTValue *o)
+{
+ UNUSED(g);
+ if (tvisudata(o))
+ return uddata(udataV(o));
+ else if (tvislightud(o))
+ return lightudV(g, o);
+ else if (LJ_HASFFI && tviscdata(o))
+ return cdataptr(cdataV(o));
+ else if (tvisgcv(o))
+ return gcV(o);
+ else
+ return NULL;
+}
+
diff --git a/libs/luajit-cmake/luajit/src/lj_obj.h b/libs/luajit-cmake/luajit/src/lj_obj.h
new file mode 100644
index 0000000..67e4118
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/lj_obj.h
@@ -0,0 +1,1045 @@
+/*
+** LuaJIT VM tags, values and objects.
+** Copyright (C) 2005-2022 Mike Pall. See Copyright Notice in luajit.h
+**
+** Portions taken verbatim or adapted from the Lua interpreter.
+** Copyright (C) 1994-2008 Lua.org, PUC-Rio. See Copyright Notice in lua.h
+*/
+
+#ifndef _LJ_OBJ_H
+#define _LJ_OBJ_H
+
+#include "lua.h"
+#include "lj_def.h"
+#include "lj_arch.h"
+
+/* -- Memory references --------------------------------------------------- */
+
+/* Memory and GC object sizes. */
+typedef uint32_t MSize;
+#if LJ_GC64
+typedef uint64_t GCSize;
+#else
+typedef uint32_t GCSize;
+#endif
+
+/* Memory reference */
+typedef struct MRef {
+#if LJ_GC64
+ uint64_t ptr64; /* True 64 bit pointer. */
+#else
+ uint32_t ptr32; /* Pseudo 32 bit pointer. */
+#endif
+} MRef;
+
+#if LJ_GC64
+#define mref(r, t) ((t *)(void *)(r).ptr64)
+#define mrefu(r) ((r).ptr64)
+
+#define setmref(r, p) ((r).ptr64 = (uint64_t)(void *)(p))
+#define setmrefu(r, u) ((r).ptr64 = (uint64_t)(u))
+#define setmrefr(r, v) ((r).ptr64 = (v).ptr64)
+#else
+#define mref(r, t) ((t *)(void *)(uintptr_t)(r).ptr32)
+#define mrefu(r) ((r).ptr32)
+
+#define setmref(r, p) ((r).ptr32 = (uint32_t)(uintptr_t)(void *)(p))
+#define setmrefu(r, u) ((r).ptr32 = (uint32_t)(u))
+#define setmrefr(r, v) ((r).ptr32 = (v).ptr32)
+#endif
+
+/* -- GC object references ------------------------------------------------ */
+
+/* GCobj reference */
+typedef struct GCRef {
+#if LJ_GC64
+ uint64_t gcptr64; /* True 64 bit pointer. */
+#else
+ uint32_t gcptr32; /* Pseudo 32 bit pointer. */
+#endif
+} GCRef;
+
+/* Common GC header for all collectable objects. */
+#define GCHeader GCRef nextgc; uint8_t marked; uint8_t gct
+/* This occupies 6 bytes, so use the next 2 bytes for non-32 bit fields. */
+
+#if LJ_GC64
+#define gcref(r) ((GCobj *)(r).gcptr64)
+#define gcrefp(r, t) ((t *)(void *)(r).gcptr64)
+#define gcrefu(r) ((r).gcptr64)
+#define gcrefeq(r1, r2) ((r1).gcptr64 == (r2).gcptr64)
+
+#define setgcref(r, gc) ((r).gcptr64 = (uint64_t)&(gc)->gch)
+#define setgcreft(r, gc, it) \
+ (r).gcptr64 = (uint64_t)&(gc)->gch | (((uint64_t)(it)) << 47)
+#define setgcrefp(r, p) ((r).gcptr64 = (uint64_t)(p))
+#define setgcrefnull(r) ((r).gcptr64 = 0)
+#define setgcrefr(r, v) ((r).gcptr64 = (v).gcptr64)
+#else
+#define gcref(r) ((GCobj *)(uintptr_t)(r).gcptr32)
+#define gcrefp(r, t) ((t *)(void *)(uintptr_t)(r).gcptr32)
+#define gcrefu(r) ((r).gcptr32)
+#define gcrefeq(r1, r2) ((r1).gcptr32 == (r2).gcptr32)
+
+#define setgcref(r, gc) ((r).gcptr32 = (uint32_t)(uintptr_t)&(gc)->gch)
+#define setgcrefp(r, p) ((r).gcptr32 = (uint32_t)(uintptr_t)(p))
+#define setgcrefnull(r) ((r).gcptr32 = 0)
+#define setgcrefr(r, v) ((r).gcptr32 = (v).gcptr32)
+#endif
+
+#define gcnext(gc) (gcref((gc)->gch.nextgc))
+
+/* IMPORTANT NOTE:
+**
+** All uses of the setgcref* macros MUST be accompanied with a write barrier.
+**
+** This is to ensure the integrity of the incremental GC. The invariant
+** to preserve is that a black object never points to a white object.
+** I.e. never store a white object into a field of a black object.
+**
+** It's ok to LEAVE OUT the write barrier ONLY in the following cases:
+** - The source is not a GC object (NULL).
+** - The target is a GC root. I.e. everything in global_State.
+** - The target is a lua_State field (threads are never black).
+** - The target is a stack slot, see setgcV et al.
+** - The target is an open upvalue, i.e. pointing to a stack slot.
+** - The target is a newly created object (i.e. marked white). But make
+** sure nothing invokes the GC inbetween.
+** - The target and the source are the same object (self-reference).
+** - The target already contains the object (e.g. moving elements around).
+**
+** The most common case is a store to a stack slot. All other cases where
+** a barrier has been omitted are annotated with a NOBARRIER comment.
+**
+** The same logic applies for stores to table slots (array part or hash
+** part). ALL uses of lj_tab_set* require a barrier for the stored value
+** *and* the stored key, based on the above rules. In practice this means
+** a barrier is needed if *either* of the key or value are a GC object.
+**
+** It's ok to LEAVE OUT the write barrier in the following special cases:
+** - The stored value is nil. The key doesn't matter because it's either
+** not resurrected or lj_tab_newkey() will take care of the key barrier.
+** - The key doesn't matter if the *previously* stored value is guaranteed
+** to be non-nil (because the key is kept alive in the table).
+** - The key doesn't matter if it's guaranteed not to be part of the table,
+** since lj_tab_newkey() takes care of the key barrier. This applies
+** trivially to new tables, but watch out for resurrected keys. Storing
+** a nil value leaves the key in the table!
+**
+** In case of doubt use lj_gc_anybarriert() as it's rather cheap. It's used
+** by the interpreter for all table stores.
+**
+** Note: In contrast to Lua's GC, LuaJIT's GC does *not* specially mark
+** dead keys in tables. The reference is left in, but it's guaranteed to
+** be never dereferenced as long as the value is nil. It's ok if the key is
+** freed or if any object subsequently gets the same address.
+**
+** Not destroying dead keys helps to keep key hash slots stable. This avoids
+** specialization back-off for HREFK when a value flips between nil and
+** non-nil and the GC gets in the way. It also allows safely hoisting
+** HREF/HREFK across GC steps. Dead keys are only removed if a table is
+** resized (i.e. by NEWREF) and xREF must not be CSEd across a resize.
+**
+** The trade-off is that a write barrier for tables must take the key into
+** account, too. Implicitly resurrecting the key by storing a non-nil value
+** may invalidate the incremental GC invariant.
+*/
+
+/* -- Common type definitions --------------------------------------------- */
+
+/* Types for handling bytecodes. Need this here, details in lj_bc.h. */
+typedef uint32_t BCIns; /* Bytecode instruction. */
+typedef uint32_t BCPos; /* Bytecode position. */
+typedef uint32_t BCReg; /* Bytecode register. */
+typedef int32_t BCLine; /* Bytecode line number. */
+
+/* Internal assembler functions. Never call these directly from C. */
+typedef void (*ASMFunction)(void);
+
+/* Resizable string buffer. Need this here, details in lj_buf.h. */
+#define SBufHeader char *w, *e, *b; MRef L
+typedef struct SBuf {
+ SBufHeader;
+} SBuf;
+
+/* -- Tags and values ----------------------------------------------------- */
+
+/* Frame link. */
+typedef union {
+ int32_t ftsz; /* Frame type and size of previous frame. */
+ MRef pcr; /* Or PC for Lua frames. */
+} FrameLink;
+
+/* Tagged value. */
+typedef LJ_ALIGN(8) union TValue {
+ uint64_t u64; /* 64 bit pattern overlaps number. */
+ lua_Number n; /* Number object overlaps split tag/value object. */
+#if LJ_GC64
+ GCRef gcr; /* GCobj reference with tag. */
+ int64_t it64;
+ struct {
+ LJ_ENDIAN_LOHI(
+ int32_t i; /* Integer value. */
+ , uint32_t it; /* Internal object tag. Must overlap MSW of number. */
+ )
+ };
+#else
+ struct {
+ LJ_ENDIAN_LOHI(
+ union {
+ GCRef gcr; /* GCobj reference (if any). */
+ int32_t i; /* Integer value. */
+ };
+ , uint32_t it; /* Internal object tag. Must overlap MSW of number. */
+ )
+ };
+#endif
+#if LJ_FR2
+ int64_t ftsz; /* Frame type and size of previous frame, or PC. */
+#else
+ struct {
+ LJ_ENDIAN_LOHI(
+ GCRef func; /* Function for next frame (or dummy L). */
+ , FrameLink tp; /* Link to previous frame. */
+ )
+ } fr;
+#endif
+ struct {
+ LJ_ENDIAN_LOHI(
+ uint32_t lo; /* Lower 32 bits of number. */
+ , uint32_t hi; /* Upper 32 bits of number. */
+ )
+ } u32;
+} TValue;
+
+typedef const TValue cTValue;
+
+#define tvref(r) (mref(r, TValue))
+
+/* More external and GCobj tags for internal objects. */
+#define LAST_TT LUA_TTHREAD
+#define LUA_TPROTO (LAST_TT+1)
+#define LUA_TCDATA (LAST_TT+2)
+
+/* Internal object tags.
+**
+** Format for 32 bit GC references (!LJ_GC64):
+**
+** Internal tags overlap the MSW of a number object (must be a double).
+** Interpreted as a double these are special NaNs. The FPU only generates
+** one type of NaN (0xfff8_0000_0000_0000). So MSWs > 0xfff80000 are available
+** for use as internal tags. Small negative numbers are used to shorten the
+** encoding of type comparisons (reg/mem against sign-ext. 8 bit immediate).
+**
+** ---MSW---.---LSW---
+** primitive types | itype | |
+** lightuserdata | itype | void * | (32 bit platforms)
+** lightuserdata |ffff|seg| ofs | (64 bit platforms)
+** GC objects | itype | GCRef |
+** int (LJ_DUALNUM)| itype | int |
+** number -------double------
+**
+** Format for 64 bit GC references (LJ_GC64):
+**
+** The upper 13 bits must be 1 (0xfff8...) for a special NaN. The next
+** 4 bits hold the internal tag. The lowest 47 bits either hold a pointer,
+** a zero-extended 32 bit integer or all bits set to 1 for primitive types.
+**
+** ------MSW------.------LSW------
+** primitive types |1..1|itype|1..................1|
+** GC objects |1..1|itype|-------GCRef--------|
+** lightuserdata |1..1|itype|seg|------ofs-------|
+** int (LJ_DUALNUM) |1..1|itype|0..0|-----int-------|
+** number ------------double-------------
+**
+** ORDER LJ_T
+** Primitive types nil/false/true must be first, lightuserdata next.
+** GC objects are at the end, table/userdata must be lowest.
+** Also check lj_ir.h for similar ordering constraints.
+*/
+#define LJ_TNIL (~0u)
+#define LJ_TFALSE (~1u)
+#define LJ_TTRUE (~2u)
+#define LJ_TLIGHTUD (~3u)
+#define LJ_TSTR (~4u)
+#define LJ_TUPVAL (~5u)
+#define LJ_TTHREAD (~6u)
+#define LJ_TPROTO (~7u)
+#define LJ_TFUNC (~8u)
+#define LJ_TTRACE (~9u)
+#define LJ_TCDATA (~10u)
+#define LJ_TTAB (~11u)
+#define LJ_TUDATA (~12u)
+/* This is just the canonical number type used in some places. */
+#define LJ_TNUMX (~13u)
+
+/* Integers have itype == LJ_TISNUM doubles have itype < LJ_TISNUM */
+#if LJ_64 && !LJ_GC64
+#define LJ_TISNUM 0xfffeffffu
+#else
+#define LJ_TISNUM LJ_TNUMX
+#endif
+#define LJ_TISTRUECOND LJ_TFALSE
+#define LJ_TISPRI LJ_TTRUE
+#define LJ_TISGCV (LJ_TSTR+1)
+#define LJ_TISTABUD LJ_TTAB
+
+/* Type marker for slot holding a traversal index. Must be lightuserdata. */
+#define LJ_KEYINDEX 0xfffe7fffu
+
+#if LJ_GC64
+#define LJ_GCVMASK (((uint64_t)1 << 47) - 1)
+#endif
+
+#if LJ_64
+/* To stay within 47 bits, lightuserdata is segmented. */
+#define LJ_LIGHTUD_BITS_SEG 8
+#define LJ_LIGHTUD_BITS_LO (47 - LJ_LIGHTUD_BITS_SEG)
+#endif
+
+/* -- String object ------------------------------------------------------- */
+
+typedef uint32_t StrHash; /* String hash value. */
+typedef uint32_t StrID; /* String ID. */
+
+/* String object header. String payload follows. */
+typedef struct GCstr {
+ GCHeader;
+ uint8_t reserved; /* Used by lexer for fast lookup of reserved words. */
+ uint8_t hashalg; /* Hash algorithm. */
+ StrID sid; /* Interned string ID. */
+ StrHash hash; /* Hash of string. */
+ MSize len; /* Size of string. */
+} GCstr;
+
+#define strref(r) (&gcref((r))->str)
+#define strdata(s) ((const char *)((s)+1))
+#define strdatawr(s) ((char *)((s)+1))
+#define strVdata(o) strdata(strV(o))
+
+/* -- Userdata object ----------------------------------------------------- */
+
+/* Userdata object. Payload follows. */
+typedef struct GCudata {
+ GCHeader;
+ uint8_t udtype; /* Userdata type. */
+ uint8_t unused2;
+ GCRef env; /* Should be at same offset in GCfunc. */
+ MSize len; /* Size of payload. */
+ GCRef metatable; /* Must be at same offset in GCtab. */
+ uint32_t align1; /* To force 8 byte alignment of the payload. */
+} GCudata;
+
+/* Userdata types. */
+enum {
+ UDTYPE_USERDATA, /* Regular userdata. */
+ UDTYPE_IO_FILE, /* I/O library FILE. */
+ UDTYPE_FFI_CLIB, /* FFI C library namespace. */
+ UDTYPE_BUFFER, /* String buffer. */
+ UDTYPE__MAX
+};
+
+#define uddata(u) ((void *)((u)+1))
+#define sizeudata(u) (sizeof(struct GCudata)+(u)->len)
+
+/* -- C data object ------------------------------------------------------- */
+
+/* C data object. Payload follows. */
+typedef struct GCcdata {
+ GCHeader;
+ uint16_t ctypeid; /* C type ID. */
+} GCcdata;
+
+/* Prepended to variable-sized or realigned C data objects. */
+typedef struct GCcdataVar {
+ uint16_t offset; /* Offset to allocated memory (relative to GCcdata). */
+ uint16_t extra; /* Extra space allocated (incl. GCcdata + GCcdatav). */
+ MSize len; /* Size of payload. */
+} GCcdataVar;
+
+#define cdataptr(cd) ((void *)((cd)+1))
+#define cdataisv(cd) ((cd)->marked & 0x80)
+#define cdatav(cd) ((GCcdataVar *)((char *)(cd) - sizeof(GCcdataVar)))
+#define cdatavlen(cd) check_exp(cdataisv(cd), cdatav(cd)->len)
+#define sizecdatav(cd) (cdatavlen(cd) + cdatav(cd)->extra)
+#define memcdatav(cd) ((void *)((char *)(cd) - cdatav(cd)->offset))
+
+/* -- Prototype object ---------------------------------------------------- */
+
+#define SCALE_NUM_GCO ((int32_t)sizeof(lua_Number)/sizeof(GCRef))
+#define round_nkgc(n) (((n) + SCALE_NUM_GCO-1) & ~(SCALE_NUM_GCO-1))
+
+typedef struct GCproto {
+ GCHeader;
+ uint8_t numparams; /* Number of parameters. */
+ uint8_t framesize; /* Fixed frame size. */
+ MSize sizebc; /* Number of bytecode instructions. */
+#if LJ_GC64
+ uint32_t unused_gc64;
+#endif
+ GCRef gclist;
+ MRef k; /* Split constant array (points to the middle). */
+ MRef uv; /* Upvalue list. local slot|0x8000 or parent uv idx. */
+ MSize sizekgc; /* Number of collectable constants. */
+ MSize sizekn; /* Number of lua_Number constants. */
+ MSize sizept; /* Total size including colocated arrays. */
+ uint8_t sizeuv; /* Number of upvalues. */
+ uint8_t flags; /* Miscellaneous flags (see below). */
+ uint16_t trace; /* Anchor for chain of root traces. */
+ /* ------ The following fields are for debugging/tracebacks only ------ */
+ GCRef chunkname; /* Name of the chunk this function was defined in. */
+ BCLine firstline; /* First line of the function definition. */
+ BCLine numline; /* Number of lines for the function definition. */
+ MRef lineinfo; /* Compressed map from bytecode ins. to source line. */
+ MRef uvinfo; /* Upvalue names. */
+ MRef varinfo; /* Names and compressed extents of local variables. */
+} GCproto;
+
+/* Flags for prototype. */
+#define PROTO_CHILD 0x01 /* Has child prototypes. */
+#define PROTO_VARARG 0x02 /* Vararg function. */
+#define PROTO_FFI 0x04 /* Uses BC_KCDATA for FFI datatypes. */
+#define PROTO_NOJIT 0x08 /* JIT disabled for this function. */
+#define PROTO_ILOOP 0x10 /* Patched bytecode with ILOOP etc. */
+/* Only used during parsing. */
+#define PROTO_HAS_RETURN 0x20 /* Already emitted a return. */
+#define PROTO_FIXUP_RETURN 0x40 /* Need to fixup emitted returns. */
+/* Top bits used for counting created closures. */
+#define PROTO_CLCOUNT 0x20 /* Base of saturating 3 bit counter. */
+#define PROTO_CLC_BITS 3
+#define PROTO_CLC_POLY (3*PROTO_CLCOUNT) /* Polymorphic threshold. */
+
+#define PROTO_UV_LOCAL 0x8000 /* Upvalue for local slot. */
+#define PROTO_UV_IMMUTABLE 0x4000 /* Immutable upvalue. */
+
+#define proto_kgc(pt, idx) \
+ check_exp((uintptr_t)(intptr_t)(idx) >= (uintptr_t)-(intptr_t)(pt)->sizekgc, \
+ gcref(mref((pt)->k, GCRef)[(idx)]))
+#define proto_knumtv(pt, idx) \
+ check_exp((uintptr_t)(idx) < (pt)->sizekn, &mref((pt)->k, TValue)[(idx)])
+#define proto_bc(pt) ((BCIns *)((char *)(pt) + sizeof(GCproto)))
+#define proto_bcpos(pt, pc) ((BCPos)((pc) - proto_bc(pt)))
+#define proto_uv(pt) (mref((pt)->uv, uint16_t))
+
+#define proto_chunkname(pt) (strref((pt)->chunkname))
+#define proto_chunknamestr(pt) (strdata(proto_chunkname((pt))))
+#define proto_lineinfo(pt) (mref((pt)->lineinfo, const void))
+#define proto_uvinfo(pt) (mref((pt)->uvinfo, const uint8_t))
+#define proto_varinfo(pt) (mref((pt)->varinfo, const uint8_t))
+
+/* -- Upvalue object ------------------------------------------------------ */
+
+typedef struct GCupval {
+ GCHeader;
+ uint8_t closed; /* Set if closed (i.e. uv->v == &uv->u.value). */
+ uint8_t immutable; /* Immutable value. */
+ union {
+ TValue tv; /* If closed: the value itself. */
+ struct { /* If open: double linked list, anchored at thread. */
+ GCRef prev;
+ GCRef next;
+ };
+ };
+ MRef v; /* Points to stack slot (open) or above (closed). */
+ uint32_t dhash; /* Disambiguation hash: dh1 != dh2 => cannot alias. */
+} GCupval;
+
+#define uvprev(uv_) (&gcref((uv_)->prev)->uv)
+#define uvnext(uv_) (&gcref((uv_)->next)->uv)
+#define uvval(uv_) (mref((uv_)->v, TValue))
+
+/* -- Function object (closures) ------------------------------------------ */
+
+/* Common header for functions. env should be at same offset in GCudata. */
+#define GCfuncHeader \
+ GCHeader; uint8_t ffid; uint8_t nupvalues; \
+ GCRef env; GCRef gclist; MRef pc
+
+typedef struct GCfuncC {
+ GCfuncHeader;
+ lua_CFunction f; /* C function to be called. */
+ TValue upvalue[1]; /* Array of upvalues (TValue). */
+} GCfuncC;
+
+typedef struct GCfuncL {
+ GCfuncHeader;
+ GCRef uvptr[1]; /* Array of _pointers_ to upvalue objects (GCupval). */
+} GCfuncL;
+
+typedef union GCfunc {
+ GCfuncC c;
+ GCfuncL l;
+} GCfunc;
+
+#define FF_LUA 0
+#define FF_C 1
+#define isluafunc(fn) ((fn)->c.ffid == FF_LUA)
+#define iscfunc(fn) ((fn)->c.ffid == FF_C)
+#define isffunc(fn) ((fn)->c.ffid > FF_C)
+#define funcproto(fn) \
+ check_exp(isluafunc(fn), (GCproto *)(mref((fn)->l.pc, char)-sizeof(GCproto)))
+#define sizeCfunc(n) (sizeof(GCfuncC)-sizeof(TValue)+sizeof(TValue)*(n))
+#define sizeLfunc(n) (sizeof(GCfuncL)-sizeof(GCRef)+sizeof(GCRef)*(n))
+
+/* -- Table object -------------------------------------------------------- */
+
+/* Hash node. */
+typedef struct Node {
+ TValue val; /* Value object. Must be first field. */
+ TValue key; /* Key object. */
+ MRef next; /* Hash chain. */
+#if !LJ_GC64
+ MRef freetop; /* Top of free elements (stored in t->node[0]). */
+#endif
+} Node;
+
+LJ_STATIC_ASSERT(offsetof(Node, val) == 0);
+
+typedef struct GCtab {
+ GCHeader;
+ uint8_t nomm; /* Negative cache for fast metamethods. */
+ int8_t colo; /* Array colocation. */
+ MRef array; /* Array part. */
+ GCRef gclist;
+ GCRef metatable; /* Must be at same offset in GCudata. */
+ MRef node; /* Hash part. */
+ uint32_t asize; /* Size of array part (keys [0, asize-1]). */
+ uint32_t hmask; /* Hash part mask (size of hash part - 1). */
+#if LJ_GC64
+ MRef freetop; /* Top of free elements. */
+#endif
+} GCtab;
+
+#define sizetabcolo(n) ((n)*sizeof(TValue) + sizeof(GCtab))
+#define tabref(r) ((GCtab *)gcref((r)))
+#define noderef(r) (mref((r), Node))
+#define nextnode(n) (mref((n)->next, Node))
+#if LJ_GC64
+#define getfreetop(t, n) (noderef((t)->freetop))
+#define setfreetop(t, n, v) (setmref((t)->freetop, (v)))
+#else
+#define getfreetop(t, n) (noderef((n)->freetop))
+#define setfreetop(t, n, v) (setmref((n)->freetop, (v)))
+#endif
+
+/* -- State objects ------------------------------------------------------- */
+
+/* VM states. */
+enum {
+ LJ_VMST_INTERP, /* Interpreter. */
+ LJ_VMST_C, /* C function. */
+ LJ_VMST_GC, /* Garbage collector. */
+ LJ_VMST_EXIT, /* Trace exit handler. */
+ LJ_VMST_RECORD, /* Trace recorder. */
+ LJ_VMST_OPT, /* Optimizer. */
+ LJ_VMST_ASM, /* Assembler. */
+ LJ_VMST__MAX
+};
+
+#define setvmstate(g, st) ((g)->vmstate = ~LJ_VMST_##st)
+
+/* Metamethods. ORDER MM */
+#ifdef LJ_HASFFI
+#define MMDEF_FFI(_) _(new)
+#else
+#define MMDEF_FFI(_)
+#endif
+
+#if LJ_52 || LJ_HASFFI
+#define MMDEF_PAIRS(_) _(pairs) _(ipairs)
+#else
+#define MMDEF_PAIRS(_)
+#define MM_pairs 255
+#define MM_ipairs 255
+#endif
+
+#define MMDEF(_) \
+ _(index) _(newindex) _(gc) _(mode) _(eq) _(len) \
+ /* Only the above (fast) metamethods are negative cached (max. 8). */ \
+ _(lt) _(le) _(concat) _(call) \
+ /* The following must be in ORDER ARITH. */ \
+ _(add) _(sub) _(mul) _(div) _(mod) _(pow) _(unm) \
+ /* The following are used in the standard libraries. */ \
+ _(metatable) _(tostring) MMDEF_FFI(_) MMDEF_PAIRS(_)
+
+typedef enum {
+#define MMENUM(name) MM_##name,
+MMDEF(MMENUM)
+#undef MMENUM
+ MM__MAX,
+ MM____ = MM__MAX,
+ MM_FAST = MM_len
+} MMS;
+
+/* GC root IDs. */
+typedef enum {
+ GCROOT_MMNAME, /* Metamethod names. */
+ GCROOT_MMNAME_LAST = GCROOT_MMNAME + MM__MAX-1,
+ GCROOT_BASEMT, /* Metatables for base types. */
+ GCROOT_BASEMT_NUM = GCROOT_BASEMT + ~LJ_TNUMX,
+ GCROOT_IO_INPUT, /* Userdata for default I/O input file. */
+ GCROOT_IO_OUTPUT, /* Userdata for default I/O output file. */
+ GCROOT_MAX
+} GCRootID;
+
+#define basemt_it(g, it) ((g)->gcroot[GCROOT_BASEMT+~(it)])
+#define basemt_obj(g, o) ((g)->gcroot[GCROOT_BASEMT+itypemap(o)])
+#define mmname_str(g, mm) (strref((g)->gcroot[GCROOT_MMNAME+(mm)]))
+
+/* Garbage collector state. */
+typedef struct GCState {
+ GCSize total; /* Memory currently allocated. */
+ GCSize threshold; /* Memory threshold. */
+ uint8_t currentwhite; /* Current white color. */
+ uint8_t state; /* GC state. */
+ uint8_t nocdatafin; /* No cdata finalizer called. */
+#if LJ_64
+ uint8_t lightudnum; /* Number of lightuserdata segments - 1. */
+#else
+ uint8_t unused1;
+#endif
+ MSize sweepstr; /* Sweep position in string table. */
+ GCRef root; /* List of all collectable objects. */
+ MRef sweep; /* Sweep position in root list. */
+ GCRef gray; /* List of gray objects. */
+ GCRef grayagain; /* List of objects for atomic traversal. */
+ GCRef weak; /* List of weak tables (to be cleared). */
+ GCRef mmudata; /* List of userdata (to be finalized). */
+ GCSize debt; /* Debt (how much GC is behind schedule). */
+ GCSize estimate; /* Estimate of memory actually in use. */
+ MSize stepmul; /* Incremental GC step granularity. */
+ MSize pause; /* Pause between successive GC cycles. */
+#if LJ_64
+ MRef lightudseg; /* Upper bits of lightuserdata segments. */
+#endif
+} GCState;
+
+/* String interning state. */
+typedef struct StrInternState {
+ GCRef *tab; /* String hash table anchors. */
+ MSize mask; /* String hash mask (size of hash table - 1). */
+ MSize num; /* Number of strings in hash table. */
+ StrID id; /* Next string ID. */
+ uint8_t idreseed; /* String ID reseed counter. */
+ uint8_t second; /* String interning table uses secondary hashing. */
+ uint8_t unused1;
+ uint8_t unused2;
+ LJ_ALIGN(8) uint64_t seed; /* Random string seed. */
+} StrInternState;
+
+/* Global state, shared by all threads of a Lua universe. */
+typedef struct global_State {
+ lua_Alloc allocf; /* Memory allocator. */
+ void *allocd; /* Memory allocator data. */
+ GCState gc; /* Garbage collector. */
+ GCstr strempty; /* Empty string. */
+ uint8_t stremptyz; /* Zero terminator of empty string. */
+ uint8_t hookmask; /* Hook mask. */
+ uint8_t dispatchmode; /* Dispatch mode. */
+ uint8_t vmevmask; /* VM event mask. */
+ StrInternState str; /* String interning. */
+ volatile int32_t vmstate; /* VM state or current JIT code trace number. */
+ GCRef mainthref; /* Link to main thread. */
+ SBuf tmpbuf; /* Temporary string buffer. */
+ TValue tmptv, tmptv2; /* Temporary TValues. */
+ Node nilnode; /* Fallback 1-element hash part (nil key and value). */
+ TValue registrytv; /* Anchor for registry. */
+ GCupval uvhead; /* Head of double-linked list of all open upvalues. */
+ int32_t hookcount; /* Instruction hook countdown. */
+ int32_t hookcstart; /* Start count for instruction hook counter. */
+ lua_Hook hookf; /* Hook function. */
+ lua_CFunction wrapf; /* Wrapper for C function calls. */
+ lua_CFunction panic; /* Called as a last resort for errors. */
+ BCIns bc_cfunc_int; /* Bytecode for internal C function calls. */
+ BCIns bc_cfunc_ext; /* Bytecode for external C function calls. */
+ GCRef cur_L; /* Currently executing lua_State. */
+ MRef jit_base; /* Current JIT code L->base or NULL. */
+ MRef ctype_state; /* Pointer to C type state. */
+ PRNGState prng; /* Global PRNG state. */
+ GCRef gcroot[GCROOT_MAX]; /* GC roots. */
+} global_State;
+
+#define mainthread(g) (&gcref(g->mainthref)->th)
+#define niltv(L) \
+ check_exp(tvisnil(&G(L)->nilnode.val), &G(L)->nilnode.val)
+#define niltvg(g) \
+ check_exp(tvisnil(&(g)->nilnode.val), &(g)->nilnode.val)
+
+/* Hook management. Hook event masks are defined in lua.h. */
+#define HOOK_EVENTMASK 0x0f
+#define HOOK_ACTIVE 0x10
+#define HOOK_ACTIVE_SHIFT 4
+#define HOOK_VMEVENT 0x20
+#define HOOK_GC 0x40
+#define HOOK_PROFILE 0x80
+#define hook_active(g) ((g)->hookmask & HOOK_ACTIVE)
+#define hook_enter(g) ((g)->hookmask |= HOOK_ACTIVE)
+#define hook_entergc(g) \
+ ((g)->hookmask = ((g)->hookmask | (HOOK_ACTIVE|HOOK_GC)) & ~HOOK_PROFILE)
+#define hook_vmevent(g) ((g)->hookmask |= (HOOK_ACTIVE|HOOK_VMEVENT))
+#define hook_leave(g) ((g)->hookmask &= ~HOOK_ACTIVE)
+#define hook_save(g) ((g)->hookmask & ~HOOK_EVENTMASK)
+#define hook_restore(g, h) \
+ ((g)->hookmask = ((g)->hookmask & HOOK_EVENTMASK) | (h))
+
+/* Per-thread state object. */
+struct lua_State {
+ GCHeader;
+ uint8_t dummy_ffid; /* Fake FF_C for curr_funcisL() on dummy frames. */
+ uint8_t status; /* Thread status. */
+ MRef glref; /* Link to global state. */
+ GCRef gclist; /* GC chain. */
+ TValue *base; /* Base of currently executing function. */
+ TValue *top; /* First free slot in the stack. */
+ MRef maxstack; /* Last free slot in the stack. */
+ MRef stack; /* Stack base. */
+ GCRef openupval; /* List of open upvalues in the stack. */
+ GCRef env; /* Thread environment (table of globals). */
+ void *cframe; /* End of C stack frame chain. */
+ MSize stacksize; /* True stack size (incl. LJ_STACK_EXTRA). */
+};
+
+#define G(L) (mref(L->glref, global_State))
+#define registry(L) (&G(L)->registrytv)
+
+/* Macros to access the currently executing (Lua) function. */
+#if LJ_GC64
+#define curr_func(L) (&gcval(L->base-2)->fn)
+#elif LJ_FR2
+#define curr_func(L) (&gcref((L->base-2)->gcr)->fn)
+#else
+#define curr_func(L) (&gcref((L->base-1)->fr.func)->fn)
+#endif
+#define curr_funcisL(L) (isluafunc(curr_func(L)))
+#define curr_proto(L) (funcproto(curr_func(L)))
+#define curr_topL(L) (L->base + curr_proto(L)->framesize)
+#define curr_top(L) (curr_funcisL(L) ? curr_topL(L) : L->top)
+
+#if defined(LUA_USE_ASSERT) || defined(LUA_USE_APICHECK)
+LJ_FUNC_NORET void lj_assert_fail(global_State *g, const char *file, int line,
+ const char *func, const char *fmt, ...);
+#endif
+
+/* -- GC object definition and conversions -------------------------------- */
+
+/* GC header for generic access to common fields of GC objects. */
+typedef struct GChead {
+ GCHeader;
+ uint8_t unused1;
+ uint8_t unused2;
+ GCRef env;
+ GCRef gclist;
+ GCRef metatable;
+} GChead;
+
+/* The env field SHOULD be at the same offset for all GC objects. */
+LJ_STATIC_ASSERT(offsetof(GChead, env) == offsetof(GCfuncL, env));
+LJ_STATIC_ASSERT(offsetof(GChead, env) == offsetof(GCudata, env));
+
+/* The metatable field MUST be at the same offset for all GC objects. */
+LJ_STATIC_ASSERT(offsetof(GChead, metatable) == offsetof(GCtab, metatable));
+LJ_STATIC_ASSERT(offsetof(GChead, metatable) == offsetof(GCudata, metatable));
+
+/* The gclist field MUST be at the same offset for all GC objects. */
+LJ_STATIC_ASSERT(offsetof(GChead, gclist) == offsetof(lua_State, gclist));
+LJ_STATIC_ASSERT(offsetof(GChead, gclist) == offsetof(GCproto, gclist));
+LJ_STATIC_ASSERT(offsetof(GChead, gclist) == offsetof(GCfuncL, gclist));
+LJ_STATIC_ASSERT(offsetof(GChead, gclist) == offsetof(GCtab, gclist));
+
+typedef union GCobj {
+ GChead gch;
+ GCstr str;
+ GCupval uv;
+ lua_State th;
+ GCproto pt;
+ GCfunc fn;
+ GCcdata cd;
+ GCtab tab;
+ GCudata ud;
+} GCobj;
+
+/* Macros to convert a GCobj pointer into a specific value. */
+#define gco2str(o) check_exp((o)->gch.gct == ~LJ_TSTR, &(o)->str)
+#define gco2uv(o) check_exp((o)->gch.gct == ~LJ_TUPVAL, &(o)->uv)
+#define gco2th(o) check_exp((o)->gch.gct == ~LJ_TTHREAD, &(o)->th)
+#define gco2pt(o) check_exp((o)->gch.gct == ~LJ_TPROTO, &(o)->pt)
+#define gco2func(o) check_exp((o)->gch.gct == ~LJ_TFUNC, &(o)->fn)
+#define gco2cd(o) check_exp((o)->gch.gct == ~LJ_TCDATA, &(o)->cd)
+#define gco2tab(o) check_exp((o)->gch.gct == ~LJ_TTAB, &(o)->tab)
+#define gco2ud(o) check_exp((o)->gch.gct == ~LJ_TUDATA, &(o)->ud)
+
+/* Macro to convert any collectable object into a GCobj pointer. */
+#define obj2gco(v) ((GCobj *)(v))
+
+/* -- TValue getters/setters ---------------------------------------------- */
+
+/* Macros to test types. */
+#if LJ_GC64
+#define itype(o) ((uint32_t)((o)->it64 >> 47))
+#define tvisnil(o) ((o)->it64 == -1)
+#else
+#define itype(o) ((o)->it)
+#define tvisnil(o) (itype(o) == LJ_TNIL)
+#endif
+#define tvisfalse(o) (itype(o) == LJ_TFALSE)
+#define tvistrue(o) (itype(o) == LJ_TTRUE)
+#define tvisbool(o) (tvisfalse(o) || tvistrue(o))
+#if LJ_64 && !LJ_GC64
+#define tvislightud(o) (((int32_t)itype(o) >> 15) == -2)
+#else
+#define tvislightud(o) (itype(o) == LJ_TLIGHTUD)
+#endif
+#define tvisstr(o) (itype(o) == LJ_TSTR)
+#define tvisfunc(o) (itype(o) == LJ_TFUNC)
+#define tvisthread(o) (itype(o) == LJ_TTHREAD)
+#define tvisproto(o) (itype(o) == LJ_TPROTO)
+#define tviscdata(o) (itype(o) == LJ_TCDATA)
+#define tvistab(o) (itype(o) == LJ_TTAB)
+#define tvisudata(o) (itype(o) == LJ_TUDATA)
+#define tvisnumber(o) (itype(o) <= LJ_TISNUM)
+#define tvisint(o) (LJ_DUALNUM && itype(o) == LJ_TISNUM)
+#define tvisnum(o) (itype(o) < LJ_TISNUM)
+
+#define tvistruecond(o) (itype(o) < LJ_TISTRUECOND)
+#define tvispri(o) (itype(o) >= LJ_TISPRI)
+#define tvistabud(o) (itype(o) <= LJ_TISTABUD) /* && !tvisnum() */
+#define tvisgcv(o) ((itype(o) - LJ_TISGCV) > (LJ_TNUMX - LJ_TISGCV))
+
+/* Special macros to test numbers for NaN, +0, -0, +1 and raw equality. */
+#define tvisnan(o) ((o)->n != (o)->n)
+#if LJ_64
+#define tviszero(o) (((o)->u64 << 1) == 0)
+#else
+#define tviszero(o) (((o)->u32.lo | ((o)->u32.hi << 1)) == 0)
+#endif
+#define tvispzero(o) ((o)->u64 == 0)
+#define tvismzero(o) ((o)->u64 == U64x(80000000,00000000))
+#define tvispone(o) ((o)->u64 == U64x(3ff00000,00000000))
+#define rawnumequal(o1, o2) ((o1)->u64 == (o2)->u64)
+
+/* Macros to convert type ids. */
+#if LJ_64 && !LJ_GC64
+#define itypemap(o) \
+ (tvisnumber(o) ? ~LJ_TNUMX : tvislightud(o) ? ~LJ_TLIGHTUD : ~itype(o))
+#else
+#define itypemap(o) (tvisnumber(o) ? ~LJ_TNUMX : ~itype(o))
+#endif
+
+/* Macros to get tagged values. */
+#if LJ_GC64
+#define gcval(o) ((GCobj *)(gcrefu((o)->gcr) & LJ_GCVMASK))
+#else
+#define gcval(o) (gcref((o)->gcr))
+#endif
+#define boolV(o) check_exp(tvisbool(o), (LJ_TFALSE - itype(o)))
+#if LJ_64
+#define lightudseg(u) \
+ (((u) >> LJ_LIGHTUD_BITS_LO) & ((1 << LJ_LIGHTUD_BITS_SEG)-1))
+#define lightudlo(u) \
+ ((u) & (((uint64_t)1 << LJ_LIGHTUD_BITS_LO) - 1))
+#define lightudup(p) \
+ ((uint32_t)(((p) >> LJ_LIGHTUD_BITS_LO) << (LJ_LIGHTUD_BITS_LO-32)))
+static LJ_AINLINE void *lightudV(global_State *g, cTValue *o)
+{
+ uint64_t u = o->u64;
+ uint64_t seg = lightudseg(u);
+ uint32_t *segmap = mref(g->gc.lightudseg, uint32_t);
+ lj_assertG(tvislightud(o), "lightuserdata expected");
+ if (seg == (1 << LJ_LIGHTUD_BITS_SEG)-1) return NULL;
+ lj_assertG(seg <= g->gc.lightudnum, "bad lightuserdata segment %d", seg);
+ return (void *)(((uint64_t)segmap[seg] << 32) | lightudlo(u));
+}
+#else
+#define lightudV(g, o) check_exp(tvislightud(o), gcrefp((o)->gcr, void))
+#endif
+#define gcV(o) check_exp(tvisgcv(o), gcval(o))
+#define strV(o) check_exp(tvisstr(o), &gcval(o)->str)
+#define funcV(o) check_exp(tvisfunc(o), &gcval(o)->fn)
+#define threadV(o) check_exp(tvisthread(o), &gcval(o)->th)
+#define protoV(o) check_exp(tvisproto(o), &gcval(o)->pt)
+#define cdataV(o) check_exp(tviscdata(o), &gcval(o)->cd)
+#define tabV(o) check_exp(tvistab(o), &gcval(o)->tab)
+#define udataV(o) check_exp(tvisudata(o), &gcval(o)->ud)
+#define numV(o) check_exp(tvisnum(o), (o)->n)
+#define intV(o) check_exp(tvisint(o), (int32_t)(o)->i)
+
+/* Macros to set tagged values. */
+#if LJ_GC64
+#define setitype(o, i) ((o)->it = ((i) << 15))
+#define setnilV(o) ((o)->it64 = -1)
+#define setpriV(o, x) ((o)->it64 = (int64_t)~((uint64_t)~(x)<<47))
+#define setboolV(o, x) ((o)->it64 = (int64_t)~((uint64_t)((x)+1)<<47))
+#else
+#define setitype(o, i) ((o)->it = (i))
+#define setnilV(o) ((o)->it = LJ_TNIL)
+#define setboolV(o, x) ((o)->it = LJ_TFALSE-(uint32_t)(x))
+#define setpriV(o, i) (setitype((o), (i)))
+#endif
+
+static LJ_AINLINE void setrawlightudV(TValue *o, void *p)
+{
+#if LJ_GC64
+ o->u64 = (uint64_t)p | (((uint64_t)LJ_TLIGHTUD) << 47);
+#elif LJ_64
+ o->u64 = (uint64_t)p | (((uint64_t)0xffff) << 48);
+#else
+ setgcrefp(o->gcr, p); setitype(o, LJ_TLIGHTUD);
+#endif
+}
+
+#if LJ_FR2 || LJ_32
+#define contptr(f) ((void *)(f))
+#define setcont(o, f) ((o)->u64 = (uint64_t)(uintptr_t)contptr(f))
+#else
+#define contptr(f) \
+ ((void *)(uintptr_t)(uint32_t)((intptr_t)(f) - (intptr_t)lj_vm_asm_begin))
+#define setcont(o, f) \
+ ((o)->u64 = (uint64_t)(void *)(f) - (uint64_t)lj_vm_asm_begin)
+#endif
+
+static LJ_AINLINE void checklivetv(lua_State *L, TValue *o, const char *msg)
+{
+ UNUSED(L); UNUSED(o); UNUSED(msg);
+#if LUA_USE_ASSERT
+ if (tvisgcv(o)) {
+ lj_assertL(~itype(o) == gcval(o)->gch.gct,
+ "mismatch of TValue type %d vs GC type %d",
+ ~itype(o), gcval(o)->gch.gct);
+ /* Copy of isdead check from lj_gc.h to avoid circular include. */
+ lj_assertL(!(gcval(o)->gch.marked & (G(L)->gc.currentwhite ^ 3) & 3), msg);
+ }
+#endif
+}
+
+static LJ_AINLINE void setgcVraw(TValue *o, GCobj *v, uint32_t itype)
+{
+#if LJ_GC64
+ setgcreft(o->gcr, v, itype);
+#else
+ setgcref(o->gcr, v); setitype(o, itype);
+#endif
+}
+
+static LJ_AINLINE void setgcV(lua_State *L, TValue *o, GCobj *v, uint32_t it)
+{
+ setgcVraw(o, v, it);
+ checklivetv(L, o, "store to dead GC object");
+}
+
+#define define_setV(name, type, tag) \
+static LJ_AINLINE void name(lua_State *L, TValue *o, const type *v) \
+{ \
+ setgcV(L, o, obj2gco(v), tag); \
+}
+define_setV(setstrV, GCstr, LJ_TSTR)
+define_setV(setthreadV, lua_State, LJ_TTHREAD)
+define_setV(setprotoV, GCproto, LJ_TPROTO)
+define_setV(setfuncV, GCfunc, LJ_TFUNC)
+define_setV(setcdataV, GCcdata, LJ_TCDATA)
+define_setV(settabV, GCtab, LJ_TTAB)
+define_setV(setudataV, GCudata, LJ_TUDATA)
+
+#define setnumV(o, x) ((o)->n = (x))
+#define setnanV(o) ((o)->u64 = U64x(fff80000,00000000))
+#define setpinfV(o) ((o)->u64 = U64x(7ff00000,00000000))
+#define setminfV(o) ((o)->u64 = U64x(fff00000,00000000))
+
+static LJ_AINLINE void setintV(TValue *o, int32_t i)
+{
+#if LJ_DUALNUM
+ o->i = (uint32_t)i; setitype(o, LJ_TISNUM);
+#else
+ o->n = (lua_Number)i;
+#endif
+}
+
+static LJ_AINLINE void setint64V(TValue *o, int64_t i)
+{
+ if (LJ_DUALNUM && LJ_LIKELY(i == (int64_t)(int32_t)i))
+ setintV(o, (int32_t)i);
+ else
+ setnumV(o, (lua_Number)i);
+}
+
+#if LJ_64
+#define setintptrV(o, i) setint64V((o), (i))
+#else
+#define setintptrV(o, i) setintV((o), (i))
+#endif
+
+/* Copy tagged values. */
+static LJ_AINLINE void copyTV(lua_State *L, TValue *o1, const TValue *o2)
+{
+ *o1 = *o2;
+ checklivetv(L, o1, "copy of dead GC object");
+}
+
+/* -- Number to integer conversion ---------------------------------------- */
+
+#if LJ_SOFTFP
+LJ_ASMF int32_t lj_vm_tobit(double x);
+#if LJ_TARGET_MIPS64
+LJ_ASMF int32_t lj_vm_tointg(double x);
+#endif
+#endif
+
+static LJ_AINLINE int32_t lj_num2bit(lua_Number n)
+{
+#if LJ_SOFTFP
+ return lj_vm_tobit(n);
+#else
+ TValue o;
+ o.n = n + 6755399441055744.0; /* 2^52 + 2^51 */
+ return (int32_t)o.u32.lo;
+#endif
+}
+
+#define lj_num2int(n) ((int32_t)(n))
+
+/*
+** This must match the JIT backend behavior. In particular for archs
+** that don't have a common hardware instruction for this conversion.
+** Note that signed FP to unsigned int conversions have an undefined
+** result and should never be relied upon in portable FFI code.
+** See also: C99 or C11 standard, 6.3.1.4, footnote of (1).
+*/
+static LJ_AINLINE uint64_t lj_num2u64(lua_Number n)
+{
+#if LJ_TARGET_X86ORX64 || LJ_TARGET_MIPS
+ int64_t i = (int64_t)n;
+ if (i < 0) i = (int64_t)(n - 18446744073709551616.0);
+ return (uint64_t)i;
+#else
+ return (uint64_t)n;
+#endif
+}
+
+static LJ_AINLINE int32_t numberVint(cTValue *o)
+{
+ if (LJ_LIKELY(tvisint(o)))
+ return intV(o);
+ else
+ return lj_num2int(numV(o));
+}
+
+static LJ_AINLINE lua_Number numberVnum(cTValue *o)
+{
+ if (LJ_UNLIKELY(tvisint(o)))
+ return (lua_Number)intV(o);
+ else
+ return numV(o);
+}
+
+/* -- Miscellaneous object handling --------------------------------------- */
+
+/* Names and maps for internal and external object tags. */
+LJ_DATA const char *const lj_obj_typename[1+LUA_TCDATA+1];
+LJ_DATA const char *const lj_obj_itypename[~LJ_TNUMX+1];
+
+#define lj_typename(o) (lj_obj_itypename[itypemap(o)])
+
+/* Compare two objects without calling metamethods. */
+LJ_FUNC int LJ_FASTCALL lj_obj_equal(cTValue *o1, cTValue *o2);
+LJ_FUNC const void * LJ_FASTCALL lj_obj_ptr(global_State *g, cTValue *o);
+
+#endif
diff --git a/libs/luajit-cmake/luajit/src/lj_opt_dce.c b/libs/luajit-cmake/luajit/src/lj_opt_dce.c
new file mode 100644
index 0000000..cff54a8
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/lj_opt_dce.c
@@ -0,0 +1,75 @@
+/*
+** DCE: Dead Code Elimination. Pre-LOOP only -- ASM already performs DCE.
+** Copyright (C) 2005-2022 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#define lj_opt_dce_c
+#define LUA_CORE
+
+#include "lj_obj.h"
+
+#if LJ_HASJIT
+
+#include "lj_ir.h"
+#include "lj_jit.h"
+#include "lj_iropt.h"
+
+/* Some local macros to save typing. Undef'd at the end. */
+#define IR(ref) (&J->cur.ir[(ref)])
+
+/* Scan through all snapshots and mark all referenced instructions. */
+static void dce_marksnap(jit_State *J)
+{
+ SnapNo i, nsnap = J->cur.nsnap;
+ for (i = 0; i < nsnap; i++) {
+ SnapShot *snap = &J->cur.snap[i];
+ SnapEntry *map = &J->cur.snapmap[snap->mapofs];
+ MSize n, nent = snap->nent;
+ for (n = 0; n < nent; n++) {
+ IRRef ref = snap_ref(map[n]);
+ if (ref >= REF_FIRST)
+ irt_setmark(IR(ref)->t);
+ }
+ }
+}
+
+/* Backwards propagate marks. Replace unused instructions with NOPs. */
+static void dce_propagate(jit_State *J)
+{
+ IRRef1 *pchain[IR__MAX];
+ IRRef ins;
+ uint32_t i;
+ for (i = 0; i < IR__MAX; i++) pchain[i] = &J->chain[i];
+ for (ins = J->cur.nins-1; ins >= REF_FIRST; ins--) {
+ IRIns *ir = IR(ins);
+ if (irt_ismarked(ir->t)) {
+ irt_clearmark(ir->t);
+ pchain[ir->o] = &ir->prev;
+ } else if (!ir_sideeff(ir)) {
+ *pchain[ir->o] = ir->prev; /* Reroute original instruction chain. */
+ lj_ir_nop(ir);
+ continue;
+ }
+ if (ir->op1 >= REF_FIRST) irt_setmark(IR(ir->op1)->t);
+ if (ir->op2 >= REF_FIRST) irt_setmark(IR(ir->op2)->t);
+ }
+}
+
+/* Dead Code Elimination.
+**
+** First backpropagate marks for all used instructions. Then replace
+** the unused ones with a NOP. Note that compressing the IR to eliminate
+** the NOPs does not pay off.
+*/
+void lj_opt_dce(jit_State *J)
+{
+ if ((J->flags & JIT_F_OPT_DCE)) {
+ dce_marksnap(J);
+ dce_propagate(J);
+ memset(J->bpropcache, 0, sizeof(J->bpropcache)); /* Invalidate cache. */
+ }
+}
+
+#undef IR
+
+#endif
diff --git a/libs/luajit-cmake/luajit/src/lj_opt_fold.c b/libs/luajit-cmake/luajit/src/lj_opt_fold.c
new file mode 100644
index 0000000..7ef09a1
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/lj_opt_fold.c
@@ -0,0 +1,2602 @@
+/*
+** FOLD: Constant Folding, Algebraic Simplifications and Reassociation.
+** ABCelim: Array Bounds Check Elimination.
+** CSE: Common-Subexpression Elimination.
+** Copyright (C) 2005-2022 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#define lj_opt_fold_c
+#define LUA_CORE
+
+#include <math.h>
+
+#include "lj_obj.h"
+
+#if LJ_HASJIT
+
+#include "lj_buf.h"
+#include "lj_str.h"
+#include "lj_tab.h"
+#include "lj_ir.h"
+#include "lj_jit.h"
+#include "lj_ircall.h"
+#include "lj_iropt.h"
+#include "lj_trace.h"
+#if LJ_HASFFI
+#include "lj_ctype.h"
+#include "lj_carith.h"
+#endif
+#include "lj_vm.h"
+#include "lj_strscan.h"
+#include "lj_strfmt.h"
+
+/* Here's a short description how the FOLD engine processes instructions:
+**
+** The FOLD engine receives a single instruction stored in fins (J->fold.ins).
+** The instruction and its operands are used to select matching fold rules.
+** These are applied iteratively until a fixed point is reached.
+**
+** The 8 bit opcode of the instruction itself plus the opcodes of the
+** two instructions referenced by its operands form a 24 bit key
+** 'ins left right' (unused operands -> 0, literals -> lowest 8 bits).
+**
+** This key is used for partial matching against the fold rules. The
+** left/right operand fields of the key are successively masked with
+** the 'any' wildcard, from most specific to least specific:
+**
+** ins left right
+** ins any right
+** ins left any
+** ins any any
+**
+** The masked key is used to lookup a matching fold rule in a semi-perfect
+** hash table. If a matching rule is found, the related fold function is run.
+** Multiple rules can share the same fold function. A fold rule may return
+** one of several special values:
+**
+** - NEXTFOLD means no folding was applied, because an additional test
+** inside the fold function failed. Matching continues against less
+** specific fold rules. Finally the instruction is passed on to CSE.
+**
+** - RETRYFOLD means the instruction was modified in-place. Folding is
+** retried as if this instruction had just been received.
+**
+** All other return values are terminal actions -- no further folding is
+** applied:
+**
+** - INTFOLD(i) returns a reference to the integer constant i.
+**
+** - LEFTFOLD and RIGHTFOLD return the left/right operand reference
+** without emitting an instruction.
+**
+** - CSEFOLD and EMITFOLD pass the instruction directly to CSE or emit
+** it without passing through any further optimizations.
+**
+** - FAILFOLD, DROPFOLD and CONDFOLD only apply to instructions which have
+** no result (e.g. guarded assertions): FAILFOLD means the guard would
+** always fail, i.e. the current trace is pointless. DROPFOLD means
+** the guard is always true and has been eliminated. CONDFOLD is a
+** shortcut for FAILFOLD + cond (i.e. drop if true, otherwise fail).
+**
+** - Any other return value is interpreted as an IRRef or TRef. This
+** can be a reference to an existing or a newly created instruction.
+** Only the least-significant 16 bits (IRRef1) are used to form a TRef
+** which is finally returned to the caller.
+**
+** The FOLD engine receives instructions both from the trace recorder and
+** substituted instructions from LOOP unrolling. This means all types
+** of instructions may end up here, even though the recorder bypasses
+** FOLD in some cases. Thus all loads, stores and allocations must have
+** an any/any rule to avoid being passed on to CSE.
+**
+** Carefully read the following requirements before adding or modifying
+** any fold rules:
+**
+** Requirement #1: All fold rules must preserve their destination type.
+**
+** Consistently use INTFOLD() (KINT result) or lj_ir_knum() (KNUM result).
+** Never use lj_ir_knumint() which can have either a KINT or KNUM result.
+**
+** Requirement #2: Fold rules should not create *new* instructions which
+** reference operands *across* PHIs.
+**
+** E.g. a RETRYFOLD with 'fins->op1 = fleft->op1' is invalid if the
+** left operand is a PHI. Then fleft->op1 would point across the PHI
+** frontier to an invariant instruction. Adding a PHI for this instruction
+** would be counterproductive. The solution is to add a barrier which
+** prevents folding across PHIs, i.e. 'PHIBARRIER(fleft)' in this case.
+** The only exception is for recurrences with high latencies like
+** repeated int->num->int conversions.
+**
+** One could relax this condition a bit if the referenced instruction is
+** a PHI, too. But this often leads to worse code due to excessive
+** register shuffling.
+**
+** Note: returning *existing* instructions (e.g. LEFTFOLD) is ok, though.
+** Even returning fleft->op1 would be ok, because a new PHI will added,
+** if needed. But again, this leads to excessive register shuffling and
+** should be avoided.
+**
+** Requirement #3: The set of all fold rules must be monotonic to guarantee
+** termination.
+**
+** The goal is optimization, so one primarily wants to add strength-reducing
+** rules. This means eliminating an instruction or replacing an instruction
+** with one or more simpler instructions. Don't add fold rules which point
+** into the other direction.
+**
+** Some rules (like commutativity) do not directly reduce the strength of
+** an instruction, but enable other fold rules (e.g. by moving constants
+** to the right operand). These rules must be made unidirectional to avoid
+** cycles.
+**
+** Rule of thumb: the trace recorder expands the IR and FOLD shrinks it.
+*/
+
+/* Some local macros to save typing. Undef'd at the end. */
+#define IR(ref) (&J->cur.ir[(ref)])
+#define fins (&J->fold.ins)
+#define fleft (J->fold.left)
+#define fright (J->fold.right)
+#define knumleft (ir_knum(fleft)->n)
+#define knumright (ir_knum(fright)->n)
+
+/* Pass IR on to next optimization in chain (FOLD). */
+#define emitir(ot, a, b) (lj_ir_set(J, (ot), (a), (b)), lj_opt_fold(J))
+
+/* Fold function type. Fastcall on x86 significantly reduces their size. */
+typedef IRRef (LJ_FASTCALL *FoldFunc)(jit_State *J);
+
+/* Macros for the fold specs, so buildvm can recognize them. */
+#define LJFOLD(x)
+#define LJFOLDX(x)
+#define LJFOLDF(name) static TRef LJ_FASTCALL fold_##name(jit_State *J)
+/* Note: They must be at the start of a line or buildvm ignores them! */
+
+/* Barrier to prevent using operands across PHIs. */
+#define PHIBARRIER(ir) if (irt_isphi((ir)->t)) return NEXTFOLD
+
+/* Barrier to prevent folding across a GC step.
+** GC steps can only happen at the head of a trace and at LOOP.
+** And the GC is only driven forward if there's at least one allocation.
+*/
+#define gcstep_barrier(J, ref) \
+ ((ref) < J->chain[IR_LOOP] && \
+ (J->chain[IR_SNEW] || J->chain[IR_XSNEW] || \
+ J->chain[IR_TNEW] || J->chain[IR_TDUP] || \
+ J->chain[IR_CNEW] || J->chain[IR_CNEWI] || \
+ J->chain[IR_BUFSTR] || J->chain[IR_TOSTR] || J->chain[IR_CALLA]))
+
+/* -- Constant folding for FP numbers ------------------------------------- */
+
+LJFOLD(ADD KNUM KNUM)
+LJFOLD(SUB KNUM KNUM)
+LJFOLD(MUL KNUM KNUM)
+LJFOLD(DIV KNUM KNUM)
+LJFOLD(LDEXP KNUM KNUM)
+LJFOLD(MIN KNUM KNUM)
+LJFOLD(MAX KNUM KNUM)
+LJFOLDF(kfold_numarith)
+{
+ lua_Number a = knumleft;
+ lua_Number b = knumright;
+ lua_Number y = lj_vm_foldarith(a, b, fins->o - IR_ADD);
+ return lj_ir_knum(J, y);
+}
+
+LJFOLD(NEG KNUM FLOAD)
+LJFOLD(ABS KNUM FLOAD)
+LJFOLDF(kfold_numabsneg)
+{
+ lua_Number a = knumleft;
+ lua_Number y = lj_vm_foldarith(a, a, fins->o - IR_ADD);
+ return lj_ir_knum(J, y);
+}
+
+LJFOLD(LDEXP KNUM KINT)
+LJFOLDF(kfold_ldexp)
+{
+#if LJ_TARGET_X86ORX64
+ UNUSED(J);
+ return NEXTFOLD;
+#else
+ return lj_ir_knum(J, ldexp(knumleft, fright->i));
+#endif
+}
+
+LJFOLD(FPMATH KNUM any)
+LJFOLDF(kfold_fpmath)
+{
+ lua_Number a = knumleft;
+ lua_Number y = lj_vm_foldfpm(a, fins->op2);
+ return lj_ir_knum(J, y);
+}
+
+LJFOLD(CALLN KNUM any)
+LJFOLDF(kfold_fpcall1)
+{
+ const CCallInfo *ci = &lj_ir_callinfo[fins->op2];
+ if (CCI_TYPE(ci) == IRT_NUM) {
+ double y = ((double (*)(double))ci->func)(knumleft);
+ return lj_ir_knum(J, y);
+ }
+ return NEXTFOLD;
+}
+
+LJFOLD(CALLN CARG IRCALL_atan2)
+LJFOLDF(kfold_fpcall2)
+{
+ if (irref_isk(fleft->op1) && irref_isk(fleft->op2)) {
+ const CCallInfo *ci = &lj_ir_callinfo[fins->op2];
+ double a = ir_knum(IR(fleft->op1))->n;
+ double b = ir_knum(IR(fleft->op2))->n;
+ double y = ((double (*)(double, double))ci->func)(a, b);
+ return lj_ir_knum(J, y);
+ }
+ return NEXTFOLD;
+}
+
+LJFOLD(POW KNUM KNUM)
+LJFOLDF(kfold_numpow)
+{
+ return lj_ir_knum(J, lj_vm_foldarith(knumleft, knumright, IR_POW - IR_ADD));
+}
+
+/* Must not use kfold_kref for numbers (could be NaN). */
+LJFOLD(EQ KNUM KNUM)
+LJFOLD(NE KNUM KNUM)
+LJFOLD(LT KNUM KNUM)
+LJFOLD(GE KNUM KNUM)
+LJFOLD(LE KNUM KNUM)
+LJFOLD(GT KNUM KNUM)
+LJFOLD(ULT KNUM KNUM)
+LJFOLD(UGE KNUM KNUM)
+LJFOLD(ULE KNUM KNUM)
+LJFOLD(UGT KNUM KNUM)
+LJFOLDF(kfold_numcomp)
+{
+ return CONDFOLD(lj_ir_numcmp(knumleft, knumright, (IROp)fins->o));
+}
+
+/* -- Constant folding for 32 bit integers -------------------------------- */
+
+static int32_t kfold_intop(int32_t k1, int32_t k2, IROp op)
+{
+ switch (op) {
+ case IR_ADD: k1 += k2; break;
+ case IR_SUB: k1 -= k2; break;
+ case IR_MUL: k1 *= k2; break;
+ case IR_MOD: k1 = lj_vm_modi(k1, k2); break;
+ case IR_NEG: k1 = -k1; break;
+ case IR_BAND: k1 &= k2; break;
+ case IR_BOR: k1 |= k2; break;
+ case IR_BXOR: k1 ^= k2; break;
+ case IR_BSHL: k1 <<= (k2 & 31); break;
+ case IR_BSHR: k1 = (int32_t)((uint32_t)k1 >> (k2 & 31)); break;
+ case IR_BSAR: k1 >>= (k2 & 31); break;
+ case IR_BROL: k1 = (int32_t)lj_rol((uint32_t)k1, (k2 & 31)); break;
+ case IR_BROR: k1 = (int32_t)lj_ror((uint32_t)k1, (k2 & 31)); break;
+ case IR_MIN: k1 = k1 < k2 ? k1 : k2; break;
+ case IR_MAX: k1 = k1 > k2 ? k1 : k2; break;
+ default: lj_assertX(0, "bad IR op %d", op); break;
+ }
+ return k1;
+}
+
+LJFOLD(ADD KINT KINT)
+LJFOLD(SUB KINT KINT)
+LJFOLD(MUL KINT KINT)
+LJFOLD(MOD KINT KINT)
+LJFOLD(NEG KINT KINT)
+LJFOLD(BAND KINT KINT)
+LJFOLD(BOR KINT KINT)
+LJFOLD(BXOR KINT KINT)
+LJFOLD(BSHL KINT KINT)
+LJFOLD(BSHR KINT KINT)
+LJFOLD(BSAR KINT KINT)
+LJFOLD(BROL KINT KINT)
+LJFOLD(BROR KINT KINT)
+LJFOLD(MIN KINT KINT)
+LJFOLD(MAX KINT KINT)
+LJFOLDF(kfold_intarith)
+{
+ return INTFOLD(kfold_intop(fleft->i, fright->i, (IROp)fins->o));
+}
+
+LJFOLD(ADDOV KINT KINT)
+LJFOLD(SUBOV KINT KINT)
+LJFOLD(MULOV KINT KINT)
+LJFOLDF(kfold_intovarith)
+{
+ lua_Number n = lj_vm_foldarith((lua_Number)fleft->i, (lua_Number)fright->i,
+ fins->o - IR_ADDOV);
+ int32_t k = lj_num2int(n);
+ if (n != (lua_Number)k)
+ return FAILFOLD;
+ return INTFOLD(k);
+}
+
+LJFOLD(BNOT KINT)
+LJFOLDF(kfold_bnot)
+{
+ return INTFOLD(~fleft->i);
+}
+
+LJFOLD(BSWAP KINT)
+LJFOLDF(kfold_bswap)
+{
+ return INTFOLD((int32_t)lj_bswap((uint32_t)fleft->i));
+}
+
+LJFOLD(LT KINT KINT)
+LJFOLD(GE KINT KINT)
+LJFOLD(LE KINT KINT)
+LJFOLD(GT KINT KINT)
+LJFOLD(ULT KINT KINT)
+LJFOLD(UGE KINT KINT)
+LJFOLD(ULE KINT KINT)
+LJFOLD(UGT KINT KINT)
+LJFOLD(ABC KINT KINT)
+LJFOLDF(kfold_intcomp)
+{
+ int32_t a = fleft->i, b = fright->i;
+ switch ((IROp)fins->o) {
+ case IR_LT: return CONDFOLD(a < b);
+ case IR_GE: return CONDFOLD(a >= b);
+ case IR_LE: return CONDFOLD(a <= b);
+ case IR_GT: return CONDFOLD(a > b);
+ case IR_ULT: return CONDFOLD((uint32_t)a < (uint32_t)b);
+ case IR_UGE: return CONDFOLD((uint32_t)a >= (uint32_t)b);
+ case IR_ULE: return CONDFOLD((uint32_t)a <= (uint32_t)b);
+ case IR_ABC:
+ case IR_UGT: return CONDFOLD((uint32_t)a > (uint32_t)b);
+ default: lj_assertJ(0, "bad IR op %d", fins->o); return FAILFOLD;
+ }
+}
+
+LJFOLD(UGE any KINT)
+LJFOLDF(kfold_intcomp0)
+{
+ if (fright->i == 0)
+ return DROPFOLD;
+ return NEXTFOLD;
+}
+
+/* -- Constant folding for 64 bit integers -------------------------------- */
+
+static uint64_t kfold_int64arith(jit_State *J, uint64_t k1, uint64_t k2,
+ IROp op)
+{
+ UNUSED(J);
+#if LJ_HASFFI
+ switch (op) {
+ case IR_ADD: k1 += k2; break;
+ case IR_SUB: k1 -= k2; break;
+ case IR_MUL: k1 *= k2; break;
+ case IR_BAND: k1 &= k2; break;
+ case IR_BOR: k1 |= k2; break;
+ case IR_BXOR: k1 ^= k2; break;
+ case IR_BSHL: k1 <<= (k2 & 63); break;
+ case IR_BSHR: k1 = (int32_t)((uint32_t)k1 >> (k2 & 63)); break;
+ case IR_BSAR: k1 >>= (k2 & 63); break;
+ case IR_BROL: k1 = (int32_t)lj_rol((uint32_t)k1, (k2 & 63)); break;
+ case IR_BROR: k1 = (int32_t)lj_ror((uint32_t)k1, (k2 & 63)); break;
+ default: lj_assertJ(0, "bad IR op %d", op); break;
+ }
+#else
+ UNUSED(k2); UNUSED(op);
+ lj_assertJ(0, "FFI IR op without FFI");
+#endif
+ return k1;
+}
+
+LJFOLD(ADD KINT64 KINT64)
+LJFOLD(SUB KINT64 KINT64)
+LJFOLD(MUL KINT64 KINT64)
+LJFOLD(BAND KINT64 KINT64)
+LJFOLD(BOR KINT64 KINT64)
+LJFOLD(BXOR KINT64 KINT64)
+LJFOLDF(kfold_int64arith)
+{
+ return INT64FOLD(kfold_int64arith(J, ir_k64(fleft)->u64,
+ ir_k64(fright)->u64, (IROp)fins->o));
+}
+
+LJFOLD(DIV KINT64 KINT64)
+LJFOLD(MOD KINT64 KINT64)
+LJFOLD(POW KINT64 KINT64)
+LJFOLDF(kfold_int64arith2)
+{
+#if LJ_HASFFI
+ uint64_t k1 = ir_k64(fleft)->u64, k2 = ir_k64(fright)->u64;
+ if (irt_isi64(fins->t)) {
+ k1 = fins->o == IR_DIV ? lj_carith_divi64((int64_t)k1, (int64_t)k2) :
+ fins->o == IR_MOD ? lj_carith_modi64((int64_t)k1, (int64_t)k2) :
+ lj_carith_powi64((int64_t)k1, (int64_t)k2);
+ } else {
+ k1 = fins->o == IR_DIV ? lj_carith_divu64(k1, k2) :
+ fins->o == IR_MOD ? lj_carith_modu64(k1, k2) :
+ lj_carith_powu64(k1, k2);
+ }
+ return INT64FOLD(k1);
+#else
+ UNUSED(J); lj_assertJ(0, "FFI IR op without FFI"); return FAILFOLD;
+#endif
+}
+
+LJFOLD(BSHL KINT64 KINT)
+LJFOLD(BSHR KINT64 KINT)
+LJFOLD(BSAR KINT64 KINT)
+LJFOLD(BROL KINT64 KINT)
+LJFOLD(BROR KINT64 KINT)
+LJFOLDF(kfold_int64shift)
+{
+#if LJ_HASFFI
+ uint64_t k = ir_k64(fleft)->u64;
+ int32_t sh = (fright->i & 63);
+ return INT64FOLD(lj_carith_shift64(k, sh, fins->o - IR_BSHL));
+#else
+ UNUSED(J); lj_assertJ(0, "FFI IR op without FFI"); return FAILFOLD;
+#endif
+}
+
+LJFOLD(BNOT KINT64)
+LJFOLDF(kfold_bnot64)
+{
+#if LJ_HASFFI
+ return INT64FOLD(~ir_k64(fleft)->u64);
+#else
+ UNUSED(J); lj_assertJ(0, "FFI IR op without FFI"); return FAILFOLD;
+#endif
+}
+
+LJFOLD(BSWAP KINT64)
+LJFOLDF(kfold_bswap64)
+{
+#if LJ_HASFFI
+ return INT64FOLD(lj_bswap64(ir_k64(fleft)->u64));
+#else
+ UNUSED(J); lj_assertJ(0, "FFI IR op without FFI"); return FAILFOLD;
+#endif
+}
+
+LJFOLD(LT KINT64 KINT64)
+LJFOLD(GE KINT64 KINT64)
+LJFOLD(LE KINT64 KINT64)
+LJFOLD(GT KINT64 KINT64)
+LJFOLD(ULT KINT64 KINT64)
+LJFOLD(UGE KINT64 KINT64)
+LJFOLD(ULE KINT64 KINT64)
+LJFOLD(UGT KINT64 KINT64)
+LJFOLDF(kfold_int64comp)
+{
+#if LJ_HASFFI
+ uint64_t a = ir_k64(fleft)->u64, b = ir_k64(fright)->u64;
+ switch ((IROp)fins->o) {
+ case IR_LT: return CONDFOLD((int64_t)a < (int64_t)b);
+ case IR_GE: return CONDFOLD((int64_t)a >= (int64_t)b);
+ case IR_LE: return CONDFOLD((int64_t)a <= (int64_t)b);
+ case IR_GT: return CONDFOLD((int64_t)a > (int64_t)b);
+ case IR_ULT: return CONDFOLD(a < b);
+ case IR_UGE: return CONDFOLD(a >= b);
+ case IR_ULE: return CONDFOLD(a <= b);
+ case IR_UGT: return CONDFOLD(a > b);
+ default: lj_assertJ(0, "bad IR op %d", fins->o); return FAILFOLD;
+ }
+#else
+ UNUSED(J); lj_assertJ(0, "FFI IR op without FFI"); return FAILFOLD;
+#endif
+}
+
+LJFOLD(UGE any KINT64)
+LJFOLDF(kfold_int64comp0)
+{
+#if LJ_HASFFI
+ if (ir_k64(fright)->u64 == 0)
+ return DROPFOLD;
+ return NEXTFOLD;
+#else
+ UNUSED(J); lj_assertJ(0, "FFI IR op without FFI"); return FAILFOLD;
+#endif
+}
+
+/* -- Constant folding for strings ---------------------------------------- */
+
+LJFOLD(SNEW KKPTR KINT)
+LJFOLDF(kfold_snew_kptr)
+{
+ GCstr *s = lj_str_new(J->L, (const char *)ir_kptr(fleft), (size_t)fright->i);
+ return lj_ir_kstr(J, s);
+}
+
+LJFOLD(SNEW any KINT)
+LJFOLD(XSNEW any KINT)
+LJFOLDF(kfold_snew_empty)
+{
+ if (fright->i == 0)
+ return lj_ir_kstr(J, &J2G(J)->strempty);
+ return NEXTFOLD;
+}
+
+LJFOLD(STRREF KGC KINT)
+LJFOLDF(kfold_strref)
+{
+ GCstr *str = ir_kstr(fleft);
+ lj_assertJ((MSize)fright->i <= str->len, "bad string ref");
+ return lj_ir_kkptr(J, (char *)strdata(str) + fright->i);
+}
+
+LJFOLD(STRREF SNEW any)
+LJFOLDF(kfold_strref_snew)
+{
+ PHIBARRIER(fleft);
+ if (irref_isk(fins->op2) && fright->i == 0) {
+ return fleft->op1; /* strref(snew(ptr, len), 0) ==> ptr */
+ } else {
+ /* Reassociate: strref(snew(strref(str, a), len), b) ==> strref(str, a+b) */
+ IRIns *ir = IR(fleft->op1);
+ if (ir->o == IR_STRREF) {
+ IRRef1 str = ir->op1; /* IRIns * is not valid across emitir. */
+ PHIBARRIER(ir);
+ fins->op2 = emitir(IRTI(IR_ADD), ir->op2, fins->op2); /* Clobbers fins! */
+ fins->op1 = str;
+ fins->ot = IRT(IR_STRREF, IRT_PGC);
+ return RETRYFOLD;
+ }
+ }
+ return NEXTFOLD;
+}
+
+LJFOLD(CALLN CARG IRCALL_lj_str_cmp)
+LJFOLDF(kfold_strcmp)
+{
+ if (irref_isk(fleft->op1) && irref_isk(fleft->op2)) {
+ GCstr *a = ir_kstr(IR(fleft->op1));
+ GCstr *b = ir_kstr(IR(fleft->op2));
+ return INTFOLD(lj_str_cmp(a, b));
+ }
+ return NEXTFOLD;
+}
+
+/* -- Constant folding and forwarding for buffers ------------------------- */
+
+/*
+** Buffer ops perform stores, but their effect is limited to the buffer
+** itself. Also, buffer ops are chained: a use of an op implies a use of
+** all other ops up the chain. Conversely, if an op is unused, all ops
+** up the chain can go unsed. This largely eliminates the need to treat
+** them as stores.
+**
+** Alas, treating them as normal (IRM_N) ops doesn't work, because they
+** cannot be CSEd in isolation. CSE for IRM_N is implicitly done in LOOP
+** or if FOLD is disabled.
+**
+** The compromise is to declare them as loads, emit them like stores and
+** CSE whole chains manually when the BUFSTR is to be emitted. Any chain
+** fragments left over from CSE are eliminated by DCE.
+**
+** The string buffer methods emit a USE instead of a BUFSTR to keep the
+** chain alive.
+*/
+
+LJFOLD(BUFHDR any any)
+LJFOLDF(bufhdr_merge)
+{
+ return fins->op2 == IRBUFHDR_WRITE ? CSEFOLD : EMITFOLD;
+}
+
+LJFOLD(BUFPUT any BUFSTR)
+LJFOLDF(bufput_bufstr)
+{
+ if ((J->flags & JIT_F_OPT_FWD)) {
+ IRRef hdr = fright->op2;
+ /* New buffer, no other buffer op inbetween and same buffer? */
+ if (fleft->o == IR_BUFHDR && fleft->op2 == IRBUFHDR_RESET &&
+ fleft->prev == hdr &&
+ fleft->op1 == IR(hdr)->op1 &&
+ !(irt_isphi(fright->t) && IR(hdr)->prev) &&
+ (!LJ_HASBUFFER || J->chain[IR_CALLA] < hdr)) {
+ IRRef ref = fins->op1;
+ IR(ref)->op2 = IRBUFHDR_APPEND; /* Modify BUFHDR. */
+ IR(ref)->op1 = fright->op1;
+ return ref;
+ }
+ /* Replay puts to global temporary buffer. */
+ if (IR(hdr)->op2 == IRBUFHDR_RESET && !irt_isphi(fright->t)) {
+ IRIns *ir = IR(fright->op1);
+ /* For now only handle single string.reverse .lower .upper .rep. */
+ if (ir->o == IR_CALLL &&
+ ir->op2 >= IRCALL_lj_buf_putstr_reverse &&
+ ir->op2 <= IRCALL_lj_buf_putstr_rep) {
+ IRIns *carg1 = IR(ir->op1);
+ if (ir->op2 == IRCALL_lj_buf_putstr_rep) {
+ IRIns *carg2 = IR(carg1->op1);
+ if (carg2->op1 == hdr) {
+ return lj_ir_call(J, ir->op2, fins->op1, carg2->op2, carg1->op2);
+ }
+ } else if (carg1->op1 == hdr) {
+ return lj_ir_call(J, ir->op2, fins->op1, carg1->op2);
+ }
+ }
+ }
+ }
+ return EMITFOLD; /* Always emit, CSE later. */
+}
+
+LJFOLD(BUFPUT any any)
+LJFOLDF(bufput_kgc)
+{
+ if (LJ_LIKELY(J->flags & JIT_F_OPT_FOLD) && fright->o == IR_KGC) {
+ GCstr *s2 = ir_kstr(fright);
+ if (s2->len == 0) { /* Empty string? */
+ return LEFTFOLD;
+ } else {
+ if (fleft->o == IR_BUFPUT && irref_isk(fleft->op2) &&
+ !irt_isphi(fleft->t)) { /* Join two constant string puts in a row. */
+ GCstr *s1 = ir_kstr(IR(fleft->op2));
+ IRRef kref = lj_ir_kstr(J, lj_buf_cat2str(J->L, s1, s2));
+ /* lj_ir_kstr() may realloc the IR and invalidates any IRIns *. */
+ IR(fins->op1)->op2 = kref; /* Modify previous BUFPUT. */
+ return fins->op1;
+ }
+ }
+ }
+ return EMITFOLD; /* Always emit, CSE later. */
+}
+
+LJFOLD(BUFSTR any any)
+LJFOLDF(bufstr_kfold_cse)
+{
+ lj_assertJ(fleft->o == IR_BUFHDR || fleft->o == IR_BUFPUT ||
+ fleft->o == IR_CALLL,
+ "bad buffer constructor IR op %d", fleft->o);
+ if (LJ_LIKELY(J->flags & JIT_F_OPT_FOLD)) {
+ if (fleft->o == IR_BUFHDR) { /* No put operations? */
+ if (fleft->op2 == IRBUFHDR_RESET) /* Empty buffer? */
+ return lj_ir_kstr(J, &J2G(J)->strempty);
+ fins->op1 = fleft->op1;
+ fins->op2 = fleft->prev; /* Relies on checks in bufput_append. */
+ return CSEFOLD;
+ } else if (fleft->o == IR_BUFPUT) {
+ IRIns *irb = IR(fleft->op1);
+ if (irb->o == IR_BUFHDR && irb->op2 == IRBUFHDR_RESET)
+ return fleft->op2; /* Shortcut for a single put operation. */
+ }
+ }
+ /* Try to CSE the whole chain. */
+ if (LJ_LIKELY(J->flags & JIT_F_OPT_CSE)) {
+ IRRef ref = J->chain[IR_BUFSTR];
+ while (ref) {
+ IRIns *irs = IR(ref), *ira = fleft, *irb = IR(irs->op1);
+ while (ira->o == irb->o && ira->op2 == irb->op2) {
+ lj_assertJ(ira->o == IR_BUFHDR || ira->o == IR_BUFPUT ||
+ ira->o == IR_CALLL || ira->o == IR_CARG,
+ "bad buffer constructor IR op %d", ira->o);
+ if (ira->o == IR_BUFHDR && ira->op2 == IRBUFHDR_RESET)
+ return ref; /* CSE succeeded. */
+ if (ira->o == IR_CALLL && ira->op2 == IRCALL_lj_buf_puttab)
+ break;
+ ira = IR(ira->op1);
+ irb = IR(irb->op1);
+ }
+ ref = irs->prev;
+ }
+ }
+ return EMITFOLD; /* No CSE possible. */
+}
+
+LJFOLD(CALLL CARG IRCALL_lj_buf_putstr_reverse)
+LJFOLD(CALLL CARG IRCALL_lj_buf_putstr_upper)
+LJFOLD(CALLL CARG IRCALL_lj_buf_putstr_lower)
+LJFOLD(CALLL CARG IRCALL_lj_strfmt_putquoted)
+LJFOLDF(bufput_kfold_op)
+{
+ if (irref_isk(fleft->op2)) {
+ const CCallInfo *ci = &lj_ir_callinfo[fins->op2];
+ SBuf *sb = lj_buf_tmp_(J->L);
+ sb = ((SBuf * (LJ_FASTCALL *)(SBuf *, GCstr *))ci->func)(sb,
+ ir_kstr(IR(fleft->op2)));
+ fins->o = IR_BUFPUT;
+ fins->op1 = fleft->op1;
+ fins->op2 = lj_ir_kstr(J, lj_buf_tostr(sb));
+ return RETRYFOLD;
+ }
+ return EMITFOLD; /* Always emit, CSE later. */
+}
+
+LJFOLD(CALLL CARG IRCALL_lj_buf_putstr_rep)
+LJFOLDF(bufput_kfold_rep)
+{
+ if (irref_isk(fleft->op2)) {
+ IRIns *irc = IR(fleft->op1);
+ if (irref_isk(irc->op2)) {
+ SBuf *sb = lj_buf_tmp_(J->L);
+ sb = lj_buf_putstr_rep(sb, ir_kstr(IR(irc->op2)), IR(fleft->op2)->i);
+ fins->o = IR_BUFPUT;
+ fins->op1 = irc->op1;
+ fins->op2 = lj_ir_kstr(J, lj_buf_tostr(sb));
+ return RETRYFOLD;
+ }
+ }
+ return EMITFOLD; /* Always emit, CSE later. */
+}
+
+LJFOLD(CALLL CARG IRCALL_lj_strfmt_putfxint)
+LJFOLD(CALLL CARG IRCALL_lj_strfmt_putfnum_int)
+LJFOLD(CALLL CARG IRCALL_lj_strfmt_putfnum_uint)
+LJFOLD(CALLL CARG IRCALL_lj_strfmt_putfnum)
+LJFOLD(CALLL CARG IRCALL_lj_strfmt_putfstr)
+LJFOLD(CALLL CARG IRCALL_lj_strfmt_putfchar)
+LJFOLDF(bufput_kfold_fmt)
+{
+ IRIns *irc = IR(fleft->op1);
+ lj_assertJ(irref_isk(irc->op2), "SFormat must be const");
+ if (irref_isk(fleft->op2)) {
+ SFormat sf = (SFormat)IR(irc->op2)->i;
+ IRIns *ira = IR(fleft->op2);
+ SBuf *sb = lj_buf_tmp_(J->L);
+ switch (fins->op2) {
+ case IRCALL_lj_strfmt_putfxint:
+ sb = lj_strfmt_putfxint(sb, sf, ir_k64(ira)->u64);
+ break;
+ case IRCALL_lj_strfmt_putfstr:
+ sb = lj_strfmt_putfstr(sb, sf, ir_kstr(ira));
+ break;
+ case IRCALL_lj_strfmt_putfchar:
+ sb = lj_strfmt_putfchar(sb, sf, ira->i);
+ break;
+ case IRCALL_lj_strfmt_putfnum_int:
+ case IRCALL_lj_strfmt_putfnum_uint:
+ case IRCALL_lj_strfmt_putfnum:
+ default: {
+ const CCallInfo *ci = &lj_ir_callinfo[fins->op2];
+ sb = ((SBuf * (*)(SBuf *, SFormat, lua_Number))ci->func)(sb, sf,
+ ir_knum(ira)->n);
+ break;
+ }
+ }
+ fins->o = IR_BUFPUT;
+ fins->op1 = irc->op1;
+ fins->op2 = lj_ir_kstr(J, lj_buf_tostr(sb));
+ return RETRYFOLD;
+ }
+ return EMITFOLD; /* Always emit, CSE later. */
+}
+
+/* -- Constant folding of pointer arithmetic ------------------------------ */
+
+LJFOLD(ADD KGC KINT)
+LJFOLD(ADD KGC KINT64)
+LJFOLDF(kfold_add_kgc)
+{
+ GCobj *o = ir_kgc(fleft);
+#if LJ_64
+ ptrdiff_t ofs = (ptrdiff_t)ir_kint64(fright)->u64;
+#else
+ ptrdiff_t ofs = fright->i;
+#endif
+#if LJ_HASFFI
+ if (irt_iscdata(fleft->t)) {
+ CType *ct = ctype_raw(ctype_ctsG(J2G(J)), gco2cd(o)->ctypeid);
+ if (ctype_isnum(ct->info) || ctype_isenum(ct->info) ||
+ ctype_isptr(ct->info) || ctype_isfunc(ct->info) ||
+ ctype_iscomplex(ct->info) || ctype_isvector(ct->info))
+ return lj_ir_kkptr(J, (char *)o + ofs);
+ }
+#endif
+ return lj_ir_kptr(J, (char *)o + ofs);
+}
+
+LJFOLD(ADD KPTR KINT)
+LJFOLD(ADD KPTR KINT64)
+LJFOLD(ADD KKPTR KINT)
+LJFOLD(ADD KKPTR KINT64)
+LJFOLDF(kfold_add_kptr)
+{
+ void *p = ir_kptr(fleft);
+#if LJ_64
+ ptrdiff_t ofs = (ptrdiff_t)ir_kint64(fright)->u64;
+#else
+ ptrdiff_t ofs = fright->i;
+#endif
+ return lj_ir_kptr_(J, fleft->o, (char *)p + ofs);
+}
+
+LJFOLD(ADD any KGC)
+LJFOLD(ADD any KPTR)
+LJFOLD(ADD any KKPTR)
+LJFOLDF(kfold_add_kright)
+{
+ if (fleft->o == IR_KINT || fleft->o == IR_KINT64) {
+ IRRef1 tmp = fins->op1; fins->op1 = fins->op2; fins->op2 = tmp;
+ return RETRYFOLD;
+ }
+ return NEXTFOLD;
+}
+
+/* -- Constant folding of conversions ------------------------------------- */
+
+LJFOLD(TOBIT KNUM KNUM)
+LJFOLDF(kfold_tobit)
+{
+ return INTFOLD(lj_num2bit(knumleft));
+}
+
+LJFOLD(CONV KINT IRCONV_NUM_INT)
+LJFOLDF(kfold_conv_kint_num)
+{
+ return lj_ir_knum(J, (lua_Number)fleft->i);
+}
+
+LJFOLD(CONV KINT IRCONV_NUM_U32)
+LJFOLDF(kfold_conv_kintu32_num)
+{
+ return lj_ir_knum(J, (lua_Number)(uint32_t)fleft->i);
+}
+
+LJFOLD(CONV KINT IRCONV_INT_I8)
+LJFOLD(CONV KINT IRCONV_INT_U8)
+LJFOLD(CONV KINT IRCONV_INT_I16)
+LJFOLD(CONV KINT IRCONV_INT_U16)
+LJFOLDF(kfold_conv_kint_ext)
+{
+ int32_t k = fleft->i;
+ if ((fins->op2 & IRCONV_SRCMASK) == IRT_I8) k = (int8_t)k;
+ else if ((fins->op2 & IRCONV_SRCMASK) == IRT_U8) k = (uint8_t)k;
+ else if ((fins->op2 & IRCONV_SRCMASK) == IRT_I16) k = (int16_t)k;
+ else k = (uint16_t)k;
+ return INTFOLD(k);
+}
+
+LJFOLD(CONV KINT IRCONV_I64_INT)
+LJFOLD(CONV KINT IRCONV_U64_INT)
+LJFOLD(CONV KINT IRCONV_I64_U32)
+LJFOLD(CONV KINT IRCONV_U64_U32)
+LJFOLDF(kfold_conv_kint_i64)
+{
+ if ((fins->op2 & IRCONV_SEXT))
+ return INT64FOLD((uint64_t)(int64_t)fleft->i);
+ else
+ return INT64FOLD((uint64_t)(int64_t)(uint32_t)fleft->i);
+}
+
+LJFOLD(CONV KINT64 IRCONV_NUM_I64)
+LJFOLDF(kfold_conv_kint64_num_i64)
+{
+ return lj_ir_knum(J, (lua_Number)(int64_t)ir_kint64(fleft)->u64);
+}
+
+LJFOLD(CONV KINT64 IRCONV_NUM_U64)
+LJFOLDF(kfold_conv_kint64_num_u64)
+{
+ return lj_ir_knum(J, (lua_Number)ir_kint64(fleft)->u64);
+}
+
+LJFOLD(CONV KINT64 IRCONV_INT_I64)
+LJFOLD(CONV KINT64 IRCONV_U32_I64)
+LJFOLDF(kfold_conv_kint64_int_i64)
+{
+ return INTFOLD((int32_t)ir_kint64(fleft)->u64);
+}
+
+LJFOLD(CONV KNUM IRCONV_INT_NUM)
+LJFOLDF(kfold_conv_knum_int_num)
+{
+ lua_Number n = knumleft;
+ int32_t k = lj_num2int(n);
+ if (irt_isguard(fins->t) && n != (lua_Number)k) {
+ /* We're about to create a guard which always fails, like CONV +1.5.
+ ** Some pathological loops cause this during LICM, e.g.:
+ ** local x,k,t = 0,1.5,{1,[1.5]=2}
+ ** for i=1,200 do x = x+ t[k]; k = k == 1 and 1.5 or 1 end
+ ** assert(x == 300)
+ */
+ return FAILFOLD;
+ }
+ return INTFOLD(k);
+}
+
+LJFOLD(CONV KNUM IRCONV_U32_NUM)
+LJFOLDF(kfold_conv_knum_u32_num)
+{
+#ifdef _MSC_VER
+ { /* Workaround for MSVC bug. */
+ volatile uint32_t u = (uint32_t)knumleft;
+ return INTFOLD((int32_t)u);
+ }
+#else
+ return INTFOLD((int32_t)(uint32_t)knumleft);
+#endif
+}
+
+LJFOLD(CONV KNUM IRCONV_I64_NUM)
+LJFOLDF(kfold_conv_knum_i64_num)
+{
+ return INT64FOLD((uint64_t)(int64_t)knumleft);
+}
+
+LJFOLD(CONV KNUM IRCONV_U64_NUM)
+LJFOLDF(kfold_conv_knum_u64_num)
+{
+ return INT64FOLD(lj_num2u64(knumleft));
+}
+
+LJFOLD(TOSTR KNUM any)
+LJFOLDF(kfold_tostr_knum)
+{
+ return lj_ir_kstr(J, lj_strfmt_num(J->L, ir_knum(fleft)));
+}
+
+LJFOLD(TOSTR KINT any)
+LJFOLDF(kfold_tostr_kint)
+{
+ return lj_ir_kstr(J, fins->op2 == IRTOSTR_INT ?
+ lj_strfmt_int(J->L, fleft->i) :
+ lj_strfmt_char(J->L, fleft->i));
+}
+
+LJFOLD(STRTO KGC)
+LJFOLDF(kfold_strto)
+{
+ TValue n;
+ if (lj_strscan_num(ir_kstr(fleft), &n))
+ return lj_ir_knum(J, numV(&n));
+ return FAILFOLD;
+}
+
+/* -- Constant folding of equality checks --------------------------------- */
+
+/* Don't constant-fold away FLOAD checks against KNULL. */
+LJFOLD(EQ FLOAD KNULL)
+LJFOLD(NE FLOAD KNULL)
+LJFOLDX(lj_opt_cse)
+
+/* But fold all other KNULL compares, since only KNULL is equal to KNULL. */
+LJFOLD(EQ any KNULL)
+LJFOLD(NE any KNULL)
+LJFOLD(EQ KNULL any)
+LJFOLD(NE KNULL any)
+LJFOLD(EQ KINT KINT) /* Constants are unique, so same refs <==> same value. */
+LJFOLD(NE KINT KINT)
+LJFOLD(EQ KINT64 KINT64)
+LJFOLD(NE KINT64 KINT64)
+LJFOLD(EQ KGC KGC)
+LJFOLD(NE KGC KGC)
+LJFOLDF(kfold_kref)
+{
+ return CONDFOLD((fins->op1 == fins->op2) ^ (fins->o == IR_NE));
+}
+
+/* -- Algebraic shortcuts ------------------------------------------------- */
+
+LJFOLD(FPMATH FPMATH IRFPM_FLOOR)
+LJFOLD(FPMATH FPMATH IRFPM_CEIL)
+LJFOLD(FPMATH FPMATH IRFPM_TRUNC)
+LJFOLDF(shortcut_round)
+{
+ IRFPMathOp op = (IRFPMathOp)fleft->op2;
+ if (op == IRFPM_FLOOR || op == IRFPM_CEIL || op == IRFPM_TRUNC)
+ return LEFTFOLD; /* round(round_left(x)) = round_left(x) */
+ return NEXTFOLD;
+}
+
+LJFOLD(ABS ABS FLOAD)
+LJFOLDF(shortcut_left)
+{
+ return LEFTFOLD; /* f(g(x)) ==> g(x) */
+}
+
+LJFOLD(ABS NEG FLOAD)
+LJFOLDF(shortcut_dropleft)
+{
+ PHIBARRIER(fleft);
+ fins->op1 = fleft->op1; /* abs(neg(x)) ==> abs(x) */
+ return RETRYFOLD;
+}
+
+/* Note: no safe shortcuts with STRTO and TOSTR ("1e2" ==> +100 ==> "100"). */
+LJFOLD(NEG NEG any)
+LJFOLD(BNOT BNOT)
+LJFOLD(BSWAP BSWAP)
+LJFOLDF(shortcut_leftleft)
+{
+ PHIBARRIER(fleft); /* See above. Fold would be ok, but not beneficial. */
+ return fleft->op1; /* f(g(x)) ==> x */
+}
+
+/* -- FP algebraic simplifications ---------------------------------------- */
+
+/* FP arithmetic is tricky -- there's not much to simplify.
+** Please note the following common pitfalls before sending "improvements":
+** x+0 ==> x is INVALID for x=-0
+** 0-x ==> -x is INVALID for x=+0
+** x*0 ==> 0 is INVALID for x=-0, x=+-Inf or x=NaN
+*/
+
+LJFOLD(ADD NEG any)
+LJFOLDF(simplify_numadd_negx)
+{
+ PHIBARRIER(fleft);
+ fins->o = IR_SUB; /* (-a) + b ==> b - a */
+ fins->op1 = fins->op2;
+ fins->op2 = fleft->op1;
+ return RETRYFOLD;
+}
+
+LJFOLD(ADD any NEG)
+LJFOLDF(simplify_numadd_xneg)
+{
+ PHIBARRIER(fright);
+ fins->o = IR_SUB; /* a + (-b) ==> a - b */
+ fins->op2 = fright->op1;
+ return RETRYFOLD;
+}
+
+LJFOLD(SUB any KNUM)
+LJFOLDF(simplify_numsub_k)
+{
+ if (ir_knum(fright)->u64 == 0) /* x - (+0) ==> x */
+ return LEFTFOLD;
+ return NEXTFOLD;
+}
+
+LJFOLD(SUB NEG KNUM)
+LJFOLDF(simplify_numsub_negk)
+{
+ PHIBARRIER(fleft);
+ fins->op2 = fleft->op1; /* (-x) - k ==> (-k) - x */
+ fins->op1 = (IRRef1)lj_ir_knum(J, -knumright);
+ return RETRYFOLD;
+}
+
+LJFOLD(SUB any NEG)
+LJFOLDF(simplify_numsub_xneg)
+{
+ PHIBARRIER(fright);
+ fins->o = IR_ADD; /* a - (-b) ==> a + b */
+ fins->op2 = fright->op1;
+ return RETRYFOLD;
+}
+
+LJFOLD(MUL any KNUM)
+LJFOLD(DIV any KNUM)
+LJFOLDF(simplify_nummuldiv_k)
+{
+ lua_Number n = knumright;
+ if (n == 1.0) { /* x o 1 ==> x */
+ return LEFTFOLD;
+ } else if (n == -1.0) { /* x o -1 ==> -x */
+ IRRef op1 = fins->op1;
+ fins->op2 = (IRRef1)lj_ir_ksimd(J, LJ_KSIMD_NEG); /* Modifies fins. */
+ fins->op1 = op1;
+ fins->o = IR_NEG;
+ return RETRYFOLD;
+ } else if (fins->o == IR_MUL && n == 2.0) { /* x * 2 ==> x + x */
+ fins->o = IR_ADD;
+ fins->op2 = fins->op1;
+ return RETRYFOLD;
+ } else if (fins->o == IR_DIV) { /* x / 2^k ==> x * 2^-k */
+ uint64_t u = ir_knum(fright)->u64;
+ uint32_t ex = ((uint32_t)(u >> 52) & 0x7ff);
+ if ((u & U64x(000fffff,ffffffff)) == 0 && ex - 1 < 0x7fd) {
+ u = (u & ((uint64_t)1 << 63)) | ((uint64_t)(0x7fe - ex) << 52);
+ fins->o = IR_MUL; /* Multiply by exact reciprocal. */
+ fins->op2 = lj_ir_knum_u64(J, u);
+ return RETRYFOLD;
+ }
+ }
+ return NEXTFOLD;
+}
+
+LJFOLD(MUL NEG KNUM)
+LJFOLD(DIV NEG KNUM)
+LJFOLDF(simplify_nummuldiv_negk)
+{
+ PHIBARRIER(fleft);
+ fins->op1 = fleft->op1; /* (-a) o k ==> a o (-k) */
+ fins->op2 = (IRRef1)lj_ir_knum(J, -knumright);
+ return RETRYFOLD;
+}
+
+LJFOLD(MUL NEG NEG)
+LJFOLD(DIV NEG NEG)
+LJFOLDF(simplify_nummuldiv_negneg)
+{
+ PHIBARRIER(fleft);
+ PHIBARRIER(fright);
+ fins->op1 = fleft->op1; /* (-a) o (-b) ==> a o b */
+ fins->op2 = fright->op1;
+ return RETRYFOLD;
+}
+
+LJFOLD(POW any KNUM)
+LJFOLDF(simplify_numpow_k)
+{
+ if (knumright == 0.0) /* x ^ 0 ==> 1 */
+ return lj_ir_knum_one(J); /* Result must be a number, not an int. */
+ else if (knumright == 1.0) /* x ^ 1 ==> x */
+ return LEFTFOLD;
+ else if (knumright == 2.0) /* x ^ 2 ==> x * x */
+ return emitir(IRTN(IR_MUL), fins->op1, fins->op1);
+ else
+ return NEXTFOLD;
+}
+
+/* -- Simplify conversions ------------------------------------------------ */
+
+LJFOLD(CONV CONV IRCONV_NUM_INT) /* _NUM */
+LJFOLDF(shortcut_conv_num_int)
+{
+ PHIBARRIER(fleft);
+ /* Only safe with a guarded conversion to int. */
+ if ((fleft->op2 & IRCONV_SRCMASK) == IRT_NUM && irt_isguard(fleft->t))
+ return fleft->op1; /* f(g(x)) ==> x */
+ return NEXTFOLD;
+}
+
+LJFOLD(CONV CONV IRCONV_INT_NUM) /* _INT */
+LJFOLD(CONV CONV IRCONV_U32_NUM) /* _U32*/
+LJFOLDF(simplify_conv_int_num)
+{
+ /* Fold even across PHI to avoid expensive num->int conversions in loop. */
+ if ((fleft->op2 & IRCONV_SRCMASK) ==
+ ((fins->op2 & IRCONV_DSTMASK) >> IRCONV_DSH))
+ return fleft->op1;
+ return NEXTFOLD;
+}
+
+LJFOLD(CONV CONV IRCONV_I64_NUM) /* _INT or _U32 */
+LJFOLD(CONV CONV IRCONV_U64_NUM) /* _INT or _U32 */
+LJFOLDF(simplify_conv_i64_num)
+{
+ PHIBARRIER(fleft);
+ if ((fleft->op2 & IRCONV_SRCMASK) == IRT_INT) {
+ /* Reduce to a sign-extension. */
+ fins->op1 = fleft->op1;
+ fins->op2 = ((IRT_I64<<5)|IRT_INT|IRCONV_SEXT);
+ return RETRYFOLD;
+ } else if ((fleft->op2 & IRCONV_SRCMASK) == IRT_U32) {
+#if LJ_TARGET_X64
+ return fleft->op1;
+#else
+ /* Reduce to a zero-extension. */
+ fins->op1 = fleft->op1;
+ fins->op2 = (IRT_I64<<5)|IRT_U32;
+ return RETRYFOLD;
+#endif
+ }
+ return NEXTFOLD;
+}
+
+LJFOLD(CONV CONV IRCONV_INT_I64) /* _INT or _U32 */
+LJFOLD(CONV CONV IRCONV_INT_U64) /* _INT or _U32 */
+LJFOLD(CONV CONV IRCONV_U32_I64) /* _INT or _U32 */
+LJFOLD(CONV CONV IRCONV_U32_U64) /* _INT or _U32 */
+LJFOLDF(simplify_conv_int_i64)
+{
+ int src;
+ PHIBARRIER(fleft);
+ src = (fleft->op2 & IRCONV_SRCMASK);
+ if (src == IRT_INT || src == IRT_U32) {
+ if (src == ((fins->op2 & IRCONV_DSTMASK) >> IRCONV_DSH)) {
+ return fleft->op1;
+ } else {
+ fins->op2 = ((fins->op2 & IRCONV_DSTMASK) | src);
+ fins->op1 = fleft->op1;
+ return RETRYFOLD;
+ }
+ }
+ return NEXTFOLD;
+}
+
+LJFOLD(CONV CONV IRCONV_FLOAT_NUM) /* _FLOAT */
+LJFOLDF(simplify_conv_flt_num)
+{
+ PHIBARRIER(fleft);
+ if ((fleft->op2 & IRCONV_SRCMASK) == IRT_FLOAT)
+ return fleft->op1;
+ return NEXTFOLD;
+}
+
+/* Shortcut TOBIT + IRT_NUM <- IRT_INT/IRT_U32 conversion. */
+LJFOLD(TOBIT CONV KNUM)
+LJFOLDF(simplify_tobit_conv)
+{
+ /* Fold even across PHI to avoid expensive num->int conversions in loop. */
+ if ((fleft->op2 & IRCONV_SRCMASK) == IRT_INT) {
+ lj_assertJ(irt_isnum(fleft->t), "expected TOBIT number arg");
+ return fleft->op1;
+ } else if ((fleft->op2 & IRCONV_SRCMASK) == IRT_U32) {
+ lj_assertJ(irt_isnum(fleft->t), "expected TOBIT number arg");
+ fins->o = IR_CONV;
+ fins->op1 = fleft->op1;
+ fins->op2 = (IRT_INT<<5)|IRT_U32;
+ return RETRYFOLD;
+ }
+ return NEXTFOLD;
+}
+
+/* Shortcut floor/ceil/round + IRT_NUM <- IRT_INT/IRT_U32 conversion. */
+LJFOLD(FPMATH CONV IRFPM_FLOOR)
+LJFOLD(FPMATH CONV IRFPM_CEIL)
+LJFOLD(FPMATH CONV IRFPM_TRUNC)
+LJFOLDF(simplify_floor_conv)
+{
+ if ((fleft->op2 & IRCONV_SRCMASK) == IRT_INT ||
+ (fleft->op2 & IRCONV_SRCMASK) == IRT_U32)
+ return LEFTFOLD;
+ return NEXTFOLD;
+}
+
+/* Strength reduction of widening. */
+LJFOLD(CONV any IRCONV_I64_INT)
+LJFOLD(CONV any IRCONV_U64_INT)
+LJFOLDF(simplify_conv_sext)
+{
+ IRRef ref = fins->op1;
+ int64_t ofs = 0;
+ if (!(fins->op2 & IRCONV_SEXT))
+ return NEXTFOLD;
+ PHIBARRIER(fleft);
+ if (fleft->o == IR_XLOAD && (irt_isu8(fleft->t) || irt_isu16(fleft->t)))
+ goto ok_reduce;
+ if (fleft->o == IR_ADD && irref_isk(fleft->op2)) {
+ ofs = (int64_t)IR(fleft->op2)->i;
+ ref = fleft->op1;
+ }
+ /* Use scalar evolution analysis results to strength-reduce sign-extension. */
+ if (ref == J->scev.idx) {
+ IRRef lo = J->scev.dir ? J->scev.start : J->scev.stop;
+ lj_assertJ(irt_isint(J->scev.t), "only int SCEV supported");
+ if (lo && IR(lo)->o == IR_KINT && IR(lo)->i + ofs >= 0) {
+ ok_reduce:
+#if LJ_TARGET_X64
+ /* Eliminate widening. All 32 bit ops do an implicit zero-extension. */
+ return LEFTFOLD;
+#else
+ /* Reduce to a (cheaper) zero-extension. */
+ fins->op2 &= ~IRCONV_SEXT;
+ return RETRYFOLD;
+#endif
+ }
+ }
+ return NEXTFOLD;
+}
+
+/* Strength reduction of narrowing. */
+LJFOLD(CONV ADD IRCONV_INT_I64)
+LJFOLD(CONV SUB IRCONV_INT_I64)
+LJFOLD(CONV MUL IRCONV_INT_I64)
+LJFOLD(CONV ADD IRCONV_INT_U64)
+LJFOLD(CONV SUB IRCONV_INT_U64)
+LJFOLD(CONV MUL IRCONV_INT_U64)
+LJFOLD(CONV ADD IRCONV_U32_I64)
+LJFOLD(CONV SUB IRCONV_U32_I64)
+LJFOLD(CONV MUL IRCONV_U32_I64)
+LJFOLD(CONV ADD IRCONV_U32_U64)
+LJFOLD(CONV SUB IRCONV_U32_U64)
+LJFOLD(CONV MUL IRCONV_U32_U64)
+LJFOLDF(simplify_conv_narrow)
+{
+#if LJ_64
+ UNUSED(J);
+ return NEXTFOLD;
+#else
+ IROp op = (IROp)fleft->o;
+ IRType t = irt_type(fins->t);
+ IRRef op1 = fleft->op1, op2 = fleft->op2, mode = fins->op2;
+ PHIBARRIER(fleft);
+ op1 = emitir(IRT(IR_CONV, t), op1, mode);
+ op2 = emitir(IRT(IR_CONV, t), op2, mode);
+ fins->ot = IRT(op, t);
+ fins->op1 = op1;
+ fins->op2 = op2;
+ return RETRYFOLD;
+#endif
+}
+
+/* Special CSE rule for CONV. */
+LJFOLD(CONV any any)
+LJFOLDF(cse_conv)
+{
+ if (LJ_LIKELY(J->flags & JIT_F_OPT_CSE)) {
+ IRRef op1 = fins->op1, op2 = (fins->op2 & IRCONV_MODEMASK);
+ uint8_t guard = irt_isguard(fins->t);
+ IRRef ref = J->chain[IR_CONV];
+ while (ref > op1) {
+ IRIns *ir = IR(ref);
+ /* Commoning with stronger checks is ok. */
+ if (ir->op1 == op1 && (ir->op2 & IRCONV_MODEMASK) == op2 &&
+ irt_isguard(ir->t) >= guard)
+ return ref;
+ ref = ir->prev;
+ }
+ }
+ return EMITFOLD; /* No fallthrough to regular CSE. */
+}
+
+/* FP conversion narrowing. */
+LJFOLD(TOBIT ADD KNUM)
+LJFOLD(TOBIT SUB KNUM)
+LJFOLD(CONV ADD IRCONV_INT_NUM)
+LJFOLD(CONV SUB IRCONV_INT_NUM)
+LJFOLD(CONV ADD IRCONV_I64_NUM)
+LJFOLD(CONV SUB IRCONV_I64_NUM)
+LJFOLDF(narrow_convert)
+{
+ PHIBARRIER(fleft);
+ /* Narrowing ignores PHIs and repeating it inside the loop is not useful. */
+ if (J->chain[IR_LOOP])
+ return NEXTFOLD;
+ lj_assertJ(fins->o != IR_CONV || (fins->op2&IRCONV_CONVMASK) != IRCONV_TOBIT,
+ "unexpected CONV TOBIT");
+ return lj_opt_narrow_convert(J);
+}
+
+/* -- Integer algebraic simplifications ----------------------------------- */
+
+LJFOLD(ADD any KINT)
+LJFOLD(ADDOV any KINT)
+LJFOLD(SUBOV any KINT)
+LJFOLDF(simplify_intadd_k)
+{
+ if (fright->i == 0) /* i o 0 ==> i */
+ return LEFTFOLD;
+ return NEXTFOLD;
+}
+
+LJFOLD(MULOV any KINT)
+LJFOLDF(simplify_intmul_k)
+{
+ if (fright->i == 0) /* i * 0 ==> 0 */
+ return RIGHTFOLD;
+ if (fright->i == 1) /* i * 1 ==> i */
+ return LEFTFOLD;
+ if (fright->i == 2) { /* i * 2 ==> i + i */
+ fins->o = IR_ADDOV;
+ fins->op2 = fins->op1;
+ return RETRYFOLD;
+ }
+ return NEXTFOLD;
+}
+
+LJFOLD(SUB any KINT)
+LJFOLDF(simplify_intsub_k)
+{
+ if (fright->i == 0) /* i - 0 ==> i */
+ return LEFTFOLD;
+ fins->o = IR_ADD; /* i - k ==> i + (-k) */
+ fins->op2 = (IRRef1)lj_ir_kint(J, -fright->i); /* Overflow for -2^31 ok. */
+ return RETRYFOLD;
+}
+
+LJFOLD(SUB KINT any)
+LJFOLD(SUB KINT64 any)
+LJFOLDF(simplify_intsub_kleft)
+{
+ if (fleft->o == IR_KINT ? (fleft->i == 0) : (ir_kint64(fleft)->u64 == 0)) {
+ fins->o = IR_NEG; /* 0 - i ==> -i */
+ fins->op1 = fins->op2;
+ return RETRYFOLD;
+ }
+ return NEXTFOLD;
+}
+
+LJFOLD(ADD any KINT64)
+LJFOLDF(simplify_intadd_k64)
+{
+ if (ir_kint64(fright)->u64 == 0) /* i + 0 ==> i */
+ return LEFTFOLD;
+ return NEXTFOLD;
+}
+
+LJFOLD(SUB any KINT64)
+LJFOLDF(simplify_intsub_k64)
+{
+ uint64_t k = ir_kint64(fright)->u64;
+ if (k == 0) /* i - 0 ==> i */
+ return LEFTFOLD;
+ fins->o = IR_ADD; /* i - k ==> i + (-k) */
+ fins->op2 = (IRRef1)lj_ir_kint64(J, (uint64_t)-(int64_t)k);
+ return RETRYFOLD;
+}
+
+static TRef simplify_intmul_k(jit_State *J, int32_t k)
+{
+ /* Note: many more simplifications are possible, e.g. 2^k1 +- 2^k2.
+ ** But this is mainly intended for simple address arithmetic.
+ ** Also it's easier for the backend to optimize the original multiplies.
+ */
+ if (k == 0) { /* i * 0 ==> 0 */
+ return RIGHTFOLD;
+ } else if (k == 1) { /* i * 1 ==> i */
+ return LEFTFOLD;
+ } else if ((k & (k-1)) == 0) { /* i * 2^k ==> i << k */
+ fins->o = IR_BSHL;
+ fins->op2 = lj_ir_kint(J, lj_fls((uint32_t)k));
+ return RETRYFOLD;
+ }
+ return NEXTFOLD;
+}
+
+LJFOLD(MUL any KINT)
+LJFOLDF(simplify_intmul_k32)
+{
+ if (fright->i >= 0)
+ return simplify_intmul_k(J, fright->i);
+ return NEXTFOLD;
+}
+
+LJFOLD(MUL any KINT64)
+LJFOLDF(simplify_intmul_k64)
+{
+#if LJ_HASFFI
+ if (ir_kint64(fright)->u64 < 0x80000000u)
+ return simplify_intmul_k(J, (int32_t)ir_kint64(fright)->u64);
+ return NEXTFOLD;
+#else
+ UNUSED(J); lj_assertJ(0, "FFI IR op without FFI"); return FAILFOLD;
+#endif
+}
+
+LJFOLD(MOD any KINT)
+LJFOLDF(simplify_intmod_k)
+{
+ int32_t k = fright->i;
+ lj_assertJ(k != 0, "integer mod 0");
+ if (k > 0 && (k & (k-1)) == 0) { /* i % (2^k) ==> i & (2^k-1) */
+ fins->o = IR_BAND;
+ fins->op2 = lj_ir_kint(J, k-1);
+ return RETRYFOLD;
+ }
+ return NEXTFOLD;
+}
+
+LJFOLD(MOD KINT any)
+LJFOLDF(simplify_intmod_kleft)
+{
+ if (fleft->i == 0)
+ return INTFOLD(0);
+ return NEXTFOLD;
+}
+
+LJFOLD(SUB any any)
+LJFOLD(SUBOV any any)
+LJFOLDF(simplify_intsub)
+{
+ if (fins->op1 == fins->op2 && !irt_isnum(fins->t)) /* i - i ==> 0 */
+ return irt_is64(fins->t) ? INT64FOLD(0) : INTFOLD(0);
+ return NEXTFOLD;
+}
+
+LJFOLD(SUB ADD any)
+LJFOLDF(simplify_intsubadd_leftcancel)
+{
+ if (!irt_isnum(fins->t)) {
+ PHIBARRIER(fleft);
+ if (fins->op2 == fleft->op1) /* (i + j) - i ==> j */
+ return fleft->op2;
+ if (fins->op2 == fleft->op2) /* (i + j) - j ==> i */
+ return fleft->op1;
+ }
+ return NEXTFOLD;
+}
+
+LJFOLD(SUB SUB any)
+LJFOLDF(simplify_intsubsub_leftcancel)
+{
+ if (!irt_isnum(fins->t)) {
+ PHIBARRIER(fleft);
+ if (fins->op2 == fleft->op1) { /* (i - j) - i ==> 0 - j */
+ fins->op1 = (IRRef1)lj_ir_kint(J, 0);
+ fins->op2 = fleft->op2;
+ return RETRYFOLD;
+ }
+ }
+ return NEXTFOLD;
+}
+
+LJFOLD(SUB any SUB)
+LJFOLDF(simplify_intsubsub_rightcancel)
+{
+ if (!irt_isnum(fins->t)) {
+ PHIBARRIER(fright);
+ if (fins->op1 == fright->op1) /* i - (i - j) ==> j */
+ return fright->op2;
+ }
+ return NEXTFOLD;
+}
+
+LJFOLD(SUB any ADD)
+LJFOLDF(simplify_intsubadd_rightcancel)
+{
+ if (!irt_isnum(fins->t)) {
+ PHIBARRIER(fright);
+ if (fins->op1 == fright->op1) { /* i - (i + j) ==> 0 - j */
+ fins->op2 = fright->op2;
+ fins->op1 = (IRRef1)lj_ir_kint(J, 0);
+ return RETRYFOLD;
+ }
+ if (fins->op1 == fright->op2) { /* i - (j + i) ==> 0 - j */
+ fins->op2 = fright->op1;
+ fins->op1 = (IRRef1)lj_ir_kint(J, 0);
+ return RETRYFOLD;
+ }
+ }
+ return NEXTFOLD;
+}
+
+LJFOLD(SUB ADD ADD)
+LJFOLDF(simplify_intsubaddadd_cancel)
+{
+ if (!irt_isnum(fins->t)) {
+ PHIBARRIER(fleft);
+ PHIBARRIER(fright);
+ if (fleft->op1 == fright->op1) { /* (i + j1) - (i + j2) ==> j1 - j2 */
+ fins->op1 = fleft->op2;
+ fins->op2 = fright->op2;
+ return RETRYFOLD;
+ }
+ if (fleft->op1 == fright->op2) { /* (i + j1) - (j2 + i) ==> j1 - j2 */
+ fins->op1 = fleft->op2;
+ fins->op2 = fright->op1;
+ return RETRYFOLD;
+ }
+ if (fleft->op2 == fright->op1) { /* (j1 + i) - (i + j2) ==> j1 - j2 */
+ fins->op1 = fleft->op1;
+ fins->op2 = fright->op2;
+ return RETRYFOLD;
+ }
+ if (fleft->op2 == fright->op2) { /* (j1 + i) - (j2 + i) ==> j1 - j2 */
+ fins->op1 = fleft->op1;
+ fins->op2 = fright->op1;
+ return RETRYFOLD;
+ }
+ }
+ return NEXTFOLD;
+}
+
+LJFOLD(BAND any KINT)
+LJFOLD(BAND any KINT64)
+LJFOLDF(simplify_band_k)
+{
+ int64_t k = fright->o == IR_KINT ? (int64_t)fright->i :
+ (int64_t)ir_k64(fright)->u64;
+ if (k == 0) /* i & 0 ==> 0 */
+ return RIGHTFOLD;
+ if (k == -1) /* i & -1 ==> i */
+ return LEFTFOLD;
+ return NEXTFOLD;
+}
+
+LJFOLD(BOR any KINT)
+LJFOLD(BOR any KINT64)
+LJFOLDF(simplify_bor_k)
+{
+ int64_t k = fright->o == IR_KINT ? (int64_t)fright->i :
+ (int64_t)ir_k64(fright)->u64;
+ if (k == 0) /* i | 0 ==> i */
+ return LEFTFOLD;
+ if (k == -1) /* i | -1 ==> -1 */
+ return RIGHTFOLD;
+ return NEXTFOLD;
+}
+
+LJFOLD(BXOR any KINT)
+LJFOLD(BXOR any KINT64)
+LJFOLDF(simplify_bxor_k)
+{
+ int64_t k = fright->o == IR_KINT ? (int64_t)fright->i :
+ (int64_t)ir_k64(fright)->u64;
+ if (k == 0) /* i xor 0 ==> i */
+ return LEFTFOLD;
+ if (k == -1) { /* i xor -1 ==> ~i */
+ fins->o = IR_BNOT;
+ fins->op2 = 0;
+ return RETRYFOLD;
+ }
+ return NEXTFOLD;
+}
+
+LJFOLD(BSHL any KINT)
+LJFOLD(BSHR any KINT)
+LJFOLD(BSAR any KINT)
+LJFOLD(BROL any KINT)
+LJFOLD(BROR any KINT)
+LJFOLDF(simplify_shift_ik)
+{
+ int32_t mask = irt_is64(fins->t) ? 63 : 31;
+ int32_t k = (fright->i & mask);
+ if (k == 0) /* i o 0 ==> i */
+ return LEFTFOLD;
+ if (k == 1 && fins->o == IR_BSHL) { /* i << 1 ==> i + i */
+ fins->o = IR_ADD;
+ fins->op2 = fins->op1;
+ return RETRYFOLD;
+ }
+ if (k != fright->i) { /* i o k ==> i o (k & mask) */
+ fins->op2 = (IRRef1)lj_ir_kint(J, k);
+ return RETRYFOLD;
+ }
+#ifndef LJ_TARGET_UNIFYROT
+ if (fins->o == IR_BROR) { /* bror(i, k) ==> brol(i, (-k)&mask) */
+ fins->o = IR_BROL;
+ fins->op2 = (IRRef1)lj_ir_kint(J, (-k)&mask);
+ return RETRYFOLD;
+ }
+#endif
+ return NEXTFOLD;
+}
+
+LJFOLD(BSHL any BAND)
+LJFOLD(BSHR any BAND)
+LJFOLD(BSAR any BAND)
+LJFOLD(BROL any BAND)
+LJFOLD(BROR any BAND)
+LJFOLDF(simplify_shift_andk)
+{
+ IRIns *irk = IR(fright->op2);
+ PHIBARRIER(fright);
+ if ((fins->o < IR_BROL ? LJ_TARGET_MASKSHIFT : LJ_TARGET_MASKROT) &&
+ irk->o == IR_KINT) { /* i o (j & mask) ==> i o j */
+ int32_t mask = irt_is64(fins->t) ? 63 : 31;
+ int32_t k = irk->i & mask;
+ if (k == mask) {
+ fins->op2 = fright->op1;
+ return RETRYFOLD;
+ }
+ }
+ return NEXTFOLD;
+}
+
+LJFOLD(BSHL KINT any)
+LJFOLD(BSHR KINT any)
+LJFOLD(BSHL KINT64 any)
+LJFOLD(BSHR KINT64 any)
+LJFOLDF(simplify_shift1_ki)
+{
+ int64_t k = fleft->o == IR_KINT ? (int64_t)fleft->i :
+ (int64_t)ir_k64(fleft)->u64;
+ if (k == 0) /* 0 o i ==> 0 */
+ return LEFTFOLD;
+ return NEXTFOLD;
+}
+
+LJFOLD(BSAR KINT any)
+LJFOLD(BROL KINT any)
+LJFOLD(BROR KINT any)
+LJFOLD(BSAR KINT64 any)
+LJFOLD(BROL KINT64 any)
+LJFOLD(BROR KINT64 any)
+LJFOLDF(simplify_shift2_ki)
+{
+ int64_t k = fleft->o == IR_KINT ? (int64_t)fleft->i :
+ (int64_t)ir_k64(fleft)->u64;
+ if (k == 0 || k == -1) /* 0 o i ==> 0; -1 o i ==> -1 */
+ return LEFTFOLD;
+ return NEXTFOLD;
+}
+
+LJFOLD(BSHL BAND KINT)
+LJFOLD(BSHR BAND KINT)
+LJFOLD(BROL BAND KINT)
+LJFOLD(BROR BAND KINT)
+LJFOLDF(simplify_shiftk_andk)
+{
+ IRIns *irk = IR(fleft->op2);
+ PHIBARRIER(fleft);
+ if (irk->o == IR_KINT) { /* (i & k1) o k2 ==> (i o k2) & (k1 o k2) */
+ int32_t k = kfold_intop(irk->i, fright->i, (IROp)fins->o);
+ fins->op1 = fleft->op1;
+ fins->op1 = (IRRef1)lj_opt_fold(J);
+ fins->op2 = (IRRef1)lj_ir_kint(J, k);
+ fins->ot = IRTI(IR_BAND);
+ return RETRYFOLD;
+ } else if (irk->o == IR_KINT64) {
+ uint64_t k = kfold_int64arith(J, ir_k64(irk)->u64, fright->i,
+ (IROp)fins->o);
+ IROpT ot = fleft->ot;
+ fins->op1 = fleft->op1;
+ fins->op1 = (IRRef1)lj_opt_fold(J);
+ fins->op2 = (IRRef1)lj_ir_kint64(J, k);
+ fins->ot = ot;
+ return RETRYFOLD;
+ }
+ return NEXTFOLD;
+}
+
+LJFOLD(BAND BSHL KINT)
+LJFOLD(BAND BSHR KINT)
+LJFOLDF(simplify_andk_shiftk)
+{
+ IRIns *irk = IR(fleft->op2);
+ if (irk->o == IR_KINT &&
+ kfold_intop(-1, irk->i, (IROp)fleft->o) == fright->i)
+ return LEFTFOLD; /* (i o k1) & k2 ==> i, if (-1 o k1) == k2 */
+ return NEXTFOLD;
+}
+
+LJFOLD(BAND BOR KINT)
+LJFOLD(BOR BAND KINT)
+LJFOLDF(simplify_andor_k)
+{
+ IRIns *irk = IR(fleft->op2);
+ PHIBARRIER(fleft);
+ if (irk->o == IR_KINT) {
+ int32_t k = kfold_intop(irk->i, fright->i, (IROp)fins->o);
+ /* (i | k1) & k2 ==> i & k2, if (k1 & k2) == 0. */
+ /* (i & k1) | k2 ==> i | k2, if (k1 | k2) == -1. */
+ if (k == (fins->o == IR_BAND ? 0 : -1)) {
+ fins->op1 = fleft->op1;
+ return RETRYFOLD;
+ }
+ }
+ return NEXTFOLD;
+}
+
+LJFOLD(BAND BOR KINT64)
+LJFOLD(BOR BAND KINT64)
+LJFOLDF(simplify_andor_k64)
+{
+#if LJ_HASFFI
+ IRIns *irk = IR(fleft->op2);
+ PHIBARRIER(fleft);
+ if (irk->o == IR_KINT64) {
+ uint64_t k = kfold_int64arith(J, ir_k64(irk)->u64, ir_k64(fright)->u64,
+ (IROp)fins->o);
+ /* (i | k1) & k2 ==> i & k2, if (k1 & k2) == 0. */
+ /* (i & k1) | k2 ==> i | k2, if (k1 | k2) == -1. */
+ if (k == (fins->o == IR_BAND ? (uint64_t)0 : ~(uint64_t)0)) {
+ fins->op1 = fleft->op1;
+ return RETRYFOLD;
+ }
+ }
+ return NEXTFOLD;
+#else
+ UNUSED(J); lj_assertJ(0, "FFI IR op without FFI"); return FAILFOLD;
+#endif
+}
+
+/* -- Reassociation ------------------------------------------------------- */
+
+LJFOLD(ADD ADD KINT)
+LJFOLD(MUL MUL KINT)
+LJFOLD(BAND BAND KINT)
+LJFOLD(BOR BOR KINT)
+LJFOLD(BXOR BXOR KINT)
+LJFOLDF(reassoc_intarith_k)
+{
+ IRIns *irk = IR(fleft->op2);
+ if (irk->o == IR_KINT) {
+ int32_t k = kfold_intop(irk->i, fright->i, (IROp)fins->o);
+ if (k == irk->i) /* (i o k1) o k2 ==> i o k1, if (k1 o k2) == k1. */
+ return LEFTFOLD;
+ PHIBARRIER(fleft);
+ fins->op1 = fleft->op1;
+ fins->op2 = (IRRef1)lj_ir_kint(J, k);
+ return RETRYFOLD; /* (i o k1) o k2 ==> i o (k1 o k2) */
+ }
+ return NEXTFOLD;
+}
+
+LJFOLD(ADD ADD KINT64)
+LJFOLD(MUL MUL KINT64)
+LJFOLD(BAND BAND KINT64)
+LJFOLD(BOR BOR KINT64)
+LJFOLD(BXOR BXOR KINT64)
+LJFOLDF(reassoc_intarith_k64)
+{
+#if LJ_HASFFI
+ IRIns *irk = IR(fleft->op2);
+ if (irk->o == IR_KINT64) {
+ uint64_t k = kfold_int64arith(J, ir_k64(irk)->u64, ir_k64(fright)->u64,
+ (IROp)fins->o);
+ PHIBARRIER(fleft);
+ fins->op1 = fleft->op1;
+ fins->op2 = (IRRef1)lj_ir_kint64(J, k);
+ return RETRYFOLD; /* (i o k1) o k2 ==> i o (k1 o k2) */
+ }
+ return NEXTFOLD;
+#else
+ UNUSED(J); lj_assertJ(0, "FFI IR op without FFI"); return FAILFOLD;
+#endif
+}
+
+LJFOLD(BAND BAND any)
+LJFOLD(BOR BOR any)
+LJFOLDF(reassoc_dup)
+{
+ if (fins->op2 == fleft->op1 || fins->op2 == fleft->op2)
+ return LEFTFOLD; /* (a o b) o a ==> a o b; (a o b) o b ==> a o b */
+ return NEXTFOLD;
+}
+
+LJFOLD(MIN MIN any)
+LJFOLD(MAX MAX any)
+LJFOLDF(reassoc_dup_minmax)
+{
+ if (fins->op2 == fleft->op2)
+ return LEFTFOLD; /* (a o b) o b ==> a o b */
+ return NEXTFOLD;
+}
+
+LJFOLD(BXOR BXOR any)
+LJFOLDF(reassoc_bxor)
+{
+ PHIBARRIER(fleft);
+ if (fins->op2 == fleft->op1) /* (a xor b) xor a ==> b */
+ return fleft->op2;
+ if (fins->op2 == fleft->op2) /* (a xor b) xor b ==> a */
+ return fleft->op1;
+ return NEXTFOLD;
+}
+
+LJFOLD(BSHL BSHL KINT)
+LJFOLD(BSHR BSHR KINT)
+LJFOLD(BSAR BSAR KINT)
+LJFOLD(BROL BROL KINT)
+LJFOLD(BROR BROR KINT)
+LJFOLDF(reassoc_shift)
+{
+ IRIns *irk = IR(fleft->op2);
+ PHIBARRIER(fleft); /* The (shift any KINT) rule covers k2 == 0 and more. */
+ if (irk->o == IR_KINT) { /* (i o k1) o k2 ==> i o (k1 + k2) */
+ int32_t mask = irt_is64(fins->t) ? 63 : 31;
+ int32_t k = (irk->i & mask) + (fright->i & mask);
+ if (k > mask) { /* Combined shift too wide? */
+ if (fins->o == IR_BSHL || fins->o == IR_BSHR)
+ return mask == 31 ? INTFOLD(0) : INT64FOLD(0);
+ else if (fins->o == IR_BSAR)
+ k = mask;
+ else
+ k &= mask;
+ }
+ fins->op1 = fleft->op1;
+ fins->op2 = (IRRef1)lj_ir_kint(J, k);
+ return RETRYFOLD;
+ }
+ return NEXTFOLD;
+}
+
+LJFOLD(MIN MIN KINT)
+LJFOLD(MAX MAX KINT)
+LJFOLDF(reassoc_minmax_k)
+{
+ IRIns *irk = IR(fleft->op2);
+ if (irk->o == IR_KINT) {
+ int32_t a = irk->i;
+ int32_t y = kfold_intop(a, fright->i, fins->o);
+ if (a == y) /* (x o k1) o k2 ==> x o k1, if (k1 o k2) == k1. */
+ return LEFTFOLD;
+ PHIBARRIER(fleft);
+ fins->op1 = fleft->op1;
+ fins->op2 = (IRRef1)lj_ir_kint(J, y);
+ return RETRYFOLD; /* (x o k1) o k2 ==> x o (k1 o k2) */
+ }
+ return NEXTFOLD;
+}
+
+/* -- Array bounds check elimination -------------------------------------- */
+
+/* Eliminate ABC across PHIs to handle t[i-1] forwarding case.
+** ABC(asize, (i+k)+(-k)) ==> ABC(asize, i), but only if it already exists.
+** Could be generalized to (i+k1)+k2 ==> i+(k1+k2), but needs better disambig.
+*/
+LJFOLD(ABC any ADD)
+LJFOLDF(abc_fwd)
+{
+ if (LJ_LIKELY(J->flags & JIT_F_OPT_ABC)) {
+ if (irref_isk(fright->op2)) {
+ IRIns *add2 = IR(fright->op1);
+ if (add2->o == IR_ADD && irref_isk(add2->op2) &&
+ IR(fright->op2)->i == -IR(add2->op2)->i) {
+ IRRef ref = J->chain[IR_ABC];
+ IRRef lim = add2->op1;
+ if (fins->op1 > lim) lim = fins->op1;
+ while (ref > lim) {
+ IRIns *ir = IR(ref);
+ if (ir->op1 == fins->op1 && ir->op2 == add2->op1)
+ return DROPFOLD;
+ ref = ir->prev;
+ }
+ }
+ }
+ }
+ return NEXTFOLD;
+}
+
+/* Eliminate ABC for constants.
+** ABC(asize, k1), ABC(asize k2) ==> ABC(asize, max(k1, k2))
+** Drop second ABC if k2 is lower. Otherwise patch first ABC with k2.
+*/
+LJFOLD(ABC any KINT)
+LJFOLDF(abc_k)
+{
+ PHIBARRIER(fleft);
+ if (LJ_LIKELY(J->flags & JIT_F_OPT_ABC)) {
+ IRRef ref = J->chain[IR_ABC];
+ IRRef asize = fins->op1;
+ while (ref > asize) {
+ IRIns *ir = IR(ref);
+ if (ir->op1 == asize && irref_isk(ir->op2)) {
+ uint32_t k = (uint32_t)IR(ir->op2)->i;
+ if ((uint32_t)fright->i > k)
+ ir->op2 = fins->op2;
+ return DROPFOLD;
+ }
+ ref = ir->prev;
+ }
+ return EMITFOLD; /* Already performed CSE. */
+ }
+ return NEXTFOLD;
+}
+
+/* Eliminate invariant ABC inside loop. */
+LJFOLD(ABC any any)
+LJFOLDF(abc_invar)
+{
+ /* Invariant ABC marked as PTR. Drop if op1 is invariant, too. */
+ if (!irt_isint(fins->t) && fins->op1 < J->chain[IR_LOOP] &&
+ !irt_isphi(IR(fins->op1)->t))
+ return DROPFOLD;
+ return NEXTFOLD;
+}
+
+/* -- Commutativity ------------------------------------------------------- */
+
+/* The refs of commutative ops are canonicalized. Lower refs go to the right.
+** Rationale behind this:
+** - It (also) moves constants to the right.
+** - It reduces the number of FOLD rules (e.g. (BOR any KINT) suffices).
+** - It helps CSE to find more matches.
+** - The assembler generates better code with constants at the right.
+*/
+
+LJFOLD(ADD any any)
+LJFOLD(MUL any any)
+LJFOLD(ADDOV any any)
+LJFOLD(MULOV any any)
+LJFOLDF(comm_swap)
+{
+ if (fins->op1 < fins->op2) { /* Move lower ref to the right. */
+ IRRef1 tmp = fins->op1;
+ fins->op1 = fins->op2;
+ fins->op2 = tmp;
+ return RETRYFOLD;
+ }
+ return NEXTFOLD;
+}
+
+LJFOLD(EQ any any)
+LJFOLD(NE any any)
+LJFOLDF(comm_equal)
+{
+ /* For non-numbers only: x == x ==> drop; x ~= x ==> fail */
+ if (fins->op1 == fins->op2 && !irt_isnum(fins->t))
+ return CONDFOLD(fins->o == IR_EQ);
+ return fold_comm_swap(J);
+}
+
+LJFOLD(LT any any)
+LJFOLD(GE any any)
+LJFOLD(LE any any)
+LJFOLD(GT any any)
+LJFOLD(ULT any any)
+LJFOLD(UGE any any)
+LJFOLD(ULE any any)
+LJFOLD(UGT any any)
+LJFOLDF(comm_comp)
+{
+ /* For non-numbers only: x <=> x ==> drop; x <> x ==> fail */
+ if (fins->op1 == fins->op2 && !irt_isnum(fins->t))
+ return CONDFOLD((fins->o ^ (fins->o >> 1)) & 1);
+ if (fins->op1 < fins->op2) { /* Move lower ref to the right. */
+ IRRef1 tmp = fins->op1;
+ fins->op1 = fins->op2;
+ fins->op2 = tmp;
+ fins->o ^= 3; /* GT <-> LT, GE <-> LE, does not affect U */
+ return RETRYFOLD;
+ }
+ return NEXTFOLD;
+}
+
+LJFOLD(BAND any any)
+LJFOLD(BOR any any)
+LJFOLDF(comm_dup)
+{
+ if (fins->op1 == fins->op2) /* x o x ==> x */
+ return LEFTFOLD;
+ return fold_comm_swap(J);
+}
+
+LJFOLD(MIN any any)
+LJFOLD(MAX any any)
+LJFOLDF(comm_dup_minmax)
+{
+ if (fins->op1 == fins->op2) /* x o x ==> x */
+ return LEFTFOLD;
+ return NEXTFOLD;
+}
+
+LJFOLD(BXOR any any)
+LJFOLDF(comm_bxor)
+{
+ if (fins->op1 == fins->op2) /* i xor i ==> 0 */
+ return irt_is64(fins->t) ? INT64FOLD(0) : INTFOLD(0);
+ return fold_comm_swap(J);
+}
+
+/* -- Simplification of compound expressions ------------------------------ */
+
+static TRef kfold_xload(jit_State *J, IRIns *ir, const void *p)
+{
+ int32_t k;
+ switch (irt_type(ir->t)) {
+ case IRT_NUM: return lj_ir_knum_u64(J, *(uint64_t *)p);
+ case IRT_I8: k = (int32_t)*(int8_t *)p; break;
+ case IRT_U8: k = (int32_t)*(uint8_t *)p; break;
+ case IRT_I16: k = (int32_t)(int16_t)lj_getu16(p); break;
+ case IRT_U16: k = (int32_t)(uint16_t)lj_getu16(p); break;
+ case IRT_INT: case IRT_U32: k = (int32_t)lj_getu32(p); break;
+ case IRT_I64: case IRT_U64: return lj_ir_kint64(J, *(uint64_t *)p);
+ default: return 0;
+ }
+ return lj_ir_kint(J, k);
+}
+
+/* Turn: string.sub(str, a, b) == kstr
+** into: string.byte(str, a) == string.byte(kstr, 1) etc.
+** Note: this creates unaligned XLOADs on x86/x64.
+*/
+LJFOLD(EQ SNEW KGC)
+LJFOLD(NE SNEW KGC)
+LJFOLDF(merge_eqne_snew_kgc)
+{
+ GCstr *kstr = ir_kstr(fright);
+ int32_t len = (int32_t)kstr->len;
+ lj_assertJ(irt_isstr(fins->t), "bad equality IR type");
+
+#if LJ_TARGET_UNALIGNED
+#define FOLD_SNEW_MAX_LEN 4 /* Handle string lengths 0, 1, 2, 3, 4. */
+#define FOLD_SNEW_TYPE8 IRT_I8 /* Creates shorter immediates. */
+#else
+#define FOLD_SNEW_MAX_LEN 1 /* Handle string lengths 0 or 1. */
+#define FOLD_SNEW_TYPE8 IRT_U8 /* Prefer unsigned loads. */
+#endif
+
+ PHIBARRIER(fleft);
+ if (len <= FOLD_SNEW_MAX_LEN) {
+ IROp op = (IROp)fins->o;
+ IRRef strref = fleft->op1;
+ if (IR(strref)->o != IR_STRREF)
+ return NEXTFOLD;
+ if (op == IR_EQ) {
+ emitir(IRTGI(IR_EQ), fleft->op2, lj_ir_kint(J, len));
+ /* Caveat: fins/fleft/fright is no longer valid after emitir. */
+ } else {
+ /* NE is not expanded since this would need an OR of two conds. */
+ if (!irref_isk(fleft->op2)) /* Only handle the constant length case. */
+ return NEXTFOLD;
+ if (IR(fleft->op2)->i != len)
+ return DROPFOLD;
+ }
+ if (len > 0) {
+ /* A 4 byte load for length 3 is ok -- all strings have an extra NUL. */
+ uint16_t ot = (uint16_t)(len == 1 ? IRT(IR_XLOAD, FOLD_SNEW_TYPE8) :
+ len == 2 ? IRT(IR_XLOAD, IRT_U16) :
+ IRTI(IR_XLOAD));
+ TRef tmp = emitir(ot, strref,
+ IRXLOAD_READONLY | (len > 1 ? IRXLOAD_UNALIGNED : 0));
+ TRef val = kfold_xload(J, IR(tref_ref(tmp)), strdata(kstr));
+ if (len == 3)
+ tmp = emitir(IRTI(IR_BAND), tmp,
+ lj_ir_kint(J, LJ_ENDIAN_SELECT(0x00ffffff, 0xffffff00)));
+ fins->op1 = (IRRef1)tmp;
+ fins->op2 = (IRRef1)val;
+ fins->ot = (IROpT)IRTGI(op);
+ return RETRYFOLD;
+ } else {
+ return DROPFOLD;
+ }
+ }
+ return NEXTFOLD;
+}
+
+/* -- Loads --------------------------------------------------------------- */
+
+/* Loads cannot be folded or passed on to CSE in general.
+** Alias analysis is needed to check for forwarding opportunities.
+**
+** Caveat: *all* loads must be listed here or they end up at CSE!
+*/
+
+LJFOLD(ALOAD any)
+LJFOLDX(lj_opt_fwd_aload)
+
+/* From HREF fwd (see below). Must eliminate, not supported by fwd/backend. */
+LJFOLD(HLOAD KKPTR)
+LJFOLDF(kfold_hload_kkptr)
+{
+ UNUSED(J);
+ lj_assertJ(ir_kptr(fleft) == niltvg(J2G(J)), "expected niltv");
+ return TREF_NIL;
+}
+
+LJFOLD(HLOAD any)
+LJFOLDX(lj_opt_fwd_hload)
+
+LJFOLD(ULOAD any)
+LJFOLDX(lj_opt_fwd_uload)
+
+LJFOLD(ALEN any any)
+LJFOLDX(lj_opt_fwd_alen)
+
+/* Upvalue refs are really loads, but there are no corresponding stores.
+** So CSE is ok for them, except for UREFO across a GC step (see below).
+** If the referenced function is const, its upvalue addresses are const, too.
+** This can be used to improve CSE by looking for the same address,
+** even if the upvalues originate from a different function.
+*/
+LJFOLD(UREFO KGC any)
+LJFOLD(UREFC KGC any)
+LJFOLDF(cse_uref)
+{
+ if (LJ_LIKELY(J->flags & JIT_F_OPT_CSE)) {
+ IRRef ref = J->chain[fins->o];
+ GCfunc *fn = ir_kfunc(fleft);
+ GCupval *uv = gco2uv(gcref(fn->l.uvptr[(fins->op2 >> 8)]));
+ while (ref > 0) {
+ IRIns *ir = IR(ref);
+ if (irref_isk(ir->op1)) {
+ GCfunc *fn2 = ir_kfunc(IR(ir->op1));
+ if (gco2uv(gcref(fn2->l.uvptr[(ir->op2 >> 8)])) == uv) {
+ if (fins->o == IR_UREFO && gcstep_barrier(J, ref))
+ break;
+ return ref;
+ }
+ }
+ ref = ir->prev;
+ }
+ }
+ return EMITFOLD;
+}
+
+LJFOLD(HREFK any any)
+LJFOLDX(lj_opt_fwd_hrefk)
+
+LJFOLD(HREF TNEW any)
+LJFOLDF(fwd_href_tnew)
+{
+ if (lj_opt_fwd_href_nokey(J))
+ return lj_ir_kkptr(J, niltvg(J2G(J)));
+ return NEXTFOLD;
+}
+
+LJFOLD(HREF TDUP KPRI)
+LJFOLD(HREF TDUP KGC)
+LJFOLD(HREF TDUP KNUM)
+LJFOLDF(fwd_href_tdup)
+{
+ TValue keyv;
+ lj_ir_kvalue(J->L, &keyv, fright);
+ if (lj_tab_get(J->L, ir_ktab(IR(fleft->op1)), &keyv) == niltvg(J2G(J)) &&
+ lj_opt_fwd_href_nokey(J))
+ return lj_ir_kkptr(J, niltvg(J2G(J)));
+ return NEXTFOLD;
+}
+
+/* We can safely FOLD/CSE array/hash refs and field loads, since there
+** are no corresponding stores. But we need to check for any NEWREF with
+** an aliased table, as it may invalidate all of the pointers and fields.
+** Only HREF needs the NEWREF check -- AREF and HREFK already depend on
+** FLOADs. And NEWREF itself is treated like a store (see below).
+** LREF is constant (per trace) since coroutine switches are not inlined.
+*/
+LJFOLD(FLOAD TNEW IRFL_TAB_ASIZE)
+LJFOLDF(fload_tab_tnew_asize)
+{
+ if (LJ_LIKELY(J->flags & JIT_F_OPT_FOLD) && lj_opt_fwd_tptr(J, fins->op1))
+ return INTFOLD(fleft->op1);
+ return NEXTFOLD;
+}
+
+LJFOLD(FLOAD TNEW IRFL_TAB_HMASK)
+LJFOLDF(fload_tab_tnew_hmask)
+{
+ if (LJ_LIKELY(J->flags & JIT_F_OPT_FOLD) && lj_opt_fwd_tptr(J, fins->op1))
+ return INTFOLD((1 << fleft->op2)-1);
+ return NEXTFOLD;
+}
+
+LJFOLD(FLOAD TDUP IRFL_TAB_ASIZE)
+LJFOLDF(fload_tab_tdup_asize)
+{
+ if (LJ_LIKELY(J->flags & JIT_F_OPT_FOLD) && lj_opt_fwd_tptr(J, fins->op1))
+ return INTFOLD((int32_t)ir_ktab(IR(fleft->op1))->asize);
+ return NEXTFOLD;
+}
+
+LJFOLD(FLOAD TDUP IRFL_TAB_HMASK)
+LJFOLDF(fload_tab_tdup_hmask)
+{
+ if (LJ_LIKELY(J->flags & JIT_F_OPT_FOLD) && lj_opt_fwd_tptr(J, fins->op1))
+ return INTFOLD((int32_t)ir_ktab(IR(fleft->op1))->hmask);
+ return NEXTFOLD;
+}
+
+LJFOLD(HREF any any)
+LJFOLD(FLOAD any IRFL_TAB_ARRAY)
+LJFOLD(FLOAD any IRFL_TAB_NODE)
+LJFOLD(FLOAD any IRFL_TAB_ASIZE)
+LJFOLD(FLOAD any IRFL_TAB_HMASK)
+LJFOLDF(fload_tab_ah)
+{
+ TRef tr = lj_opt_cse(J);
+ return lj_opt_fwd_tptr(J, tref_ref(tr)) ? tr : EMITFOLD;
+}
+
+/* Strings are immutable, so we can safely FOLD/CSE the related FLOAD. */
+LJFOLD(FLOAD KGC IRFL_STR_LEN)
+LJFOLDF(fload_str_len_kgc)
+{
+ if (LJ_LIKELY(J->flags & JIT_F_OPT_FOLD))
+ return INTFOLD((int32_t)ir_kstr(fleft)->len);
+ return NEXTFOLD;
+}
+
+LJFOLD(FLOAD SNEW IRFL_STR_LEN)
+LJFOLDF(fload_str_len_snew)
+{
+ if (LJ_LIKELY(J->flags & JIT_F_OPT_FOLD)) {
+ PHIBARRIER(fleft);
+ return fleft->op2;
+ }
+ return NEXTFOLD;
+}
+
+LJFOLD(FLOAD TOSTR IRFL_STR_LEN)
+LJFOLDF(fload_str_len_tostr)
+{
+ if (LJ_LIKELY(J->flags & JIT_F_OPT_FOLD) && fleft->op2 == IRTOSTR_CHAR)
+ return INTFOLD(1);
+ return NEXTFOLD;
+}
+
+LJFOLD(FLOAD any IRFL_SBUF_W)
+LJFOLD(FLOAD any IRFL_SBUF_E)
+LJFOLD(FLOAD any IRFL_SBUF_B)
+LJFOLD(FLOAD any IRFL_SBUF_L)
+LJFOLD(FLOAD any IRFL_SBUF_REF)
+LJFOLD(FLOAD any IRFL_SBUF_R)
+LJFOLDF(fload_sbuf)
+{
+ TRef tr = lj_opt_fwd_fload(J);
+ return lj_opt_fwd_sbuf(J, tref_ref(tr)) ? tr : EMITFOLD;
+}
+
+/* The fast function ID of function objects is immutable. */
+LJFOLD(FLOAD KGC IRFL_FUNC_FFID)
+LJFOLDF(fload_func_ffid_kgc)
+{
+ if (LJ_LIKELY(J->flags & JIT_F_OPT_FOLD))
+ return INTFOLD((int32_t)ir_kfunc(fleft)->c.ffid);
+ return NEXTFOLD;
+}
+
+/* The C type ID of cdata objects is immutable. */
+LJFOLD(FLOAD KGC IRFL_CDATA_CTYPEID)
+LJFOLDF(fload_cdata_typeid_kgc)
+{
+ if (LJ_LIKELY(J->flags & JIT_F_OPT_FOLD))
+ return INTFOLD((int32_t)ir_kcdata(fleft)->ctypeid);
+ return NEXTFOLD;
+}
+
+/* Get the contents of immutable cdata objects. */
+LJFOLD(FLOAD KGC IRFL_CDATA_PTR)
+LJFOLD(FLOAD KGC IRFL_CDATA_INT)
+LJFOLD(FLOAD KGC IRFL_CDATA_INT64)
+LJFOLDF(fload_cdata_int64_kgc)
+{
+ if (LJ_LIKELY(J->flags & JIT_F_OPT_FOLD)) {
+ void *p = cdataptr(ir_kcdata(fleft));
+ if (irt_is64(fins->t))
+ return INT64FOLD(*(uint64_t *)p);
+ else
+ return INTFOLD(*(int32_t *)p);
+ }
+ return NEXTFOLD;
+}
+
+LJFOLD(FLOAD CNEW IRFL_CDATA_CTYPEID)
+LJFOLD(FLOAD CNEWI IRFL_CDATA_CTYPEID)
+LJFOLDF(fload_cdata_typeid_cnew)
+{
+ if (LJ_LIKELY(J->flags & JIT_F_OPT_FOLD))
+ return fleft->op1; /* No PHI barrier needed. CNEW/CNEWI op1 is const. */
+ return NEXTFOLD;
+}
+
+/* Pointer, int and int64 cdata objects are immutable. */
+LJFOLD(FLOAD CNEWI IRFL_CDATA_PTR)
+LJFOLD(FLOAD CNEWI IRFL_CDATA_INT)
+LJFOLD(FLOAD CNEWI IRFL_CDATA_INT64)
+LJFOLDF(fload_cdata_ptr_int64_cnew)
+{
+ if (LJ_LIKELY(J->flags & JIT_F_OPT_FOLD))
+ return fleft->op2; /* Fold even across PHI to avoid allocations. */
+ return NEXTFOLD;
+}
+
+LJFOLD(FLOAD any IRFL_STR_LEN)
+LJFOLD(FLOAD any IRFL_FUNC_ENV)
+LJFOLD(FLOAD any IRFL_THREAD_ENV)
+LJFOLD(FLOAD any IRFL_CDATA_CTYPEID)
+LJFOLD(FLOAD any IRFL_CDATA_PTR)
+LJFOLD(FLOAD any IRFL_CDATA_INT)
+LJFOLD(FLOAD any IRFL_CDATA_INT64)
+LJFOLD(VLOAD any any) /* Vararg loads have no corresponding stores. */
+LJFOLDX(lj_opt_cse)
+
+/* All other field loads need alias analysis. */
+LJFOLD(FLOAD any any)
+LJFOLDX(lj_opt_fwd_fload)
+
+/* This is for LOOP only. Recording handles SLOADs internally. */
+LJFOLD(SLOAD any any)
+LJFOLDF(fwd_sload)
+{
+ if ((fins->op2 & IRSLOAD_FRAME)) {
+ TRef tr = lj_opt_cse(J);
+ return tref_ref(tr) < J->chain[IR_RETF] ? EMITFOLD : tr;
+ } else {
+ lj_assertJ(J->slot[fins->op1] != 0, "uninitialized slot accessed");
+ return J->slot[fins->op1];
+ }
+}
+
+/* Only fold for KKPTR. The pointer _and_ the contents must be const. */
+LJFOLD(XLOAD KKPTR any)
+LJFOLDF(xload_kptr)
+{
+ TRef tr = kfold_xload(J, fins, ir_kptr(fleft));
+ return tr ? tr : NEXTFOLD;
+}
+
+LJFOLD(XLOAD any any)
+LJFOLDX(lj_opt_fwd_xload)
+
+/* -- Frame handling ------------------------------------------------------ */
+
+/* Prevent CSE of a REF_BASE operand across IR_RETF. */
+LJFOLD(SUB any BASE)
+LJFOLD(SUB BASE any)
+LJFOLD(EQ any BASE)
+LJFOLDF(fold_base)
+{
+ return lj_opt_cselim(J, J->chain[IR_RETF]);
+}
+
+/* -- Write barriers ------------------------------------------------------ */
+
+/* Write barriers are amenable to CSE, but not across any incremental
+** GC steps.
+**
+** The same logic applies to open upvalue references, because a stack
+** may be resized during a GC step (not the current stack, but maybe that
+** of a coroutine).
+*/
+LJFOLD(TBAR any)
+LJFOLD(OBAR any any)
+LJFOLD(UREFO any any)
+LJFOLDF(barrier_tab)
+{
+ TRef tr = lj_opt_cse(J);
+ if (gcstep_barrier(J, tref_ref(tr))) /* CSE across GC step? */
+ return EMITFOLD; /* Raw emit. Assumes fins is left intact by CSE. */
+ return tr;
+}
+
+LJFOLD(TBAR TNEW)
+LJFOLD(TBAR TDUP)
+LJFOLDF(barrier_tnew_tdup)
+{
+ /* New tables are always white and never need a barrier. */
+ if (fins->op1 < J->chain[IR_LOOP]) /* Except across a GC step. */
+ return NEXTFOLD;
+ return DROPFOLD;
+}
+
+/* -- Profiling ----------------------------------------------------------- */
+
+LJFOLD(PROF any any)
+LJFOLDF(prof)
+{
+ IRRef ref = J->chain[IR_PROF];
+ if (ref+1 == J->cur.nins) /* Drop neighbouring IR_PROF. */
+ return ref;
+ return EMITFOLD;
+}
+
+/* -- Stores and allocations ---------------------------------------------- */
+
+/* Stores and allocations cannot be folded or passed on to CSE in general.
+** But some stores can be eliminated with dead-store elimination (DSE).
+**
+** Caveat: *all* stores and allocs must be listed here or they end up at CSE!
+*/
+
+LJFOLD(ASTORE any any)
+LJFOLD(HSTORE any any)
+LJFOLDX(lj_opt_dse_ahstore)
+
+LJFOLD(USTORE any any)
+LJFOLDX(lj_opt_dse_ustore)
+
+LJFOLD(FSTORE any any)
+LJFOLDX(lj_opt_dse_fstore)
+
+LJFOLD(XSTORE any any)
+LJFOLDX(lj_opt_dse_xstore)
+
+LJFOLD(NEWREF any any) /* Treated like a store. */
+LJFOLD(TMPREF any any)
+LJFOLD(CALLA any any)
+LJFOLD(CALLL any any) /* Safeguard fallback. */
+LJFOLD(CALLS any any)
+LJFOLD(CALLXS any any)
+LJFOLD(XBAR)
+LJFOLD(RETF any any) /* Modifies BASE. */
+LJFOLD(TNEW any any)
+LJFOLD(TDUP any)
+LJFOLD(CNEW any any)
+LJFOLD(XSNEW any any)
+LJFOLDX(lj_ir_emit)
+
+/* ------------------------------------------------------------------------ */
+
+/* Every entry in the generated hash table is a 32 bit pattern:
+**
+** xxxxxxxx iiiiiii lllllll rrrrrrrrrr
+**
+** xxxxxxxx = 8 bit index into fold function table
+** iiiiiii = 7 bit folded instruction opcode
+** lllllll = 7 bit left instruction opcode
+** rrrrrrrrrr = 8 bit right instruction opcode or 10 bits from literal field
+*/
+
+#include "lj_folddef.h"
+
+/* ------------------------------------------------------------------------ */
+
+/* Fold IR instruction. */
+TRef LJ_FASTCALL lj_opt_fold(jit_State *J)
+{
+ uint32_t key, any;
+ IRRef ref;
+
+ if (LJ_UNLIKELY((J->flags & JIT_F_OPT_MASK) != JIT_F_OPT_DEFAULT)) {
+ lj_assertJ(((JIT_F_OPT_FOLD|JIT_F_OPT_FWD|JIT_F_OPT_CSE|JIT_F_OPT_DSE) |
+ JIT_F_OPT_DEFAULT) == JIT_F_OPT_DEFAULT,
+ "bad JIT_F_OPT_DEFAULT");
+ /* Folding disabled? Chain to CSE, but not for loads/stores/allocs. */
+ if (!(J->flags & JIT_F_OPT_FOLD) && irm_kind(lj_ir_mode[fins->o]) == IRM_N)
+ return lj_opt_cse(J);
+
+ /* No FOLD, forwarding or CSE? Emit raw IR for loads, except for SLOAD. */
+ if ((J->flags & (JIT_F_OPT_FOLD|JIT_F_OPT_FWD|JIT_F_OPT_CSE)) !=
+ (JIT_F_OPT_FOLD|JIT_F_OPT_FWD|JIT_F_OPT_CSE) &&
+ irm_kind(lj_ir_mode[fins->o]) == IRM_L && fins->o != IR_SLOAD)
+ return lj_ir_emit(J);
+
+ /* No FOLD or DSE? Emit raw IR for stores. */
+ if ((J->flags & (JIT_F_OPT_FOLD|JIT_F_OPT_DSE)) !=
+ (JIT_F_OPT_FOLD|JIT_F_OPT_DSE) &&
+ irm_kind(lj_ir_mode[fins->o]) == IRM_S)
+ return lj_ir_emit(J);
+ }
+
+ /* Fold engine start/retry point. */
+retry:
+ /* Construct key from opcode and operand opcodes (unless literal/none). */
+ key = ((uint32_t)fins->o << 17);
+ if (fins->op1 >= J->cur.nk) {
+ key += (uint32_t)IR(fins->op1)->o << 10;
+ *fleft = *IR(fins->op1);
+ if (fins->op1 < REF_TRUE)
+ fleft[1] = IR(fins->op1)[1];
+ }
+ if (fins->op2 >= J->cur.nk) {
+ key += (uint32_t)IR(fins->op2)->o;
+ *fright = *IR(fins->op2);
+ if (fins->op2 < REF_TRUE)
+ fright[1] = IR(fins->op2)[1];
+ } else {
+ key += (fins->op2 & 0x3ffu); /* Literal mask. Must include IRCONV_*MASK. */
+ }
+
+ /* Check for a match in order from most specific to least specific. */
+ any = 0;
+ for (;;) {
+ uint32_t k = key | (any & 0x1ffff);
+ uint32_t h = fold_hashkey(k);
+ uint32_t fh = fold_hash[h]; /* Lookup key in semi-perfect hash table. */
+ if ((fh & 0xffffff) == k || (fh = fold_hash[h+1], (fh & 0xffffff) == k)) {
+ ref = (IRRef)tref_ref(fold_func[fh >> 24](J));
+ if (ref != NEXTFOLD)
+ break;
+ }
+ if (any == 0xfffff) /* Exhausted folding. Pass on to CSE. */
+ return lj_opt_cse(J);
+ any = (any | (any >> 10)) ^ 0xffc00;
+ }
+
+ /* Return value processing, ordered by frequency. */
+ if (LJ_LIKELY(ref >= MAX_FOLD))
+ return TREF(ref, irt_t(IR(ref)->t));
+ if (ref == RETRYFOLD)
+ goto retry;
+ if (ref == KINTFOLD)
+ return lj_ir_kint(J, fins->i);
+ if (ref == FAILFOLD)
+ lj_trace_err(J, LJ_TRERR_GFAIL);
+ lj_assertJ(ref == DROPFOLD, "bad fold result");
+ return REF_DROP;
+}
+
+/* -- Common-Subexpression Elimination ------------------------------------ */
+
+/* CSE an IR instruction. This is very fast due to the skip-list chains. */
+TRef LJ_FASTCALL lj_opt_cse(jit_State *J)
+{
+ /* Avoid narrow to wide store-to-load forwarding stall */
+ IRRef2 op12 = (IRRef2)fins->op1 + ((IRRef2)fins->op2 << 16);
+ IROp op = fins->o;
+ if (LJ_LIKELY(J->flags & JIT_F_OPT_CSE)) {
+ /* Limited search for same operands in per-opcode chain. */
+ IRRef ref = J->chain[op];
+ IRRef lim = fins->op1;
+ if (fins->op2 > lim) lim = fins->op2; /* Relies on lit < REF_BIAS. */
+ while (ref > lim) {
+ if (IR(ref)->op12 == op12)
+ return TREF(ref, irt_t(IR(ref)->t)); /* Common subexpression found. */
+ ref = IR(ref)->prev;
+ }
+ }
+ /* Otherwise emit IR (inlined for speed). */
+ {
+ IRRef ref = lj_ir_nextins(J);
+ IRIns *ir = IR(ref);
+ ir->prev = J->chain[op];
+ ir->op12 = op12;
+ J->chain[op] = (IRRef1)ref;
+ ir->o = fins->o;
+ J->guardemit.irt |= fins->t.irt;
+ return TREF(ref, irt_t((ir->t = fins->t)));
+ }
+}
+
+/* CSE with explicit search limit. */
+TRef LJ_FASTCALL lj_opt_cselim(jit_State *J, IRRef lim)
+{
+ IRRef ref = J->chain[fins->o];
+ IRRef2 op12 = (IRRef2)fins->op1 + ((IRRef2)fins->op2 << 16);
+ while (ref > lim) {
+ if (IR(ref)->op12 == op12)
+ return ref;
+ ref = IR(ref)->prev;
+ }
+ return lj_ir_emit(J);
+}
+
+/* ------------------------------------------------------------------------ */
+
+#undef IR
+#undef fins
+#undef fleft
+#undef fright
+#undef knumleft
+#undef knumright
+#undef emitir
+
+#endif
diff --git a/libs/luajit-cmake/luajit/src/lj_opt_loop.c b/libs/luajit-cmake/luajit/src/lj_opt_loop.c
new file mode 100644
index 0000000..ee3ee04
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/lj_opt_loop.c
@@ -0,0 +1,453 @@
+/*
+** LOOP: Loop Optimizations.
+** Copyright (C) 2005-2022 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#define lj_opt_loop_c
+#define LUA_CORE
+
+#include "lj_obj.h"
+
+#if LJ_HASJIT
+
+#include "lj_err.h"
+#include "lj_buf.h"
+#include "lj_ir.h"
+#include "lj_jit.h"
+#include "lj_iropt.h"
+#include "lj_trace.h"
+#include "lj_snap.h"
+#include "lj_vm.h"
+
+/* Loop optimization:
+**
+** Traditional Loop-Invariant Code Motion (LICM) splits the instructions
+** of a loop into invariant and variant instructions. The invariant
+** instructions are hoisted out of the loop and only the variant
+** instructions remain inside the loop body.
+**
+** Unfortunately LICM is mostly useless for compiling dynamic languages.
+** The IR has many guards and most of the subsequent instructions are
+** control-dependent on them. The first non-hoistable guard would
+** effectively prevent hoisting of all subsequent instructions.
+**
+** That's why we use a special form of unrolling using copy-substitution,
+** combined with redundancy elimination:
+**
+** The recorded instruction stream is re-emitted to the compiler pipeline
+** with substituted operands. The substitution table is filled with the
+** refs returned by re-emitting each instruction. This can be done
+** on-the-fly, because the IR is in strict SSA form, where every ref is
+** defined before its use.
+**
+** This aproach generates two code sections, separated by the LOOP
+** instruction:
+**
+** 1. The recorded instructions form a kind of pre-roll for the loop. It
+** contains a mix of invariant and variant instructions and performs
+** exactly one loop iteration (but not necessarily the 1st iteration).
+**
+** 2. The loop body contains only the variant instructions and performs
+** all remaining loop iterations.
+**
+** On first sight that looks like a waste of space, because the variant
+** instructions are present twice. But the key insight is that the
+** pre-roll honors the control-dependencies for *both* the pre-roll itself
+** *and* the loop body!
+**
+** It also means one doesn't have to explicitly model control-dependencies
+** (which, BTW, wouldn't help LICM much). And it's much easier to
+** integrate sparse snapshotting with this approach.
+**
+** One of the nicest aspects of this approach is that all of the
+** optimizations of the compiler pipeline (FOLD, CSE, FWD, etc.) can be
+** reused with only minor restrictions (e.g. one should not fold
+** instructions across loop-carried dependencies).
+**
+** But in general all optimizations can be applied which only need to look
+** backwards into the generated instruction stream. At any point in time
+** during the copy-substitution process this contains both a static loop
+** iteration (the pre-roll) and a dynamic one (from the to-be-copied
+** instruction up to the end of the partial loop body).
+**
+** Since control-dependencies are implicitly kept, CSE also applies to all
+** kinds of guards. The major advantage is that all invariant guards can
+** be hoisted, too.
+**
+** Load/store forwarding works across loop iterations, too. This is
+** important if loop-carried dependencies are kept in upvalues or tables.
+** E.g. 'self.idx = self.idx + 1' deep down in some OO-style method may
+** become a forwarded loop-recurrence after inlining.
+**
+** Since the IR is in SSA form, loop-carried dependencies have to be
+** modeled with PHI instructions. The potential candidates for PHIs are
+** collected on-the-fly during copy-substitution. After eliminating the
+** redundant ones, PHI instructions are emitted *below* the loop body.
+**
+** Note that this departure from traditional SSA form doesn't change the
+** semantics of the PHI instructions themselves. But it greatly simplifies
+** on-the-fly generation of the IR and the machine code.
+*/
+
+/* Some local macros to save typing. Undef'd at the end. */
+#define IR(ref) (&J->cur.ir[(ref)])
+
+/* Pass IR on to next optimization in chain (FOLD). */
+#define emitir(ot, a, b) (lj_ir_set(J, (ot), (a), (b)), lj_opt_fold(J))
+
+/* Emit raw IR without passing through optimizations. */
+#define emitir_raw(ot, a, b) (lj_ir_set(J, (ot), (a), (b)), lj_ir_emit(J))
+
+/* -- PHI elimination ----------------------------------------------------- */
+
+/* Emit or eliminate collected PHIs. */
+static void loop_emit_phi(jit_State *J, IRRef1 *subst, IRRef1 *phi, IRRef nphi,
+ SnapNo onsnap)
+{
+ int passx = 0;
+ IRRef i, j, nslots;
+ IRRef invar = J->chain[IR_LOOP];
+ /* Pass #1: mark redundant and potentially redundant PHIs. */
+ for (i = 0, j = 0; i < nphi; i++) {
+ IRRef lref = phi[i];
+ IRRef rref = subst[lref];
+ if (lref == rref || rref == REF_DROP) { /* Invariants are redundant. */
+ irt_clearphi(IR(lref)->t);
+ } else {
+ phi[j++] = (IRRef1)lref;
+ if (!(IR(rref)->op1 == lref || IR(rref)->op2 == lref)) {
+ /* Quick check for simple recurrences failed, need pass2. */
+ irt_setmark(IR(lref)->t);
+ passx = 1;
+ }
+ }
+ }
+ nphi = j;
+ /* Pass #2: traverse variant part and clear marks of non-redundant PHIs. */
+ if (passx) {
+ SnapNo s;
+ for (i = J->cur.nins-1; i > invar; i--) {
+ IRIns *ir = IR(i);
+ if (!irref_isk(ir->op2)) irt_clearmark(IR(ir->op2)->t);
+ if (!irref_isk(ir->op1)) {
+ irt_clearmark(IR(ir->op1)->t);
+ if (ir->op1 < invar &&
+ ir->o >= IR_CALLN && ir->o <= IR_CARG) { /* ORDER IR */
+ ir = IR(ir->op1);
+ while (ir->o == IR_CARG) {
+ if (!irref_isk(ir->op2)) irt_clearmark(IR(ir->op2)->t);
+ if (irref_isk(ir->op1)) break;
+ ir = IR(ir->op1);
+ irt_clearmark(ir->t);
+ }
+ }
+ }
+ }
+ for (s = J->cur.nsnap-1; s >= onsnap; s--) {
+ SnapShot *snap = &J->cur.snap[s];
+ SnapEntry *map = &J->cur.snapmap[snap->mapofs];
+ MSize n, nent = snap->nent;
+ for (n = 0; n < nent; n++) {
+ IRRef ref = snap_ref(map[n]);
+ if (!irref_isk(ref)) irt_clearmark(IR(ref)->t);
+ }
+ }
+ }
+ /* Pass #3: add PHIs for variant slots without a corresponding SLOAD. */
+ nslots = J->baseslot+J->maxslot;
+ for (i = 1; i < nslots; i++) {
+ IRRef ref = tref_ref(J->slot[i]);
+ while (!irref_isk(ref) && ref != subst[ref]) {
+ IRIns *ir = IR(ref);
+ irt_clearmark(ir->t); /* Unmark potential uses, too. */
+ if (irt_isphi(ir->t) || irt_ispri(ir->t))
+ break;
+ irt_setphi(ir->t);
+ if (nphi >= LJ_MAX_PHI)
+ lj_trace_err(J, LJ_TRERR_PHIOV);
+ phi[nphi++] = (IRRef1)ref;
+ ref = subst[ref];
+ if (ref > invar)
+ break;
+ }
+ }
+ /* Pass #4: propagate non-redundant PHIs. */
+ while (passx) {
+ passx = 0;
+ for (i = 0; i < nphi; i++) {
+ IRRef lref = phi[i];
+ IRIns *ir = IR(lref);
+ if (!irt_ismarked(ir->t)) { /* Propagate only from unmarked PHIs. */
+ IRIns *irr = IR(subst[lref]);
+ if (irt_ismarked(irr->t)) { /* Right ref points to other PHI? */
+ irt_clearmark(irr->t); /* Mark that PHI as non-redundant. */
+ passx = 1; /* Retry. */
+ }
+ }
+ }
+ }
+ /* Pass #5: emit PHI instructions or eliminate PHIs. */
+ for (i = 0; i < nphi; i++) {
+ IRRef lref = phi[i];
+ IRIns *ir = IR(lref);
+ if (!irt_ismarked(ir->t)) { /* Emit PHI if not marked. */
+ IRRef rref = subst[lref];
+ if (rref > invar)
+ irt_setphi(IR(rref)->t);
+ emitir_raw(IRT(IR_PHI, irt_type(ir->t)), lref, rref);
+ } else { /* Otherwise eliminate PHI. */
+ irt_clearmark(ir->t);
+ irt_clearphi(ir->t);
+ }
+ }
+}
+
+/* -- Loop unrolling using copy-substitution ------------------------------ */
+
+/* Copy-substitute snapshot. */
+static void loop_subst_snap(jit_State *J, SnapShot *osnap,
+ SnapEntry *loopmap, IRRef1 *subst)
+{
+ SnapEntry *nmap, *omap = &J->cur.snapmap[osnap->mapofs];
+ SnapEntry *nextmap = &J->cur.snapmap[snap_nextofs(&J->cur, osnap)];
+ MSize nmapofs;
+ MSize on, ln, nn, onent = osnap->nent;
+ BCReg nslots = osnap->nslots;
+ SnapShot *snap = &J->cur.snap[J->cur.nsnap];
+ if (irt_isguard(J->guardemit)) { /* Guard inbetween? */
+ nmapofs = J->cur.nsnapmap;
+ J->cur.nsnap++; /* Add new snapshot. */
+ } else { /* Otherwise overwrite previous snapshot. */
+ snap--;
+ nmapofs = snap->mapofs;
+ }
+ J->guardemit.irt = 0;
+ /* Setup new snapshot. */
+ snap->mapofs = (uint32_t)nmapofs;
+ snap->ref = (IRRef1)J->cur.nins;
+ snap->mcofs = 0;
+ snap->nslots = nslots;
+ snap->topslot = osnap->topslot;
+ snap->count = 0;
+ nmap = &J->cur.snapmap[nmapofs];
+ /* Substitute snapshot slots. */
+ on = ln = nn = 0;
+ while (on < onent) {
+ SnapEntry osn = omap[on], lsn = loopmap[ln];
+ if (snap_slot(lsn) < snap_slot(osn)) { /* Copy slot from loop map. */
+ nmap[nn++] = lsn;
+ ln++;
+ } else { /* Copy substituted slot from snapshot map. */
+ if (snap_slot(lsn) == snap_slot(osn)) ln++; /* Shadowed loop slot. */
+ if (!irref_isk(snap_ref(osn)))
+ osn = snap_setref(osn, subst[snap_ref(osn)]);
+ nmap[nn++] = osn;
+ on++;
+ }
+ }
+ while (snap_slot(loopmap[ln]) < nslots) /* Copy remaining loop slots. */
+ nmap[nn++] = loopmap[ln++];
+ snap->nent = (uint8_t)nn;
+ omap += onent;
+ nmap += nn;
+ while (omap < nextmap) /* Copy PC + frame links. */
+ *nmap++ = *omap++;
+ J->cur.nsnapmap = (uint32_t)(nmap - J->cur.snapmap);
+}
+
+typedef struct LoopState {
+ jit_State *J;
+ IRRef1 *subst;
+ MSize sizesubst;
+} LoopState;
+
+/* Unroll loop. */
+static void loop_unroll(LoopState *lps)
+{
+ jit_State *J = lps->J;
+ IRRef1 phi[LJ_MAX_PHI];
+ uint32_t nphi = 0;
+ IRRef1 *subst;
+ SnapNo onsnap;
+ SnapShot *osnap, *loopsnap;
+ SnapEntry *loopmap, *psentinel;
+ IRRef ins, invar;
+
+ /* Allocate substitution table.
+ ** Only non-constant refs in [REF_BIAS,invar) are valid indexes.
+ */
+ invar = J->cur.nins;
+ lps->sizesubst = invar - REF_BIAS;
+ lps->subst = lj_mem_newvec(J->L, lps->sizesubst, IRRef1);
+ subst = lps->subst - REF_BIAS;
+ subst[REF_BASE] = REF_BASE;
+
+ /* LOOP separates the pre-roll from the loop body. */
+ emitir_raw(IRTG(IR_LOOP, IRT_NIL), 0, 0);
+
+ /* Grow snapshot buffer and map for copy-substituted snapshots.
+ ** Need up to twice the number of snapshots minus #0 and loop snapshot.
+ ** Need up to twice the number of entries plus fallback substitutions
+ ** from the loop snapshot entries for each new snapshot.
+ ** Caveat: both calls may reallocate J->cur.snap and J->cur.snapmap!
+ */
+ onsnap = J->cur.nsnap;
+ lj_snap_grow_buf(J, 2*onsnap-2);
+ lj_snap_grow_map(J, J->cur.nsnapmap*2+(onsnap-2)*J->cur.snap[onsnap-1].nent);
+
+ /* The loop snapshot is used for fallback substitutions. */
+ loopsnap = &J->cur.snap[onsnap-1];
+ loopmap = &J->cur.snapmap[loopsnap->mapofs];
+ /* The PC of snapshot #0 and the loop snapshot must match. */
+ psentinel = &loopmap[loopsnap->nent];
+ lj_assertJ(*psentinel == J->cur.snapmap[J->cur.snap[0].nent],
+ "mismatched PC for loop snapshot");
+ *psentinel = SNAP(255, 0, 0); /* Replace PC with temporary sentinel. */
+
+ /* Start substitution with snapshot #1 (#0 is empty for root traces). */
+ osnap = &J->cur.snap[1];
+
+ /* Copy and substitute all recorded instructions and snapshots. */
+ for (ins = REF_FIRST; ins < invar; ins++) {
+ IRIns *ir;
+ IRRef op1, op2;
+
+ if (ins >= osnap->ref) /* Instruction belongs to next snapshot? */
+ loop_subst_snap(J, osnap++, loopmap, subst); /* Copy-substitute it. */
+
+ /* Substitute instruction operands. */
+ ir = IR(ins);
+ op1 = ir->op1;
+ if (!irref_isk(op1)) op1 = subst[op1];
+ op2 = ir->op2;
+ if (!irref_isk(op2)) op2 = subst[op2];
+ if (irm_kind(lj_ir_mode[ir->o]) == IRM_N &&
+ op1 == ir->op1 && op2 == ir->op2) { /* Regular invariant ins? */
+ subst[ins] = (IRRef1)ins; /* Shortcut. */
+ } else {
+ /* Re-emit substituted instruction to the FOLD/CSE/etc. pipeline. */
+ IRType1 t = ir->t; /* Get this first, since emitir may invalidate ir. */
+ IRRef ref = tref_ref(emitir(ir->ot & ~IRT_ISPHI, op1, op2));
+ subst[ins] = (IRRef1)ref;
+ if (ref != ins) {
+ IRIns *irr = IR(ref);
+ if (ref < invar) { /* Loop-carried dependency? */
+ /* Potential PHI? */
+ if (!irref_isk(ref) && !irt_isphi(irr->t) && !irt_ispri(irr->t)) {
+ irt_setphi(irr->t);
+ if (nphi >= LJ_MAX_PHI)
+ lj_trace_err(J, LJ_TRERR_PHIOV);
+ phi[nphi++] = (IRRef1)ref;
+ }
+ /* Check all loop-carried dependencies for type instability. */
+ if (!irt_sametype(t, irr->t)) {
+ if (irt_isinteger(t) && irt_isinteger(irr->t))
+ continue;
+ else if (irt_isnum(t) && irt_isinteger(irr->t)) /* Fix int->num. */
+ ref = tref_ref(emitir(IRTN(IR_CONV), ref, IRCONV_NUM_INT));
+ else if (irt_isnum(irr->t) && irt_isinteger(t)) /* Fix num->int. */
+ ref = tref_ref(emitir(IRTGI(IR_CONV), ref,
+ IRCONV_INT_NUM|IRCONV_CHECK));
+ else
+ lj_trace_err(J, LJ_TRERR_TYPEINS);
+ subst[ins] = (IRRef1)ref;
+ irr = IR(ref);
+ goto phiconv;
+ }
+ } else if (ref != REF_DROP && ref > invar &&
+ ((irr->o == IR_CONV && irr->op1 < invar) ||
+ (irr->o == IR_ALEN && irr->op2 < invar &&
+ irr->op2 != REF_NIL))) {
+ /* May need an extra PHI for a CONV or ALEN hint. */
+ ref = irr->o == IR_CONV ? irr->op1 : irr->op2;
+ irr = IR(ref);
+ phiconv:
+ if (ref < invar && !irref_isk(ref) && !irt_isphi(irr->t)) {
+ irt_setphi(irr->t);
+ if (nphi >= LJ_MAX_PHI)
+ lj_trace_err(J, LJ_TRERR_PHIOV);
+ phi[nphi++] = (IRRef1)ref;
+ }
+ }
+ }
+ }
+ }
+ if (!irt_isguard(J->guardemit)) /* Drop redundant snapshot. */
+ J->cur.nsnapmap = (uint32_t)J->cur.snap[--J->cur.nsnap].mapofs;
+ lj_assertJ(J->cur.nsnapmap <= J->sizesnapmap, "bad snapshot map index");
+ *psentinel = J->cur.snapmap[J->cur.snap[0].nent]; /* Restore PC. */
+
+ loop_emit_phi(J, subst, phi, nphi, onsnap);
+}
+
+/* Undo any partial changes made by the loop optimization. */
+static void loop_undo(jit_State *J, IRRef ins, SnapNo nsnap, MSize nsnapmap)
+{
+ ptrdiff_t i;
+ SnapShot *snap = &J->cur.snap[nsnap-1];
+ SnapEntry *map = J->cur.snapmap;
+ map[snap->mapofs + snap->nent] = map[J->cur.snap[0].nent]; /* Restore PC. */
+ J->cur.nsnapmap = (uint32_t)nsnapmap;
+ J->cur.nsnap = nsnap;
+ J->guardemit.irt = 0;
+ lj_ir_rollback(J, ins);
+ for (i = 0; i < BPROP_SLOTS; i++) { /* Remove backprop. cache entries. */
+ BPropEntry *bp = &J->bpropcache[i];
+ if (bp->val >= ins)
+ bp->key = 0;
+ }
+ for (ins--; ins >= REF_FIRST; ins--) { /* Remove flags. */
+ IRIns *ir = IR(ins);
+ irt_clearphi(ir->t);
+ irt_clearmark(ir->t);
+ }
+}
+
+/* Protected callback for loop optimization. */
+static TValue *cploop_opt(lua_State *L, lua_CFunction dummy, void *ud)
+{
+ UNUSED(L); UNUSED(dummy);
+ loop_unroll((LoopState *)ud);
+ return NULL;
+}
+
+/* Loop optimization. */
+int lj_opt_loop(jit_State *J)
+{
+ IRRef nins = J->cur.nins;
+ SnapNo nsnap = J->cur.nsnap;
+ MSize nsnapmap = J->cur.nsnapmap;
+ LoopState lps;
+ int errcode;
+ lps.J = J;
+ lps.subst = NULL;
+ lps.sizesubst = 0;
+ errcode = lj_vm_cpcall(J->L, NULL, &lps, cploop_opt);
+ lj_mem_freevec(J2G(J), lps.subst, lps.sizesubst, IRRef1);
+ if (LJ_UNLIKELY(errcode)) {
+ lua_State *L = J->L;
+ if (errcode == LUA_ERRRUN && tvisnumber(L->top-1)) { /* Trace error? */
+ int32_t e = numberVint(L->top-1);
+ switch ((TraceError)e) {
+ case LJ_TRERR_TYPEINS: /* Type instability. */
+ case LJ_TRERR_GFAIL: /* Guard would always fail. */
+ /* Unrolling via recording fixes many cases, e.g. a flipped boolean. */
+ if (--J->instunroll < 0) /* But do not unroll forever. */
+ break;
+ L->top--; /* Remove error object. */
+ loop_undo(J, nins, nsnap, nsnapmap);
+ return 1; /* Loop optimization failed, continue recording. */
+ default:
+ break;
+ }
+ }
+ lj_err_throw(L, errcode); /* Propagate all other errors. */
+ }
+ return 0; /* Loop optimization is ok. */
+}
+
+#undef IR
+#undef emitir
+#undef emitir_raw
+
+#endif
diff --git a/libs/luajit-cmake/luajit/src/lj_opt_mem.c b/libs/luajit-cmake/luajit/src/lj_opt_mem.c
new file mode 100644
index 0000000..09de2f0
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/lj_opt_mem.c
@@ -0,0 +1,979 @@
+/*
+** Memory access optimizations.
+** AA: Alias Analysis using high-level semantic disambiguation.
+** FWD: Load Forwarding (L2L) + Store Forwarding (S2L).
+** DSE: Dead-Store Elimination.
+** Copyright (C) 2005-2022 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#define lj_opt_mem_c
+#define LUA_CORE
+
+#include "lj_obj.h"
+
+#if LJ_HASJIT
+
+#include "lj_tab.h"
+#include "lj_ir.h"
+#include "lj_jit.h"
+#include "lj_iropt.h"
+#include "lj_ircall.h"
+#include "lj_dispatch.h"
+
+/* Some local macros to save typing. Undef'd at the end. */
+#define IR(ref) (&J->cur.ir[(ref)])
+#define fins (&J->fold.ins)
+#define fleft (J->fold.left)
+#define fright (J->fold.right)
+
+/*
+** Caveat #1: return value is not always a TRef -- only use with tref_ref().
+** Caveat #2: FWD relies on active CSE for xREF operands -- see lj_opt_fold().
+*/
+
+/* Return values from alias analysis. */
+typedef enum {
+ ALIAS_NO, /* The two refs CANNOT alias (exact). */
+ ALIAS_MAY, /* The two refs MAY alias (inexact). */
+ ALIAS_MUST /* The two refs MUST alias (exact). */
+} AliasRet;
+
+/* -- ALOAD/HLOAD forwarding and ASTORE/HSTORE elimination ---------------- */
+
+/* Simplified escape analysis: check for intervening stores. */
+static AliasRet aa_escape(jit_State *J, IRIns *ir, IRIns *stop)
+{
+ IRRef ref = (IRRef)(ir - J->cur.ir); /* The ref that might be stored. */
+ for (ir++; ir < stop; ir++)
+ if (ir->op2 == ref &&
+ (ir->o == IR_ASTORE || ir->o == IR_HSTORE ||
+ ir->o == IR_USTORE || ir->o == IR_FSTORE))
+ return ALIAS_MAY; /* Reference was stored and might alias. */
+ return ALIAS_NO; /* Reference was not stored. */
+}
+
+/* Alias analysis for two different table references. */
+static AliasRet aa_table(jit_State *J, IRRef ta, IRRef tb)
+{
+ IRIns *taba = IR(ta), *tabb = IR(tb);
+ int newa, newb;
+ lj_assertJ(ta != tb, "bad usage");
+ lj_assertJ(irt_istab(taba->t) && irt_istab(tabb->t), "bad usage");
+ /* Disambiguate new allocations. */
+ newa = (taba->o == IR_TNEW || taba->o == IR_TDUP);
+ newb = (tabb->o == IR_TNEW || tabb->o == IR_TDUP);
+ if (newa && newb)
+ return ALIAS_NO; /* Two different allocations never alias. */
+ if (newb) { /* At least one allocation? */
+ IRIns *tmp = taba; taba = tabb; tabb = tmp;
+ } else if (!newa) {
+ return ALIAS_MAY; /* Anything else: we just don't know. */
+ }
+ return aa_escape(J, taba, tabb);
+}
+
+/* Check whether there's no aliasing table.clear. */
+static int fwd_aa_tab_clear(jit_State *J, IRRef lim, IRRef ta)
+{
+ IRRef ref = J->chain[IR_CALLS];
+ while (ref > lim) {
+ IRIns *calls = IR(ref);
+ if (calls->op2 == IRCALL_lj_tab_clear &&
+ (ta == calls->op1 || aa_table(J, ta, calls->op1) != ALIAS_NO))
+ return 0; /* Conflict. */
+ ref = calls->prev;
+ }
+ return 1; /* No conflict. Can safely FOLD/CSE. */
+}
+
+/* Check whether there's no aliasing NEWREF/table.clear for the left operand. */
+int LJ_FASTCALL lj_opt_fwd_tptr(jit_State *J, IRRef lim)
+{
+ IRRef ta = fins->op1;
+ IRRef ref = J->chain[IR_NEWREF];
+ while (ref > lim) {
+ IRIns *newref = IR(ref);
+ if (ta == newref->op1 || aa_table(J, ta, newref->op1) != ALIAS_NO)
+ return 0; /* Conflict. */
+ ref = newref->prev;
+ }
+ return fwd_aa_tab_clear(J, lim, ta);
+}
+
+/* Alias analysis for array and hash access using key-based disambiguation. */
+static AliasRet aa_ahref(jit_State *J, IRIns *refa, IRIns *refb)
+{
+ IRRef ka = refa->op2;
+ IRRef kb = refb->op2;
+ IRIns *keya, *keyb;
+ IRRef ta, tb;
+ if (refa == refb)
+ return ALIAS_MUST; /* Shortcut for same refs. */
+ keya = IR(ka);
+ if (keya->o == IR_KSLOT) { ka = keya->op1; keya = IR(ka); }
+ keyb = IR(kb);
+ if (keyb->o == IR_KSLOT) { kb = keyb->op1; keyb = IR(kb); }
+ ta = (refa->o==IR_HREFK || refa->o==IR_AREF) ? IR(refa->op1)->op1 : refa->op1;
+ tb = (refb->o==IR_HREFK || refb->o==IR_AREF) ? IR(refb->op1)->op1 : refb->op1;
+ if (ka == kb) {
+ /* Same key. Check for same table with different ref (NEWREF vs. HREF). */
+ if (ta == tb)
+ return ALIAS_MUST; /* Same key, same table. */
+ else
+ return aa_table(J, ta, tb); /* Same key, possibly different table. */
+ }
+ if (irref_isk(ka) && irref_isk(kb))
+ return ALIAS_NO; /* Different constant keys. */
+ if (refa->o == IR_AREF) {
+ /* Disambiguate array references based on index arithmetic. */
+ int32_t ofsa = 0, ofsb = 0;
+ IRRef basea = ka, baseb = kb;
+ lj_assertJ(refb->o == IR_AREF, "expected AREF");
+ /* Gather base and offset from t[base] or t[base+-ofs]. */
+ if (keya->o == IR_ADD && irref_isk(keya->op2)) {
+ basea = keya->op1;
+ ofsa = IR(keya->op2)->i;
+ if (basea == kb && ofsa != 0)
+ return ALIAS_NO; /* t[base+-ofs] vs. t[base]. */
+ }
+ if (keyb->o == IR_ADD && irref_isk(keyb->op2)) {
+ baseb = keyb->op1;
+ ofsb = IR(keyb->op2)->i;
+ if (ka == baseb && ofsb != 0)
+ return ALIAS_NO; /* t[base] vs. t[base+-ofs]. */
+ }
+ if (basea == baseb && ofsa != ofsb)
+ return ALIAS_NO; /* t[base+-o1] vs. t[base+-o2] and o1 != o2. */
+ } else {
+ /* Disambiguate hash references based on the type of their keys. */
+ lj_assertJ((refa->o==IR_HREF || refa->o==IR_HREFK || refa->o==IR_NEWREF) &&
+ (refb->o==IR_HREF || refb->o==IR_HREFK || refb->o==IR_NEWREF),
+ "bad xREF IR op %d or %d", refa->o, refb->o);
+ if (!irt_sametype(keya->t, keyb->t))
+ return ALIAS_NO; /* Different key types. */
+ }
+ if (ta == tb)
+ return ALIAS_MAY; /* Same table, cannot disambiguate keys. */
+ else
+ return aa_table(J, ta, tb); /* Try to disambiguate tables. */
+}
+
+/* Array and hash load forwarding. */
+static TRef fwd_ahload(jit_State *J, IRRef xref)
+{
+ IRIns *xr = IR(xref);
+ IRRef lim = xref; /* Search limit. */
+ IRRef ref;
+
+ /* Search for conflicting stores. */
+ ref = J->chain[fins->o+IRDELTA_L2S];
+ while (ref > xref) {
+ IRIns *store = IR(ref);
+ switch (aa_ahref(J, xr, IR(store->op1))) {
+ case ALIAS_NO: break; /* Continue searching. */
+ case ALIAS_MAY: lim = ref; goto cselim; /* Limit search for load. */
+ case ALIAS_MUST: return store->op2; /* Store forwarding. */
+ }
+ ref = store->prev;
+ }
+
+ /* No conflicting store (yet): const-fold loads from allocations. */
+ {
+ IRIns *ir = (xr->o == IR_HREFK || xr->o == IR_AREF) ? IR(xr->op1) : xr;
+ IRRef tab = ir->op1;
+ ir = IR(tab);
+ if ((ir->o == IR_TNEW || (ir->o == IR_TDUP && irref_isk(xr->op2))) &&
+ fwd_aa_tab_clear(J, tab, tab)) {
+ /* A NEWREF with a number key may end up pointing to the array part.
+ ** But it's referenced from HSTORE and not found in the ASTORE chain.
+ ** For now simply consider this a conflict without forwarding anything.
+ */
+ if (xr->o == IR_AREF) {
+ IRRef ref2 = J->chain[IR_NEWREF];
+ while (ref2 > tab) {
+ IRIns *newref = IR(ref2);
+ if (irt_isnum(IR(newref->op2)->t))
+ goto cselim;
+ ref2 = newref->prev;
+ }
+ }
+ /* NEWREF inhibits CSE for HREF, and dependent FLOADs from HREFK/AREF.
+ ** But the above search for conflicting stores was limited by xref.
+ ** So continue searching, limited by the TNEW/TDUP. Store forwarding
+ ** is ok, too. A conflict does NOT limit the search for a matching load.
+ */
+ while (ref > tab) {
+ IRIns *store = IR(ref);
+ switch (aa_ahref(J, xr, IR(store->op1))) {
+ case ALIAS_NO: break; /* Continue searching. */
+ case ALIAS_MAY: goto cselim; /* Conflicting store. */
+ case ALIAS_MUST: return store->op2; /* Store forwarding. */
+ }
+ ref = store->prev;
+ }
+ if (ir->o == IR_TNEW && !irt_isnil(fins->t))
+ return 0; /* Type instability in loop-carried dependency. */
+ if (irt_ispri(fins->t)) {
+ return TREF_PRI(irt_type(fins->t));
+ } else if (irt_isnum(fins->t) || (LJ_DUALNUM && irt_isint(fins->t)) ||
+ irt_isstr(fins->t)) {
+ TValue keyv;
+ cTValue *tv;
+ IRIns *key = IR(xr->op2);
+ if (key->o == IR_KSLOT) key = IR(key->op1);
+ lj_ir_kvalue(J->L, &keyv, key);
+ tv = lj_tab_get(J->L, ir_ktab(IR(ir->op1)), &keyv);
+ lj_assertJ(itype2irt(tv) == irt_type(fins->t),
+ "mismatched type in constant table");
+ if (irt_isnum(fins->t))
+ return lj_ir_knum_u64(J, tv->u64);
+ else if (LJ_DUALNUM && irt_isint(fins->t))
+ return lj_ir_kint(J, intV(tv));
+ else
+ return lj_ir_kstr(J, strV(tv));
+ }
+ /* Othwerwise: don't intern as a constant. */
+ }
+ }
+
+cselim:
+ /* Try to find a matching load. Below the conflicting store, if any. */
+ ref = J->chain[fins->o];
+ while (ref > lim) {
+ IRIns *load = IR(ref);
+ if (load->op1 == xref)
+ return ref; /* Load forwarding. */
+ ref = load->prev;
+ }
+ return 0; /* Conflict or no match. */
+}
+
+/* Reassociate ALOAD across PHIs to handle t[i-1] forwarding case. */
+static TRef fwd_aload_reassoc(jit_State *J)
+{
+ IRIns *irx = IR(fins->op1);
+ IRIns *key = IR(irx->op2);
+ if (key->o == IR_ADD && irref_isk(key->op2)) {
+ IRIns *add2 = IR(key->op1);
+ if (add2->o == IR_ADD && irref_isk(add2->op2) &&
+ IR(key->op2)->i == -IR(add2->op2)->i) {
+ IRRef ref = J->chain[IR_AREF];
+ IRRef lim = add2->op1;
+ if (irx->op1 > lim) lim = irx->op1;
+ while (ref > lim) {
+ IRIns *ir = IR(ref);
+ if (ir->op1 == irx->op1 && ir->op2 == add2->op1)
+ return fwd_ahload(J, ref);
+ ref = ir->prev;
+ }
+ }
+ }
+ return 0;
+}
+
+/* ALOAD forwarding. */
+TRef LJ_FASTCALL lj_opt_fwd_aload(jit_State *J)
+{
+ IRRef ref;
+ if ((ref = fwd_ahload(J, fins->op1)) ||
+ (ref = fwd_aload_reassoc(J)))
+ return ref;
+ return EMITFOLD;
+}
+
+/* HLOAD forwarding. */
+TRef LJ_FASTCALL lj_opt_fwd_hload(jit_State *J)
+{
+ IRRef ref = fwd_ahload(J, fins->op1);
+ if (ref)
+ return ref;
+ return EMITFOLD;
+}
+
+/* HREFK forwarding. */
+TRef LJ_FASTCALL lj_opt_fwd_hrefk(jit_State *J)
+{
+ IRRef tab = fleft->op1;
+ IRRef ref = J->chain[IR_NEWREF];
+ while (ref > tab) {
+ IRIns *newref = IR(ref);
+ if (tab == newref->op1) {
+ if (fright->op1 == newref->op2 && fwd_aa_tab_clear(J, ref, tab))
+ return ref; /* Forward from NEWREF. */
+ else
+ goto docse;
+ } else if (aa_table(J, tab, newref->op1) != ALIAS_NO) {
+ goto docse;
+ }
+ ref = newref->prev;
+ }
+ /* No conflicting NEWREF: key location unchanged for HREFK of TDUP. */
+ if (IR(tab)->o == IR_TDUP && fwd_aa_tab_clear(J, tab, tab))
+ fins->t.irt &= ~IRT_GUARD; /* Drop HREFK guard. */
+docse:
+ return CSEFOLD;
+}
+
+/* Check whether HREF of TNEW/TDUP can be folded to niltv. */
+int LJ_FASTCALL lj_opt_fwd_href_nokey(jit_State *J)
+{
+ IRRef lim = fins->op1; /* Search limit. */
+ IRRef ref;
+
+ /* The key for an ASTORE may end up in the hash part after a NEWREF. */
+ if (irt_isnum(fright->t) && J->chain[IR_NEWREF] > lim) {
+ ref = J->chain[IR_ASTORE];
+ while (ref > lim) {
+ if (ref < J->chain[IR_NEWREF])
+ return 0; /* Conflict. */
+ ref = IR(ref)->prev;
+ }
+ }
+
+ /* Search for conflicting stores. */
+ ref = J->chain[IR_HSTORE];
+ while (ref > lim) {
+ IRIns *store = IR(ref);
+ if (aa_ahref(J, fins, IR(store->op1)) != ALIAS_NO)
+ return 0; /* Conflict. */
+ ref = store->prev;
+ }
+
+ return 1; /* No conflict. Can fold to niltv. */
+}
+
+/* ASTORE/HSTORE elimination. */
+TRef LJ_FASTCALL lj_opt_dse_ahstore(jit_State *J)
+{
+ IRRef xref = fins->op1; /* xREF reference. */
+ IRRef val = fins->op2; /* Stored value reference. */
+ IRIns *xr = IR(xref);
+ IRRef1 *refp = &J->chain[fins->o];
+ IRRef ref = *refp;
+ while (ref > xref) { /* Search for redundant or conflicting stores. */
+ IRIns *store = IR(ref);
+ switch (aa_ahref(J, xr, IR(store->op1))) {
+ case ALIAS_NO:
+ break; /* Continue searching. */
+ case ALIAS_MAY: /* Store to MAYBE the same location. */
+ if (store->op2 != val) /* Conflict if the value is different. */
+ goto doemit;
+ break; /* Otherwise continue searching. */
+ case ALIAS_MUST: /* Store to the same location. */
+ if (store->op2 == val) /* Same value: drop the new store. */
+ return DROPFOLD;
+ /* Different value: try to eliminate the redundant store. */
+ if (ref > J->chain[IR_LOOP]) { /* Quick check to avoid crossing LOOP. */
+ IRIns *ir;
+ /* Check for any intervening guards (includes conflicting loads).
+ ** Note that lj_tab_keyindex and lj_vm_next don't need guards,
+ ** since they are followed by at least one guarded VLOAD.
+ */
+ for (ir = IR(J->cur.nins-1); ir > store; ir--)
+ if (irt_isguard(ir->t) || ir->o == IR_ALEN)
+ goto doemit; /* No elimination possible. */
+ /* Remove redundant store from chain and replace with NOP. */
+ *refp = store->prev;
+ lj_ir_nop(store);
+ /* Now emit the new store instead. */
+ }
+ goto doemit;
+ }
+ ref = *(refp = &store->prev);
+ }
+doemit:
+ return EMITFOLD; /* Otherwise we have a conflict or simply no match. */
+}
+
+/* ALEN forwarding. */
+TRef LJ_FASTCALL lj_opt_fwd_alen(jit_State *J)
+{
+ IRRef tab = fins->op1; /* Table reference. */
+ IRRef lim = tab; /* Search limit. */
+ IRRef ref;
+
+ /* Search for conflicting HSTORE with numeric key. */
+ ref = J->chain[IR_HSTORE];
+ while (ref > lim) {
+ IRIns *store = IR(ref);
+ IRIns *href = IR(store->op1);
+ IRIns *key = IR(href->op2);
+ if (irt_isnum(key->o == IR_KSLOT ? IR(key->op1)->t : key->t)) {
+ lim = ref; /* Conflicting store found, limits search for ALEN. */
+ break;
+ }
+ ref = store->prev;
+ }
+
+ /* Try to find a matching ALEN. */
+ ref = J->chain[IR_ALEN];
+ while (ref > lim) {
+ /* CSE for ALEN only depends on the table, not the hint. */
+ if (IR(ref)->op1 == tab) {
+ IRRef sref;
+
+ /* Search for aliasing table.clear. */
+ if (!fwd_aa_tab_clear(J, ref, tab))
+ break;
+
+ /* Search for hint-forwarding or conflicting store. */
+ sref = J->chain[IR_ASTORE];
+ while (sref > ref) {
+ IRIns *store = IR(sref);
+ IRIns *aref = IR(store->op1);
+ IRIns *fref = IR(aref->op1);
+ if (tab == fref->op1) { /* ASTORE to the same table. */
+ /* Detect t[#t+1] = x idiom for push. */
+ IRIns *idx = IR(aref->op2);
+ if (!irt_isnil(store->t) &&
+ idx->o == IR_ADD && idx->op1 == ref &&
+ IR(idx->op2)->o == IR_KINT && IR(idx->op2)->i == 1) {
+ /* Note: this requires an extra PHI check in loop unroll. */
+ fins->op2 = aref->op2; /* Set ALEN hint. */
+ }
+ goto doemit; /* Conflicting store, possibly giving a hint. */
+ } else if (aa_table(J, tab, fref->op1) != ALIAS_NO) {
+ goto doemit; /* Conflicting store. */
+ }
+ sref = store->prev;
+ }
+
+ return ref; /* Plain ALEN forwarding. */
+ }
+ ref = IR(ref)->prev;
+ }
+doemit:
+ return EMITFOLD;
+}
+
+/* -- ULOAD forwarding ---------------------------------------------------- */
+
+/* The current alias analysis for upvalues is very simplistic. It only
+** disambiguates between the unique upvalues of the same function.
+** This is good enough for now, since most upvalues are read-only.
+**
+** A more precise analysis would be feasible with the help of the parser:
+** generate a unique key for every upvalue, even across all prototypes.
+** Lacking a realistic use-case, it's unclear whether this is beneficial.
+*/
+static AliasRet aa_uref(IRIns *refa, IRIns *refb)
+{
+ if (refa->o != refb->o)
+ return ALIAS_NO; /* Different UREFx type. */
+ if (refa->op1 == refb->op1) { /* Same function. */
+ if (refa->op2 == refb->op2)
+ return ALIAS_MUST; /* Same function, same upvalue idx. */
+ else
+ return ALIAS_NO; /* Same function, different upvalue idx. */
+ } else { /* Different functions, check disambiguation hash values. */
+ if (((refa->op2 ^ refb->op2) & 0xff))
+ return ALIAS_NO; /* Upvalues with different hash values cannot alias. */
+ else
+ return ALIAS_MAY; /* No conclusion can be drawn for same hash value. */
+ }
+}
+
+/* ULOAD forwarding. */
+TRef LJ_FASTCALL lj_opt_fwd_uload(jit_State *J)
+{
+ IRRef uref = fins->op1;
+ IRRef lim = REF_BASE; /* Search limit. */
+ IRIns *xr = IR(uref);
+ IRRef ref;
+
+ /* Search for conflicting stores. */
+ ref = J->chain[IR_USTORE];
+ while (ref > lim) {
+ IRIns *store = IR(ref);
+ switch (aa_uref(xr, IR(store->op1))) {
+ case ALIAS_NO: break; /* Continue searching. */
+ case ALIAS_MAY: lim = ref; goto cselim; /* Limit search for load. */
+ case ALIAS_MUST: return store->op2; /* Store forwarding. */
+ }
+ ref = store->prev;
+ }
+
+cselim:
+ /* Try to find a matching load. Below the conflicting store, if any. */
+ ref = J->chain[IR_ULOAD];
+ while (ref > lim) {
+ IRIns *ir = IR(ref);
+ if (ir->op1 == uref ||
+ (IR(ir->op1)->op12 == IR(uref)->op12 && IR(ir->op1)->o == IR(uref)->o))
+ return ref; /* Match for identical or equal UREFx (non-CSEable UREFO). */
+ ref = ir->prev;
+ }
+ return lj_ir_emit(J);
+}
+
+/* USTORE elimination. */
+TRef LJ_FASTCALL lj_opt_dse_ustore(jit_State *J)
+{
+ IRRef xref = fins->op1; /* xREF reference. */
+ IRRef val = fins->op2; /* Stored value reference. */
+ IRIns *xr = IR(xref);
+ IRRef1 *refp = &J->chain[IR_USTORE];
+ IRRef ref = *refp;
+ while (ref > xref) { /* Search for redundant or conflicting stores. */
+ IRIns *store = IR(ref);
+ switch (aa_uref(xr, IR(store->op1))) {
+ case ALIAS_NO:
+ break; /* Continue searching. */
+ case ALIAS_MAY: /* Store to MAYBE the same location. */
+ if (store->op2 != val) /* Conflict if the value is different. */
+ goto doemit;
+ break; /* Otherwise continue searching. */
+ case ALIAS_MUST: /* Store to the same location. */
+ if (store->op2 == val) /* Same value: drop the new store. */
+ return DROPFOLD;
+ /* Different value: try to eliminate the redundant store. */
+ if (ref > J->chain[IR_LOOP]) { /* Quick check to avoid crossing LOOP. */
+ IRIns *ir;
+ /* Check for any intervening guards (includes conflicting loads). */
+ for (ir = IR(J->cur.nins-1); ir > store; ir--)
+ if (irt_isguard(ir->t))
+ goto doemit; /* No elimination possible. */
+ /* Remove redundant store from chain and replace with NOP. */
+ *refp = store->prev;
+ lj_ir_nop(store);
+ if (ref+1 < J->cur.nins &&
+ store[1].o == IR_OBAR && store[1].op1 == xref) {
+ IRRef1 *bp = &J->chain[IR_OBAR];
+ IRIns *obar;
+ for (obar = IR(*bp); *bp > ref+1; obar = IR(*bp))
+ bp = &obar->prev;
+ /* Remove OBAR, too. */
+ *bp = obar->prev;
+ lj_ir_nop(obar);
+ }
+ /* Now emit the new store instead. */
+ }
+ goto doemit;
+ }
+ ref = *(refp = &store->prev);
+ }
+doemit:
+ return EMITFOLD; /* Otherwise we have a conflict or simply no match. */
+}
+
+/* -- FLOAD forwarding and FSTORE elimination ----------------------------- */
+
+/* Alias analysis for field access.
+** Field loads are cheap and field stores are rare.
+** Simple disambiguation based on field types is good enough.
+*/
+static AliasRet aa_fref(jit_State *J, IRIns *refa, IRIns *refb)
+{
+ if (refa->op2 != refb->op2)
+ return ALIAS_NO; /* Different fields. */
+ if (refa->op1 == refb->op1)
+ return ALIAS_MUST; /* Same field, same object. */
+ else if (refa->op2 >= IRFL_TAB_META && refa->op2 <= IRFL_TAB_NOMM)
+ return aa_table(J, refa->op1, refb->op1); /* Disambiguate tables. */
+ else
+ return ALIAS_MAY; /* Same field, possibly different object. */
+}
+
+/* Only the loads for mutable fields end up here (see FOLD). */
+TRef LJ_FASTCALL lj_opt_fwd_fload(jit_State *J)
+{
+ IRRef oref = fins->op1; /* Object reference. */
+ IRRef fid = fins->op2; /* Field ID. */
+ IRRef lim = oref; /* Search limit. */
+ IRRef ref;
+
+ /* Search for conflicting stores. */
+ ref = J->chain[IR_FSTORE];
+ while (ref > oref) {
+ IRIns *store = IR(ref);
+ switch (aa_fref(J, fins, IR(store->op1))) {
+ case ALIAS_NO: break; /* Continue searching. */
+ case ALIAS_MAY: lim = ref; goto cselim; /* Limit search for load. */
+ case ALIAS_MUST: return store->op2; /* Store forwarding. */
+ }
+ ref = store->prev;
+ }
+
+ /* No conflicting store: const-fold field loads from allocations. */
+ if (fid == IRFL_TAB_META) {
+ IRIns *ir = IR(oref);
+ if (ir->o == IR_TNEW || ir->o == IR_TDUP)
+ return lj_ir_knull(J, IRT_TAB);
+ }
+
+cselim:
+ /* Try to find a matching load. Below the conflicting store, if any. */
+ return lj_opt_cselim(J, lim);
+}
+
+/* FSTORE elimination. */
+TRef LJ_FASTCALL lj_opt_dse_fstore(jit_State *J)
+{
+ IRRef fref = fins->op1; /* FREF reference. */
+ IRRef val = fins->op2; /* Stored value reference. */
+ IRIns *xr = IR(fref);
+ IRRef1 *refp = &J->chain[IR_FSTORE];
+ IRRef ref = *refp;
+ while (ref > fref) { /* Search for redundant or conflicting stores. */
+ IRIns *store = IR(ref);
+ switch (aa_fref(J, xr, IR(store->op1))) {
+ case ALIAS_NO:
+ break; /* Continue searching. */
+ case ALIAS_MAY:
+ if (store->op2 != val) /* Conflict if the value is different. */
+ goto doemit;
+ break; /* Otherwise continue searching. */
+ case ALIAS_MUST:
+ if (store->op2 == val &&
+ !(xr->op2 >= IRFL_SBUF_W && xr->op2 <= IRFL_SBUF_R))
+ return DROPFOLD; /* Same value: drop the new store. */
+ /* Different value: try to eliminate the redundant store. */
+ if (ref > J->chain[IR_LOOP]) { /* Quick check to avoid crossing LOOP. */
+ IRIns *ir;
+ /* Check for any intervening guards or conflicting loads. */
+ for (ir = IR(J->cur.nins-1); ir > store; ir--)
+ if (irt_isguard(ir->t) || (ir->o == IR_FLOAD && ir->op2 == xr->op2))
+ goto doemit; /* No elimination possible. */
+ /* Remove redundant store from chain and replace with NOP. */
+ *refp = store->prev;
+ lj_ir_nop(store);
+ /* Now emit the new store instead. */
+ }
+ goto doemit;
+ }
+ ref = *(refp = &store->prev);
+ }
+doemit:
+ return EMITFOLD; /* Otherwise we have a conflict or simply no match. */
+}
+
+/* Check whether there's no aliasing buffer op between IRFL_SBUF_*. */
+int LJ_FASTCALL lj_opt_fwd_sbuf(jit_State *J, IRRef lim)
+{
+ IRRef ref;
+ if (J->chain[IR_BUFPUT] > lim)
+ return 0; /* Conflict. */
+ ref = J->chain[IR_CALLS];
+ while (ref > lim) {
+ IRIns *ir = IR(ref);
+ if (ir->op2 >= IRCALL_lj_strfmt_putint && ir->op2 < IRCALL_lj_buf_tostr)
+ return 0; /* Conflict. */
+ ref = ir->prev;
+ }
+ ref = J->chain[IR_CALLL];
+ while (ref > lim) {
+ IRIns *ir = IR(ref);
+ if (ir->op2 >= IRCALL_lj_strfmt_putint && ir->op2 < IRCALL_lj_buf_tostr)
+ return 0; /* Conflict. */
+ ref = ir->prev;
+ }
+ return 1; /* No conflict. Can safely FOLD/CSE. */
+}
+
+/* -- XLOAD forwarding and XSTORE elimination ----------------------------- */
+
+/* Find cdata allocation for a reference (if any). */
+static IRIns *aa_findcnew(jit_State *J, IRIns *ir)
+{
+ while (ir->o == IR_ADD) {
+ if (!irref_isk(ir->op1)) {
+ IRIns *ir1 = aa_findcnew(J, IR(ir->op1)); /* Left-recursion. */
+ if (ir1) return ir1;
+ }
+ if (irref_isk(ir->op2)) return NULL;
+ ir = IR(ir->op2); /* Flatten right-recursion. */
+ }
+ return ir->o == IR_CNEW ? ir : NULL;
+}
+
+/* Alias analysis for two cdata allocations. */
+static AliasRet aa_cnew(jit_State *J, IRIns *refa, IRIns *refb)
+{
+ IRIns *cnewa = aa_findcnew(J, refa);
+ IRIns *cnewb = aa_findcnew(J, refb);
+ if (cnewa == cnewb)
+ return ALIAS_MAY; /* Same allocation or neither is an allocation. */
+ if (cnewa && cnewb)
+ return ALIAS_NO; /* Two different allocations never alias. */
+ if (cnewb) { cnewa = cnewb; refb = refa; }
+ return aa_escape(J, cnewa, refb);
+}
+
+/* Alias analysis for XLOAD/XSTORE. */
+static AliasRet aa_xref(jit_State *J, IRIns *refa, IRIns *xa, IRIns *xb)
+{
+ ptrdiff_t ofsa = 0, ofsb = 0;
+ IRIns *refb = IR(xb->op1);
+ IRIns *basea = refa, *baseb = refb;
+ if (refa == refb && irt_sametype(xa->t, xb->t))
+ return ALIAS_MUST; /* Shortcut for same refs with identical type. */
+ /* Offset-based disambiguation. */
+ if (refa->o == IR_ADD && irref_isk(refa->op2)) {
+ IRIns *irk = IR(refa->op2);
+ basea = IR(refa->op1);
+ ofsa = (LJ_64 && irk->o == IR_KINT64) ? (ptrdiff_t)ir_k64(irk)->u64 :
+ (ptrdiff_t)irk->i;
+ }
+ if (refb->o == IR_ADD && irref_isk(refb->op2)) {
+ IRIns *irk = IR(refb->op2);
+ baseb = IR(refb->op1);
+ ofsb = (LJ_64 && irk->o == IR_KINT64) ? (ptrdiff_t)ir_k64(irk)->u64 :
+ (ptrdiff_t)irk->i;
+ }
+ /* Treat constified pointers like base vs. base+offset. */
+ if (basea->o == IR_KPTR && baseb->o == IR_KPTR) {
+ ofsb += (char *)ir_kptr(baseb) - (char *)ir_kptr(basea);
+ baseb = basea;
+ }
+ /* This implements (very) strict aliasing rules.
+ ** Different types do NOT alias, except for differences in signedness.
+ ** Type punning through unions is allowed (but forces a reload).
+ */
+ if (basea == baseb) {
+ ptrdiff_t sza = irt_size(xa->t), szb = irt_size(xb->t);
+ if (ofsa == ofsb) {
+ if (sza == szb && irt_isfp(xa->t) == irt_isfp(xb->t))
+ return ALIAS_MUST; /* Same-sized, same-kind. May need to convert. */
+ } else if (ofsa + sza <= ofsb || ofsb + szb <= ofsa) {
+ return ALIAS_NO; /* Non-overlapping base+-o1 vs. base+-o2. */
+ }
+ /* NYI: extract, extend or reinterpret bits (int <-> fp). */
+ return ALIAS_MAY; /* Overlapping or type punning: force reload. */
+ }
+ if (!irt_sametype(xa->t, xb->t) &&
+ !(irt_typerange(xa->t, IRT_I8, IRT_U64) &&
+ ((xa->t.irt - IRT_I8) ^ (xb->t.irt - IRT_I8)) == 1))
+ return ALIAS_NO;
+ /* NYI: structural disambiguation. */
+ return aa_cnew(J, basea, baseb); /* Try to disambiguate allocations. */
+}
+
+/* Return CSEd reference or 0. Caveat: swaps lower ref to the right! */
+static IRRef reassoc_trycse(jit_State *J, IROp op, IRRef op1, IRRef op2)
+{
+ IRRef ref = J->chain[op];
+ IRRef lim = op1;
+ if (op2 > lim) { lim = op2; op2 = op1; op1 = lim; }
+ while (ref > lim) {
+ IRIns *ir = IR(ref);
+ if (ir->op1 == op1 && ir->op2 == op2)
+ return ref;
+ ref = ir->prev;
+ }
+ return 0;
+}
+
+/* Reassociate index references. */
+static IRRef reassoc_xref(jit_State *J, IRIns *ir)
+{
+ ptrdiff_t ofs = 0;
+ if (ir->o == IR_ADD && irref_isk(ir->op2)) { /* Get constant offset. */
+ IRIns *irk = IR(ir->op2);
+ ofs = (LJ_64 && irk->o == IR_KINT64) ? (ptrdiff_t)ir_k64(irk)->u64 :
+ (ptrdiff_t)irk->i;
+ ir = IR(ir->op1);
+ }
+ if (ir->o == IR_ADD) { /* Add of base + index. */
+ /* Index ref > base ref for loop-carried dependences. Only check op1. */
+ IRIns *ir2, *ir1 = IR(ir->op1);
+ int32_t shift = 0;
+ IRRef idxref;
+ /* Determine index shifts. Don't bother with IR_MUL here. */
+ if (ir1->o == IR_BSHL && irref_isk(ir1->op2))
+ shift = IR(ir1->op2)->i;
+ else if (ir1->o == IR_ADD && ir1->op1 == ir1->op2)
+ shift = 1;
+ else
+ ir1 = ir;
+ ir2 = IR(ir1->op1);
+ /* A non-reassociated add. Must be a loop-carried dependence. */
+ if (ir2->o == IR_ADD && irt_isint(ir2->t) && irref_isk(ir2->op2))
+ ofs += (ptrdiff_t)IR(ir2->op2)->i << shift;
+ else
+ return 0;
+ idxref = ir2->op1;
+ /* Try to CSE the reassociated chain. Give up if not found. */
+ if (ir1 != ir &&
+ !(idxref = reassoc_trycse(J, ir1->o, idxref,
+ ir1->o == IR_BSHL ? ir1->op2 : idxref)))
+ return 0;
+ if (!(idxref = reassoc_trycse(J, IR_ADD, idxref, ir->op2)))
+ return 0;
+ if (ofs != 0) {
+ IRRef refk = tref_ref(lj_ir_kintp(J, ofs));
+ if (!(idxref = reassoc_trycse(J, IR_ADD, idxref, refk)))
+ return 0;
+ }
+ return idxref; /* Success, found a reassociated index reference. Phew. */
+ }
+ return 0; /* Failure. */
+}
+
+/* XLOAD forwarding. */
+TRef LJ_FASTCALL lj_opt_fwd_xload(jit_State *J)
+{
+ IRRef xref = fins->op1;
+ IRIns *xr = IR(xref);
+ IRRef lim = xref; /* Search limit. */
+ IRRef ref;
+
+ if ((fins->op2 & IRXLOAD_READONLY))
+ goto cselim;
+ if ((fins->op2 & IRXLOAD_VOLATILE))
+ goto doemit;
+
+ /* Search for conflicting stores. */
+ ref = J->chain[IR_XSTORE];
+retry:
+ if (J->chain[IR_CALLXS] > lim) lim = J->chain[IR_CALLXS];
+ if (J->chain[IR_XBAR] > lim) lim = J->chain[IR_XBAR];
+ while (ref > lim) {
+ IRIns *store = IR(ref);
+ switch (aa_xref(J, xr, fins, store)) {
+ case ALIAS_NO: break; /* Continue searching. */
+ case ALIAS_MAY: lim = ref; goto cselim; /* Limit search for load. */
+ case ALIAS_MUST:
+ /* Emit conversion if the loaded type doesn't match the forwarded type. */
+ if (!irt_sametype(fins->t, IR(store->op2)->t)) {
+ IRType dt = irt_type(fins->t), st = irt_type(IR(store->op2)->t);
+ if (dt == IRT_I8 || dt == IRT_I16) { /* Trunc + sign-extend. */
+ st = dt | IRCONV_SEXT;
+ dt = IRT_INT;
+ } else if (dt == IRT_U8 || dt == IRT_U16) { /* Trunc + zero-extend. */
+ st = dt;
+ dt = IRT_INT;
+ }
+ fins->ot = IRT(IR_CONV, dt);
+ fins->op1 = store->op2;
+ fins->op2 = (dt<<5)|st;
+ return RETRYFOLD;
+ }
+ return store->op2; /* Store forwarding. */
+ }
+ ref = store->prev;
+ }
+
+cselim:
+ /* Try to find a matching load. Below the conflicting store, if any. */
+ ref = J->chain[IR_XLOAD];
+ while (ref > lim) {
+ /* CSE for XLOAD depends on the type, but not on the IRXLOAD_* flags. */
+ if (IR(ref)->op1 == xref && irt_sametype(IR(ref)->t, fins->t))
+ return ref;
+ ref = IR(ref)->prev;
+ }
+
+ /* Reassociate XLOAD across PHIs to handle a[i-1] forwarding case. */
+ if (!(fins->op2 & IRXLOAD_READONLY) && J->chain[IR_LOOP] &&
+ xref == fins->op1 && (xref = reassoc_xref(J, xr)) != 0) {
+ ref = J->chain[IR_XSTORE];
+ while (ref > lim) /* Skip stores that have already been checked. */
+ ref = IR(ref)->prev;
+ lim = xref;
+ xr = IR(xref);
+ goto retry; /* Retry with the reassociated reference. */
+ }
+doemit:
+ return EMITFOLD;
+}
+
+/* XSTORE elimination. */
+TRef LJ_FASTCALL lj_opt_dse_xstore(jit_State *J)
+{
+ IRRef xref = fins->op1;
+ IRIns *xr = IR(xref);
+ IRRef lim = xref; /* Search limit. */
+ IRRef val = fins->op2; /* Stored value reference. */
+ IRRef1 *refp = &J->chain[IR_XSTORE];
+ IRRef ref = *refp;
+ if (J->chain[IR_CALLXS] > lim) lim = J->chain[IR_CALLXS];
+ if (J->chain[IR_XBAR] > lim) lim = J->chain[IR_XBAR];
+ if (J->chain[IR_XSNEW] > lim) lim = J->chain[IR_XSNEW];
+ while (ref > lim) { /* Search for redundant or conflicting stores. */
+ IRIns *store = IR(ref);
+ switch (aa_xref(J, xr, fins, store)) {
+ case ALIAS_NO:
+ break; /* Continue searching. */
+ case ALIAS_MAY:
+ if (store->op2 != val) /* Conflict if the value is different. */
+ goto doemit;
+ break; /* Otherwise continue searching. */
+ case ALIAS_MUST:
+ if (store->op2 == val) /* Same value: drop the new store. */
+ return DROPFOLD;
+ /* Different value: try to eliminate the redundant store. */
+ if (ref > J->chain[IR_LOOP]) { /* Quick check to avoid crossing LOOP. */
+ IRIns *ir;
+ /* Check for any intervening guards or any XLOADs (no AA performed). */
+ for (ir = IR(J->cur.nins-1); ir > store; ir--)
+ if (irt_isguard(ir->t) || ir->o == IR_XLOAD)
+ goto doemit; /* No elimination possible. */
+ /* Remove redundant store from chain and replace with NOP. */
+ *refp = store->prev;
+ lj_ir_nop(store);
+ /* Now emit the new store instead. */
+ }
+ goto doemit;
+ }
+ ref = *(refp = &store->prev);
+ }
+doemit:
+ return EMITFOLD; /* Otherwise we have a conflict or simply no match. */
+}
+
+/* -- ASTORE/HSTORE previous type analysis -------------------------------- */
+
+/* Check whether the previous value for a table store is non-nil.
+** This can be derived either from a previous store or from a previous
+** load (because all loads from tables perform a type check).
+**
+** The result of the analysis can be used to avoid the metatable check
+** and the guard against HREF returning niltv. Both of these are cheap,
+** so let's not spend too much effort on the analysis.
+**
+** A result of 1 is exact: previous value CANNOT be nil.
+** A result of 0 is inexact: previous value MAY be nil.
+*/
+int lj_opt_fwd_wasnonnil(jit_State *J, IROpT loadop, IRRef xref)
+{
+ /* First check stores. */
+ IRRef ref = J->chain[loadop+IRDELTA_L2S];
+ while (ref > xref) {
+ IRIns *store = IR(ref);
+ if (store->op1 == xref) { /* Same xREF. */
+ /* A nil store MAY alias, but a non-nil store MUST alias. */
+ return !irt_isnil(store->t);
+ } else if (irt_isnil(store->t)) { /* Must check any nil store. */
+ IRRef skref = IR(store->op1)->op2;
+ IRRef xkref = IR(xref)->op2;
+ /* Same key type MAY alias. Need ALOAD check due to multiple int types. */
+ if (loadop == IR_ALOAD || irt_sametype(IR(skref)->t, IR(xkref)->t)) {
+ if (skref == xkref || !irref_isk(skref) || !irref_isk(xkref))
+ return 0; /* A nil store with same const key or var key MAY alias. */
+ /* Different const keys CANNOT alias. */
+ } /* Different key types CANNOT alias. */
+ } /* Other non-nil stores MAY alias. */
+ ref = store->prev;
+ }
+
+ /* Check loads since nothing could be derived from stores. */
+ ref = J->chain[loadop];
+ while (ref > xref) {
+ IRIns *load = IR(ref);
+ if (load->op1 == xref) { /* Same xREF. */
+ /* A nil load MAY alias, but a non-nil load MUST alias. */
+ return !irt_isnil(load->t);
+ } /* Other non-nil loads MAY alias. */
+ ref = load->prev;
+ }
+ return 0; /* Nothing derived at all, previous value MAY be nil. */
+}
+
+/* ------------------------------------------------------------------------ */
+
+#undef IR
+#undef fins
+#undef fleft
+#undef fright
+
+#endif
diff --git a/libs/luajit-cmake/luajit/src/lj_opt_narrow.c b/libs/luajit-cmake/luajit/src/lj_opt_narrow.c
new file mode 100644
index 0000000..586f1bc
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/lj_opt_narrow.c
@@ -0,0 +1,622 @@
+/*
+** NARROW: Narrowing of numbers to integers (double to int32_t).
+** STRIPOV: Stripping of overflow checks.
+** Copyright (C) 2005-2022 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#define lj_opt_narrow_c
+#define LUA_CORE
+
+#include "lj_obj.h"
+
+#if LJ_HASJIT
+
+#include "lj_bc.h"
+#include "lj_ir.h"
+#include "lj_jit.h"
+#include "lj_iropt.h"
+#include "lj_trace.h"
+#include "lj_vm.h"
+#include "lj_strscan.h"
+
+/* Rationale for narrowing optimizations:
+**
+** Lua has only a single number type and this is a FP double by default.
+** Narrowing doubles to integers does not pay off for the interpreter on a
+** current-generation x86/x64 machine. Most FP operations need the same
+** amount of execution resources as their integer counterparts, except
+** with slightly longer latencies. Longer latencies are a non-issue for
+** the interpreter, since they are usually hidden by other overhead.
+**
+** The total CPU execution bandwidth is the sum of the bandwidth of the FP
+** and the integer units, because they execute in parallel. The FP units
+** have an equal or higher bandwidth than the integer units. Not using
+** them means losing execution bandwidth. Moving work away from them to
+** the already quite busy integer units is a losing proposition.
+**
+** The situation for JIT-compiled code is a bit different: the higher code
+** density makes the extra latencies much more visible. Tight loops expose
+** the latencies for updating the induction variables. Array indexing
+** requires narrowing conversions with high latencies and additional
+** guards (to check that the index is really an integer). And many common
+** optimizations only work on integers.
+**
+** One solution would be speculative, eager narrowing of all number loads.
+** This causes many problems, like losing -0 or the need to resolve type
+** mismatches between traces. It also effectively forces the integer type
+** to have overflow-checking semantics. This impedes many basic
+** optimizations and requires adding overflow checks to all integer
+** arithmetic operations (whereas FP arithmetics can do without).
+**
+** Always replacing an FP op with an integer op plus an overflow check is
+** counter-productive on a current-generation super-scalar CPU. Although
+** the overflow check branches are highly predictable, they will clog the
+** execution port for the branch unit and tie up reorder buffers. This is
+** turning a pure data-flow dependency into a different data-flow
+** dependency (with slightly lower latency) *plus* a control dependency.
+** In general, you don't want to do this since latencies due to data-flow
+** dependencies can be well hidden by out-of-order execution.
+**
+** A better solution is to keep all numbers as FP values and only narrow
+** when it's beneficial to do so. LuaJIT uses predictive narrowing for
+** induction variables and demand-driven narrowing for index expressions,
+** integer arguments and bit operations. Additionally it can eliminate or
+** hoist most of the resulting overflow checks. Regular arithmetic
+** computations are never narrowed to integers.
+**
+** The integer type in the IR has convenient wrap-around semantics and
+** ignores overflow. Extra operations have been added for
+** overflow-checking arithmetic (ADDOV/SUBOV) instead of an extra type.
+** Apart from reducing overall complexity of the compiler, this also
+** nicely solves the problem where you want to apply algebraic
+** simplifications to ADD, but not to ADDOV. And the x86/x64 assembler can
+** use lea instead of an add for integer ADD, but not for ADDOV (lea does
+** not affect the flags, but it helps to avoid register moves).
+**
+**
+** All of the above has to be reconsidered for architectures with slow FP
+** operations or without a hardware FPU. The dual-number mode of LuaJIT
+** addresses this issue. Arithmetic operations are performed on integers
+** as far as possible and overflow checks are added as needed.
+**
+** This implies that narrowing for integer arguments and bit operations
+** should also strip overflow checks, e.g. replace ADDOV with ADD. The
+** original overflow guards are weak and can be eliminated by DCE, if
+** there's no other use.
+**
+** A slight twist is that it's usually beneficial to use overflow-checked
+** integer arithmetics if all inputs are already integers. This is the only
+** change that affects the single-number mode, too.
+*/
+
+/* Some local macros to save typing. Undef'd at the end. */
+#define IR(ref) (&J->cur.ir[(ref)])
+#define fins (&J->fold.ins)
+
+/* Pass IR on to next optimization in chain (FOLD). */
+#define emitir(ot, a, b) (lj_ir_set(J, (ot), (a), (b)), lj_opt_fold(J))
+
+#define emitir_raw(ot, a, b) (lj_ir_set(J, (ot), (a), (b)), lj_ir_emit(J))
+
+/* -- Elimination of narrowing type conversions --------------------------- */
+
+/* Narrowing of index expressions and bit operations is demand-driven. The
+** trace recorder emits a narrowing type conversion (CONV.int.num or TOBIT)
+** in all of these cases (e.g. array indexing or string indexing). FOLD
+** already takes care of eliminating simple redundant conversions like
+** CONV.int.num(CONV.num.int(x)) ==> x.
+**
+** But the surrounding code is FP-heavy and arithmetic operations are
+** performed on FP numbers (for the single-number mode). Consider a common
+** example such as 'x=t[i+1]', with 'i' already an integer (due to induction
+** variable narrowing). The index expression would be recorded as
+** CONV.int.num(ADD(CONV.num.int(i), 1))
+** which is clearly suboptimal.
+**
+** One can do better by recursively backpropagating the narrowing type
+** conversion across FP arithmetic operations. This turns FP ops into
+** their corresponding integer counterparts. Depending on the semantics of
+** the conversion they also need to check for overflow. Currently only ADD
+** and SUB are supported.
+**
+** The above example can be rewritten as
+** ADDOV(CONV.int.num(CONV.num.int(i)), 1)
+** and then into ADDOV(i, 1) after folding of the conversions. The original
+** FP ops remain in the IR and are eliminated by DCE since all references to
+** them are gone.
+**
+** [In dual-number mode the trace recorder already emits ADDOV etc., but
+** this can be further reduced. See below.]
+**
+** Special care has to be taken to avoid narrowing across an operation
+** which is potentially operating on non-integral operands. One obvious
+** case is when an expression contains a non-integral constant, but ends
+** up as an integer index at runtime (like t[x+1.5] with x=0.5).
+**
+** Operations with two non-constant operands illustrate a similar problem
+** (like t[a+b] with a=1.5 and b=2.5). Backpropagation has to stop there,
+** unless it can be proven that either operand is integral (e.g. by CSEing
+** a previous conversion). As a not-so-obvious corollary this logic also
+** applies for a whole expression tree (e.g. t[(a+1)+(b+1)]).
+**
+** Correctness of the transformation is guaranteed by avoiding to expand
+** the tree by adding more conversions than the one we would need to emit
+** if not backpropagating. TOBIT employs a more optimistic rule, because
+** the conversion has special semantics, designed to make the life of the
+** compiler writer easier. ;-)
+**
+** Using on-the-fly backpropagation of an expression tree doesn't work
+** because it's unknown whether the transform is correct until the end.
+** This either requires IR rollback and cache invalidation for every
+** subtree or a two-pass algorithm. The former didn't work out too well,
+** so the code now combines a recursive collector with a stack-based
+** emitter.
+**
+** [A recursive backpropagation algorithm with backtracking, employing
+** skip-list lookup and round-robin caching, emitting stack operations
+** on-the-fly for a stack-based interpreter -- and all of that in a meager
+** kilobyte? Yep, compilers are a great treasure chest. Throw away your
+** textbooks and read the codebase of a compiler today!]
+**
+** There's another optimization opportunity for array indexing: it's
+** always accompanied by an array bounds-check. The outermost overflow
+** check may be delegated to the ABC operation. This works because ABC is
+** an unsigned comparison and wrap-around due to overflow creates negative
+** numbers.
+**
+** But this optimization is only valid for constants that cannot overflow
+** an int32_t into the range of valid array indexes [0..2^27+1). A check
+** for +-2^30 is safe since -2^31 - 2^30 wraps to 2^30 and 2^31-1 + 2^30
+** wraps to -2^30-1.
+**
+** It's also good enough in practice, since e.g. t[i+1] or t[i-10] are
+** quite common. So the above example finally ends up as ADD(i, 1)!
+**
+** Later on, the assembler is able to fuse the whole array reference and
+** the ADD into the memory operands of loads and other instructions. This
+** is why LuaJIT is able to generate very pretty (and fast) machine code
+** for array indexing. And that, my dear, concludes another story about
+** one of the hidden secrets of LuaJIT ...
+*/
+
+/* Maximum backpropagation depth and maximum stack size. */
+#define NARROW_MAX_BACKPROP 100
+#define NARROW_MAX_STACK 256
+
+/* The stack machine has a 32 bit instruction format: [IROpT | IRRef1]
+** The lower 16 bits hold a reference (or 0). The upper 16 bits hold
+** the IR opcode + type or one of the following special opcodes:
+*/
+enum {
+ NARROW_REF, /* Push ref. */
+ NARROW_CONV, /* Push conversion of ref. */
+ NARROW_SEXT, /* Push sign-extension of ref. */
+ NARROW_INT /* Push KINT ref. The next code holds an int32_t. */
+};
+
+typedef uint32_t NarrowIns;
+
+#define NARROWINS(op, ref) (((op) << 16) + (ref))
+#define narrow_op(ins) ((IROpT)((ins) >> 16))
+#define narrow_ref(ins) ((IRRef1)(ins))
+
+/* Context used for narrowing of type conversions. */
+typedef struct NarrowConv {
+ jit_State *J; /* JIT compiler state. */
+ NarrowIns *sp; /* Current stack pointer. */
+ NarrowIns *maxsp; /* Maximum stack pointer minus redzone. */
+ IRRef mode; /* Conversion mode (IRCONV_*). */
+ IRType t; /* Destination type: IRT_INT or IRT_I64. */
+ NarrowIns stack[NARROW_MAX_STACK]; /* Stack holding stack-machine code. */
+} NarrowConv;
+
+/* Lookup a reference in the backpropagation cache. */
+static BPropEntry *narrow_bpc_get(jit_State *J, IRRef1 key, IRRef mode)
+{
+ ptrdiff_t i;
+ for (i = 0; i < BPROP_SLOTS; i++) {
+ BPropEntry *bp = &J->bpropcache[i];
+ /* Stronger checks are ok, too. */
+ if (bp->key == key && bp->mode >= mode &&
+ ((bp->mode ^ mode) & IRCONV_MODEMASK) == 0)
+ return bp;
+ }
+ return NULL;
+}
+
+/* Add an entry to the backpropagation cache. */
+static void narrow_bpc_set(jit_State *J, IRRef1 key, IRRef1 val, IRRef mode)
+{
+ uint32_t slot = J->bpropslot;
+ BPropEntry *bp = &J->bpropcache[slot];
+ J->bpropslot = (slot + 1) & (BPROP_SLOTS-1);
+ bp->key = key;
+ bp->val = val;
+ bp->mode = mode;
+}
+
+/* Backpropagate overflow stripping. */
+static void narrow_stripov_backprop(NarrowConv *nc, IRRef ref, int depth)
+{
+ jit_State *J = nc->J;
+ IRIns *ir = IR(ref);
+ if (ir->o == IR_ADDOV || ir->o == IR_SUBOV ||
+ (ir->o == IR_MULOV && (nc->mode & IRCONV_CONVMASK) == IRCONV_ANY)) {
+ BPropEntry *bp = narrow_bpc_get(nc->J, ref, IRCONV_TOBIT);
+ if (bp) {
+ ref = bp->val;
+ } else if (++depth < NARROW_MAX_BACKPROP && nc->sp < nc->maxsp) {
+ NarrowIns *savesp = nc->sp;
+ narrow_stripov_backprop(nc, ir->op1, depth);
+ if (nc->sp < nc->maxsp) {
+ narrow_stripov_backprop(nc, ir->op2, depth);
+ if (nc->sp < nc->maxsp) {
+ *nc->sp++ = NARROWINS(IRT(ir->o - IR_ADDOV + IR_ADD, IRT_INT), ref);
+ return;
+ }
+ }
+ nc->sp = savesp; /* Path too deep, need to backtrack. */
+ }
+ }
+ *nc->sp++ = NARROWINS(NARROW_REF, ref);
+}
+
+/* Backpropagate narrowing conversion. Return number of needed conversions. */
+static int narrow_conv_backprop(NarrowConv *nc, IRRef ref, int depth)
+{
+ jit_State *J = nc->J;
+ IRIns *ir = IR(ref);
+ IRRef cref;
+
+ if (nc->sp >= nc->maxsp) return 10; /* Path too deep. */
+
+ /* Check the easy cases first. */
+ if (ir->o == IR_CONV && (ir->op2 & IRCONV_SRCMASK) == IRT_INT) {
+ if ((nc->mode & IRCONV_CONVMASK) <= IRCONV_ANY)
+ narrow_stripov_backprop(nc, ir->op1, depth+1);
+ else
+ *nc->sp++ = NARROWINS(NARROW_REF, ir->op1); /* Undo conversion. */
+ if (nc->t == IRT_I64)
+ *nc->sp++ = NARROWINS(NARROW_SEXT, 0); /* Sign-extend integer. */
+ return 0;
+ } else if (ir->o == IR_KNUM) { /* Narrow FP constant. */
+ lua_Number n = ir_knum(ir)->n;
+ if ((nc->mode & IRCONV_CONVMASK) == IRCONV_TOBIT) {
+ /* Allows a wider range of constants. */
+ int64_t k64 = (int64_t)n;
+ if (n == (lua_Number)k64) { /* Only if const doesn't lose precision. */
+ *nc->sp++ = NARROWINS(NARROW_INT, 0);
+ *nc->sp++ = (NarrowIns)k64; /* But always truncate to 32 bits. */
+ return 0;
+ }
+ } else {
+ int32_t k = lj_num2int(n);
+ /* Only if constant is a small integer. */
+ if (checki16(k) && n == (lua_Number)k) {
+ *nc->sp++ = NARROWINS(NARROW_INT, 0);
+ *nc->sp++ = (NarrowIns)k;
+ return 0;
+ }
+ }
+ return 10; /* Never narrow other FP constants (this is rare). */
+ }
+
+ /* Try to CSE the conversion. Stronger checks are ok, too. */
+ cref = J->chain[fins->o];
+ while (cref > ref) {
+ IRIns *cr = IR(cref);
+ if (cr->op1 == ref &&
+ (fins->o == IR_TOBIT ||
+ ((cr->op2 & IRCONV_MODEMASK) == (nc->mode & IRCONV_MODEMASK) &&
+ irt_isguard(cr->t) >= irt_isguard(fins->t)))) {
+ *nc->sp++ = NARROWINS(NARROW_REF, cref);
+ return 0; /* Already there, no additional conversion needed. */
+ }
+ cref = cr->prev;
+ }
+
+ /* Backpropagate across ADD/SUB. */
+ if (ir->o == IR_ADD || ir->o == IR_SUB) {
+ /* Try cache lookup first. */
+ IRRef mode = nc->mode;
+ BPropEntry *bp;
+ /* Inner conversions need a stronger check. */
+ if ((mode & IRCONV_CONVMASK) == IRCONV_INDEX && depth > 0)
+ mode += IRCONV_CHECK-IRCONV_INDEX;
+ bp = narrow_bpc_get(nc->J, (IRRef1)ref, mode);
+ if (bp) {
+ *nc->sp++ = NARROWINS(NARROW_REF, bp->val);
+ return 0;
+ } else if (nc->t == IRT_I64) {
+ /* Try sign-extending from an existing (checked) conversion to int. */
+ mode = (IRT_INT<<5)|IRT_NUM|IRCONV_INDEX;
+ bp = narrow_bpc_get(nc->J, (IRRef1)ref, mode);
+ if (bp) {
+ *nc->sp++ = NARROWINS(NARROW_REF, bp->val);
+ *nc->sp++ = NARROWINS(NARROW_SEXT, 0);
+ return 0;
+ }
+ }
+ if (++depth < NARROW_MAX_BACKPROP && nc->sp < nc->maxsp) {
+ NarrowIns *savesp = nc->sp;
+ int count = narrow_conv_backprop(nc, ir->op1, depth);
+ count += narrow_conv_backprop(nc, ir->op2, depth);
+ if (count <= 1) { /* Limit total number of conversions. */
+ *nc->sp++ = NARROWINS(IRT(ir->o, nc->t), ref);
+ return count;
+ }
+ nc->sp = savesp; /* Too many conversions, need to backtrack. */
+ }
+ }
+
+ /* Otherwise add a conversion. */
+ *nc->sp++ = NARROWINS(NARROW_CONV, ref);
+ return 1;
+}
+
+/* Emit the conversions collected during backpropagation. */
+static IRRef narrow_conv_emit(jit_State *J, NarrowConv *nc)
+{
+ /* The fins fields must be saved now -- emitir() overwrites them. */
+ IROpT guardot = irt_isguard(fins->t) ? IRTG(IR_ADDOV-IR_ADD, 0) : 0;
+ IROpT convot = fins->ot;
+ IRRef1 convop2 = fins->op2;
+ NarrowIns *next = nc->stack; /* List of instructions from backpropagation. */
+ NarrowIns *last = nc->sp;
+ NarrowIns *sp = nc->stack; /* Recycle the stack to store operands. */
+ while (next < last) { /* Simple stack machine to process the ins. list. */
+ NarrowIns ref = *next++;
+ IROpT op = narrow_op(ref);
+ if (op == NARROW_REF) {
+ *sp++ = ref;
+ } else if (op == NARROW_CONV) {
+ *sp++ = emitir_raw(convot, ref, convop2); /* Raw emit avoids a loop. */
+ } else if (op == NARROW_SEXT) {
+ lj_assertJ(sp >= nc->stack+1, "stack underflow");
+ sp[-1] = emitir(IRT(IR_CONV, IRT_I64), sp[-1],
+ (IRT_I64<<5)|IRT_INT|IRCONV_SEXT);
+ } else if (op == NARROW_INT) {
+ lj_assertJ(next < last, "missing arg to NARROW_INT");
+ *sp++ = nc->t == IRT_I64 ?
+ lj_ir_kint64(J, (int64_t)(int32_t)*next++) :
+ lj_ir_kint(J, *next++);
+ } else { /* Regular IROpT. Pops two operands and pushes one result. */
+ IRRef mode = nc->mode;
+ lj_assertJ(sp >= nc->stack+2, "stack underflow");
+ sp--;
+ /* Omit some overflow checks for array indexing. See comments above. */
+ if ((mode & IRCONV_CONVMASK) == IRCONV_INDEX) {
+ if (next == last && irref_isk(narrow_ref(sp[0])) &&
+ (uint32_t)IR(narrow_ref(sp[0]))->i + 0x40000000u < 0x80000000u)
+ guardot = 0;
+ else /* Otherwise cache a stronger check. */
+ mode += IRCONV_CHECK-IRCONV_INDEX;
+ }
+ sp[-1] = emitir(op+guardot, sp[-1], sp[0]);
+ /* Add to cache. */
+ if (narrow_ref(ref))
+ narrow_bpc_set(J, narrow_ref(ref), narrow_ref(sp[-1]), mode);
+ }
+ }
+ lj_assertJ(sp == nc->stack+1, "stack misalignment");
+ return nc->stack[0];
+}
+
+/* Narrow a type conversion of an arithmetic operation. */
+TRef LJ_FASTCALL lj_opt_narrow_convert(jit_State *J)
+{
+ if ((J->flags & JIT_F_OPT_NARROW)) {
+ NarrowConv nc;
+ nc.J = J;
+ nc.sp = nc.stack;
+ nc.maxsp = &nc.stack[NARROW_MAX_STACK-4];
+ nc.t = irt_type(fins->t);
+ if (fins->o == IR_TOBIT) {
+ nc.mode = IRCONV_TOBIT; /* Used only in the backpropagation cache. */
+ } else {
+ nc.mode = fins->op2;
+ }
+ if (narrow_conv_backprop(&nc, fins->op1, 0) <= 1)
+ return narrow_conv_emit(J, &nc);
+ }
+ return NEXTFOLD;
+}
+
+/* -- Narrowing of implicit conversions ----------------------------------- */
+
+/* Recursively strip overflow checks. */
+static TRef narrow_stripov(jit_State *J, TRef tr, int lastop, IRRef mode)
+{
+ IRRef ref = tref_ref(tr);
+ IRIns *ir = IR(ref);
+ int op = ir->o;
+ if (op >= IR_ADDOV && op <= lastop) {
+ BPropEntry *bp = narrow_bpc_get(J, ref, mode);
+ if (bp) {
+ return TREF(bp->val, irt_t(IR(bp->val)->t));
+ } else {
+ IRRef op1 = ir->op1, op2 = ir->op2; /* The IR may be reallocated. */
+ op1 = narrow_stripov(J, op1, lastop, mode);
+ op2 = narrow_stripov(J, op2, lastop, mode);
+ tr = emitir(IRT(op - IR_ADDOV + IR_ADD,
+ ((mode & IRCONV_DSTMASK) >> IRCONV_DSH)), op1, op2);
+ narrow_bpc_set(J, ref, tref_ref(tr), mode);
+ }
+ } else if (LJ_64 && (mode & IRCONV_SEXT) && !irt_is64(ir->t)) {
+ tr = emitir(IRT(IR_CONV, IRT_INTP), tr, mode);
+ }
+ return tr;
+}
+
+/* Narrow array index. */
+TRef LJ_FASTCALL lj_opt_narrow_index(jit_State *J, TRef tr)
+{
+ IRIns *ir;
+ lj_assertJ(tref_isnumber(tr), "expected number type");
+ if (tref_isnum(tr)) /* Conversion may be narrowed, too. See above. */
+ return emitir(IRTGI(IR_CONV), tr, IRCONV_INT_NUM|IRCONV_INDEX);
+ /* Omit some overflow checks for array indexing. See comments above. */
+ ir = IR(tref_ref(tr));
+ if ((ir->o == IR_ADDOV || ir->o == IR_SUBOV) && irref_isk(ir->op2) &&
+ (uint32_t)IR(ir->op2)->i + 0x40000000u < 0x80000000u)
+ return emitir(IRTI(ir->o - IR_ADDOV + IR_ADD), ir->op1, ir->op2);
+ return tr;
+}
+
+/* Narrow conversion to integer operand (overflow undefined). */
+TRef LJ_FASTCALL lj_opt_narrow_toint(jit_State *J, TRef tr)
+{
+ if (tref_isstr(tr))
+ tr = emitir(IRTG(IR_STRTO, IRT_NUM), tr, 0);
+ if (tref_isnum(tr)) /* Conversion may be narrowed, too. See above. */
+ return emitir(IRTI(IR_CONV), tr, IRCONV_INT_NUM|IRCONV_ANY);
+ if (!tref_isinteger(tr))
+ lj_trace_err(J, LJ_TRERR_BADTYPE);
+ /*
+ ** Undefined overflow semantics allow stripping of ADDOV, SUBOV and MULOV.
+ ** Use IRCONV_TOBIT for the cache entries, since the semantics are the same.
+ */
+ return narrow_stripov(J, tr, IR_MULOV, (IRT_INT<<5)|IRT_INT|IRCONV_TOBIT);
+}
+
+/* Narrow conversion to bitop operand (overflow wrapped). */
+TRef LJ_FASTCALL lj_opt_narrow_tobit(jit_State *J, TRef tr)
+{
+ if (tref_isstr(tr))
+ tr = emitir(IRTG(IR_STRTO, IRT_NUM), tr, 0);
+ if (tref_isnum(tr)) /* Conversion may be narrowed, too. See above. */
+ return emitir(IRTI(IR_TOBIT), tr, lj_ir_knum_tobit(J));
+ if (!tref_isinteger(tr))
+ lj_trace_err(J, LJ_TRERR_BADTYPE);
+ /*
+ ** Wrapped overflow semantics allow stripping of ADDOV and SUBOV.
+ ** MULOV cannot be stripped due to precision widening.
+ */
+ return narrow_stripov(J, tr, IR_SUBOV, (IRT_INT<<5)|IRT_INT|IRCONV_TOBIT);
+}
+
+#if LJ_HASFFI
+/* Narrow C array index (overflow undefined). */
+TRef LJ_FASTCALL lj_opt_narrow_cindex(jit_State *J, TRef tr)
+{
+ lj_assertJ(tref_isnumber(tr), "expected number type");
+ if (tref_isnum(tr))
+ return emitir(IRT(IR_CONV, IRT_INTP), tr, (IRT_INTP<<5)|IRT_NUM|IRCONV_ANY);
+ /* Undefined overflow semantics allow stripping of ADDOV, SUBOV and MULOV. */
+ return narrow_stripov(J, tr, IR_MULOV,
+ LJ_64 ? ((IRT_INTP<<5)|IRT_INT|IRCONV_SEXT) :
+ ((IRT_INTP<<5)|IRT_INT|IRCONV_TOBIT));
+}
+#endif
+
+/* -- Narrowing of arithmetic operators ----------------------------------- */
+
+/* Check whether a number fits into an int32_t (-0 is ok, too). */
+static int numisint(lua_Number n)
+{
+ return (n == (lua_Number)lj_num2int(n));
+}
+
+/* Convert string to number. Error out for non-numeric string values. */
+static TRef conv_str_tonum(jit_State *J, TRef tr, TValue *o)
+{
+ if (tref_isstr(tr)) {
+ tr = emitir(IRTG(IR_STRTO, IRT_NUM), tr, 0);
+ /* Would need an inverted STRTO for this rare and useless case. */
+ if (!lj_strscan_num(strV(o), o)) /* Convert in-place. Value used below. */
+ lj_trace_err(J, LJ_TRERR_BADTYPE); /* Punt if non-numeric. */
+ }
+ return tr;
+}
+
+/* Narrowing of arithmetic operations. */
+TRef lj_opt_narrow_arith(jit_State *J, TRef rb, TRef rc,
+ TValue *vb, TValue *vc, IROp op)
+{
+ rb = conv_str_tonum(J, rb, vb);
+ rc = conv_str_tonum(J, rc, vc);
+ /* Must not narrow MUL in non-DUALNUM variant, because it loses -0. */
+ if ((op >= IR_ADD && op <= (LJ_DUALNUM ? IR_MUL : IR_SUB)) &&
+ tref_isinteger(rb) && tref_isinteger(rc) &&
+ numisint(lj_vm_foldarith(numberVnum(vb), numberVnum(vc),
+ (int)op - (int)IR_ADD)))
+ return emitir(IRTGI((int)op - (int)IR_ADD + (int)IR_ADDOV), rb, rc);
+ if (!tref_isnum(rb)) rb = emitir(IRTN(IR_CONV), rb, IRCONV_NUM_INT);
+ if (!tref_isnum(rc)) rc = emitir(IRTN(IR_CONV), rc, IRCONV_NUM_INT);
+ return emitir(IRTN(op), rb, rc);
+}
+
+/* Narrowing of unary minus operator. */
+TRef lj_opt_narrow_unm(jit_State *J, TRef rc, TValue *vc)
+{
+ rc = conv_str_tonum(J, rc, vc);
+ if (tref_isinteger(rc)) {
+ uint32_t k = (uint32_t)numberVint(vc);
+ if ((LJ_DUALNUM || k != 0) && k != 0x80000000u) {
+ TRef zero = lj_ir_kint(J, 0);
+ if (!LJ_DUALNUM)
+ emitir(IRTGI(IR_NE), rc, zero);
+ return emitir(IRTGI(IR_SUBOV), zero, rc);
+ }
+ rc = emitir(IRTN(IR_CONV), rc, IRCONV_NUM_INT);
+ }
+ return emitir(IRTN(IR_NEG), rc, lj_ir_ksimd(J, LJ_KSIMD_NEG));
+}
+
+/* Narrowing of modulo operator. */
+TRef lj_opt_narrow_mod(jit_State *J, TRef rb, TRef rc, TValue *vb, TValue *vc)
+{
+ TRef tmp;
+ rb = conv_str_tonum(J, rb, vb);
+ rc = conv_str_tonum(J, rc, vc);
+ if ((LJ_DUALNUM || (J->flags & JIT_F_OPT_NARROW)) &&
+ tref_isinteger(rb) && tref_isinteger(rc) &&
+ (tvisint(vc) ? intV(vc) != 0 : !tviszero(vc))) {
+ emitir(IRTGI(IR_NE), rc, lj_ir_kint(J, 0));
+ return emitir(IRTI(IR_MOD), rb, rc);
+ }
+ /* b % c ==> b - floor(b/c)*c */
+ rb = lj_ir_tonum(J, rb);
+ rc = lj_ir_tonum(J, rc);
+ tmp = emitir(IRTN(IR_DIV), rb, rc);
+ tmp = emitir(IRTN(IR_FPMATH), tmp, IRFPM_FLOOR);
+ tmp = emitir(IRTN(IR_MUL), tmp, rc);
+ return emitir(IRTN(IR_SUB), rb, tmp);
+}
+
+/* -- Predictive narrowing of induction variables ------------------------- */
+
+/* Narrow a single runtime value. */
+static int narrow_forl(jit_State *J, cTValue *o)
+{
+ if (tvisint(o)) return 1;
+ if (LJ_DUALNUM || (J->flags & JIT_F_OPT_NARROW)) return numisint(numV(o));
+ return 0;
+}
+
+/* Narrow the FORL index type by looking at the runtime values. */
+IRType lj_opt_narrow_forl(jit_State *J, cTValue *tv)
+{
+ lj_assertJ(tvisnumber(&tv[FORL_IDX]) &&
+ tvisnumber(&tv[FORL_STOP]) &&
+ tvisnumber(&tv[FORL_STEP]),
+ "expected number types");
+ /* Narrow only if the runtime values of start/stop/step are all integers. */
+ if (narrow_forl(J, &tv[FORL_IDX]) &&
+ narrow_forl(J, &tv[FORL_STOP]) &&
+ narrow_forl(J, &tv[FORL_STEP])) {
+ /* And if the loop index can't possibly overflow. */
+ lua_Number step = numberVnum(&tv[FORL_STEP]);
+ lua_Number sum = numberVnum(&tv[FORL_STOP]) + step;
+ if (0 <= step ? (sum <= 2147483647.0) : (sum >= -2147483648.0))
+ return IRT_INT;
+ }
+ return IRT_NUM;
+}
+
+#undef IR
+#undef fins
+#undef emitir
+#undef emitir_raw
+
+#endif
diff --git a/libs/luajit-cmake/luajit/src/lj_opt_sink.c b/libs/luajit-cmake/luajit/src/lj_opt_sink.c
new file mode 100644
index 0000000..4b9008b
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/lj_opt_sink.c
@@ -0,0 +1,258 @@
+/*
+** SINK: Allocation Sinking and Store Sinking.
+** Copyright (C) 2005-2022 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#define lj_opt_sink_c
+#define LUA_CORE
+
+#include "lj_obj.h"
+
+#if LJ_HASJIT
+
+#include "lj_ir.h"
+#include "lj_jit.h"
+#include "lj_iropt.h"
+#include "lj_target.h"
+
+/* Some local macros to save typing. Undef'd at the end. */
+#define IR(ref) (&J->cur.ir[(ref)])
+
+/* Check whether the store ref points to an eligible allocation. */
+static IRIns *sink_checkalloc(jit_State *J, IRIns *irs)
+{
+ IRIns *ir = IR(irs->op1);
+ if (!irref_isk(ir->op2))
+ return NULL; /* Non-constant key. */
+ if (ir->o == IR_HREFK || ir->o == IR_AREF)
+ ir = IR(ir->op1);
+ else if (!(ir->o == IR_HREF || ir->o == IR_NEWREF ||
+ ir->o == IR_FREF || ir->o == IR_ADD))
+ return NULL; /* Unhandled reference type (for XSTORE). */
+ ir = IR(ir->op1);
+ if (!(ir->o == IR_TNEW || ir->o == IR_TDUP || ir->o == IR_CNEW))
+ return NULL; /* Not an allocation. */
+ return ir; /* Return allocation. */
+}
+
+/* Recursively check whether a value depends on a PHI. */
+static int sink_phidep(jit_State *J, IRRef ref, int *workp)
+{
+ IRIns *ir = IR(ref);
+ if (!*workp) return 1; /* Give up and pretend it does. */
+ (*workp)--;
+ if (irt_isphi(ir->t)) return 1;
+ if (ir->op1 >= REF_FIRST && sink_phidep(J, ir->op1, workp)) return 1;
+ if (ir->op2 >= REF_FIRST && sink_phidep(J, ir->op2, workp)) return 1;
+ return 0;
+}
+
+/* Check whether a value is a sinkable PHI or loop-invariant. */
+static int sink_checkphi(jit_State *J, IRIns *ira, IRRef ref)
+{
+ if (ref >= REF_FIRST) {
+ IRIns *ir = IR(ref);
+ if (irt_isphi(ir->t) || (ir->o == IR_CONV && ir->op2 == IRCONV_NUM_INT &&
+ irt_isphi(IR(ir->op1)->t))) {
+ ira->prev++;
+ return 1; /* Sinkable PHI. */
+ }
+ /* Otherwise the value must be loop-invariant. */
+ if (ref < J->loopref) {
+ /* Check for PHI dependencies, but give up after reasonable effort. */
+ int work = 64;
+ return !sink_phidep(J, ref, &work);
+ } else {
+ return 0; /* Loop-variant. */
+ }
+ }
+ return 1; /* Constant (non-PHI). */
+}
+
+/* Mark non-sinkable allocations using single-pass backward propagation.
+**
+** Roots for the marking process are:
+** - Some PHIs or snapshots (see below).
+** - Non-PHI, non-constant values stored to PHI allocations.
+** - All guards.
+** - Any remaining loads not eliminated by store-to-load forwarding.
+** - Stores with non-constant keys.
+** - All stored values.
+*/
+static void sink_mark_ins(jit_State *J)
+{
+ IRIns *ir, *irlast = IR(J->cur.nins-1);
+ for (ir = irlast ; ; ir--) {
+ switch (ir->o) {
+ case IR_BASE:
+ return; /* Finished. */
+ case IR_ALOAD: case IR_HLOAD: case IR_XLOAD: case IR_TBAR: case IR_ALEN:
+ irt_setmark(IR(ir->op1)->t); /* Mark ref for remaining loads. */
+ break;
+ case IR_FLOAD:
+ if (irt_ismarked(ir->t) || ir->op2 == IRFL_TAB_META)
+ irt_setmark(IR(ir->op1)->t); /* Mark table for remaining loads. */
+ break;
+ case IR_ASTORE: case IR_HSTORE: case IR_FSTORE: case IR_XSTORE: {
+ IRIns *ira = sink_checkalloc(J, ir);
+ if (!ira || (irt_isphi(ira->t) && !sink_checkphi(J, ira, ir->op2)))
+ irt_setmark(IR(ir->op1)->t); /* Mark ineligible ref. */
+ irt_setmark(IR(ir->op2)->t); /* Mark stored value. */
+ break;
+ }
+#if LJ_HASFFI
+ case IR_CNEWI:
+ if (irt_isphi(ir->t) &&
+ (!sink_checkphi(J, ir, ir->op2) ||
+ (LJ_32 && ir+1 < irlast && (ir+1)->o == IR_HIOP &&
+ !sink_checkphi(J, ir, (ir+1)->op2))))
+ irt_setmark(ir->t); /* Mark ineligible allocation. */
+#endif
+ /* fallthrough */
+ case IR_USTORE:
+ irt_setmark(IR(ir->op2)->t); /* Mark stored value. */
+ break;
+#if LJ_HASFFI
+ case IR_CALLXS:
+#endif
+ case IR_CALLS:
+ irt_setmark(IR(ir->op1)->t); /* Mark (potentially) stored values. */
+ break;
+ case IR_PHI: {
+ IRIns *irl = IR(ir->op1), *irr = IR(ir->op2);
+ irl->prev = irr->prev = 0; /* Clear PHI value counts. */
+ if (irl->o == irr->o &&
+ (irl->o == IR_TNEW || irl->o == IR_TDUP ||
+ (LJ_HASFFI && (irl->o == IR_CNEW || irl->o == IR_CNEWI))))
+ break;
+ irt_setmark(irl->t);
+ irt_setmark(irr->t);
+ break;
+ }
+ default:
+ if (irt_ismarked(ir->t) || irt_isguard(ir->t)) { /* Propagate mark. */
+ if (ir->op1 >= REF_FIRST) irt_setmark(IR(ir->op1)->t);
+ if (ir->op2 >= REF_FIRST) irt_setmark(IR(ir->op2)->t);
+ }
+ break;
+ }
+ }
+}
+
+/* Mark all instructions referenced by a snapshot. */
+static void sink_mark_snap(jit_State *J, SnapShot *snap)
+{
+ SnapEntry *map = &J->cur.snapmap[snap->mapofs];
+ MSize n, nent = snap->nent;
+ for (n = 0; n < nent; n++) {
+ IRRef ref = snap_ref(map[n]);
+ if (!irref_isk(ref))
+ irt_setmark(IR(ref)->t);
+ }
+}
+
+/* Iteratively remark PHI refs with differing marks or PHI value counts. */
+static void sink_remark_phi(jit_State *J)
+{
+ IRIns *ir;
+ int remark;
+ do {
+ remark = 0;
+ for (ir = IR(J->cur.nins-1); ir->o == IR_PHI; ir--) {
+ IRIns *irl = IR(ir->op1), *irr = IR(ir->op2);
+ if (!((irl->t.irt ^ irr->t.irt) & IRT_MARK) && irl->prev == irr->prev)
+ continue;
+ remark |= (~(irl->t.irt & irr->t.irt) & IRT_MARK);
+ irt_setmark(IR(ir->op1)->t);
+ irt_setmark(IR(ir->op2)->t);
+ }
+ } while (remark);
+}
+
+/* Sweep instructions and tag sunken allocations and stores. */
+static void sink_sweep_ins(jit_State *J)
+{
+ IRIns *ir, *irbase = IR(REF_BASE);
+ for (ir = IR(J->cur.nins-1) ; ir >= irbase; ir--) {
+ switch (ir->o) {
+ case IR_ASTORE: case IR_HSTORE: case IR_FSTORE: case IR_XSTORE: {
+ IRIns *ira = sink_checkalloc(J, ir);
+ if (ira && !irt_ismarked(ira->t)) {
+ int delta = (int)(ir - ira);
+ ir->prev = REGSP(RID_SINK, delta > 255 ? 255 : delta);
+ } else {
+ ir->prev = REGSP_INIT;
+ }
+ break;
+ }
+ case IR_NEWREF:
+ if (!irt_ismarked(IR(ir->op1)->t)) {
+ ir->prev = REGSP(RID_SINK, 0);
+ } else {
+ irt_clearmark(ir->t);
+ ir->prev = REGSP_INIT;
+ }
+ break;
+#if LJ_HASFFI
+ case IR_CNEW: case IR_CNEWI:
+#endif
+ case IR_TNEW: case IR_TDUP:
+ if (!irt_ismarked(ir->t)) {
+ ir->t.irt &= ~IRT_GUARD;
+ ir->prev = REGSP(RID_SINK, 0);
+ J->cur.sinktags = 1; /* Signal present SINK tags to assembler. */
+ } else {
+ irt_clearmark(ir->t);
+ ir->prev = REGSP_INIT;
+ }
+ break;
+ case IR_PHI: {
+ IRIns *ira = IR(ir->op2);
+ if (!irt_ismarked(ira->t) &&
+ (ira->o == IR_TNEW || ira->o == IR_TDUP ||
+ (LJ_HASFFI && (ira->o == IR_CNEW || ira->o == IR_CNEWI)))) {
+ ir->prev = REGSP(RID_SINK, 0);
+ } else {
+ ir->prev = REGSP_INIT;
+ }
+ break;
+ }
+ default:
+ irt_clearmark(ir->t);
+ ir->prev = REGSP_INIT;
+ break;
+ }
+ }
+ for (ir = IR(J->cur.nk); ir < irbase; ir++) {
+ irt_clearmark(ir->t);
+ ir->prev = REGSP_INIT;
+ /* The false-positive of irt_is64() for ASMREF_L (REF_NIL) is OK here. */
+ if (irt_is64(ir->t) && ir->o != IR_KNULL)
+ ir++;
+ }
+}
+
+/* Allocation sinking and store sinking.
+**
+** 1. Mark all non-sinkable allocations.
+** 2. Then sink all remaining allocations and the related stores.
+*/
+void lj_opt_sink(jit_State *J)
+{
+ const uint32_t need = (JIT_F_OPT_SINK|JIT_F_OPT_FWD|
+ JIT_F_OPT_DCE|JIT_F_OPT_CSE|JIT_F_OPT_FOLD);
+ if ((J->flags & need) == need &&
+ (J->chain[IR_TNEW] || J->chain[IR_TDUP] ||
+ (LJ_HASFFI && (J->chain[IR_CNEW] || J->chain[IR_CNEWI])))) {
+ if (!J->loopref)
+ sink_mark_snap(J, &J->cur.snap[J->cur.nsnap-1]);
+ sink_mark_ins(J);
+ if (J->loopref)
+ sink_remark_phi(J);
+ sink_sweep_ins(J);
+ }
+}
+
+#undef IR
+
+#endif
diff --git a/libs/luajit-cmake/luajit/src/lj_opt_split.c b/libs/luajit-cmake/luajit/src/lj_opt_split.c
new file mode 100644
index 0000000..506b981
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/lj_opt_split.c
@@ -0,0 +1,848 @@
+/*
+** SPLIT: Split 64 bit IR instructions into 32 bit IR instructions.
+** Copyright (C) 2005-2022 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#define lj_opt_split_c
+#define LUA_CORE
+
+#include "lj_obj.h"
+
+#if LJ_HASJIT && (LJ_SOFTFP32 || (LJ_32 && LJ_HASFFI))
+
+#include "lj_err.h"
+#include "lj_buf.h"
+#include "lj_ir.h"
+#include "lj_jit.h"
+#include "lj_ircall.h"
+#include "lj_iropt.h"
+#include "lj_dispatch.h"
+#include "lj_vm.h"
+
+/* SPLIT pass:
+**
+** This pass splits up 64 bit IR instructions into multiple 32 bit IR
+** instructions. It's only active for soft-float targets or for 32 bit CPUs
+** which lack native 64 bit integer operations (the FFI is currently the
+** only emitter for 64 bit integer instructions).
+**
+** Splitting the IR in a separate pass keeps each 32 bit IR assembler
+** backend simple. Only a small amount of extra functionality needs to be
+** implemented. This is much easier than adding support for allocating
+** register pairs to each backend (believe me, I tried). A few simple, but
+** important optimizations can be performed by the SPLIT pass, which would
+** be tedious to do in the backend.
+**
+** The basic idea is to replace each 64 bit IR instruction with its 32 bit
+** equivalent plus an extra HIOP instruction. The splitted IR is not passed
+** through FOLD or any other optimizations, so each HIOP is guaranteed to
+** immediately follow it's counterpart. The actual functionality of HIOP is
+** inferred from the previous instruction.
+**
+** The operands of HIOP hold the hiword input references. The output of HIOP
+** is the hiword output reference, which is also used to hold the hiword
+** register or spill slot information. The register allocator treats this
+** instruction independently of any other instruction, which improves code
+** quality compared to using fixed register pairs.
+**
+** It's easier to split up some instructions into two regular 32 bit
+** instructions. E.g. XLOAD is split up into two XLOADs with two different
+** addresses. Obviously 64 bit constants need to be split up into two 32 bit
+** constants, too. Some hiword instructions can be entirely omitted, e.g.
+** when zero-extending a 32 bit value to 64 bits. 64 bit arguments for calls
+** are split up into two 32 bit arguments each.
+**
+** On soft-float targets, floating-point instructions are directly converted
+** to soft-float calls by the SPLIT pass (except for comparisons and MIN/MAX).
+** HIOP for number results has the type IRT_SOFTFP ("sfp" in -jdump).
+**
+** Here's the IR and x64 machine code for 'x.b = x.a + 1' for a struct with
+** two int64_t fields:
+**
+** 0100 p32 ADD base +8
+** 0101 i64 XLOAD 0100
+** 0102 i64 ADD 0101 +1
+** 0103 p32 ADD base +16
+** 0104 i64 XSTORE 0103 0102
+**
+** mov rax, [esi+0x8]
+** add rax, +0x01
+** mov [esi+0x10], rax
+**
+** Here's the transformed IR and the x86 machine code after the SPLIT pass:
+**
+** 0100 p32 ADD base +8
+** 0101 int XLOAD 0100
+** 0102 p32 ADD base +12
+** 0103 int XLOAD 0102
+** 0104 int ADD 0101 +1
+** 0105 int HIOP 0103 +0
+** 0106 p32 ADD base +16
+** 0107 int XSTORE 0106 0104
+** 0108 int HIOP 0106 0105
+**
+** mov eax, [esi+0x8]
+** mov ecx, [esi+0xc]
+** add eax, +0x01
+** adc ecx, +0x00
+** mov [esi+0x10], eax
+** mov [esi+0x14], ecx
+**
+** You may notice the reassociated hiword address computation, which is
+** later fused into the mov operands by the assembler.
+*/
+
+/* Some local macros to save typing. Undef'd at the end. */
+#define IR(ref) (&J->cur.ir[(ref)])
+
+/* Directly emit the transformed IR without updating chains etc. */
+static IRRef split_emit(jit_State *J, uint16_t ot, IRRef1 op1, IRRef1 op2)
+{
+ IRRef nref = lj_ir_nextins(J);
+ IRIns *ir = IR(nref);
+ ir->ot = ot;
+ ir->op1 = op1;
+ ir->op2 = op2;
+ return nref;
+}
+
+#if LJ_SOFTFP
+/* Emit a (checked) number to integer conversion. */
+static IRRef split_num2int(jit_State *J, IRRef lo, IRRef hi, int check)
+{
+ IRRef tmp, res;
+#if LJ_LE
+ tmp = split_emit(J, IRT(IR_CARG, IRT_NIL), lo, hi);
+#else
+ tmp = split_emit(J, IRT(IR_CARG, IRT_NIL), hi, lo);
+#endif
+ res = split_emit(J, IRTI(IR_CALLN), tmp, IRCALL_softfp_d2i);
+ if (check) {
+ tmp = split_emit(J, IRTI(IR_CALLN), res, IRCALL_softfp_i2d);
+ split_emit(J, IRT(IR_HIOP, IRT_SOFTFP), tmp, tmp);
+ split_emit(J, IRTGI(IR_EQ), tmp, lo);
+ split_emit(J, IRTG(IR_HIOP, IRT_SOFTFP), tmp+1, hi);
+ }
+ return res;
+}
+
+/* Emit a CALLN with one split 64 bit argument. */
+static IRRef split_call_l(jit_State *J, IRRef1 *hisubst, IRIns *oir,
+ IRIns *ir, IRCallID id)
+{
+ IRRef tmp, op1 = ir->op1;
+ J->cur.nins--;
+#if LJ_LE
+ tmp = split_emit(J, IRT(IR_CARG, IRT_NIL), oir[op1].prev, hisubst[op1]);
+#else
+ tmp = split_emit(J, IRT(IR_CARG, IRT_NIL), hisubst[op1], oir[op1].prev);
+#endif
+ ir->prev = tmp = split_emit(J, IRTI(IR_CALLN), tmp, id);
+ return split_emit(J, IRT(IR_HIOP, IRT_SOFTFP), tmp, tmp);
+}
+#endif
+
+/* Emit a CALLN with one split 64 bit argument and a 32 bit argument. */
+static IRRef split_call_li(jit_State *J, IRRef1 *hisubst, IRIns *oir,
+ IRIns *ir, IRCallID id)
+{
+ IRRef tmp, op1 = ir->op1, op2 = ir->op2;
+ J->cur.nins--;
+#if LJ_LE
+ tmp = split_emit(J, IRT(IR_CARG, IRT_NIL), oir[op1].prev, hisubst[op1]);
+#else
+ tmp = split_emit(J, IRT(IR_CARG, IRT_NIL), hisubst[op1], oir[op1].prev);
+#endif
+ tmp = split_emit(J, IRT(IR_CARG, IRT_NIL), tmp, oir[op2].prev);
+ ir->prev = tmp = split_emit(J, IRTI(IR_CALLN), tmp, id);
+ return split_emit(J, IRT(IR_HIOP, IRT_SOFTFP), tmp, tmp);
+}
+
+/* Emit a CALLN with two split 64 bit arguments. */
+static IRRef split_call_ll(jit_State *J, IRRef1 *hisubst, IRIns *oir,
+ IRIns *ir, IRCallID id)
+{
+ IRRef tmp, op1 = ir->op1, op2 = ir->op2;
+ J->cur.nins--;
+#if LJ_LE
+ tmp = split_emit(J, IRT(IR_CARG, IRT_NIL), oir[op1].prev, hisubst[op1]);
+ tmp = split_emit(J, IRT(IR_CARG, IRT_NIL), tmp, oir[op2].prev);
+ tmp = split_emit(J, IRT(IR_CARG, IRT_NIL), tmp, hisubst[op2]);
+#else
+ tmp = split_emit(J, IRT(IR_CARG, IRT_NIL), hisubst[op1], oir[op1].prev);
+ tmp = split_emit(J, IRT(IR_CARG, IRT_NIL), tmp, hisubst[op2]);
+ tmp = split_emit(J, IRT(IR_CARG, IRT_NIL), tmp, oir[op2].prev);
+#endif
+ ir->prev = tmp = split_emit(J, IRTI(IR_CALLN), tmp, id);
+ return split_emit(J,
+ IRT(IR_HIOP, (LJ_SOFTFP && irt_isnum(ir->t)) ? IRT_SOFTFP : IRT_INT),
+ tmp, tmp);
+}
+
+/* Get a pointer to the other 32 bit word (LE: hiword, BE: loword). */
+static IRRef split_ptr(jit_State *J, IRIns *oir, IRRef ref)
+{
+ IRRef nref = oir[ref].prev;
+ IRIns *ir = IR(nref);
+ int32_t ofs = 4;
+ if (ir->o == IR_KPTR)
+ return lj_ir_kptr(J, (char *)ir_kptr(ir) + ofs);
+ if (ir->o == IR_ADD && irref_isk(ir->op2) && !irt_isphi(oir[ref].t)) {
+ /* Reassociate address. */
+ ofs += IR(ir->op2)->i;
+ nref = ir->op1;
+ if (ofs == 0) return nref;
+ }
+ return split_emit(J, IRT(IR_ADD, IRT_PTR), nref, lj_ir_kint(J, ofs));
+}
+
+#if LJ_HASFFI
+static IRRef split_bitshift(jit_State *J, IRRef1 *hisubst,
+ IRIns *oir, IRIns *nir, IRIns *ir)
+{
+ IROp op = ir->o;
+ IRRef kref = nir->op2;
+ if (irref_isk(kref)) { /* Optimize constant shifts. */
+ int32_t k = (IR(kref)->i & 63);
+ IRRef lo = nir->op1, hi = hisubst[ir->op1];
+ if (op == IR_BROL || op == IR_BROR) {
+ if (op == IR_BROR) k = (-k & 63);
+ if (k >= 32) { IRRef t = lo; lo = hi; hi = t; k -= 32; }
+ if (k == 0) {
+ passthrough:
+ J->cur.nins--;
+ ir->prev = lo;
+ return hi;
+ } else {
+ TRef k1, k2;
+ IRRef t1, t2, t3, t4;
+ J->cur.nins--;
+ k1 = lj_ir_kint(J, k);
+ k2 = lj_ir_kint(J, (-k & 31));
+ t1 = split_emit(J, IRTI(IR_BSHL), lo, k1);
+ t2 = split_emit(J, IRTI(IR_BSHL), hi, k1);
+ t3 = split_emit(J, IRTI(IR_BSHR), lo, k2);
+ t4 = split_emit(J, IRTI(IR_BSHR), hi, k2);
+ ir->prev = split_emit(J, IRTI(IR_BOR), t1, t4);
+ return split_emit(J, IRTI(IR_BOR), t2, t3);
+ }
+ } else if (k == 0) {
+ goto passthrough;
+ } else if (k < 32) {
+ if (op == IR_BSHL) {
+ IRRef t1 = split_emit(J, IRTI(IR_BSHL), hi, kref);
+ IRRef t2 = split_emit(J, IRTI(IR_BSHR), lo, lj_ir_kint(J, (-k&31)));
+ return split_emit(J, IRTI(IR_BOR), t1, t2);
+ } else {
+ IRRef t1 = ir->prev, t2;
+ lj_assertJ(op == IR_BSHR || op == IR_BSAR, "bad usage");
+ nir->o = IR_BSHR;
+ t2 = split_emit(J, IRTI(IR_BSHL), hi, lj_ir_kint(J, (-k&31)));
+ ir->prev = split_emit(J, IRTI(IR_BOR), t1, t2);
+ return split_emit(J, IRTI(op), hi, kref);
+ }
+ } else {
+ if (op == IR_BSHL) {
+ if (k == 32)
+ J->cur.nins--;
+ else
+ lo = ir->prev;
+ ir->prev = lj_ir_kint(J, 0);
+ return lo;
+ } else {
+ lj_assertJ(op == IR_BSHR || op == IR_BSAR, "bad usage");
+ if (k == 32) {
+ J->cur.nins--;
+ ir->prev = hi;
+ } else {
+ nir->op1 = hi;
+ }
+ if (op == IR_BSHR)
+ return lj_ir_kint(J, 0);
+ else
+ return split_emit(J, IRTI(IR_BSAR), hi, lj_ir_kint(J, 31));
+ }
+ }
+ }
+ return split_call_li(J, hisubst, oir, ir,
+ op - IR_BSHL + IRCALL_lj_carith_shl64);
+}
+
+static IRRef split_bitop(jit_State *J, IRRef1 *hisubst,
+ IRIns *nir, IRIns *ir)
+{
+ IROp op = ir->o;
+ IRRef hi, kref = nir->op2;
+ if (irref_isk(kref)) { /* Optimize bit operations with lo constant. */
+ int32_t k = IR(kref)->i;
+ if (k == 0 || k == -1) {
+ if (op == IR_BAND) k = ~k;
+ if (k == 0) {
+ J->cur.nins--;
+ ir->prev = nir->op1;
+ } else if (op == IR_BXOR) {
+ nir->o = IR_BNOT;
+ nir->op2 = 0;
+ } else {
+ J->cur.nins--;
+ ir->prev = kref;
+ }
+ }
+ }
+ hi = hisubst[ir->op1];
+ kref = hisubst[ir->op2];
+ if (irref_isk(kref)) { /* Optimize bit operations with hi constant. */
+ int32_t k = IR(kref)->i;
+ if (k == 0 || k == -1) {
+ if (op == IR_BAND) k = ~k;
+ if (k == 0) {
+ return hi;
+ } else if (op == IR_BXOR) {
+ return split_emit(J, IRTI(IR_BNOT), hi, 0);
+ } else {
+ return kref;
+ }
+ }
+ }
+ return split_emit(J, IRTI(op), hi, kref);
+}
+#endif
+
+/* Substitute references of a snapshot. */
+static void split_subst_snap(jit_State *J, SnapShot *snap, IRIns *oir)
+{
+ SnapEntry *map = &J->cur.snapmap[snap->mapofs];
+ MSize n, nent = snap->nent;
+ for (n = 0; n < nent; n++) {
+ SnapEntry sn = map[n];
+ IRIns *ir = &oir[snap_ref(sn)];
+ if (!(LJ_SOFTFP && (sn & SNAP_SOFTFPNUM) && irref_isk(snap_ref(sn))))
+ map[n] = ((sn & 0xffff0000) | ir->prev);
+ }
+}
+
+/* Transform the old IR to the new IR. */
+static void split_ir(jit_State *J)
+{
+ IRRef nins = J->cur.nins, nk = J->cur.nk;
+ MSize irlen = nins - nk;
+ MSize need = (irlen+1)*(sizeof(IRIns) + sizeof(IRRef1));
+ IRIns *oir = (IRIns *)lj_buf_tmp(J->L, need);
+ IRRef1 *hisubst;
+ IRRef ref, snref;
+ SnapShot *snap;
+
+ /* Copy old IR to buffer. */
+ memcpy(oir, IR(nk), irlen*sizeof(IRIns));
+ /* Bias hiword substitution table and old IR. Loword kept in field prev. */
+ hisubst = (IRRef1 *)&oir[irlen] - nk;
+ oir -= nk;
+
+ /* Remove all IR instructions, but retain IR constants. */
+ J->cur.nins = REF_FIRST;
+ J->loopref = 0;
+
+ /* Process constants and fixed references. */
+ for (ref = nk; ref <= REF_BASE; ref++) {
+ IRIns *ir = &oir[ref];
+ if ((LJ_SOFTFP && ir->o == IR_KNUM) || ir->o == IR_KINT64) {
+ /* Split up 64 bit constant. */
+ TValue tv = *ir_k64(ir);
+ ir->prev = lj_ir_kint(J, (int32_t)tv.u32.lo);
+ hisubst[ref] = lj_ir_kint(J, (int32_t)tv.u32.hi);
+ } else {
+ ir->prev = ref; /* Identity substitution for loword. */
+ hisubst[ref] = 0;
+ }
+ if (irt_is64(ir->t) && ir->o != IR_KNULL)
+ ref++;
+ }
+
+ /* Process old IR instructions. */
+ snap = J->cur.snap;
+ snref = snap->ref;
+ for (ref = REF_FIRST; ref < nins; ref++) {
+ IRIns *ir = &oir[ref];
+ IRRef nref = lj_ir_nextins(J);
+ IRIns *nir = IR(nref);
+ IRRef hi = 0;
+
+ if (ref >= snref) {
+ snap->ref = nref;
+ split_subst_snap(J, snap++, oir);
+ snref = snap < &J->cur.snap[J->cur.nsnap] ? snap->ref : ~(IRRef)0;
+ }
+
+ /* Copy-substitute old instruction to new instruction. */
+ nir->op1 = ir->op1 < nk ? ir->op1 : oir[ir->op1].prev;
+ nir->op2 = ir->op2 < nk ? ir->op2 : oir[ir->op2].prev;
+ ir->prev = nref; /* Loword substitution. */
+ nir->o = ir->o;
+ nir->t.irt = ir->t.irt & ~(IRT_MARK|IRT_ISPHI);
+ hisubst[ref] = 0;
+
+ /* Split 64 bit instructions. */
+#if LJ_SOFTFP
+ if (irt_isnum(ir->t)) {
+ nir->t.irt = IRT_INT | (nir->t.irt & IRT_GUARD); /* Turn into INT op. */
+ /* Note: hi ref = lo ref + 1! Required for SNAP_SOFTFPNUM logic. */
+ switch (ir->o) {
+ case IR_ADD:
+ hi = split_call_ll(J, hisubst, oir, ir, IRCALL_softfp_add);
+ break;
+ case IR_SUB:
+ hi = split_call_ll(J, hisubst, oir, ir, IRCALL_softfp_sub);
+ break;
+ case IR_MUL:
+ hi = split_call_ll(J, hisubst, oir, ir, IRCALL_softfp_mul);
+ break;
+ case IR_DIV:
+ hi = split_call_ll(J, hisubst, oir, ir, IRCALL_softfp_div);
+ break;
+ case IR_POW:
+ hi = split_call_ll(J, hisubst, oir, ir, IRCALL_pow);
+ break;
+ case IR_FPMATH:
+ hi = split_call_l(J, hisubst, oir, ir, IRCALL_lj_vm_floor + ir->op2);
+ break;
+ case IR_LDEXP:
+ hi = split_call_li(J, hisubst, oir, ir, IRCALL_ldexp);
+ break;
+ case IR_NEG: case IR_ABS:
+ nir->o = IR_CONV; /* Pass through loword. */
+ nir->op2 = (IRT_INT << 5) | IRT_INT;
+ hi = split_emit(J, IRT(ir->o == IR_NEG ? IR_BXOR : IR_BAND, IRT_SOFTFP),
+ hisubst[ir->op1],
+ lj_ir_kint(J, (int32_t)(0x7fffffffu + (ir->o == IR_NEG))));
+ break;
+ case IR_SLOAD:
+ if ((nir->op2 & IRSLOAD_CONVERT)) { /* Convert from int to number. */
+ nir->op2 &= ~IRSLOAD_CONVERT;
+ ir->prev = nref = split_emit(J, IRTI(IR_CALLN), nref,
+ IRCALL_softfp_i2d);
+ hi = split_emit(J, IRT(IR_HIOP, IRT_SOFTFP), nref, nref);
+ break;
+ }
+ /* fallthrough */
+ case IR_ALOAD: case IR_HLOAD: case IR_ULOAD: case IR_VLOAD:
+ case IR_STRTO:
+ hi = split_emit(J, IRT(IR_HIOP, IRT_SOFTFP), nref, nref);
+ break;
+ case IR_FLOAD:
+ lj_assertJ(ir->op1 == REF_NIL, "expected FLOAD from GG_State");
+ hi = lj_ir_kint(J, *(int32_t*)((char*)J2GG(J) + ir->op2 + LJ_LE*4));
+ nir->op2 += LJ_BE*4;
+ break;
+ case IR_XLOAD: {
+ IRIns inslo = *nir; /* Save/undo the emit of the lo XLOAD. */
+ J->cur.nins--;
+ hi = split_ptr(J, oir, ir->op1); /* Insert the hiref ADD. */
+#if LJ_BE
+ hi = split_emit(J, IRT(IR_XLOAD, IRT_INT), hi, ir->op2);
+ inslo.t.irt = IRT_SOFTFP | (inslo.t.irt & IRT_GUARD);
+#endif
+ nref = lj_ir_nextins(J);
+ nir = IR(nref);
+ *nir = inslo; /* Re-emit lo XLOAD. */
+#if LJ_LE
+ hi = split_emit(J, IRT(IR_XLOAD, IRT_SOFTFP), hi, ir->op2);
+ ir->prev = nref;
+#else
+ ir->prev = hi; hi = nref;
+#endif
+ break;
+ }
+ case IR_ASTORE: case IR_HSTORE: case IR_USTORE: case IR_XSTORE:
+ split_emit(J, IRT(IR_HIOP, IRT_SOFTFP), nir->op1, hisubst[ir->op2]);
+ break;
+ case IR_CONV: { /* Conversion to number. Others handled below. */
+ IRType st = (IRType)(ir->op2 & IRCONV_SRCMASK);
+ UNUSED(st);
+#if LJ_32 && LJ_HASFFI
+ if (st == IRT_I64 || st == IRT_U64) {
+ hi = split_call_l(J, hisubst, oir, ir,
+ st == IRT_I64 ? IRCALL_fp64_l2d : IRCALL_fp64_ul2d);
+ break;
+ }
+#endif
+ lj_assertJ(st == IRT_INT ||
+ (LJ_32 && LJ_HASFFI && (st == IRT_U32 || st == IRT_FLOAT)),
+ "bad source type for CONV");
+ nir->o = IR_CALLN;
+#if LJ_32 && LJ_HASFFI
+ nir->op2 = st == IRT_INT ? IRCALL_softfp_i2d :
+ st == IRT_FLOAT ? IRCALL_softfp_f2d :
+ IRCALL_softfp_ui2d;
+#else
+ nir->op2 = IRCALL_softfp_i2d;
+#endif
+ hi = split_emit(J, IRT(IR_HIOP, IRT_SOFTFP), nref, nref);
+ break;
+ }
+ case IR_CALLN:
+ case IR_CALLL:
+ case IR_CALLS:
+ case IR_CALLXS:
+ goto split_call;
+ case IR_PHI:
+ if (nir->op1 == nir->op2)
+ J->cur.nins--; /* Drop useless PHIs. */
+ if (hisubst[ir->op1] != hisubst[ir->op2])
+ split_emit(J, IRT(IR_PHI, IRT_SOFTFP),
+ hisubst[ir->op1], hisubst[ir->op2]);
+ break;
+ case IR_HIOP:
+ J->cur.nins--; /* Drop joining HIOP. */
+ ir->prev = nir->op1;
+ hi = nir->op2;
+ break;
+ default:
+ lj_assertJ(ir->o <= IR_NE || ir->o == IR_MIN || ir->o == IR_MAX,
+ "bad IR op %d", ir->o);
+ hi = split_emit(J, IRTG(IR_HIOP, IRT_SOFTFP),
+ hisubst[ir->op1], hisubst[ir->op2]);
+ break;
+ }
+ } else
+#endif
+#if LJ_32 && LJ_HASFFI
+ if (irt_isint64(ir->t)) {
+ IRRef hiref = hisubst[ir->op1];
+ nir->t.irt = IRT_INT | (nir->t.irt & IRT_GUARD); /* Turn into INT op. */
+ switch (ir->o) {
+ case IR_ADD:
+ case IR_SUB:
+ /* Use plain op for hiword if loword cannot produce a carry/borrow. */
+ if (irref_isk(nir->op2) && IR(nir->op2)->i == 0) {
+ ir->prev = nir->op1; /* Pass through loword. */
+ nir->op1 = hiref; nir->op2 = hisubst[ir->op2];
+ hi = nref;
+ break;
+ }
+ /* fallthrough */
+ case IR_NEG:
+ hi = split_emit(J, IRTI(IR_HIOP), hiref, hisubst[ir->op2]);
+ break;
+ case IR_MUL:
+ hi = split_call_ll(J, hisubst, oir, ir, IRCALL_lj_carith_mul64);
+ break;
+ case IR_DIV:
+ hi = split_call_ll(J, hisubst, oir, ir,
+ irt_isi64(ir->t) ? IRCALL_lj_carith_divi64 :
+ IRCALL_lj_carith_divu64);
+ break;
+ case IR_MOD:
+ hi = split_call_ll(J, hisubst, oir, ir,
+ irt_isi64(ir->t) ? IRCALL_lj_carith_modi64 :
+ IRCALL_lj_carith_modu64);
+ break;
+ case IR_POW:
+ hi = split_call_ll(J, hisubst, oir, ir,
+ irt_isi64(ir->t) ? IRCALL_lj_carith_powi64 :
+ IRCALL_lj_carith_powu64);
+ break;
+ case IR_BNOT:
+ hi = split_emit(J, IRTI(IR_BNOT), hiref, 0);
+ break;
+ case IR_BSWAP:
+ ir->prev = split_emit(J, IRTI(IR_BSWAP), hiref, 0);
+ hi = nref;
+ break;
+ case IR_BAND: case IR_BOR: case IR_BXOR:
+ hi = split_bitop(J, hisubst, nir, ir);
+ break;
+ case IR_BSHL: case IR_BSHR: case IR_BSAR: case IR_BROL: case IR_BROR:
+ hi = split_bitshift(J, hisubst, oir, nir, ir);
+ break;
+ case IR_FLOAD:
+ lj_assertJ(ir->op2 == IRFL_CDATA_INT64, "only INT64 supported");
+ hi = split_emit(J, IRTI(IR_FLOAD), nir->op1, IRFL_CDATA_INT64_4);
+#if LJ_BE
+ ir->prev = hi; hi = nref;
+#endif
+ break;
+ case IR_XLOAD:
+ hi = split_emit(J, IRTI(IR_XLOAD), split_ptr(J, oir, ir->op1), ir->op2);
+#if LJ_BE
+ ir->prev = hi; hi = nref;
+#endif
+ break;
+ case IR_XSTORE:
+ split_emit(J, IRTI(IR_HIOP), nir->op1, hisubst[ir->op2]);
+ break;
+ case IR_CONV: { /* Conversion to 64 bit integer. Others handled below. */
+ IRType st = (IRType)(ir->op2 & IRCONV_SRCMASK);
+#if LJ_SOFTFP
+ if (st == IRT_NUM) { /* NUM to 64 bit int conv. */
+ hi = split_call_l(J, hisubst, oir, ir,
+ irt_isi64(ir->t) ? IRCALL_fp64_d2l : IRCALL_fp64_d2ul);
+ } else if (st == IRT_FLOAT) { /* FLOAT to 64 bit int conv. */
+ nir->o = IR_CALLN;
+ nir->op2 = irt_isi64(ir->t) ? IRCALL_fp64_f2l : IRCALL_fp64_f2ul;
+ hi = split_emit(J, IRTI(IR_HIOP), nref, nref);
+ }
+#else
+ if (st == IRT_NUM || st == IRT_FLOAT) { /* FP to 64 bit int conv. */
+ hi = split_emit(J, IRTI(IR_HIOP), nir->op1, nref);
+ }
+#endif
+ else if (st == IRT_I64 || st == IRT_U64) { /* 64/64 bit cast. */
+ /* Drop cast, since assembler doesn't care. But fwd both parts. */
+ hi = hiref;
+ goto fwdlo;
+ } else if ((ir->op2 & IRCONV_SEXT)) { /* Sign-extend to 64 bit. */
+ IRRef k31 = lj_ir_kint(J, 31);
+ nir = IR(nref); /* May have been reallocated. */
+ ir->prev = nir->op1; /* Pass through loword. */
+ nir->o = IR_BSAR; /* hi = bsar(lo, 31). */
+ nir->op2 = k31;
+ hi = nref;
+ } else { /* Zero-extend to 64 bit. */
+ hi = lj_ir_kint(J, 0);
+ goto fwdlo;
+ }
+ break;
+ }
+ case IR_CALLXS:
+ goto split_call;
+ case IR_PHI: {
+ IRRef hiref2;
+ if ((irref_isk(nir->op1) && irref_isk(nir->op2)) ||
+ nir->op1 == nir->op2)
+ J->cur.nins--; /* Drop useless PHIs. */
+ hiref2 = hisubst[ir->op2];
+ if (!((irref_isk(hiref) && irref_isk(hiref2)) || hiref == hiref2))
+ split_emit(J, IRTI(IR_PHI), hiref, hiref2);
+ break;
+ }
+ case IR_HIOP:
+ J->cur.nins--; /* Drop joining HIOP. */
+ ir->prev = nir->op1;
+ hi = nir->op2;
+ break;
+ default:
+ lj_assertJ(ir->o <= IR_NE, "bad IR op %d", ir->o); /* Comparisons. */
+ split_emit(J, IRTGI(IR_HIOP), hiref, hisubst[ir->op2]);
+ break;
+ }
+ } else
+#endif
+#if LJ_SOFTFP
+ if (ir->o == IR_SLOAD) {
+ if ((nir->op2 & IRSLOAD_CONVERT)) { /* Convert from number to int. */
+ nir->op2 &= ~IRSLOAD_CONVERT;
+ if (!(nir->op2 & IRSLOAD_TYPECHECK))
+ nir->t.irt = IRT_INT; /* Drop guard. */
+ split_emit(J, IRT(IR_HIOP, IRT_SOFTFP), nref, nref);
+ ir->prev = split_num2int(J, nref, nref+1, irt_isguard(ir->t));
+ }
+ } else if (ir->o == IR_TOBIT) {
+ IRRef tmp, op1 = ir->op1;
+ J->cur.nins--;
+#if LJ_LE
+ tmp = split_emit(J, IRT(IR_CARG, IRT_NIL), oir[op1].prev, hisubst[op1]);
+#else
+ tmp = split_emit(J, IRT(IR_CARG, IRT_NIL), hisubst[op1], oir[op1].prev);
+#endif
+ ir->prev = split_emit(J, IRTI(IR_CALLN), tmp, IRCALL_lj_vm_tobit);
+ } else if (ir->o == IR_TOSTR || ir->o == IR_TMPREF) {
+ if (hisubst[ir->op1]) {
+ if (irref_isk(ir->op1))
+ nir->op1 = ir->op1;
+ else
+ split_emit(J, IRT(IR_HIOP, IRT_NIL), hisubst[ir->op1], nref);
+ }
+ } else if (ir->o == IR_HREF || ir->o == IR_NEWREF) {
+ if (irref_isk(ir->op2) && hisubst[ir->op2])
+ nir->op2 = ir->op2;
+ } else
+#endif
+ if (ir->o == IR_CONV) { /* See above, too. */
+ IRType st = (IRType)(ir->op2 & IRCONV_SRCMASK);
+#if LJ_32 && LJ_HASFFI
+ if (st == IRT_I64 || st == IRT_U64) { /* Conversion from 64 bit int. */
+#if LJ_SOFTFP
+ if (irt_isfloat(ir->t)) {
+ split_call_l(J, hisubst, oir, ir,
+ st == IRT_I64 ? IRCALL_fp64_l2f : IRCALL_fp64_ul2f);
+ J->cur.nins--; /* Drop unused HIOP. */
+ }
+#else
+ if (irt_isfp(ir->t)) { /* 64 bit integer to FP conversion. */
+ ir->prev = split_emit(J, IRT(IR_HIOP, irt_type(ir->t)),
+ hisubst[ir->op1], nref);
+ }
+#endif
+ else { /* Truncate to lower 32 bits. */
+ fwdlo:
+ ir->prev = nir->op1; /* Forward loword. */
+ /* Replace with NOP to avoid messing up the snapshot logic. */
+ nir->ot = IRT(IR_NOP, IRT_NIL);
+ nir->op1 = nir->op2 = 0;
+ }
+ }
+#endif
+#if LJ_SOFTFP && LJ_32 && LJ_HASFFI
+ else if (irt_isfloat(ir->t)) {
+ if (st == IRT_NUM) {
+ split_call_l(J, hisubst, oir, ir, IRCALL_softfp_d2f);
+ J->cur.nins--; /* Drop unused HIOP. */
+ } else {
+ nir->o = IR_CALLN;
+ nir->op2 = st == IRT_INT ? IRCALL_softfp_i2f : IRCALL_softfp_ui2f;
+ }
+ } else if (st == IRT_FLOAT) {
+ nir->o = IR_CALLN;
+ nir->op2 = irt_isint(ir->t) ? IRCALL_softfp_f2i : IRCALL_softfp_f2ui;
+ } else
+#endif
+#if LJ_SOFTFP
+ if (st == IRT_NUM || (LJ_32 && LJ_HASFFI && st == IRT_FLOAT)) {
+ if (irt_isguard(ir->t)) {
+ lj_assertJ(st == IRT_NUM && irt_isint(ir->t), "bad CONV types");
+ J->cur.nins--;
+ ir->prev = split_num2int(J, nir->op1, hisubst[ir->op1], 1);
+ } else {
+ split_call_l(J, hisubst, oir, ir,
+#if LJ_32 && LJ_HASFFI
+ st == IRT_NUM ?
+ (irt_isint(ir->t) ? IRCALL_softfp_d2i : IRCALL_softfp_d2ui) :
+ (irt_isint(ir->t) ? IRCALL_softfp_f2i : IRCALL_softfp_f2ui)
+#else
+ IRCALL_softfp_d2i
+#endif
+ );
+ J->cur.nins--; /* Drop unused HIOP. */
+ }
+ }
+#endif
+ } else if (ir->o == IR_CALLXS) {
+ IRRef hiref;
+ split_call:
+ hiref = hisubst[ir->op1];
+ if (hiref) {
+ IROpT ot = nir->ot;
+ IRRef op2 = nir->op2;
+ nir->ot = IRT(IR_CARG, IRT_NIL);
+#if LJ_LE
+ nir->op2 = hiref;
+#else
+ nir->op2 = nir->op1; nir->op1 = hiref;
+#endif
+ ir->prev = nref = split_emit(J, ot, nref, op2);
+ }
+ if (LJ_SOFTFP ? irt_is64(ir->t) : irt_isint64(ir->t))
+ hi = split_emit(J,
+ IRT(IR_HIOP, (LJ_SOFTFP && irt_isnum(ir->t)) ? IRT_SOFTFP : IRT_INT),
+ nref, nref);
+ } else if (ir->o == IR_CARG) {
+ IRRef hiref = hisubst[ir->op1];
+ if (hiref) {
+ IRRef op2 = nir->op2;
+#if LJ_LE
+ nir->op2 = hiref;
+#else
+ nir->op2 = nir->op1; nir->op1 = hiref;
+#endif
+ ir->prev = nref = split_emit(J, IRT(IR_CARG, IRT_NIL), nref, op2);
+ nir = IR(nref);
+ }
+ hiref = hisubst[ir->op2];
+ if (hiref) {
+#if !LJ_TARGET_X86
+ int carg = 0;
+ IRIns *cir;
+ for (cir = IR(nir->op1); cir->o == IR_CARG; cir = IR(cir->op1))
+ carg++;
+ if ((carg & 1) == 0) { /* Align 64 bit arguments. */
+ IRRef op2 = nir->op2;
+ nir->op2 = REF_NIL;
+ nref = split_emit(J, IRT(IR_CARG, IRT_NIL), nref, op2);
+ nir = IR(nref);
+ }
+#endif
+#if LJ_BE
+ { IRRef tmp = nir->op2; nir->op2 = hiref; hiref = tmp; }
+#endif
+ ir->prev = split_emit(J, IRT(IR_CARG, IRT_NIL), nref, hiref);
+ }
+ } else if (ir->o == IR_CNEWI) {
+ if (hisubst[ir->op2])
+ split_emit(J, IRT(IR_HIOP, IRT_NIL), nref, hisubst[ir->op2]);
+ } else if (ir->o == IR_LOOP) {
+ J->loopref = nref; /* Needed by assembler. */
+ }
+ hisubst[ref] = hi; /* Store hiword substitution. */
+ }
+ if (snref == nins) { /* Substitution for last snapshot. */
+ snap->ref = J->cur.nins;
+ split_subst_snap(J, snap, oir);
+ }
+
+ /* Add PHI marks. */
+ for (ref = J->cur.nins-1; ref >= REF_FIRST; ref--) {
+ IRIns *ir = IR(ref);
+ if (ir->o != IR_PHI) break;
+ if (!irref_isk(ir->op1)) irt_setphi(IR(ir->op1)->t);
+ if (ir->op2 > J->loopref) irt_setphi(IR(ir->op2)->t);
+ }
+}
+
+/* Protected callback for split pass. */
+static TValue *cpsplit(lua_State *L, lua_CFunction dummy, void *ud)
+{
+ jit_State *J = (jit_State *)ud;
+ split_ir(J);
+ UNUSED(L); UNUSED(dummy);
+ return NULL;
+}
+
+#if defined(LUA_USE_ASSERT) || LJ_SOFTFP
+/* Slow, but sure way to check whether a SPLIT pass is needed. */
+static int split_needsplit(jit_State *J)
+{
+ IRIns *ir, *irend;
+ IRRef ref;
+ for (ir = IR(REF_FIRST), irend = IR(J->cur.nins); ir < irend; ir++)
+ if (LJ_SOFTFP ? irt_is64orfp(ir->t) : irt_isint64(ir->t))
+ return 1;
+ if (LJ_SOFTFP) {
+ for (ref = J->chain[IR_SLOAD]; ref; ref = IR(ref)->prev)
+ if ((IR(ref)->op2 & IRSLOAD_CONVERT))
+ return 1;
+ if (J->chain[IR_TOBIT])
+ return 1;
+ }
+ for (ref = J->chain[IR_CONV]; ref; ref = IR(ref)->prev) {
+ IRType st = (IR(ref)->op2 & IRCONV_SRCMASK);
+ if ((LJ_SOFTFP && (st == IRT_NUM || st == IRT_FLOAT)) ||
+ st == IRT_I64 || st == IRT_U64)
+ return 1;
+ }
+ return 0; /* Nope. */
+}
+#endif
+
+/* SPLIT pass. */
+void lj_opt_split(jit_State *J)
+{
+#if LJ_SOFTFP
+ if (!J->needsplit)
+ J->needsplit = split_needsplit(J);
+#else
+ lj_assertJ(J->needsplit >= split_needsplit(J), "bad SPLIT state");
+#endif
+ if (J->needsplit) {
+ int errcode = lj_vm_cpcall(J->L, NULL, J, cpsplit);
+ if (errcode) {
+ /* Completely reset the trace to avoid inconsistent dump on abort. */
+ J->cur.nins = J->cur.nk = REF_BASE;
+ J->cur.nsnap = 0;
+ lj_err_throw(J->L, errcode); /* Propagate errors. */
+ }
+ }
+}
+
+#undef IR
+
+#endif
diff --git a/libs/luajit-cmake/luajit/src/lj_parse.c b/libs/luajit-cmake/luajit/src/lj_parse.c
new file mode 100644
index 0000000..9ddf60e
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/lj_parse.c
@@ -0,0 +1,2747 @@
+/*
+** Lua parser (source code -> bytecode).
+** Copyright (C) 2005-2022 Mike Pall. See Copyright Notice in luajit.h
+**
+** Major portions taken verbatim or adapted from the Lua interpreter.
+** Copyright (C) 1994-2008 Lua.org, PUC-Rio. See Copyright Notice in lua.h
+*/
+
+#define lj_parse_c
+#define LUA_CORE
+
+#include "lj_obj.h"
+#include "lj_gc.h"
+#include "lj_err.h"
+#include "lj_debug.h"
+#include "lj_buf.h"
+#include "lj_str.h"
+#include "lj_tab.h"
+#include "lj_func.h"
+#include "lj_state.h"
+#include "lj_bc.h"
+#if LJ_HASFFI
+#include "lj_ctype.h"
+#endif
+#include "lj_strfmt.h"
+#include "lj_lex.h"
+#include "lj_parse.h"
+#include "lj_vm.h"
+#include "lj_vmevent.h"
+
+/* -- Parser structures and definitions ----------------------------------- */
+
+/* Expression kinds. */
+typedef enum {
+ /* Constant expressions must be first and in this order: */
+ VKNIL,
+ VKFALSE,
+ VKTRUE,
+ VKSTR, /* sval = string value */
+ VKNUM, /* nval = number value */
+ VKLAST = VKNUM,
+ VKCDATA, /* nval = cdata value, not treated as a constant expression */
+ /* Non-constant expressions follow: */
+ VLOCAL, /* info = local register, aux = vstack index */
+ VUPVAL, /* info = upvalue index, aux = vstack index */
+ VGLOBAL, /* sval = string value */
+ VINDEXED, /* info = table register, aux = index reg/byte/string const */
+ VJMP, /* info = instruction PC */
+ VRELOCABLE, /* info = instruction PC */
+ VNONRELOC, /* info = result register */
+ VCALL, /* info = instruction PC, aux = base */
+ VVOID
+} ExpKind;
+
+/* Expression descriptor. */
+typedef struct ExpDesc {
+ union {
+ struct {
+ uint32_t info; /* Primary info. */
+ uint32_t aux; /* Secondary info. */
+ } s;
+ TValue nval; /* Number value. */
+ GCstr *sval; /* String value. */
+ } u;
+ ExpKind k;
+ BCPos t; /* True condition jump list. */
+ BCPos f; /* False condition jump list. */
+} ExpDesc;
+
+/* Macros for expressions. */
+#define expr_hasjump(e) ((e)->t != (e)->f)
+
+#define expr_isk(e) ((e)->k <= VKLAST)
+#define expr_isk_nojump(e) (expr_isk(e) && !expr_hasjump(e))
+#define expr_isnumk(e) ((e)->k == VKNUM)
+#define expr_isnumk_nojump(e) (expr_isnumk(e) && !expr_hasjump(e))
+#define expr_isstrk(e) ((e)->k == VKSTR)
+
+#define expr_numtv(e) check_exp(expr_isnumk((e)), &(e)->u.nval)
+#define expr_numberV(e) numberVnum(expr_numtv((e)))
+
+/* Initialize expression. */
+static LJ_AINLINE void expr_init(ExpDesc *e, ExpKind k, uint32_t info)
+{
+ e->k = k;
+ e->u.s.info = info;
+ e->f = e->t = NO_JMP;
+}
+
+/* Check number constant for +-0. */
+static int expr_numiszero(ExpDesc *e)
+{
+ TValue *o = expr_numtv(e);
+ return tvisint(o) ? (intV(o) == 0) : tviszero(o);
+}
+
+/* Per-function linked list of scope blocks. */
+typedef struct FuncScope {
+ struct FuncScope *prev; /* Link to outer scope. */
+ MSize vstart; /* Start of block-local variables. */
+ uint8_t nactvar; /* Number of active vars outside the scope. */
+ uint8_t flags; /* Scope flags. */
+} FuncScope;
+
+#define FSCOPE_LOOP 0x01 /* Scope is a (breakable) loop. */
+#define FSCOPE_BREAK 0x02 /* Break used in scope. */
+#define FSCOPE_GOLA 0x04 /* Goto or label used in scope. */
+#define FSCOPE_UPVAL 0x08 /* Upvalue in scope. */
+#define FSCOPE_NOCLOSE 0x10 /* Do not close upvalues. */
+
+#define NAME_BREAK ((GCstr *)(uintptr_t)1)
+
+/* Index into variable stack. */
+typedef uint16_t VarIndex;
+#define LJ_MAX_VSTACK (65536 - LJ_MAX_UPVAL)
+
+/* Variable/goto/label info. */
+#define VSTACK_VAR_RW 0x01 /* R/W variable. */
+#define VSTACK_GOTO 0x02 /* Pending goto. */
+#define VSTACK_LABEL 0x04 /* Label. */
+
+/* Per-function state. */
+typedef struct FuncState {
+ GCtab *kt; /* Hash table for constants. */
+ LexState *ls; /* Lexer state. */
+ lua_State *L; /* Lua state. */
+ FuncScope *bl; /* Current scope. */
+ struct FuncState *prev; /* Enclosing function. */
+ BCPos pc; /* Next bytecode position. */
+ BCPos lasttarget; /* Bytecode position of last jump target. */
+ BCPos jpc; /* Pending jump list to next bytecode. */
+ BCReg freereg; /* First free register. */
+ BCReg nactvar; /* Number of active local variables. */
+ BCReg nkn, nkgc; /* Number of lua_Number/GCobj constants */
+ BCLine linedefined; /* First line of the function definition. */
+ BCInsLine *bcbase; /* Base of bytecode stack. */
+ BCPos bclim; /* Limit of bytecode stack. */
+ MSize vbase; /* Base of variable stack for this function. */
+ uint8_t flags; /* Prototype flags. */
+ uint8_t numparams; /* Number of parameters. */
+ uint8_t framesize; /* Fixed frame size. */
+ uint8_t nuv; /* Number of upvalues */
+ VarIndex varmap[LJ_MAX_LOCVAR]; /* Map from register to variable idx. */
+ VarIndex uvmap[LJ_MAX_UPVAL]; /* Map from upvalue to variable idx. */
+ VarIndex uvtmp[LJ_MAX_UPVAL]; /* Temporary upvalue map. */
+} FuncState;
+
+/* Binary and unary operators. ORDER OPR */
+typedef enum BinOpr {
+ OPR_ADD, OPR_SUB, OPR_MUL, OPR_DIV, OPR_MOD, OPR_POW, /* ORDER ARITH */
+ OPR_CONCAT,
+ OPR_NE, OPR_EQ,
+ OPR_LT, OPR_GE, OPR_LE, OPR_GT,
+ OPR_AND, OPR_OR,
+ OPR_NOBINOPR
+} BinOpr;
+
+LJ_STATIC_ASSERT((int)BC_ISGE-(int)BC_ISLT == (int)OPR_GE-(int)OPR_LT);
+LJ_STATIC_ASSERT((int)BC_ISLE-(int)BC_ISLT == (int)OPR_LE-(int)OPR_LT);
+LJ_STATIC_ASSERT((int)BC_ISGT-(int)BC_ISLT == (int)OPR_GT-(int)OPR_LT);
+LJ_STATIC_ASSERT((int)BC_SUBVV-(int)BC_ADDVV == (int)OPR_SUB-(int)OPR_ADD);
+LJ_STATIC_ASSERT((int)BC_MULVV-(int)BC_ADDVV == (int)OPR_MUL-(int)OPR_ADD);
+LJ_STATIC_ASSERT((int)BC_DIVVV-(int)BC_ADDVV == (int)OPR_DIV-(int)OPR_ADD);
+LJ_STATIC_ASSERT((int)BC_MODVV-(int)BC_ADDVV == (int)OPR_MOD-(int)OPR_ADD);
+
+#ifdef LUA_USE_ASSERT
+#define lj_assertFS(c, ...) (lj_assertG_(G(fs->L), (c), __VA_ARGS__))
+#else
+#define lj_assertFS(c, ...) ((void)fs)
+#endif
+
+/* -- Error handling ------------------------------------------------------ */
+
+LJ_NORET LJ_NOINLINE static void err_syntax(LexState *ls, ErrMsg em)
+{
+ lj_lex_error(ls, ls->tok, em);
+}
+
+LJ_NORET LJ_NOINLINE static void err_token(LexState *ls, LexToken tok)
+{
+ lj_lex_error(ls, ls->tok, LJ_ERR_XTOKEN, lj_lex_token2str(ls, tok));
+}
+
+LJ_NORET static void err_limit(FuncState *fs, uint32_t limit, const char *what)
+{
+ if (fs->linedefined == 0)
+ lj_lex_error(fs->ls, 0, LJ_ERR_XLIMM, limit, what);
+ else
+ lj_lex_error(fs->ls, 0, LJ_ERR_XLIMF, fs->linedefined, limit, what);
+}
+
+#define checklimit(fs, v, l, m) if ((v) >= (l)) err_limit(fs, l, m)
+#define checklimitgt(fs, v, l, m) if ((v) > (l)) err_limit(fs, l, m)
+#define checkcond(ls, c, em) { if (!(c)) err_syntax(ls, em); }
+
+/* -- Management of constants --------------------------------------------- */
+
+/* Return bytecode encoding for primitive constant. */
+#define const_pri(e) check_exp((e)->k <= VKTRUE, (e)->k)
+
+#define tvhaskslot(o) ((o)->u32.hi == 0)
+#define tvkslot(o) ((o)->u32.lo)
+
+/* Add a number constant. */
+static BCReg const_num(FuncState *fs, ExpDesc *e)
+{
+ lua_State *L = fs->L;
+ TValue *o;
+ lj_assertFS(expr_isnumk(e), "bad usage");
+ o = lj_tab_set(L, fs->kt, &e->u.nval);
+ if (tvhaskslot(o))
+ return tvkslot(o);
+ o->u64 = fs->nkn;
+ return fs->nkn++;
+}
+
+/* Add a GC object constant. */
+static BCReg const_gc(FuncState *fs, GCobj *gc, uint32_t itype)
+{
+ lua_State *L = fs->L;
+ TValue key, *o;
+ setgcV(L, &key, gc, itype);
+ /* NOBARRIER: the key is new or kept alive. */
+ o = lj_tab_set(L, fs->kt, &key);
+ if (tvhaskslot(o))
+ return tvkslot(o);
+ o->u64 = fs->nkgc;
+ return fs->nkgc++;
+}
+
+/* Add a string constant. */
+static BCReg const_str(FuncState *fs, ExpDesc *e)
+{
+ lj_assertFS(expr_isstrk(e) || e->k == VGLOBAL, "bad usage");
+ return const_gc(fs, obj2gco(e->u.sval), LJ_TSTR);
+}
+
+/* Anchor string constant to avoid GC. */
+GCstr *lj_parse_keepstr(LexState *ls, const char *str, size_t len)
+{
+ /* NOBARRIER: the key is new or kept alive. */
+ lua_State *L = ls->L;
+ GCstr *s = lj_str_new(L, str, len);
+ TValue *tv = lj_tab_setstr(L, ls->fs->kt, s);
+ if (tvisnil(tv)) setboolV(tv, 1);
+ lj_gc_check(L);
+ return s;
+}
+
+#if LJ_HASFFI
+/* Anchor cdata to avoid GC. */
+void lj_parse_keepcdata(LexState *ls, TValue *tv, GCcdata *cd)
+{
+ /* NOBARRIER: the key is new or kept alive. */
+ lua_State *L = ls->L;
+ setcdataV(L, tv, cd);
+ setboolV(lj_tab_set(L, ls->fs->kt, tv), 1);
+}
+#endif
+
+/* -- Jump list handling -------------------------------------------------- */
+
+/* Get next element in jump list. */
+static BCPos jmp_next(FuncState *fs, BCPos pc)
+{
+ ptrdiff_t delta = bc_j(fs->bcbase[pc].ins);
+ if ((BCPos)delta == NO_JMP)
+ return NO_JMP;
+ else
+ return (BCPos)(((ptrdiff_t)pc+1)+delta);
+}
+
+/* Check if any of the instructions on the jump list produce no value. */
+static int jmp_novalue(FuncState *fs, BCPos list)
+{
+ for (; list != NO_JMP; list = jmp_next(fs, list)) {
+ BCIns p = fs->bcbase[list >= 1 ? list-1 : list].ins;
+ if (!(bc_op(p) == BC_ISTC || bc_op(p) == BC_ISFC || bc_a(p) == NO_REG))
+ return 1;
+ }
+ return 0;
+}
+
+/* Patch register of test instructions. */
+static int jmp_patchtestreg(FuncState *fs, BCPos pc, BCReg reg)
+{
+ BCInsLine *ilp = &fs->bcbase[pc >= 1 ? pc-1 : pc];
+ BCOp op = bc_op(ilp->ins);
+ if (op == BC_ISTC || op == BC_ISFC) {
+ if (reg != NO_REG && reg != bc_d(ilp->ins)) {
+ setbc_a(&ilp->ins, reg);
+ } else { /* Nothing to store or already in the right register. */
+ setbc_op(&ilp->ins, op+(BC_IST-BC_ISTC));
+ setbc_a(&ilp->ins, 0);
+ }
+ } else if (bc_a(ilp->ins) == NO_REG) {
+ if (reg == NO_REG) {
+ ilp->ins = BCINS_AJ(BC_JMP, bc_a(fs->bcbase[pc].ins), 0);
+ } else {
+ setbc_a(&ilp->ins, reg);
+ if (reg >= bc_a(ilp[1].ins))
+ setbc_a(&ilp[1].ins, reg+1);
+ }
+ } else {
+ return 0; /* Cannot patch other instructions. */
+ }
+ return 1;
+}
+
+/* Drop values for all instructions on jump list. */
+static void jmp_dropval(FuncState *fs, BCPos list)
+{
+ for (; list != NO_JMP; list = jmp_next(fs, list))
+ jmp_patchtestreg(fs, list, NO_REG);
+}
+
+/* Patch jump instruction to target. */
+static void jmp_patchins(FuncState *fs, BCPos pc, BCPos dest)
+{
+ BCIns *jmp = &fs->bcbase[pc].ins;
+ BCPos offset = dest-(pc+1)+BCBIAS_J;
+ lj_assertFS(dest != NO_JMP, "uninitialized jump target");
+ if (offset > BCMAX_D)
+ err_syntax(fs->ls, LJ_ERR_XJUMP);
+ setbc_d(jmp, offset);
+}
+
+/* Append to jump list. */
+static void jmp_append(FuncState *fs, BCPos *l1, BCPos l2)
+{
+ if (l2 == NO_JMP) {
+ return;
+ } else if (*l1 == NO_JMP) {
+ *l1 = l2;
+ } else {
+ BCPos list = *l1;
+ BCPos next;
+ while ((next = jmp_next(fs, list)) != NO_JMP) /* Find last element. */
+ list = next;
+ jmp_patchins(fs, list, l2);
+ }
+}
+
+/* Patch jump list and preserve produced values. */
+static void jmp_patchval(FuncState *fs, BCPos list, BCPos vtarget,
+ BCReg reg, BCPos dtarget)
+{
+ while (list != NO_JMP) {
+ BCPos next = jmp_next(fs, list);
+ if (jmp_patchtestreg(fs, list, reg))
+ jmp_patchins(fs, list, vtarget); /* Jump to target with value. */
+ else
+ jmp_patchins(fs, list, dtarget); /* Jump to default target. */
+ list = next;
+ }
+}
+
+/* Jump to following instruction. Append to list of pending jumps. */
+static void jmp_tohere(FuncState *fs, BCPos list)
+{
+ fs->lasttarget = fs->pc;
+ jmp_append(fs, &fs->jpc, list);
+}
+
+/* Patch jump list to target. */
+static void jmp_patch(FuncState *fs, BCPos list, BCPos target)
+{
+ if (target == fs->pc) {
+ jmp_tohere(fs, list);
+ } else {
+ lj_assertFS(target < fs->pc, "bad jump target");
+ jmp_patchval(fs, list, target, NO_REG, target);
+ }
+}
+
+/* -- Bytecode register allocator ----------------------------------------- */
+
+/* Bump frame size. */
+static void bcreg_bump(FuncState *fs, BCReg n)
+{
+ BCReg sz = fs->freereg + n;
+ if (sz > fs->framesize) {
+ if (sz >= LJ_MAX_SLOTS)
+ err_syntax(fs->ls, LJ_ERR_XSLOTS);
+ fs->framesize = (uint8_t)sz;
+ }
+}
+
+/* Reserve registers. */
+static void bcreg_reserve(FuncState *fs, BCReg n)
+{
+ bcreg_bump(fs, n);
+ fs->freereg += n;
+}
+
+/* Free register. */
+static void bcreg_free(FuncState *fs, BCReg reg)
+{
+ if (reg >= fs->nactvar) {
+ fs->freereg--;
+ lj_assertFS(reg == fs->freereg, "bad regfree");
+ }
+}
+
+/* Free register for expression. */
+static void expr_free(FuncState *fs, ExpDesc *e)
+{
+ if (e->k == VNONRELOC)
+ bcreg_free(fs, e->u.s.info);
+}
+
+/* -- Bytecode emitter ---------------------------------------------------- */
+
+/* Emit bytecode instruction. */
+static BCPos bcemit_INS(FuncState *fs, BCIns ins)
+{
+ BCPos pc = fs->pc;
+ LexState *ls = fs->ls;
+ jmp_patchval(fs, fs->jpc, pc, NO_REG, pc);
+ fs->jpc = NO_JMP;
+ if (LJ_UNLIKELY(pc >= fs->bclim)) {
+ ptrdiff_t base = fs->bcbase - ls->bcstack;
+ checklimit(fs, ls->sizebcstack, LJ_MAX_BCINS, "bytecode instructions");
+ lj_mem_growvec(fs->L, ls->bcstack, ls->sizebcstack, LJ_MAX_BCINS,BCInsLine);
+ fs->bclim = (BCPos)(ls->sizebcstack - base);
+ fs->bcbase = ls->bcstack + base;
+ }
+ fs->bcbase[pc].ins = ins;
+ fs->bcbase[pc].line = ls->lastline;
+ fs->pc = pc+1;
+ return pc;
+}
+
+#define bcemit_ABC(fs, o, a, b, c) bcemit_INS(fs, BCINS_ABC(o, a, b, c))
+#define bcemit_AD(fs, o, a, d) bcemit_INS(fs, BCINS_AD(o, a, d))
+#define bcemit_AJ(fs, o, a, j) bcemit_INS(fs, BCINS_AJ(o, a, j))
+
+#define bcptr(fs, e) (&(fs)->bcbase[(e)->u.s.info].ins)
+
+/* -- Bytecode emitter for expressions ------------------------------------ */
+
+/* Discharge non-constant expression to any register. */
+static void expr_discharge(FuncState *fs, ExpDesc *e)
+{
+ BCIns ins;
+ if (e->k == VUPVAL) {
+ ins = BCINS_AD(BC_UGET, 0, e->u.s.info);
+ } else if (e->k == VGLOBAL) {
+ ins = BCINS_AD(BC_GGET, 0, const_str(fs, e));
+ } else if (e->k == VINDEXED) {
+ BCReg rc = e->u.s.aux;
+ if ((int32_t)rc < 0) {
+ ins = BCINS_ABC(BC_TGETS, 0, e->u.s.info, ~rc);
+ } else if (rc > BCMAX_C) {
+ ins = BCINS_ABC(BC_TGETB, 0, e->u.s.info, rc-(BCMAX_C+1));
+ } else {
+ bcreg_free(fs, rc);
+ ins = BCINS_ABC(BC_TGETV, 0, e->u.s.info, rc);
+ }
+ bcreg_free(fs, e->u.s.info);
+ } else if (e->k == VCALL) {
+ e->u.s.info = e->u.s.aux;
+ e->k = VNONRELOC;
+ return;
+ } else if (e->k == VLOCAL) {
+ e->k = VNONRELOC;
+ return;
+ } else {
+ return;
+ }
+ e->u.s.info = bcemit_INS(fs, ins);
+ e->k = VRELOCABLE;
+}
+
+/* Emit bytecode to set a range of registers to nil. */
+static void bcemit_nil(FuncState *fs, BCReg from, BCReg n)
+{
+ if (fs->pc > fs->lasttarget) { /* No jumps to current position? */
+ BCIns *ip = &fs->bcbase[fs->pc-1].ins;
+ BCReg pto, pfrom = bc_a(*ip);
+ switch (bc_op(*ip)) { /* Try to merge with the previous instruction. */
+ case BC_KPRI:
+ if (bc_d(*ip) != ~LJ_TNIL) break;
+ if (from == pfrom) {
+ if (n == 1) return;
+ } else if (from == pfrom+1) {
+ from = pfrom;
+ n++;
+ } else {
+ break;
+ }
+ *ip = BCINS_AD(BC_KNIL, from, from+n-1); /* Replace KPRI. */
+ return;
+ case BC_KNIL:
+ pto = bc_d(*ip);
+ if (pfrom <= from && from <= pto+1) { /* Can we connect both ranges? */
+ if (from+n-1 > pto)
+ setbc_d(ip, from+n-1); /* Patch previous instruction range. */
+ return;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+ /* Emit new instruction or replace old instruction. */
+ bcemit_INS(fs, n == 1 ? BCINS_AD(BC_KPRI, from, VKNIL) :
+ BCINS_AD(BC_KNIL, from, from+n-1));
+}
+
+/* Discharge an expression to a specific register. Ignore branches. */
+static void expr_toreg_nobranch(FuncState *fs, ExpDesc *e, BCReg reg)
+{
+ BCIns ins;
+ expr_discharge(fs, e);
+ if (e->k == VKSTR) {
+ ins = BCINS_AD(BC_KSTR, reg, const_str(fs, e));
+ } else if (e->k == VKNUM) {
+#if LJ_DUALNUM
+ cTValue *tv = expr_numtv(e);
+ if (tvisint(tv) && checki16(intV(tv)))
+ ins = BCINS_AD(BC_KSHORT, reg, (BCReg)(uint16_t)intV(tv));
+ else
+#else
+ lua_Number n = expr_numberV(e);
+ int32_t k = lj_num2int(n);
+ if (checki16(k) && n == (lua_Number)k)
+ ins = BCINS_AD(BC_KSHORT, reg, (BCReg)(uint16_t)k);
+ else
+#endif
+ ins = BCINS_AD(BC_KNUM, reg, const_num(fs, e));
+#if LJ_HASFFI
+ } else if (e->k == VKCDATA) {
+ fs->flags |= PROTO_FFI;
+ ins = BCINS_AD(BC_KCDATA, reg,
+ const_gc(fs, obj2gco(cdataV(&e->u.nval)), LJ_TCDATA));
+#endif
+ } else if (e->k == VRELOCABLE) {
+ setbc_a(bcptr(fs, e), reg);
+ goto noins;
+ } else if (e->k == VNONRELOC) {
+ if (reg == e->u.s.info)
+ goto noins;
+ ins = BCINS_AD(BC_MOV, reg, e->u.s.info);
+ } else if (e->k == VKNIL) {
+ bcemit_nil(fs, reg, 1);
+ goto noins;
+ } else if (e->k <= VKTRUE) {
+ ins = BCINS_AD(BC_KPRI, reg, const_pri(e));
+ } else {
+ lj_assertFS(e->k == VVOID || e->k == VJMP, "bad expr type %d", e->k);
+ return;
+ }
+ bcemit_INS(fs, ins);
+noins:
+ e->u.s.info = reg;
+ e->k = VNONRELOC;
+}
+
+/* Forward declaration. */
+static BCPos bcemit_jmp(FuncState *fs);
+
+/* Discharge an expression to a specific register. */
+static void expr_toreg(FuncState *fs, ExpDesc *e, BCReg reg)
+{
+ expr_toreg_nobranch(fs, e, reg);
+ if (e->k == VJMP)
+ jmp_append(fs, &e->t, e->u.s.info); /* Add it to the true jump list. */
+ if (expr_hasjump(e)) { /* Discharge expression with branches. */
+ BCPos jend, jfalse = NO_JMP, jtrue = NO_JMP;
+ if (jmp_novalue(fs, e->t) || jmp_novalue(fs, e->f)) {
+ BCPos jval = (e->k == VJMP) ? NO_JMP : bcemit_jmp(fs);
+ jfalse = bcemit_AD(fs, BC_KPRI, reg, VKFALSE);
+ bcemit_AJ(fs, BC_JMP, fs->freereg, 1);
+ jtrue = bcemit_AD(fs, BC_KPRI, reg, VKTRUE);
+ jmp_tohere(fs, jval);
+ }
+ jend = fs->pc;
+ fs->lasttarget = jend;
+ jmp_patchval(fs, e->f, jend, reg, jfalse);
+ jmp_patchval(fs, e->t, jend, reg, jtrue);
+ }
+ e->f = e->t = NO_JMP;
+ e->u.s.info = reg;
+ e->k = VNONRELOC;
+}
+
+/* Discharge an expression to the next free register. */
+static void expr_tonextreg(FuncState *fs, ExpDesc *e)
+{
+ expr_discharge(fs, e);
+ expr_free(fs, e);
+ bcreg_reserve(fs, 1);
+ expr_toreg(fs, e, fs->freereg - 1);
+}
+
+/* Discharge an expression to any register. */
+static BCReg expr_toanyreg(FuncState *fs, ExpDesc *e)
+{
+ expr_discharge(fs, e);
+ if (e->k == VNONRELOC) {
+ if (!expr_hasjump(e)) return e->u.s.info; /* Already in a register. */
+ if (e->u.s.info >= fs->nactvar) {
+ expr_toreg(fs, e, e->u.s.info); /* Discharge to temp. register. */
+ return e->u.s.info;
+ }
+ }
+ expr_tonextreg(fs, e); /* Discharge to next register. */
+ return e->u.s.info;
+}
+
+/* Partially discharge expression to a value. */
+static void expr_toval(FuncState *fs, ExpDesc *e)
+{
+ if (expr_hasjump(e))
+ expr_toanyreg(fs, e);
+ else
+ expr_discharge(fs, e);
+}
+
+/* Emit store for LHS expression. */
+static void bcemit_store(FuncState *fs, ExpDesc *var, ExpDesc *e)
+{
+ BCIns ins;
+ if (var->k == VLOCAL) {
+ fs->ls->vstack[var->u.s.aux].info |= VSTACK_VAR_RW;
+ expr_free(fs, e);
+ expr_toreg(fs, e, var->u.s.info);
+ return;
+ } else if (var->k == VUPVAL) {
+ fs->ls->vstack[var->u.s.aux].info |= VSTACK_VAR_RW;
+ expr_toval(fs, e);
+ if (e->k <= VKTRUE)
+ ins = BCINS_AD(BC_USETP, var->u.s.info, const_pri(e));
+ else if (e->k == VKSTR)
+ ins = BCINS_AD(BC_USETS, var->u.s.info, const_str(fs, e));
+ else if (e->k == VKNUM)
+ ins = BCINS_AD(BC_USETN, var->u.s.info, const_num(fs, e));
+ else
+ ins = BCINS_AD(BC_USETV, var->u.s.info, expr_toanyreg(fs, e));
+ } else if (var->k == VGLOBAL) {
+ BCReg ra = expr_toanyreg(fs, e);
+ ins = BCINS_AD(BC_GSET, ra, const_str(fs, var));
+ } else {
+ BCReg ra, rc;
+ lj_assertFS(var->k == VINDEXED, "bad expr type %d", var->k);
+ ra = expr_toanyreg(fs, e);
+ rc = var->u.s.aux;
+ if ((int32_t)rc < 0) {
+ ins = BCINS_ABC(BC_TSETS, ra, var->u.s.info, ~rc);
+ } else if (rc > BCMAX_C) {
+ ins = BCINS_ABC(BC_TSETB, ra, var->u.s.info, rc-(BCMAX_C+1));
+ } else {
+#ifdef LUA_USE_ASSERT
+ /* Free late alloced key reg to avoid assert on free of value reg. */
+ /* This can only happen when called from expr_table(). */
+ if (e->k == VNONRELOC && ra >= fs->nactvar && rc >= ra)
+ bcreg_free(fs, rc);
+#endif
+ ins = BCINS_ABC(BC_TSETV, ra, var->u.s.info, rc);
+ }
+ }
+ bcemit_INS(fs, ins);
+ expr_free(fs, e);
+}
+
+/* Emit method lookup expression. */
+static void bcemit_method(FuncState *fs, ExpDesc *e, ExpDesc *key)
+{
+ BCReg idx, func, obj = expr_toanyreg(fs, e);
+ expr_free(fs, e);
+ func = fs->freereg;
+ bcemit_AD(fs, BC_MOV, func+1+LJ_FR2, obj); /* Copy object to 1st argument. */
+ lj_assertFS(expr_isstrk(key), "bad usage");
+ idx = const_str(fs, key);
+ if (idx <= BCMAX_C) {
+ bcreg_reserve(fs, 2+LJ_FR2);
+ bcemit_ABC(fs, BC_TGETS, func, obj, idx);
+ } else {
+ bcreg_reserve(fs, 3+LJ_FR2);
+ bcemit_AD(fs, BC_KSTR, func+2+LJ_FR2, idx);
+ bcemit_ABC(fs, BC_TGETV, func, obj, func+2+LJ_FR2);
+ fs->freereg--;
+ }
+ e->u.s.info = func;
+ e->k = VNONRELOC;
+}
+
+/* -- Bytecode emitter for branches --------------------------------------- */
+
+/* Emit unconditional branch. */
+static BCPos bcemit_jmp(FuncState *fs)
+{
+ BCPos jpc = fs->jpc;
+ BCPos j = fs->pc - 1;
+ BCIns *ip = &fs->bcbase[j].ins;
+ fs->jpc = NO_JMP;
+ if ((int32_t)j >= (int32_t)fs->lasttarget && bc_op(*ip) == BC_UCLO) {
+ setbc_j(ip, NO_JMP);
+ fs->lasttarget = j+1;
+ } else {
+ j = bcemit_AJ(fs, BC_JMP, fs->freereg, NO_JMP);
+ }
+ jmp_append(fs, &j, jpc);
+ return j;
+}
+
+/* Invert branch condition of bytecode instruction. */
+static void invertcond(FuncState *fs, ExpDesc *e)
+{
+ BCIns *ip = &fs->bcbase[e->u.s.info - 1].ins;
+ setbc_op(ip, bc_op(*ip)^1);
+}
+
+/* Emit conditional branch. */
+static BCPos bcemit_branch(FuncState *fs, ExpDesc *e, int cond)
+{
+ BCPos pc;
+ if (e->k == VRELOCABLE) {
+ BCIns *ip = bcptr(fs, e);
+ if (bc_op(*ip) == BC_NOT) {
+ *ip = BCINS_AD(cond ? BC_ISF : BC_IST, 0, bc_d(*ip));
+ return bcemit_jmp(fs);
+ }
+ }
+ if (e->k != VNONRELOC) {
+ bcreg_reserve(fs, 1);
+ expr_toreg_nobranch(fs, e, fs->freereg-1);
+ }
+ bcemit_AD(fs, cond ? BC_ISTC : BC_ISFC, NO_REG, e->u.s.info);
+ pc = bcemit_jmp(fs);
+ expr_free(fs, e);
+ return pc;
+}
+
+/* Emit branch on true condition. */
+static void bcemit_branch_t(FuncState *fs, ExpDesc *e)
+{
+ BCPos pc;
+ expr_discharge(fs, e);
+ if (e->k == VKSTR || e->k == VKNUM || e->k == VKTRUE)
+ pc = NO_JMP; /* Never jump. */
+ else if (e->k == VJMP)
+ invertcond(fs, e), pc = e->u.s.info;
+ else if (e->k == VKFALSE || e->k == VKNIL)
+ expr_toreg_nobranch(fs, e, NO_REG), pc = bcemit_jmp(fs);
+ else
+ pc = bcemit_branch(fs, e, 0);
+ jmp_append(fs, &e->f, pc);
+ jmp_tohere(fs, e->t);
+ e->t = NO_JMP;
+}
+
+/* Emit branch on false condition. */
+static void bcemit_branch_f(FuncState *fs, ExpDesc *e)
+{
+ BCPos pc;
+ expr_discharge(fs, e);
+ if (e->k == VKNIL || e->k == VKFALSE)
+ pc = NO_JMP; /* Never jump. */
+ else if (e->k == VJMP)
+ pc = e->u.s.info;
+ else if (e->k == VKSTR || e->k == VKNUM || e->k == VKTRUE)
+ expr_toreg_nobranch(fs, e, NO_REG), pc = bcemit_jmp(fs);
+ else
+ pc = bcemit_branch(fs, e, 1);
+ jmp_append(fs, &e->t, pc);
+ jmp_tohere(fs, e->f);
+ e->f = NO_JMP;
+}
+
+/* -- Bytecode emitter for operators -------------------------------------- */
+
+/* Try constant-folding of arithmetic operators. */
+static int foldarith(BinOpr opr, ExpDesc *e1, ExpDesc *e2)
+{
+ TValue o;
+ lua_Number n;
+ if (!expr_isnumk_nojump(e1) || !expr_isnumk_nojump(e2)) return 0;
+ n = lj_vm_foldarith(expr_numberV(e1), expr_numberV(e2), (int)opr-OPR_ADD);
+ setnumV(&o, n);
+ if (tvisnan(&o) || tvismzero(&o)) return 0; /* Avoid NaN and -0 as consts. */
+ if (LJ_DUALNUM) {
+ int32_t k = lj_num2int(n);
+ if ((lua_Number)k == n) {
+ setintV(&e1->u.nval, k);
+ return 1;
+ }
+ }
+ setnumV(&e1->u.nval, n);
+ return 1;
+}
+
+/* Emit arithmetic operator. */
+static void bcemit_arith(FuncState *fs, BinOpr opr, ExpDesc *e1, ExpDesc *e2)
+{
+ BCReg rb, rc, t;
+ uint32_t op;
+ if (foldarith(opr, e1, e2))
+ return;
+ if (opr == OPR_POW) {
+ op = BC_POW;
+ rc = expr_toanyreg(fs, e2);
+ rb = expr_toanyreg(fs, e1);
+ } else {
+ op = opr-OPR_ADD+BC_ADDVV;
+ /* Must discharge 2nd operand first since VINDEXED might free regs. */
+ expr_toval(fs, e2);
+ if (expr_isnumk(e2) && (rc = const_num(fs, e2)) <= BCMAX_C)
+ op -= BC_ADDVV-BC_ADDVN;
+ else
+ rc = expr_toanyreg(fs, e2);
+ /* 1st operand discharged by bcemit_binop_left, but need KNUM/KSHORT. */
+ lj_assertFS(expr_isnumk(e1) || e1->k == VNONRELOC,
+ "bad expr type %d", e1->k);
+ expr_toval(fs, e1);
+ /* Avoid two consts to satisfy bytecode constraints. */
+ if (expr_isnumk(e1) && !expr_isnumk(e2) &&
+ (t = const_num(fs, e1)) <= BCMAX_B) {
+ rb = rc; rc = t; op -= BC_ADDVV-BC_ADDNV;
+ } else {
+ rb = expr_toanyreg(fs, e1);
+ }
+ }
+ /* Using expr_free might cause asserts if the order is wrong. */
+ if (e1->k == VNONRELOC && e1->u.s.info >= fs->nactvar) fs->freereg--;
+ if (e2->k == VNONRELOC && e2->u.s.info >= fs->nactvar) fs->freereg--;
+ e1->u.s.info = bcemit_ABC(fs, op, 0, rb, rc);
+ e1->k = VRELOCABLE;
+}
+
+/* Emit comparison operator. */
+static void bcemit_comp(FuncState *fs, BinOpr opr, ExpDesc *e1, ExpDesc *e2)
+{
+ ExpDesc *eret = e1;
+ BCIns ins;
+ expr_toval(fs, e1);
+ if (opr == OPR_EQ || opr == OPR_NE) {
+ BCOp op = opr == OPR_EQ ? BC_ISEQV : BC_ISNEV;
+ BCReg ra;
+ if (expr_isk(e1)) { e1 = e2; e2 = eret; } /* Need constant in 2nd arg. */
+ ra = expr_toanyreg(fs, e1); /* First arg must be in a reg. */
+ expr_toval(fs, e2);
+ switch (e2->k) {
+ case VKNIL: case VKFALSE: case VKTRUE:
+ ins = BCINS_AD(op+(BC_ISEQP-BC_ISEQV), ra, const_pri(e2));
+ break;
+ case VKSTR:
+ ins = BCINS_AD(op+(BC_ISEQS-BC_ISEQV), ra, const_str(fs, e2));
+ break;
+ case VKNUM:
+ ins = BCINS_AD(op+(BC_ISEQN-BC_ISEQV), ra, const_num(fs, e2));
+ break;
+ default:
+ ins = BCINS_AD(op, ra, expr_toanyreg(fs, e2));
+ break;
+ }
+ } else {
+ uint32_t op = opr-OPR_LT+BC_ISLT;
+ BCReg ra, rd;
+ if ((op-BC_ISLT) & 1) { /* GT -> LT, GE -> LE */
+ e1 = e2; e2 = eret; /* Swap operands. */
+ op = ((op-BC_ISLT)^3)+BC_ISLT;
+ expr_toval(fs, e1);
+ ra = expr_toanyreg(fs, e1);
+ rd = expr_toanyreg(fs, e2);
+ } else {
+ rd = expr_toanyreg(fs, e2);
+ ra = expr_toanyreg(fs, e1);
+ }
+ ins = BCINS_AD(op, ra, rd);
+ }
+ /* Using expr_free might cause asserts if the order is wrong. */
+ if (e1->k == VNONRELOC && e1->u.s.info >= fs->nactvar) fs->freereg--;
+ if (e2->k == VNONRELOC && e2->u.s.info >= fs->nactvar) fs->freereg--;
+ bcemit_INS(fs, ins);
+ eret->u.s.info = bcemit_jmp(fs);
+ eret->k = VJMP;
+}
+
+/* Fixup left side of binary operator. */
+static void bcemit_binop_left(FuncState *fs, BinOpr op, ExpDesc *e)
+{
+ if (op == OPR_AND) {
+ bcemit_branch_t(fs, e);
+ } else if (op == OPR_OR) {
+ bcemit_branch_f(fs, e);
+ } else if (op == OPR_CONCAT) {
+ expr_tonextreg(fs, e);
+ } else if (op == OPR_EQ || op == OPR_NE) {
+ if (!expr_isk_nojump(e)) expr_toanyreg(fs, e);
+ } else {
+ if (!expr_isnumk_nojump(e)) expr_toanyreg(fs, e);
+ }
+}
+
+/* Emit binary operator. */
+static void bcemit_binop(FuncState *fs, BinOpr op, ExpDesc *e1, ExpDesc *e2)
+{
+ if (op <= OPR_POW) {
+ bcemit_arith(fs, op, e1, e2);
+ } else if (op == OPR_AND) {
+ lj_assertFS(e1->t == NO_JMP, "jump list not closed");
+ expr_discharge(fs, e2);
+ jmp_append(fs, &e2->f, e1->f);
+ *e1 = *e2;
+ } else if (op == OPR_OR) {
+ lj_assertFS(e1->f == NO_JMP, "jump list not closed");
+ expr_discharge(fs, e2);
+ jmp_append(fs, &e2->t, e1->t);
+ *e1 = *e2;
+ } else if (op == OPR_CONCAT) {
+ expr_toval(fs, e2);
+ if (e2->k == VRELOCABLE && bc_op(*bcptr(fs, e2)) == BC_CAT) {
+ lj_assertFS(e1->u.s.info == bc_b(*bcptr(fs, e2))-1,
+ "bad CAT stack layout");
+ expr_free(fs, e1);
+ setbc_b(bcptr(fs, e2), e1->u.s.info);
+ e1->u.s.info = e2->u.s.info;
+ } else {
+ expr_tonextreg(fs, e2);
+ expr_free(fs, e2);
+ expr_free(fs, e1);
+ e1->u.s.info = bcemit_ABC(fs, BC_CAT, 0, e1->u.s.info, e2->u.s.info);
+ }
+ e1->k = VRELOCABLE;
+ } else {
+ lj_assertFS(op == OPR_NE || op == OPR_EQ ||
+ op == OPR_LT || op == OPR_GE || op == OPR_LE || op == OPR_GT,
+ "bad binop %d", op);
+ bcemit_comp(fs, op, e1, e2);
+ }
+}
+
+/* Emit unary operator. */
+static void bcemit_unop(FuncState *fs, BCOp op, ExpDesc *e)
+{
+ if (op == BC_NOT) {
+ /* Swap true and false lists. */
+ { BCPos temp = e->f; e->f = e->t; e->t = temp; }
+ jmp_dropval(fs, e->f);
+ jmp_dropval(fs, e->t);
+ expr_discharge(fs, e);
+ if (e->k == VKNIL || e->k == VKFALSE) {
+ e->k = VKTRUE;
+ return;
+ } else if (expr_isk(e) || (LJ_HASFFI && e->k == VKCDATA)) {
+ e->k = VKFALSE;
+ return;
+ } else if (e->k == VJMP) {
+ invertcond(fs, e);
+ return;
+ } else if (e->k == VRELOCABLE) {
+ bcreg_reserve(fs, 1);
+ setbc_a(bcptr(fs, e), fs->freereg-1);
+ e->u.s.info = fs->freereg-1;
+ e->k = VNONRELOC;
+ } else {
+ lj_assertFS(e->k == VNONRELOC, "bad expr type %d", e->k);
+ }
+ } else {
+ lj_assertFS(op == BC_UNM || op == BC_LEN, "bad unop %d", op);
+ if (op == BC_UNM && !expr_hasjump(e)) { /* Constant-fold negations. */
+#if LJ_HASFFI
+ if (e->k == VKCDATA) { /* Fold in-place since cdata is not interned. */
+ GCcdata *cd = cdataV(&e->u.nval);
+ int64_t *p = (int64_t *)cdataptr(cd);
+ if (cd->ctypeid == CTID_COMPLEX_DOUBLE)
+ p[1] ^= (int64_t)U64x(80000000,00000000);
+ else
+ *p = -*p;
+ return;
+ } else
+#endif
+ if (expr_isnumk(e) && !expr_numiszero(e)) { /* Avoid folding to -0. */
+ TValue *o = expr_numtv(e);
+ if (tvisint(o)) {
+ int32_t k = intV(o);
+ if (k == -k)
+ setnumV(o, -(lua_Number)k);
+ else
+ setintV(o, -k);
+ return;
+ } else {
+ o->u64 ^= U64x(80000000,00000000);
+ return;
+ }
+ }
+ }
+ expr_toanyreg(fs, e);
+ }
+ expr_free(fs, e);
+ e->u.s.info = bcemit_AD(fs, op, 0, e->u.s.info);
+ e->k = VRELOCABLE;
+}
+
+/* -- Lexer support ------------------------------------------------------- */
+
+/* Check and consume optional token. */
+static int lex_opt(LexState *ls, LexToken tok)
+{
+ if (ls->tok == tok) {
+ lj_lex_next(ls);
+ return 1;
+ }
+ return 0;
+}
+
+/* Check and consume token. */
+static void lex_check(LexState *ls, LexToken tok)
+{
+ if (ls->tok != tok)
+ err_token(ls, tok);
+ lj_lex_next(ls);
+}
+
+/* Check for matching token. */
+static void lex_match(LexState *ls, LexToken what, LexToken who, BCLine line)
+{
+ if (!lex_opt(ls, what)) {
+ if (line == ls->linenumber) {
+ err_token(ls, what);
+ } else {
+ const char *swhat = lj_lex_token2str(ls, what);
+ const char *swho = lj_lex_token2str(ls, who);
+ lj_lex_error(ls, ls->tok, LJ_ERR_XMATCH, swhat, swho, line);
+ }
+ }
+}
+
+/* Check for string token. */
+static GCstr *lex_str(LexState *ls)
+{
+ GCstr *s;
+ if (ls->tok != TK_name && (LJ_52 || ls->tok != TK_goto))
+ err_token(ls, TK_name);
+ s = strV(&ls->tokval);
+ lj_lex_next(ls);
+ return s;
+}
+
+/* -- Variable handling --------------------------------------------------- */
+
+#define var_get(ls, fs, i) ((ls)->vstack[(fs)->varmap[(i)]])
+
+/* Define a new local variable. */
+static void var_new(LexState *ls, BCReg n, GCstr *name)
+{
+ FuncState *fs = ls->fs;
+ MSize vtop = ls->vtop;
+ checklimit(fs, fs->nactvar+n, LJ_MAX_LOCVAR, "local variables");
+ if (LJ_UNLIKELY(vtop >= ls->sizevstack)) {
+ if (ls->sizevstack >= LJ_MAX_VSTACK)
+ lj_lex_error(ls, 0, LJ_ERR_XLIMC, LJ_MAX_VSTACK);
+ lj_mem_growvec(ls->L, ls->vstack, ls->sizevstack, LJ_MAX_VSTACK, VarInfo);
+ }
+ lj_assertFS((uintptr_t)name < VARNAME__MAX ||
+ lj_tab_getstr(fs->kt, name) != NULL,
+ "unanchored variable name");
+ /* NOBARRIER: name is anchored in fs->kt and ls->vstack is not a GCobj. */
+ setgcref(ls->vstack[vtop].name, obj2gco(name));
+ fs->varmap[fs->nactvar+n] = (uint16_t)vtop;
+ ls->vtop = vtop+1;
+}
+
+#define var_new_lit(ls, n, v) \
+ var_new(ls, (n), lj_parse_keepstr(ls, "" v, sizeof(v)-1))
+
+#define var_new_fixed(ls, n, vn) \
+ var_new(ls, (n), (GCstr *)(uintptr_t)(vn))
+
+/* Add local variables. */
+static void var_add(LexState *ls, BCReg nvars)
+{
+ FuncState *fs = ls->fs;
+ BCReg nactvar = fs->nactvar;
+ while (nvars--) {
+ VarInfo *v = &var_get(ls, fs, nactvar);
+ v->startpc = fs->pc;
+ v->slot = nactvar++;
+ v->info = 0;
+ }
+ fs->nactvar = nactvar;
+}
+
+/* Remove local variables. */
+static void var_remove(LexState *ls, BCReg tolevel)
+{
+ FuncState *fs = ls->fs;
+ while (fs->nactvar > tolevel)
+ var_get(ls, fs, --fs->nactvar).endpc = fs->pc;
+}
+
+/* Lookup local variable name. */
+static BCReg var_lookup_local(FuncState *fs, GCstr *n)
+{
+ int i;
+ for (i = fs->nactvar-1; i >= 0; i--) {
+ if (n == strref(var_get(fs->ls, fs, i).name))
+ return (BCReg)i;
+ }
+ return (BCReg)-1; /* Not found. */
+}
+
+/* Lookup or add upvalue index. */
+static MSize var_lookup_uv(FuncState *fs, MSize vidx, ExpDesc *e)
+{
+ MSize i, n = fs->nuv;
+ for (i = 0; i < n; i++)
+ if (fs->uvmap[i] == vidx)
+ return i; /* Already exists. */
+ /* Otherwise create a new one. */
+ checklimit(fs, fs->nuv, LJ_MAX_UPVAL, "upvalues");
+ lj_assertFS(e->k == VLOCAL || e->k == VUPVAL, "bad expr type %d", e->k);
+ fs->uvmap[n] = (uint16_t)vidx;
+ fs->uvtmp[n] = (uint16_t)(e->k == VLOCAL ? vidx : LJ_MAX_VSTACK+e->u.s.info);
+ fs->nuv = n+1;
+ return n;
+}
+
+/* Forward declaration. */
+static void fscope_uvmark(FuncState *fs, BCReg level);
+
+/* Recursively lookup variables in enclosing functions. */
+static MSize var_lookup_(FuncState *fs, GCstr *name, ExpDesc *e, int first)
+{
+ if (fs) {
+ BCReg reg = var_lookup_local(fs, name);
+ if ((int32_t)reg >= 0) { /* Local in this function? */
+ expr_init(e, VLOCAL, reg);
+ if (!first)
+ fscope_uvmark(fs, reg); /* Scope now has an upvalue. */
+ return (MSize)(e->u.s.aux = (uint32_t)fs->varmap[reg]);
+ } else {
+ MSize vidx = var_lookup_(fs->prev, name, e, 0); /* Var in outer func? */
+ if ((int32_t)vidx >= 0) { /* Yes, make it an upvalue here. */
+ e->u.s.info = (uint8_t)var_lookup_uv(fs, vidx, e);
+ e->k = VUPVAL;
+ return vidx;
+ }
+ }
+ } else { /* Not found in any function, must be a global. */
+ expr_init(e, VGLOBAL, 0);
+ e->u.sval = name;
+ }
+ return (MSize)-1; /* Global. */
+}
+
+/* Lookup variable name. */
+#define var_lookup(ls, e) \
+ var_lookup_((ls)->fs, lex_str(ls), (e), 1)
+
+/* -- Goto an label handling ---------------------------------------------- */
+
+/* Add a new goto or label. */
+static MSize gola_new(LexState *ls, GCstr *name, uint8_t info, BCPos pc)
+{
+ FuncState *fs = ls->fs;
+ MSize vtop = ls->vtop;
+ if (LJ_UNLIKELY(vtop >= ls->sizevstack)) {
+ if (ls->sizevstack >= LJ_MAX_VSTACK)
+ lj_lex_error(ls, 0, LJ_ERR_XLIMC, LJ_MAX_VSTACK);
+ lj_mem_growvec(ls->L, ls->vstack, ls->sizevstack, LJ_MAX_VSTACK, VarInfo);
+ }
+ lj_assertFS(name == NAME_BREAK || lj_tab_getstr(fs->kt, name) != NULL,
+ "unanchored label name");
+ /* NOBARRIER: name is anchored in fs->kt and ls->vstack is not a GCobj. */
+ setgcref(ls->vstack[vtop].name, obj2gco(name));
+ ls->vstack[vtop].startpc = pc;
+ ls->vstack[vtop].slot = (uint8_t)fs->nactvar;
+ ls->vstack[vtop].info = info;
+ ls->vtop = vtop+1;
+ return vtop;
+}
+
+#define gola_isgoto(v) ((v)->info & VSTACK_GOTO)
+#define gola_islabel(v) ((v)->info & VSTACK_LABEL)
+#define gola_isgotolabel(v) ((v)->info & (VSTACK_GOTO|VSTACK_LABEL))
+
+/* Patch goto to jump to label. */
+static void gola_patch(LexState *ls, VarInfo *vg, VarInfo *vl)
+{
+ FuncState *fs = ls->fs;
+ BCPos pc = vg->startpc;
+ setgcrefnull(vg->name); /* Invalidate pending goto. */
+ setbc_a(&fs->bcbase[pc].ins, vl->slot);
+ jmp_patch(fs, pc, vl->startpc);
+}
+
+/* Patch goto to close upvalues. */
+static void gola_close(LexState *ls, VarInfo *vg)
+{
+ FuncState *fs = ls->fs;
+ BCPos pc = vg->startpc;
+ BCIns *ip = &fs->bcbase[pc].ins;
+ lj_assertFS(gola_isgoto(vg), "expected goto");
+ lj_assertFS(bc_op(*ip) == BC_JMP || bc_op(*ip) == BC_UCLO,
+ "bad bytecode op %d", bc_op(*ip));
+ setbc_a(ip, vg->slot);
+ if (bc_op(*ip) == BC_JMP) {
+ BCPos next = jmp_next(fs, pc);
+ if (next != NO_JMP) jmp_patch(fs, next, pc); /* Jump to UCLO. */
+ setbc_op(ip, BC_UCLO); /* Turn into UCLO. */
+ setbc_j(ip, NO_JMP);
+ }
+}
+
+/* Resolve pending forward gotos for label. */
+static void gola_resolve(LexState *ls, FuncScope *bl, MSize idx)
+{
+ VarInfo *vg = ls->vstack + bl->vstart;
+ VarInfo *vl = ls->vstack + idx;
+ for (; vg < vl; vg++)
+ if (gcrefeq(vg->name, vl->name) && gola_isgoto(vg)) {
+ if (vg->slot < vl->slot) {
+ GCstr *name = strref(var_get(ls, ls->fs, vg->slot).name);
+ lj_assertLS((uintptr_t)name >= VARNAME__MAX, "expected goto name");
+ ls->linenumber = ls->fs->bcbase[vg->startpc].line;
+ lj_assertLS(strref(vg->name) != NAME_BREAK, "unexpected break");
+ lj_lex_error(ls, 0, LJ_ERR_XGSCOPE,
+ strdata(strref(vg->name)), strdata(name));
+ }
+ gola_patch(ls, vg, vl);
+ }
+}
+
+/* Fixup remaining gotos and labels for scope. */
+static void gola_fixup(LexState *ls, FuncScope *bl)
+{
+ VarInfo *v = ls->vstack + bl->vstart;
+ VarInfo *ve = ls->vstack + ls->vtop;
+ for (; v < ve; v++) {
+ GCstr *name = strref(v->name);
+ if (name != NULL) { /* Only consider remaining valid gotos/labels. */
+ if (gola_islabel(v)) {
+ VarInfo *vg;
+ setgcrefnull(v->name); /* Invalidate label that goes out of scope. */
+ for (vg = v+1; vg < ve; vg++) /* Resolve pending backward gotos. */
+ if (strref(vg->name) == name && gola_isgoto(vg)) {
+ if ((bl->flags&FSCOPE_UPVAL) && vg->slot > v->slot)
+ gola_close(ls, vg);
+ gola_patch(ls, vg, v);
+ }
+ } else if (gola_isgoto(v)) {
+ if (bl->prev) { /* Propagate goto or break to outer scope. */
+ bl->prev->flags |= name == NAME_BREAK ? FSCOPE_BREAK : FSCOPE_GOLA;
+ v->slot = bl->nactvar;
+ if ((bl->flags & FSCOPE_UPVAL))
+ gola_close(ls, v);
+ } else { /* No outer scope: undefined goto label or no loop. */
+ ls->linenumber = ls->fs->bcbase[v->startpc].line;
+ if (name == NAME_BREAK)
+ lj_lex_error(ls, 0, LJ_ERR_XBREAK);
+ else
+ lj_lex_error(ls, 0, LJ_ERR_XLUNDEF, strdata(name));
+ }
+ }
+ }
+ }
+}
+
+/* Find existing label. */
+static VarInfo *gola_findlabel(LexState *ls, GCstr *name)
+{
+ VarInfo *v = ls->vstack + ls->fs->bl->vstart;
+ VarInfo *ve = ls->vstack + ls->vtop;
+ for (; v < ve; v++)
+ if (strref(v->name) == name && gola_islabel(v))
+ return v;
+ return NULL;
+}
+
+/* -- Scope handling ------------------------------------------------------ */
+
+/* Begin a scope. */
+static void fscope_begin(FuncState *fs, FuncScope *bl, int flags)
+{
+ bl->nactvar = (uint8_t)fs->nactvar;
+ bl->flags = flags;
+ bl->vstart = fs->ls->vtop;
+ bl->prev = fs->bl;
+ fs->bl = bl;
+ lj_assertFS(fs->freereg == fs->nactvar, "bad regalloc");
+}
+
+/* End a scope. */
+static void fscope_end(FuncState *fs)
+{
+ FuncScope *bl = fs->bl;
+ LexState *ls = fs->ls;
+ fs->bl = bl->prev;
+ var_remove(ls, bl->nactvar);
+ fs->freereg = fs->nactvar;
+ lj_assertFS(bl->nactvar == fs->nactvar, "bad regalloc");
+ if ((bl->flags & (FSCOPE_UPVAL|FSCOPE_NOCLOSE)) == FSCOPE_UPVAL)
+ bcemit_AJ(fs, BC_UCLO, bl->nactvar, 0);
+ if ((bl->flags & FSCOPE_BREAK)) {
+ if ((bl->flags & FSCOPE_LOOP)) {
+ MSize idx = gola_new(ls, NAME_BREAK, VSTACK_LABEL, fs->pc);
+ ls->vtop = idx; /* Drop break label immediately. */
+ gola_resolve(ls, bl, idx);
+ } else { /* Need the fixup step to propagate the breaks. */
+ gola_fixup(ls, bl);
+ return;
+ }
+ }
+ if ((bl->flags & FSCOPE_GOLA)) {
+ gola_fixup(ls, bl);
+ }
+}
+
+/* Mark scope as having an upvalue. */
+static void fscope_uvmark(FuncState *fs, BCReg level)
+{
+ FuncScope *bl;
+ for (bl = fs->bl; bl && bl->nactvar > level; bl = bl->prev)
+ ;
+ if (bl)
+ bl->flags |= FSCOPE_UPVAL;
+}
+
+/* -- Function state management ------------------------------------------- */
+
+/* Fixup bytecode for prototype. */
+static void fs_fixup_bc(FuncState *fs, GCproto *pt, BCIns *bc, MSize n)
+{
+ BCInsLine *base = fs->bcbase;
+ MSize i;
+ pt->sizebc = n;
+ bc[0] = BCINS_AD((fs->flags & PROTO_VARARG) ? BC_FUNCV : BC_FUNCF,
+ fs->framesize, 0);
+ for (i = 1; i < n; i++)
+ bc[i] = base[i].ins;
+}
+
+/* Fixup upvalues for child prototype, step #2. */
+static void fs_fixup_uv2(FuncState *fs, GCproto *pt)
+{
+ VarInfo *vstack = fs->ls->vstack;
+ uint16_t *uv = proto_uv(pt);
+ MSize i, n = pt->sizeuv;
+ for (i = 0; i < n; i++) {
+ VarIndex vidx = uv[i];
+ if (vidx >= LJ_MAX_VSTACK)
+ uv[i] = vidx - LJ_MAX_VSTACK;
+ else if ((vstack[vidx].info & VSTACK_VAR_RW))
+ uv[i] = vstack[vidx].slot | PROTO_UV_LOCAL;
+ else
+ uv[i] = vstack[vidx].slot | PROTO_UV_LOCAL | PROTO_UV_IMMUTABLE;
+ }
+}
+
+/* Fixup constants for prototype. */
+static void fs_fixup_k(FuncState *fs, GCproto *pt, void *kptr)
+{
+ GCtab *kt;
+ TValue *array;
+ Node *node;
+ MSize i, hmask;
+ checklimitgt(fs, fs->nkn, BCMAX_D+1, "constants");
+ checklimitgt(fs, fs->nkgc, BCMAX_D+1, "constants");
+ setmref(pt->k, kptr);
+ pt->sizekn = fs->nkn;
+ pt->sizekgc = fs->nkgc;
+ kt = fs->kt;
+ array = tvref(kt->array);
+ for (i = 0; i < kt->asize; i++)
+ if (tvhaskslot(&array[i])) {
+ TValue *tv = &((TValue *)kptr)[tvkslot(&array[i])];
+ if (LJ_DUALNUM)
+ setintV(tv, (int32_t)i);
+ else
+ setnumV(tv, (lua_Number)i);
+ }
+ node = noderef(kt->node);
+ hmask = kt->hmask;
+ for (i = 0; i <= hmask; i++) {
+ Node *n = &node[i];
+ if (tvhaskslot(&n->val)) {
+ ptrdiff_t kidx = (ptrdiff_t)tvkslot(&n->val);
+ lj_assertFS(!tvisint(&n->key), "unexpected integer key");
+ if (tvisnum(&n->key)) {
+ TValue *tv = &((TValue *)kptr)[kidx];
+ if (LJ_DUALNUM) {
+ lua_Number nn = numV(&n->key);
+ int32_t k = lj_num2int(nn);
+ lj_assertFS(!tvismzero(&n->key), "unexpected -0 key");
+ if ((lua_Number)k == nn)
+ setintV(tv, k);
+ else
+ *tv = n->key;
+ } else {
+ *tv = n->key;
+ }
+ } else {
+ GCobj *o = gcV(&n->key);
+ setgcref(((GCRef *)kptr)[~kidx], o);
+ lj_gc_objbarrier(fs->L, pt, o);
+ if (tvisproto(&n->key))
+ fs_fixup_uv2(fs, gco2pt(o));
+ }
+ }
+ }
+}
+
+/* Fixup upvalues for prototype, step #1. */
+static void fs_fixup_uv1(FuncState *fs, GCproto *pt, uint16_t *uv)
+{
+ setmref(pt->uv, uv);
+ pt->sizeuv = fs->nuv;
+ memcpy(uv, fs->uvtmp, fs->nuv*sizeof(VarIndex));
+}
+
+#ifndef LUAJIT_DISABLE_DEBUGINFO
+/* Prepare lineinfo for prototype. */
+static size_t fs_prep_line(FuncState *fs, BCLine numline)
+{
+ return (fs->pc-1) << (numline < 256 ? 0 : numline < 65536 ? 1 : 2);
+}
+
+/* Fixup lineinfo for prototype. */
+static void fs_fixup_line(FuncState *fs, GCproto *pt,
+ void *lineinfo, BCLine numline)
+{
+ BCInsLine *base = fs->bcbase + 1;
+ BCLine first = fs->linedefined;
+ MSize i = 0, n = fs->pc-1;
+ pt->firstline = fs->linedefined;
+ pt->numline = numline;
+ setmref(pt->lineinfo, lineinfo);
+ if (LJ_LIKELY(numline < 256)) {
+ uint8_t *li = (uint8_t *)lineinfo;
+ do {
+ BCLine delta = base[i].line - first;
+ lj_assertFS(delta >= 0 && delta < 256, "bad line delta");
+ li[i] = (uint8_t)delta;
+ } while (++i < n);
+ } else if (LJ_LIKELY(numline < 65536)) {
+ uint16_t *li = (uint16_t *)lineinfo;
+ do {
+ BCLine delta = base[i].line - first;
+ lj_assertFS(delta >= 0 && delta < 65536, "bad line delta");
+ li[i] = (uint16_t)delta;
+ } while (++i < n);
+ } else {
+ uint32_t *li = (uint32_t *)lineinfo;
+ do {
+ BCLine delta = base[i].line - first;
+ lj_assertFS(delta >= 0, "bad line delta");
+ li[i] = (uint32_t)delta;
+ } while (++i < n);
+ }
+}
+
+/* Prepare variable info for prototype. */
+static size_t fs_prep_var(LexState *ls, FuncState *fs, size_t *ofsvar)
+{
+ VarInfo *vs =ls->vstack, *ve;
+ MSize i, n;
+ BCPos lastpc;
+ lj_buf_reset(&ls->sb); /* Copy to temp. string buffer. */
+ /* Store upvalue names. */
+ for (i = 0, n = fs->nuv; i < n; i++) {
+ GCstr *s = strref(vs[fs->uvmap[i]].name);
+ MSize len = s->len+1;
+ char *p = lj_buf_more(&ls->sb, len);
+ p = lj_buf_wmem(p, strdata(s), len);
+ ls->sb.w = p;
+ }
+ *ofsvar = sbuflen(&ls->sb);
+ lastpc = 0;
+ /* Store local variable names and compressed ranges. */
+ for (ve = vs + ls->vtop, vs += fs->vbase; vs < ve; vs++) {
+ if (!gola_isgotolabel(vs)) {
+ GCstr *s = strref(vs->name);
+ BCPos startpc;
+ char *p;
+ if ((uintptr_t)s < VARNAME__MAX) {
+ p = lj_buf_more(&ls->sb, 1 + 2*5);
+ *p++ = (char)(uintptr_t)s;
+ } else {
+ MSize len = s->len+1;
+ p = lj_buf_more(&ls->sb, len + 2*5);
+ p = lj_buf_wmem(p, strdata(s), len);
+ }
+ startpc = vs->startpc;
+ p = lj_strfmt_wuleb128(p, startpc-lastpc);
+ p = lj_strfmt_wuleb128(p, vs->endpc-startpc);
+ ls->sb.w = p;
+ lastpc = startpc;
+ }
+ }
+ lj_buf_putb(&ls->sb, '\0'); /* Terminator for varinfo. */
+ return sbuflen(&ls->sb);
+}
+
+/* Fixup variable info for prototype. */
+static void fs_fixup_var(LexState *ls, GCproto *pt, uint8_t *p, size_t ofsvar)
+{
+ setmref(pt->uvinfo, p);
+ setmref(pt->varinfo, (char *)p + ofsvar);
+ memcpy(p, ls->sb.b, sbuflen(&ls->sb)); /* Copy from temp. buffer. */
+}
+#else
+
+/* Initialize with empty debug info, if disabled. */
+#define fs_prep_line(fs, numline) (UNUSED(numline), 0)
+#define fs_fixup_line(fs, pt, li, numline) \
+ pt->firstline = pt->numline = 0, setmref((pt)->lineinfo, NULL)
+#define fs_prep_var(ls, fs, ofsvar) (UNUSED(ofsvar), 0)
+#define fs_fixup_var(ls, pt, p, ofsvar) \
+ setmref((pt)->uvinfo, NULL), setmref((pt)->varinfo, NULL)
+
+#endif
+
+/* Check if bytecode op returns. */
+static int bcopisret(BCOp op)
+{
+ switch (op) {
+ case BC_CALLMT: case BC_CALLT:
+ case BC_RETM: case BC_RET: case BC_RET0: case BC_RET1:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+/* Fixup return instruction for prototype. */
+static void fs_fixup_ret(FuncState *fs)
+{
+ BCPos lastpc = fs->pc;
+ if (lastpc <= fs->lasttarget || !bcopisret(bc_op(fs->bcbase[lastpc-1].ins))) {
+ if ((fs->bl->flags & FSCOPE_UPVAL))
+ bcemit_AJ(fs, BC_UCLO, 0, 0);
+ bcemit_AD(fs, BC_RET0, 0, 1); /* Need final return. */
+ }
+ fs->bl->flags |= FSCOPE_NOCLOSE; /* Handled above. */
+ fscope_end(fs);
+ lj_assertFS(fs->bl == NULL, "bad scope nesting");
+ /* May need to fixup returns encoded before first function was created. */
+ if (fs->flags & PROTO_FIXUP_RETURN) {
+ BCPos pc;
+ for (pc = 1; pc < lastpc; pc++) {
+ BCIns ins = fs->bcbase[pc].ins;
+ BCPos offset;
+ switch (bc_op(ins)) {
+ case BC_CALLMT: case BC_CALLT:
+ case BC_RETM: case BC_RET: case BC_RET0: case BC_RET1:
+ offset = bcemit_INS(fs, ins); /* Copy original instruction. */
+ fs->bcbase[offset].line = fs->bcbase[pc].line;
+ offset = offset-(pc+1)+BCBIAS_J;
+ if (offset > BCMAX_D)
+ err_syntax(fs->ls, LJ_ERR_XFIXUP);
+ /* Replace with UCLO plus branch. */
+ fs->bcbase[pc].ins = BCINS_AD(BC_UCLO, 0, offset);
+ break;
+ case BC_FNEW:
+ return; /* We're done. */
+ default:
+ break;
+ }
+ }
+ }
+}
+
+/* Finish a FuncState and return the new prototype. */
+static GCproto *fs_finish(LexState *ls, BCLine line)
+{
+ lua_State *L = ls->L;
+ FuncState *fs = ls->fs;
+ BCLine numline = line - fs->linedefined;
+ size_t sizept, ofsk, ofsuv, ofsli, ofsdbg, ofsvar;
+ GCproto *pt;
+
+ /* Apply final fixups. */
+ fs_fixup_ret(fs);
+
+ /* Calculate total size of prototype including all colocated arrays. */
+ sizept = sizeof(GCproto) + fs->pc*sizeof(BCIns) + fs->nkgc*sizeof(GCRef);
+ sizept = (sizept + sizeof(TValue)-1) & ~(sizeof(TValue)-1);
+ ofsk = sizept; sizept += fs->nkn*sizeof(TValue);
+ ofsuv = sizept; sizept += ((fs->nuv+1)&~1)*2;
+ ofsli = sizept; sizept += fs_prep_line(fs, numline);
+ ofsdbg = sizept; sizept += fs_prep_var(ls, fs, &ofsvar);
+
+ /* Allocate prototype and initialize its fields. */
+ pt = (GCproto *)lj_mem_newgco(L, (MSize)sizept);
+ pt->gct = ~LJ_TPROTO;
+ pt->sizept = (MSize)sizept;
+ pt->trace = 0;
+ pt->flags = (uint8_t)(fs->flags & ~(PROTO_HAS_RETURN|PROTO_FIXUP_RETURN));
+ pt->numparams = fs->numparams;
+ pt->framesize = fs->framesize;
+ setgcref(pt->chunkname, obj2gco(ls->chunkname));
+
+ /* Close potentially uninitialized gap between bc and kgc. */
+ *(uint32_t *)((char *)pt + ofsk - sizeof(GCRef)*(fs->nkgc+1)) = 0;
+ fs_fixup_bc(fs, pt, (BCIns *)((char *)pt + sizeof(GCproto)), fs->pc);
+ fs_fixup_k(fs, pt, (void *)((char *)pt + ofsk));
+ fs_fixup_uv1(fs, pt, (uint16_t *)((char *)pt + ofsuv));
+ fs_fixup_line(fs, pt, (void *)((char *)pt + ofsli), numline);
+ fs_fixup_var(ls, pt, (uint8_t *)((char *)pt + ofsdbg), ofsvar);
+
+ lj_vmevent_send(L, BC,
+ setprotoV(L, L->top++, pt);
+ );
+
+ L->top--; /* Pop table of constants. */
+ ls->vtop = fs->vbase; /* Reset variable stack. */
+ ls->fs = fs->prev;
+ lj_assertL(ls->fs != NULL || ls->tok == TK_eof, "bad parser state");
+ return pt;
+}
+
+/* Initialize a new FuncState. */
+static void fs_init(LexState *ls, FuncState *fs)
+{
+ lua_State *L = ls->L;
+ fs->prev = ls->fs; ls->fs = fs; /* Append to list. */
+ fs->ls = ls;
+ fs->vbase = ls->vtop;
+ fs->L = L;
+ fs->pc = 0;
+ fs->lasttarget = 0;
+ fs->jpc = NO_JMP;
+ fs->freereg = 0;
+ fs->nkgc = 0;
+ fs->nkn = 0;
+ fs->nactvar = 0;
+ fs->nuv = 0;
+ fs->bl = NULL;
+ fs->flags = 0;
+ fs->framesize = 1; /* Minimum frame size. */
+ fs->kt = lj_tab_new(L, 0, 0);
+ /* Anchor table of constants in stack to avoid being collected. */
+ settabV(L, L->top, fs->kt);
+ incr_top(L);
+}
+
+/* -- Expressions --------------------------------------------------------- */
+
+/* Forward declaration. */
+static void expr(LexState *ls, ExpDesc *v);
+
+/* Return string expression. */
+static void expr_str(LexState *ls, ExpDesc *e)
+{
+ expr_init(e, VKSTR, 0);
+ e->u.sval = lex_str(ls);
+}
+
+/* Return index expression. */
+static void expr_index(FuncState *fs, ExpDesc *t, ExpDesc *e)
+{
+ /* Already called: expr_toval(fs, e). */
+ t->k = VINDEXED;
+ if (expr_isnumk(e)) {
+#if LJ_DUALNUM
+ if (tvisint(expr_numtv(e))) {
+ int32_t k = intV(expr_numtv(e));
+ if (checku8(k)) {
+ t->u.s.aux = BCMAX_C+1+(uint32_t)k; /* 256..511: const byte key */
+ return;
+ }
+ }
+#else
+ lua_Number n = expr_numberV(e);
+ int32_t k = lj_num2int(n);
+ if (checku8(k) && n == (lua_Number)k) {
+ t->u.s.aux = BCMAX_C+1+(uint32_t)k; /* 256..511: const byte key */
+ return;
+ }
+#endif
+ } else if (expr_isstrk(e)) {
+ BCReg idx = const_str(fs, e);
+ if (idx <= BCMAX_C) {
+ t->u.s.aux = ~idx; /* -256..-1: const string key */
+ return;
+ }
+ }
+ t->u.s.aux = expr_toanyreg(fs, e); /* 0..255: register */
+}
+
+/* Parse index expression with named field. */
+static void expr_field(LexState *ls, ExpDesc *v)
+{
+ FuncState *fs = ls->fs;
+ ExpDesc key;
+ expr_toanyreg(fs, v);
+ lj_lex_next(ls); /* Skip dot or colon. */
+ expr_str(ls, &key);
+ expr_index(fs, v, &key);
+}
+
+/* Parse index expression with brackets. */
+static void expr_bracket(LexState *ls, ExpDesc *v)
+{
+ lj_lex_next(ls); /* Skip '['. */
+ expr(ls, v);
+ expr_toval(ls->fs, v);
+ lex_check(ls, ']');
+}
+
+/* Get value of constant expression. */
+static void expr_kvalue(FuncState *fs, TValue *v, ExpDesc *e)
+{
+ UNUSED(fs);
+ if (e->k <= VKTRUE) {
+ setpriV(v, ~(uint32_t)e->k);
+ } else if (e->k == VKSTR) {
+ setgcVraw(v, obj2gco(e->u.sval), LJ_TSTR);
+ } else {
+ lj_assertFS(tvisnumber(expr_numtv(e)), "bad number constant");
+ *v = *expr_numtv(e);
+ }
+}
+
+/* Parse table constructor expression. */
+static void expr_table(LexState *ls, ExpDesc *e)
+{
+ FuncState *fs = ls->fs;
+ BCLine line = ls->linenumber;
+ GCtab *t = NULL;
+ int vcall = 0, needarr = 0, fixt = 0;
+ uint32_t narr = 1; /* First array index. */
+ uint32_t nhash = 0; /* Number of hash entries. */
+ BCReg freg = fs->freereg;
+ BCPos pc = bcemit_AD(fs, BC_TNEW, freg, 0);
+ expr_init(e, VNONRELOC, freg);
+ bcreg_reserve(fs, 1);
+ freg++;
+ lex_check(ls, '{');
+ while (ls->tok != '}') {
+ ExpDesc key, val;
+ vcall = 0;
+ if (ls->tok == '[') {
+ expr_bracket(ls, &key); /* Already calls expr_toval. */
+ if (!expr_isk(&key)) expr_index(fs, e, &key);
+ if (expr_isnumk(&key) && expr_numiszero(&key)) needarr = 1; else nhash++;
+ lex_check(ls, '=');
+ } else if ((ls->tok == TK_name || (!LJ_52 && ls->tok == TK_goto)) &&
+ lj_lex_lookahead(ls) == '=') {
+ expr_str(ls, &key);
+ lex_check(ls, '=');
+ nhash++;
+ } else {
+ expr_init(&key, VKNUM, 0);
+ setintV(&key.u.nval, (int)narr);
+ narr++;
+ needarr = vcall = 1;
+ }
+ expr(ls, &val);
+ if (expr_isk(&key) && key.k != VKNIL &&
+ (key.k == VKSTR || expr_isk_nojump(&val))) {
+ TValue k, *v;
+ if (!t) { /* Create template table on demand. */
+ BCReg kidx;
+ t = lj_tab_new(fs->L, needarr ? narr : 0, hsize2hbits(nhash));
+ kidx = const_gc(fs, obj2gco(t), LJ_TTAB);
+ fs->bcbase[pc].ins = BCINS_AD(BC_TDUP, freg-1, kidx);
+ }
+ vcall = 0;
+ expr_kvalue(fs, &k, &key);
+ v = lj_tab_set(fs->L, t, &k);
+ lj_gc_anybarriert(fs->L, t);
+ if (expr_isk_nojump(&val)) { /* Add const key/value to template table. */
+ expr_kvalue(fs, v, &val);
+ } else { /* Otherwise create dummy string key (avoids lj_tab_newkey). */
+ settabV(fs->L, v, t); /* Preserve key with table itself as value. */
+ fixt = 1; /* Fix this later, after all resizes. */
+ goto nonconst;
+ }
+ } else {
+ nonconst:
+ if (val.k != VCALL) { expr_toanyreg(fs, &val); vcall = 0; }
+ if (expr_isk(&key)) expr_index(fs, e, &key);
+ bcemit_store(fs, e, &val);
+ }
+ fs->freereg = freg;
+ if (!lex_opt(ls, ',') && !lex_opt(ls, ';')) break;
+ }
+ lex_match(ls, '}', '{', line);
+ if (vcall) {
+ BCInsLine *ilp = &fs->bcbase[fs->pc-1];
+ ExpDesc en;
+ lj_assertFS(bc_a(ilp->ins) == freg &&
+ bc_op(ilp->ins) == (narr > 256 ? BC_TSETV : BC_TSETB),
+ "bad CALL code generation");
+ expr_init(&en, VKNUM, 0);
+ en.u.nval.u32.lo = narr-1;
+ en.u.nval.u32.hi = 0x43300000; /* Biased integer to avoid denormals. */
+ if (narr > 256) { fs->pc--; ilp--; }
+ ilp->ins = BCINS_AD(BC_TSETM, freg, const_num(fs, &en));
+ setbc_b(&ilp[-1].ins, 0);
+ }
+ if (pc == fs->pc-1) { /* Make expr relocable if possible. */
+ e->u.s.info = pc;
+ fs->freereg--;
+ e->k = VRELOCABLE;
+ } else {
+ e->k = VNONRELOC; /* May have been changed by expr_index. */
+ }
+ if (!t) { /* Construct TNEW RD: hhhhhaaaaaaaaaaa. */
+ BCIns *ip = &fs->bcbase[pc].ins;
+ if (!needarr) narr = 0;
+ else if (narr < 3) narr = 3;
+ else if (narr > 0x7ff) narr = 0x7ff;
+ setbc_d(ip, narr|(hsize2hbits(nhash)<<11));
+ } else {
+ if (needarr && t->asize < narr)
+ lj_tab_reasize(fs->L, t, narr-1);
+ if (fixt) { /* Fix value for dummy keys in template table. */
+ Node *node = noderef(t->node);
+ uint32_t i, hmask = t->hmask;
+ for (i = 0; i <= hmask; i++) {
+ Node *n = &node[i];
+ if (tvistab(&n->val)) {
+ lj_assertFS(tabV(&n->val) == t, "bad dummy key in template table");
+ setnilV(&n->val); /* Turn value into nil. */
+ }
+ }
+ }
+ lj_gc_check(fs->L);
+ }
+}
+
+/* Parse function parameters. */
+static BCReg parse_params(LexState *ls, int needself)
+{
+ FuncState *fs = ls->fs;
+ BCReg nparams = 0;
+ lex_check(ls, '(');
+ if (needself)
+ var_new_lit(ls, nparams++, "self");
+ if (ls->tok != ')') {
+ do {
+ if (ls->tok == TK_name || (!LJ_52 && ls->tok == TK_goto)) {
+ var_new(ls, nparams++, lex_str(ls));
+ } else if (ls->tok == TK_dots) {
+ lj_lex_next(ls);
+ fs->flags |= PROTO_VARARG;
+ break;
+ } else {
+ err_syntax(ls, LJ_ERR_XPARAM);
+ }
+ } while (lex_opt(ls, ','));
+ }
+ var_add(ls, nparams);
+ lj_assertFS(fs->nactvar == nparams, "bad regalloc");
+ bcreg_reserve(fs, nparams);
+ lex_check(ls, ')');
+ return nparams;
+}
+
+/* Forward declaration. */
+static void parse_chunk(LexState *ls);
+
+/* Parse body of a function. */
+static void parse_body(LexState *ls, ExpDesc *e, int needself, BCLine line)
+{
+ FuncState fs, *pfs = ls->fs;
+ FuncScope bl;
+ GCproto *pt;
+ ptrdiff_t oldbase = pfs->bcbase - ls->bcstack;
+ fs_init(ls, &fs);
+ fscope_begin(&fs, &bl, 0);
+ fs.linedefined = line;
+ fs.numparams = (uint8_t)parse_params(ls, needself);
+ fs.bcbase = pfs->bcbase + pfs->pc;
+ fs.bclim = pfs->bclim - pfs->pc;
+ bcemit_AD(&fs, BC_FUNCF, 0, 0); /* Placeholder. */
+ parse_chunk(ls);
+ if (ls->tok != TK_end) lex_match(ls, TK_end, TK_function, line);
+ pt = fs_finish(ls, (ls->lastline = ls->linenumber));
+ pfs->bcbase = ls->bcstack + oldbase; /* May have been reallocated. */
+ pfs->bclim = (BCPos)(ls->sizebcstack - oldbase);
+ /* Store new prototype in the constant array of the parent. */
+ expr_init(e, VRELOCABLE,
+ bcemit_AD(pfs, BC_FNEW, 0, const_gc(pfs, obj2gco(pt), LJ_TPROTO)));
+#if LJ_HASFFI
+ pfs->flags |= (fs.flags & PROTO_FFI);
+#endif
+ if (!(pfs->flags & PROTO_CHILD)) {
+ if (pfs->flags & PROTO_HAS_RETURN)
+ pfs->flags |= PROTO_FIXUP_RETURN;
+ pfs->flags |= PROTO_CHILD;
+ }
+ lj_lex_next(ls);
+}
+
+/* Parse expression list. Last expression is left open. */
+static BCReg expr_list(LexState *ls, ExpDesc *v)
+{
+ BCReg n = 1;
+ expr(ls, v);
+ while (lex_opt(ls, ',')) {
+ expr_tonextreg(ls->fs, v);
+ expr(ls, v);
+ n++;
+ }
+ return n;
+}
+
+/* Parse function argument list. */
+static void parse_args(LexState *ls, ExpDesc *e)
+{
+ FuncState *fs = ls->fs;
+ ExpDesc args;
+ BCIns ins;
+ BCReg base;
+ BCLine line = ls->linenumber;
+ if (ls->tok == '(') {
+#if !LJ_52
+ if (line != ls->lastline)
+ err_syntax(ls, LJ_ERR_XAMBIG);
+#endif
+ lj_lex_next(ls);
+ if (ls->tok == ')') { /* f(). */
+ args.k = VVOID;
+ } else {
+ expr_list(ls, &args);
+ if (args.k == VCALL) /* f(a, b, g()) or f(a, b, ...). */
+ setbc_b(bcptr(fs, &args), 0); /* Pass on multiple results. */
+ }
+ lex_match(ls, ')', '(', line);
+ } else if (ls->tok == '{') {
+ expr_table(ls, &args);
+ } else if (ls->tok == TK_string) {
+ expr_init(&args, VKSTR, 0);
+ args.u.sval = strV(&ls->tokval);
+ lj_lex_next(ls);
+ } else {
+ err_syntax(ls, LJ_ERR_XFUNARG);
+ return; /* Silence compiler. */
+ }
+ lj_assertFS(e->k == VNONRELOC, "bad expr type %d", e->k);
+ base = e->u.s.info; /* Base register for call. */
+ if (args.k == VCALL) {
+ ins = BCINS_ABC(BC_CALLM, base, 2, args.u.s.aux - base - 1 - LJ_FR2);
+ } else {
+ if (args.k != VVOID)
+ expr_tonextreg(fs, &args);
+ ins = BCINS_ABC(BC_CALL, base, 2, fs->freereg - base - LJ_FR2);
+ }
+ expr_init(e, VCALL, bcemit_INS(fs, ins));
+ e->u.s.aux = base;
+ fs->bcbase[fs->pc - 1].line = line;
+ fs->freereg = base+1; /* Leave one result by default. */
+}
+
+/* Parse primary expression. */
+static void expr_primary(LexState *ls, ExpDesc *v)
+{
+ FuncState *fs = ls->fs;
+ /* Parse prefix expression. */
+ if (ls->tok == '(') {
+ BCLine line = ls->linenumber;
+ lj_lex_next(ls);
+ expr(ls, v);
+ lex_match(ls, ')', '(', line);
+ expr_discharge(ls->fs, v);
+ } else if (ls->tok == TK_name || (!LJ_52 && ls->tok == TK_goto)) {
+ var_lookup(ls, v);
+ } else {
+ err_syntax(ls, LJ_ERR_XSYMBOL);
+ }
+ for (;;) { /* Parse multiple expression suffixes. */
+ if (ls->tok == '.') {
+ expr_field(ls, v);
+ } else if (ls->tok == '[') {
+ ExpDesc key;
+ expr_toanyreg(fs, v);
+ expr_bracket(ls, &key);
+ expr_index(fs, v, &key);
+ } else if (ls->tok == ':') {
+ ExpDesc key;
+ lj_lex_next(ls);
+ expr_str(ls, &key);
+ bcemit_method(fs, v, &key);
+ parse_args(ls, v);
+ } else if (ls->tok == '(' || ls->tok == TK_string || ls->tok == '{') {
+ expr_tonextreg(fs, v);
+ if (LJ_FR2) bcreg_reserve(fs, 1);
+ parse_args(ls, v);
+ } else {
+ break;
+ }
+ }
+}
+
+/* Parse simple expression. */
+static void expr_simple(LexState *ls, ExpDesc *v)
+{
+ switch (ls->tok) {
+ case TK_number:
+ expr_init(v, (LJ_HASFFI && tviscdata(&ls->tokval)) ? VKCDATA : VKNUM, 0);
+ copyTV(ls->L, &v->u.nval, &ls->tokval);
+ break;
+ case TK_string:
+ expr_init(v, VKSTR, 0);
+ v->u.sval = strV(&ls->tokval);
+ break;
+ case TK_nil:
+ expr_init(v, VKNIL, 0);
+ break;
+ case TK_true:
+ expr_init(v, VKTRUE, 0);
+ break;
+ case TK_false:
+ expr_init(v, VKFALSE, 0);
+ break;
+ case TK_dots: { /* Vararg. */
+ FuncState *fs = ls->fs;
+ BCReg base;
+ checkcond(ls, fs->flags & PROTO_VARARG, LJ_ERR_XDOTS);
+ bcreg_reserve(fs, 1);
+ base = fs->freereg-1;
+ expr_init(v, VCALL, bcemit_ABC(fs, BC_VARG, base, 2, fs->numparams));
+ v->u.s.aux = base;
+ break;
+ }
+ case '{': /* Table constructor. */
+ expr_table(ls, v);
+ return;
+ case TK_function:
+ lj_lex_next(ls);
+ parse_body(ls, v, 0, ls->linenumber);
+ return;
+ default:
+ expr_primary(ls, v);
+ return;
+ }
+ lj_lex_next(ls);
+}
+
+/* Manage syntactic levels to avoid blowing up the stack. */
+static void synlevel_begin(LexState *ls)
+{
+ if (++ls->level >= LJ_MAX_XLEVEL)
+ lj_lex_error(ls, 0, LJ_ERR_XLEVELS);
+}
+
+#define synlevel_end(ls) ((ls)->level--)
+
+/* Convert token to binary operator. */
+static BinOpr token2binop(LexToken tok)
+{
+ switch (tok) {
+ case '+': return OPR_ADD;
+ case '-': return OPR_SUB;
+ case '*': return OPR_MUL;
+ case '/': return OPR_DIV;
+ case '%': return OPR_MOD;
+ case '^': return OPR_POW;
+ case TK_concat: return OPR_CONCAT;
+ case TK_ne: return OPR_NE;
+ case TK_eq: return OPR_EQ;
+ case '<': return OPR_LT;
+ case TK_le: return OPR_LE;
+ case '>': return OPR_GT;
+ case TK_ge: return OPR_GE;
+ case TK_and: return OPR_AND;
+ case TK_or: return OPR_OR;
+ default: return OPR_NOBINOPR;
+ }
+}
+
+/* Priorities for each binary operator. ORDER OPR. */
+static const struct {
+ uint8_t left; /* Left priority. */
+ uint8_t right; /* Right priority. */
+} priority[] = {
+ {6,6}, {6,6}, {7,7}, {7,7}, {7,7}, /* ADD SUB MUL DIV MOD */
+ {10,9}, {5,4}, /* POW CONCAT (right associative) */
+ {3,3}, {3,3}, /* EQ NE */
+ {3,3}, {3,3}, {3,3}, {3,3}, /* LT GE GT LE */
+ {2,2}, {1,1} /* AND OR */
+};
+
+#define UNARY_PRIORITY 8 /* Priority for unary operators. */
+
+/* Forward declaration. */
+static BinOpr expr_binop(LexState *ls, ExpDesc *v, uint32_t limit);
+
+/* Parse unary expression. */
+static void expr_unop(LexState *ls, ExpDesc *v)
+{
+ BCOp op;
+ if (ls->tok == TK_not) {
+ op = BC_NOT;
+ } else if (ls->tok == '-') {
+ op = BC_UNM;
+ } else if (ls->tok == '#') {
+ op = BC_LEN;
+ } else {
+ expr_simple(ls, v);
+ return;
+ }
+ lj_lex_next(ls);
+ expr_binop(ls, v, UNARY_PRIORITY);
+ bcemit_unop(ls->fs, op, v);
+}
+
+/* Parse binary expressions with priority higher than the limit. */
+static BinOpr expr_binop(LexState *ls, ExpDesc *v, uint32_t limit)
+{
+ BinOpr op;
+ synlevel_begin(ls);
+ expr_unop(ls, v);
+ op = token2binop(ls->tok);
+ while (op != OPR_NOBINOPR && priority[op].left > limit) {
+ ExpDesc v2;
+ BinOpr nextop;
+ lj_lex_next(ls);
+ bcemit_binop_left(ls->fs, op, v);
+ /* Parse binary expression with higher priority. */
+ nextop = expr_binop(ls, &v2, priority[op].right);
+ bcemit_binop(ls->fs, op, v, &v2);
+ op = nextop;
+ }
+ synlevel_end(ls);
+ return op; /* Return unconsumed binary operator (if any). */
+}
+
+/* Parse expression. */
+static void expr(LexState *ls, ExpDesc *v)
+{
+ expr_binop(ls, v, 0); /* Priority 0: parse whole expression. */
+}
+
+/* Assign expression to the next register. */
+static void expr_next(LexState *ls)
+{
+ ExpDesc e;
+ expr(ls, &e);
+ expr_tonextreg(ls->fs, &e);
+}
+
+/* Parse conditional expression. */
+static BCPos expr_cond(LexState *ls)
+{
+ ExpDesc v;
+ expr(ls, &v);
+ if (v.k == VKNIL) v.k = VKFALSE;
+ bcemit_branch_t(ls->fs, &v);
+ return v.f;
+}
+
+/* -- Assignments --------------------------------------------------------- */
+
+/* List of LHS variables. */
+typedef struct LHSVarList {
+ ExpDesc v; /* LHS variable. */
+ struct LHSVarList *prev; /* Link to previous LHS variable. */
+} LHSVarList;
+
+/* Eliminate write-after-read hazards for local variable assignment. */
+static void assign_hazard(LexState *ls, LHSVarList *lh, const ExpDesc *v)
+{
+ FuncState *fs = ls->fs;
+ BCReg reg = v->u.s.info; /* Check against this variable. */
+ BCReg tmp = fs->freereg; /* Rename to this temp. register (if needed). */
+ int hazard = 0;
+ for (; lh; lh = lh->prev) {
+ if (lh->v.k == VINDEXED) {
+ if (lh->v.u.s.info == reg) { /* t[i], t = 1, 2 */
+ hazard = 1;
+ lh->v.u.s.info = tmp;
+ }
+ if (lh->v.u.s.aux == reg) { /* t[i], i = 1, 2 */
+ hazard = 1;
+ lh->v.u.s.aux = tmp;
+ }
+ }
+ }
+ if (hazard) {
+ bcemit_AD(fs, BC_MOV, tmp, reg); /* Rename conflicting variable. */
+ bcreg_reserve(fs, 1);
+ }
+}
+
+/* Adjust LHS/RHS of an assignment. */
+static void assign_adjust(LexState *ls, BCReg nvars, BCReg nexps, ExpDesc *e)
+{
+ FuncState *fs = ls->fs;
+ int32_t extra = (int32_t)nvars - (int32_t)nexps;
+ if (e->k == VCALL) {
+ extra++; /* Compensate for the VCALL itself. */
+ if (extra < 0) extra = 0;
+ setbc_b(bcptr(fs, e), extra+1); /* Fixup call results. */
+ if (extra > 1) bcreg_reserve(fs, (BCReg)extra-1);
+ } else {
+ if (e->k != VVOID)
+ expr_tonextreg(fs, e); /* Close last expression. */
+ if (extra > 0) { /* Leftover LHS are set to nil. */
+ BCReg reg = fs->freereg;
+ bcreg_reserve(fs, (BCReg)extra);
+ bcemit_nil(fs, reg, (BCReg)extra);
+ }
+ }
+ if (nexps > nvars)
+ ls->fs->freereg -= nexps - nvars; /* Drop leftover regs. */
+}
+
+/* Recursively parse assignment statement. */
+static void parse_assignment(LexState *ls, LHSVarList *lh, BCReg nvars)
+{
+ ExpDesc e;
+ checkcond(ls, VLOCAL <= lh->v.k && lh->v.k <= VINDEXED, LJ_ERR_XSYNTAX);
+ if (lex_opt(ls, ',')) { /* Collect LHS list and recurse upwards. */
+ LHSVarList vl;
+ vl.prev = lh;
+ expr_primary(ls, &vl.v);
+ if (vl.v.k == VLOCAL)
+ assign_hazard(ls, lh, &vl.v);
+ checklimit(ls->fs, ls->level + nvars, LJ_MAX_XLEVEL, "variable names");
+ parse_assignment(ls, &vl, nvars+1);
+ } else { /* Parse RHS. */
+ BCReg nexps;
+ lex_check(ls, '=');
+ nexps = expr_list(ls, &e);
+ if (nexps == nvars) {
+ if (e.k == VCALL) {
+ if (bc_op(*bcptr(ls->fs, &e)) == BC_VARG) { /* Vararg assignment. */
+ ls->fs->freereg--;
+ e.k = VRELOCABLE;
+ } else { /* Multiple call results. */
+ e.u.s.info = e.u.s.aux; /* Base of call is not relocatable. */
+ e.k = VNONRELOC;
+ }
+ }
+ bcemit_store(ls->fs, &lh->v, &e);
+ return;
+ }
+ assign_adjust(ls, nvars, nexps, &e);
+ }
+ /* Assign RHS to LHS and recurse downwards. */
+ expr_init(&e, VNONRELOC, ls->fs->freereg-1);
+ bcemit_store(ls->fs, &lh->v, &e);
+}
+
+/* Parse call statement or assignment. */
+static void parse_call_assign(LexState *ls)
+{
+ FuncState *fs = ls->fs;
+ LHSVarList vl;
+ expr_primary(ls, &vl.v);
+ if (vl.v.k == VCALL) { /* Function call statement. */
+ setbc_b(bcptr(fs, &vl.v), 1); /* No results. */
+ } else { /* Start of an assignment. */
+ vl.prev = NULL;
+ parse_assignment(ls, &vl, 1);
+ }
+}
+
+/* Parse 'local' statement. */
+static void parse_local(LexState *ls)
+{
+ if (lex_opt(ls, TK_function)) { /* Local function declaration. */
+ ExpDesc v, b;
+ FuncState *fs = ls->fs;
+ var_new(ls, 0, lex_str(ls));
+ expr_init(&v, VLOCAL, fs->freereg);
+ v.u.s.aux = fs->varmap[fs->freereg];
+ bcreg_reserve(fs, 1);
+ var_add(ls, 1);
+ parse_body(ls, &b, 0, ls->linenumber);
+ /* bcemit_store(fs, &v, &b) without setting VSTACK_VAR_RW. */
+ expr_free(fs, &b);
+ expr_toreg(fs, &b, v.u.s.info);
+ /* The upvalue is in scope, but the local is only valid after the store. */
+ var_get(ls, fs, fs->nactvar - 1).startpc = fs->pc;
+ } else { /* Local variable declaration. */
+ ExpDesc e;
+ BCReg nexps, nvars = 0;
+ do { /* Collect LHS. */
+ var_new(ls, nvars++, lex_str(ls));
+ } while (lex_opt(ls, ','));
+ if (lex_opt(ls, '=')) { /* Optional RHS. */
+ nexps = expr_list(ls, &e);
+ } else { /* Or implicitly set to nil. */
+ e.k = VVOID;
+ nexps = 0;
+ }
+ assign_adjust(ls, nvars, nexps, &e);
+ var_add(ls, nvars);
+ }
+}
+
+/* Parse 'function' statement. */
+static void parse_func(LexState *ls, BCLine line)
+{
+ FuncState *fs;
+ ExpDesc v, b;
+ int needself = 0;
+ lj_lex_next(ls); /* Skip 'function'. */
+ /* Parse function name. */
+ var_lookup(ls, &v);
+ while (ls->tok == '.') /* Multiple dot-separated fields. */
+ expr_field(ls, &v);
+ if (ls->tok == ':') { /* Optional colon to signify method call. */
+ needself = 1;
+ expr_field(ls, &v);
+ }
+ parse_body(ls, &b, needself, line);
+ fs = ls->fs;
+ bcemit_store(fs, &v, &b);
+ fs->bcbase[fs->pc - 1].line = line; /* Set line for the store. */
+}
+
+/* -- Control transfer statements ----------------------------------------- */
+
+/* Check for end of block. */
+static int parse_isend(LexToken tok)
+{
+ switch (tok) {
+ case TK_else: case TK_elseif: case TK_end: case TK_until: case TK_eof:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+/* Parse 'return' statement. */
+static void parse_return(LexState *ls)
+{
+ BCIns ins;
+ FuncState *fs = ls->fs;
+ lj_lex_next(ls); /* Skip 'return'. */
+ fs->flags |= PROTO_HAS_RETURN;
+ if (parse_isend(ls->tok) || ls->tok == ';') { /* Bare return. */
+ ins = BCINS_AD(BC_RET0, 0, 1);
+ } else { /* Return with one or more values. */
+ ExpDesc e; /* Receives the _last_ expression in the list. */
+ BCReg nret = expr_list(ls, &e);
+ if (nret == 1) { /* Return one result. */
+ if (e.k == VCALL) { /* Check for tail call. */
+ BCIns *ip = bcptr(fs, &e);
+ /* It doesn't pay off to add BC_VARGT just for 'return ...'. */
+ if (bc_op(*ip) == BC_VARG) goto notailcall;
+ fs->pc--;
+ ins = BCINS_AD(bc_op(*ip)-BC_CALL+BC_CALLT, bc_a(*ip), bc_c(*ip));
+ } else { /* Can return the result from any register. */
+ ins = BCINS_AD(BC_RET1, expr_toanyreg(fs, &e), 2);
+ }
+ } else {
+ if (e.k == VCALL) { /* Append all results from a call. */
+ notailcall:
+ setbc_b(bcptr(fs, &e), 0);
+ ins = BCINS_AD(BC_RETM, fs->nactvar, e.u.s.aux - fs->nactvar);
+ } else {
+ expr_tonextreg(fs, &e); /* Force contiguous registers. */
+ ins = BCINS_AD(BC_RET, fs->nactvar, nret+1);
+ }
+ }
+ }
+ if (fs->flags & PROTO_CHILD)
+ bcemit_AJ(fs, BC_UCLO, 0, 0); /* May need to close upvalues first. */
+ bcemit_INS(fs, ins);
+}
+
+/* Parse 'break' statement. */
+static void parse_break(LexState *ls)
+{
+ ls->fs->bl->flags |= FSCOPE_BREAK;
+ gola_new(ls, NAME_BREAK, VSTACK_GOTO, bcemit_jmp(ls->fs));
+}
+
+/* Parse 'goto' statement. */
+static void parse_goto(LexState *ls)
+{
+ FuncState *fs = ls->fs;
+ GCstr *name = lex_str(ls);
+ VarInfo *vl = gola_findlabel(ls, name);
+ if (vl) /* Treat backwards goto within same scope like a loop. */
+ bcemit_AJ(fs, BC_LOOP, vl->slot, -1); /* No BC range check. */
+ fs->bl->flags |= FSCOPE_GOLA;
+ gola_new(ls, name, VSTACK_GOTO, bcemit_jmp(fs));
+}
+
+/* Parse label. */
+static void parse_label(LexState *ls)
+{
+ FuncState *fs = ls->fs;
+ GCstr *name;
+ MSize idx;
+ fs->lasttarget = fs->pc;
+ fs->bl->flags |= FSCOPE_GOLA;
+ lj_lex_next(ls); /* Skip '::'. */
+ name = lex_str(ls);
+ if (gola_findlabel(ls, name))
+ lj_lex_error(ls, 0, LJ_ERR_XLDUP, strdata(name));
+ idx = gola_new(ls, name, VSTACK_LABEL, fs->pc);
+ lex_check(ls, TK_label);
+ /* Recursively parse trailing statements: labels and ';' (Lua 5.2 only). */
+ for (;;) {
+ if (ls->tok == TK_label) {
+ synlevel_begin(ls);
+ parse_label(ls);
+ synlevel_end(ls);
+ } else if (LJ_52 && ls->tok == ';') {
+ lj_lex_next(ls);
+ } else {
+ break;
+ }
+ }
+ /* Trailing label is considered to be outside of scope. */
+ if (parse_isend(ls->tok) && ls->tok != TK_until)
+ ls->vstack[idx].slot = fs->bl->nactvar;
+ gola_resolve(ls, fs->bl, idx);
+}
+
+/* -- Blocks, loops and conditional statements ---------------------------- */
+
+/* Parse a block. */
+static void parse_block(LexState *ls)
+{
+ FuncState *fs = ls->fs;
+ FuncScope bl;
+ fscope_begin(fs, &bl, 0);
+ parse_chunk(ls);
+ fscope_end(fs);
+}
+
+/* Parse 'while' statement. */
+static void parse_while(LexState *ls, BCLine line)
+{
+ FuncState *fs = ls->fs;
+ BCPos start, loop, condexit;
+ FuncScope bl;
+ lj_lex_next(ls); /* Skip 'while'. */
+ start = fs->lasttarget = fs->pc;
+ condexit = expr_cond(ls);
+ fscope_begin(fs, &bl, FSCOPE_LOOP);
+ lex_check(ls, TK_do);
+ loop = bcemit_AD(fs, BC_LOOP, fs->nactvar, 0);
+ parse_block(ls);
+ jmp_patch(fs, bcemit_jmp(fs), start);
+ lex_match(ls, TK_end, TK_while, line);
+ fscope_end(fs);
+ jmp_tohere(fs, condexit);
+ jmp_patchins(fs, loop, fs->pc);
+}
+
+/* Parse 'repeat' statement. */
+static void parse_repeat(LexState *ls, BCLine line)
+{
+ FuncState *fs = ls->fs;
+ BCPos loop = fs->lasttarget = fs->pc;
+ BCPos condexit;
+ FuncScope bl1, bl2;
+ fscope_begin(fs, &bl1, FSCOPE_LOOP); /* Breakable loop scope. */
+ fscope_begin(fs, &bl2, 0); /* Inner scope. */
+ lj_lex_next(ls); /* Skip 'repeat'. */
+ bcemit_AD(fs, BC_LOOP, fs->nactvar, 0);
+ parse_chunk(ls);
+ lex_match(ls, TK_until, TK_repeat, line);
+ condexit = expr_cond(ls); /* Parse condition (still inside inner scope). */
+ if (!(bl2.flags & FSCOPE_UPVAL)) { /* No upvalues? Just end inner scope. */
+ fscope_end(fs);
+ } else { /* Otherwise generate: cond: UCLO+JMP out, !cond: UCLO+JMP loop. */
+ parse_break(ls); /* Break from loop and close upvalues. */
+ jmp_tohere(fs, condexit);
+ fscope_end(fs); /* End inner scope and close upvalues. */
+ condexit = bcemit_jmp(fs);
+ }
+ jmp_patch(fs, condexit, loop); /* Jump backwards if !cond. */
+ jmp_patchins(fs, loop, fs->pc);
+ fscope_end(fs); /* End loop scope. */
+}
+
+/* Parse numeric 'for'. */
+static void parse_for_num(LexState *ls, GCstr *varname, BCLine line)
+{
+ FuncState *fs = ls->fs;
+ BCReg base = fs->freereg;
+ FuncScope bl;
+ BCPos loop, loopend;
+ /* Hidden control variables. */
+ var_new_fixed(ls, FORL_IDX, VARNAME_FOR_IDX);
+ var_new_fixed(ls, FORL_STOP, VARNAME_FOR_STOP);
+ var_new_fixed(ls, FORL_STEP, VARNAME_FOR_STEP);
+ /* Visible copy of index variable. */
+ var_new(ls, FORL_EXT, varname);
+ lex_check(ls, '=');
+ expr_next(ls);
+ lex_check(ls, ',');
+ expr_next(ls);
+ if (lex_opt(ls, ',')) {
+ expr_next(ls);
+ } else {
+ bcemit_AD(fs, BC_KSHORT, fs->freereg, 1); /* Default step is 1. */
+ bcreg_reserve(fs, 1);
+ }
+ var_add(ls, 3); /* Hidden control variables. */
+ lex_check(ls, TK_do);
+ loop = bcemit_AJ(fs, BC_FORI, base, NO_JMP);
+ fscope_begin(fs, &bl, 0); /* Scope for visible variables. */
+ var_add(ls, 1);
+ bcreg_reserve(fs, 1);
+ parse_block(ls);
+ fscope_end(fs);
+ /* Perform loop inversion. Loop control instructions are at the end. */
+ loopend = bcemit_AJ(fs, BC_FORL, base, NO_JMP);
+ fs->bcbase[loopend].line = line; /* Fix line for control ins. */
+ jmp_patchins(fs, loopend, loop+1);
+ jmp_patchins(fs, loop, fs->pc);
+}
+
+/* Try to predict whether the iterator is next() and specialize the bytecode.
+** Detecting next() and pairs() by name is simplistic, but quite effective.
+** The interpreter backs off if the check for the closure fails at runtime.
+*/
+static int predict_next(LexState *ls, FuncState *fs, BCPos pc)
+{
+ BCIns ins = fs->bcbase[pc].ins;
+ GCstr *name;
+ cTValue *o;
+ switch (bc_op(ins)) {
+ case BC_MOV:
+ name = gco2str(gcref(var_get(ls, fs, bc_d(ins)).name));
+ break;
+ case BC_UGET:
+ name = gco2str(gcref(ls->vstack[fs->uvmap[bc_d(ins)]].name));
+ break;
+ case BC_GGET:
+ /* There's no inverse index (yet), so lookup the strings. */
+ o = lj_tab_getstr(fs->kt, lj_str_newlit(ls->L, "pairs"));
+ if (o && tvhaskslot(o) && tvkslot(o) == bc_d(ins))
+ return 1;
+ o = lj_tab_getstr(fs->kt, lj_str_newlit(ls->L, "next"));
+ if (o && tvhaskslot(o) && tvkslot(o) == bc_d(ins))
+ return 1;
+ return 0;
+ default:
+ return 0;
+ }
+ return (name->len == 5 && !strcmp(strdata(name), "pairs")) ||
+ (name->len == 4 && !strcmp(strdata(name), "next"));
+}
+
+/* Parse 'for' iterator. */
+static void parse_for_iter(LexState *ls, GCstr *indexname)
+{
+ FuncState *fs = ls->fs;
+ ExpDesc e;
+ BCReg nvars = 0;
+ BCLine line;
+ BCReg base = fs->freereg + 3;
+ BCPos loop, loopend, exprpc = fs->pc;
+ FuncScope bl;
+ int isnext;
+ /* Hidden control variables. */
+ var_new_fixed(ls, nvars++, VARNAME_FOR_GEN);
+ var_new_fixed(ls, nvars++, VARNAME_FOR_STATE);
+ var_new_fixed(ls, nvars++, VARNAME_FOR_CTL);
+ /* Visible variables returned from iterator. */
+ var_new(ls, nvars++, indexname);
+ while (lex_opt(ls, ','))
+ var_new(ls, nvars++, lex_str(ls));
+ lex_check(ls, TK_in);
+ line = ls->linenumber;
+ assign_adjust(ls, 3, expr_list(ls, &e), &e);
+ /* The iterator needs another 3 [4] slots (func [pc] | state ctl). */
+ bcreg_bump(fs, 3+LJ_FR2);
+ isnext = (nvars <= 5 && predict_next(ls, fs, exprpc));
+ var_add(ls, 3); /* Hidden control variables. */
+ lex_check(ls, TK_do);
+ loop = bcemit_AJ(fs, isnext ? BC_ISNEXT : BC_JMP, base, NO_JMP);
+ fscope_begin(fs, &bl, 0); /* Scope for visible variables. */
+ var_add(ls, nvars-3);
+ bcreg_reserve(fs, nvars-3);
+ parse_block(ls);
+ fscope_end(fs);
+ /* Perform loop inversion. Loop control instructions are at the end. */
+ jmp_patchins(fs, loop, fs->pc);
+ bcemit_ABC(fs, isnext ? BC_ITERN : BC_ITERC, base, nvars-3+1, 2+1);
+ loopend = bcemit_AJ(fs, BC_ITERL, base, NO_JMP);
+ fs->bcbase[loopend-1].line = line; /* Fix line for control ins. */
+ fs->bcbase[loopend].line = line;
+ jmp_patchins(fs, loopend, loop+1);
+}
+
+/* Parse 'for' statement. */
+static void parse_for(LexState *ls, BCLine line)
+{
+ FuncState *fs = ls->fs;
+ GCstr *varname;
+ FuncScope bl;
+ fscope_begin(fs, &bl, FSCOPE_LOOP);
+ lj_lex_next(ls); /* Skip 'for'. */
+ varname = lex_str(ls); /* Get first variable name. */
+ if (ls->tok == '=')
+ parse_for_num(ls, varname, line);
+ else if (ls->tok == ',' || ls->tok == TK_in)
+ parse_for_iter(ls, varname);
+ else
+ err_syntax(ls, LJ_ERR_XFOR);
+ lex_match(ls, TK_end, TK_for, line);
+ fscope_end(fs); /* Resolve break list. */
+}
+
+/* Parse condition and 'then' block. */
+static BCPos parse_then(LexState *ls)
+{
+ BCPos condexit;
+ lj_lex_next(ls); /* Skip 'if' or 'elseif'. */
+ condexit = expr_cond(ls);
+ lex_check(ls, TK_then);
+ parse_block(ls);
+ return condexit;
+}
+
+/* Parse 'if' statement. */
+static void parse_if(LexState *ls, BCLine line)
+{
+ FuncState *fs = ls->fs;
+ BCPos flist;
+ BCPos escapelist = NO_JMP;
+ flist = parse_then(ls);
+ while (ls->tok == TK_elseif) { /* Parse multiple 'elseif' blocks. */
+ jmp_append(fs, &escapelist, bcemit_jmp(fs));
+ jmp_tohere(fs, flist);
+ flist = parse_then(ls);
+ }
+ if (ls->tok == TK_else) { /* Parse optional 'else' block. */
+ jmp_append(fs, &escapelist, bcemit_jmp(fs));
+ jmp_tohere(fs, flist);
+ lj_lex_next(ls); /* Skip 'else'. */
+ parse_block(ls);
+ } else {
+ jmp_append(fs, &escapelist, flist);
+ }
+ jmp_tohere(fs, escapelist);
+ lex_match(ls, TK_end, TK_if, line);
+}
+
+/* -- Parse statements ---------------------------------------------------- */
+
+/* Parse a statement. Returns 1 if it must be the last one in a chunk. */
+static int parse_stmt(LexState *ls)
+{
+ BCLine line = ls->linenumber;
+ switch (ls->tok) {
+ case TK_if:
+ parse_if(ls, line);
+ break;
+ case TK_while:
+ parse_while(ls, line);
+ break;
+ case TK_do:
+ lj_lex_next(ls);
+ parse_block(ls);
+ lex_match(ls, TK_end, TK_do, line);
+ break;
+ case TK_for:
+ parse_for(ls, line);
+ break;
+ case TK_repeat:
+ parse_repeat(ls, line);
+ break;
+ case TK_function:
+ parse_func(ls, line);
+ break;
+ case TK_local:
+ lj_lex_next(ls);
+ parse_local(ls);
+ break;
+ case TK_return:
+ parse_return(ls);
+ return 1; /* Must be last. */
+ case TK_break:
+ lj_lex_next(ls);
+ parse_break(ls);
+ return !LJ_52; /* Must be last in Lua 5.1. */
+#if LJ_52
+ case ';':
+ lj_lex_next(ls);
+ break;
+#endif
+ case TK_label:
+ parse_label(ls);
+ break;
+ case TK_goto:
+ if (LJ_52 || lj_lex_lookahead(ls) == TK_name) {
+ lj_lex_next(ls);
+ parse_goto(ls);
+ break;
+ }
+ /* fallthrough */
+ default:
+ parse_call_assign(ls);
+ break;
+ }
+ return 0;
+}
+
+/* A chunk is a list of statements optionally separated by semicolons. */
+static void parse_chunk(LexState *ls)
+{
+ int islast = 0;
+ synlevel_begin(ls);
+ while (!islast && !parse_isend(ls->tok)) {
+ islast = parse_stmt(ls);
+ lex_opt(ls, ';');
+ lj_assertLS(ls->fs->framesize >= ls->fs->freereg &&
+ ls->fs->freereg >= ls->fs->nactvar,
+ "bad regalloc");
+ ls->fs->freereg = ls->fs->nactvar; /* Free registers after each stmt. */
+ }
+ synlevel_end(ls);
+}
+
+/* Entry point of bytecode parser. */
+GCproto *lj_parse(LexState *ls)
+{
+ FuncState fs;
+ FuncScope bl;
+ GCproto *pt;
+ lua_State *L = ls->L;
+#ifdef LUAJIT_DISABLE_DEBUGINFO
+ ls->chunkname = lj_str_newlit(L, "=");
+#else
+ ls->chunkname = lj_str_newz(L, ls->chunkarg);
+#endif
+ setstrV(L, L->top, ls->chunkname); /* Anchor chunkname string. */
+ incr_top(L);
+ ls->level = 0;
+ fs_init(ls, &fs);
+ fs.linedefined = 0;
+ fs.numparams = 0;
+ fs.bcbase = NULL;
+ fs.bclim = 0;
+ fs.flags |= PROTO_VARARG; /* Main chunk is always a vararg func. */
+ fscope_begin(&fs, &bl, 0);
+ bcemit_AD(&fs, BC_FUNCV, 0, 0); /* Placeholder. */
+ lj_lex_next(ls); /* Read-ahead first token. */
+ parse_chunk(ls);
+ if (ls->tok != TK_eof)
+ err_token(ls, TK_eof);
+ pt = fs_finish(ls, ls->linenumber);
+ L->top--; /* Drop chunkname. */
+ lj_assertL(fs.prev == NULL && ls->fs == NULL, "mismatched frame nesting");
+ lj_assertL(pt->sizeuv == 0, "toplevel proto has upvalues");
+ return pt;
+}
+
diff --git a/libs/luajit-cmake/luajit/src/lj_parse.h b/libs/luajit-cmake/luajit/src/lj_parse.h
new file mode 100644
index 0000000..4206f00
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/lj_parse.h
@@ -0,0 +1,18 @@
+/*
+** Lua parser (source code -> bytecode).
+** Copyright (C) 2005-2022 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LJ_PARSE_H
+#define _LJ_PARSE_H
+
+#include "lj_obj.h"
+#include "lj_lex.h"
+
+LJ_FUNC GCproto *lj_parse(LexState *ls);
+LJ_FUNC GCstr *lj_parse_keepstr(LexState *ls, const char *str, size_t l);
+#if LJ_HASFFI
+LJ_FUNC void lj_parse_keepcdata(LexState *ls, TValue *tv, GCcdata *cd);
+#endif
+
+#endif
diff --git a/libs/luajit-cmake/luajit/src/lj_prng.c b/libs/luajit-cmake/luajit/src/lj_prng.c
new file mode 100644
index 0000000..01935e5
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/lj_prng.c
@@ -0,0 +1,259 @@
+/*
+** Pseudo-random number generation.
+** Copyright (C) 2005-2022 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#define lj_prng_c
+#define LUA_CORE
+
+/* To get the syscall prototype. */
+#if defined(__linux__) && !defined(_GNU_SOURCE)
+#define _GNU_SOURCE
+#endif
+
+#include "lj_def.h"
+#include "lj_arch.h"
+#include "lj_prng.h"
+
+/* -- PRNG step function -------------------------------------------------- */
+
+/* This implements a Tausworthe PRNG with period 2^223. Based on:
+** Tables of maximally-equidistributed combined LFSR generators,
+** Pierre L'Ecuyer, 1991, table 3, 1st entry.
+** Full-period ME-CF generator with L=64, J=4, k=223, N1=49.
+**
+** Important note: This PRNG is NOT suitable for cryptographic use!
+**
+** But it works fine for math.random(), which has an API that's not
+** suitable for cryptography, anyway.
+**
+** When used as a securely seeded global PRNG, it substantially raises
+** the difficulty for various attacks on the VM.
+*/
+
+/* Update generator i and compute a running xor of all states. */
+#define TW223_GEN(rs, z, r, i, k, q, s) \
+ z = rs->u[i]; \
+ z = (((z<<q)^z) >> (k-s)) ^ ((z&((uint64_t)(int64_t)-1 << (64-k)))<<s); \
+ r ^= z; rs->u[i] = z;
+
+#define TW223_STEP(rs, z, r) \
+ TW223_GEN(rs, z, r, 0, 63, 31, 18) \
+ TW223_GEN(rs, z, r, 1, 58, 19, 28) \
+ TW223_GEN(rs, z, r, 2, 55, 24, 7) \
+ TW223_GEN(rs, z, r, 3, 47, 21, 8)
+
+/* PRNG step function with uint64_t result. */
+LJ_NOINLINE uint64_t LJ_FASTCALL lj_prng_u64(PRNGState *rs)
+{
+ uint64_t z, r = 0;
+ TW223_STEP(rs, z, r)
+ return r;
+}
+
+/* PRNG step function with double in uint64_t result. */
+LJ_NOINLINE uint64_t LJ_FASTCALL lj_prng_u64d(PRNGState *rs)
+{
+ uint64_t z, r = 0;
+ TW223_STEP(rs, z, r)
+ /* Returns a double bit pattern in the range 1.0 <= d < 2.0. */
+ return (r & U64x(000fffff,ffffffff)) | U64x(3ff00000,00000000);
+}
+
+/* Condition seed: ensure k[i] MSB of u[i] are non-zero. */
+static LJ_AINLINE void lj_prng_condition(PRNGState *rs)
+{
+ if (rs->u[0] < (1u << 1)) rs->u[0] += (1u << 1);
+ if (rs->u[1] < (1u << 6)) rs->u[1] += (1u << 6);
+ if (rs->u[2] < (1u << 9)) rs->u[2] += (1u << 9);
+ if (rs->u[3] < (1u << 17)) rs->u[3] += (1u << 17);
+}
+
+/* -- PRNG seeding from OS ------------------------------------------------ */
+
+#if LUAJIT_SECURITY_PRNG == 0
+
+/* Nothing to define. */
+
+#elif LJ_TARGET_XBOX360
+
+extern int XNetRandom(void *buf, unsigned int len);
+
+#elif LJ_TARGET_PS3
+
+extern int sys_get_random_number(void *buf, uint64_t len);
+
+#elif LJ_TARGET_PS4 || LJ_TARGET_PS5 || LJ_TARGET_PSVITA
+
+extern int sceRandomGetRandomNumber(void *buf, size_t len);
+
+#elif LJ_TARGET_NX
+
+#include <unistd.h>
+
+#elif LJ_TARGET_WINDOWS || LJ_TARGET_XBOXONE
+
+#define WIN32_LEAN_AND_MEAN
+#include <windows.h>
+
+#if LJ_TARGET_UWP || LJ_TARGET_XBOXONE
+/* Must use BCryptGenRandom. */
+#include <bcrypt.h>
+#pragma comment(lib, "bcrypt.lib")
+#else
+/* If you wonder about this mess, then search online for RtlGenRandom. */
+typedef BOOLEAN (WINAPI *PRGR)(void *buf, ULONG len);
+static PRGR libfunc_rgr;
+#endif
+
+#elif LJ_TARGET_POSIX
+
+#if LJ_TARGET_LINUX
+/* Avoid a dependency on glibc 2.25+ and use the getrandom syscall instead. */
+#include <sys/syscall.h>
+#else
+
+#if LJ_TARGET_OSX && !LJ_TARGET_IOS
+/*
+** In their infinite wisdom Apple decided to disallow getentropy() in the
+** iOS App Store. Even though the call is common to all BSD-ish OS, it's
+** recommended by Apple in their own security-related docs, and, to top
+** off the foolery, /dev/urandom is handled by the same kernel code,
+** yet accessing it is actually permitted (but less efficient).
+*/
+#include <Availability.h>
+#if __MAC_OS_X_VERSION_MIN_REQUIRED >= 101200
+#define LJ_TARGET_HAS_GETENTROPY 1
+#endif
+#elif (LJ_TARGET_BSD && !defined(__NetBSD__)) || LJ_TARGET_SOLARIS || LJ_TARGET_CYGWIN || LJ_TARGET_QNX
+#define LJ_TARGET_HAS_GETENTROPY 1
+#endif
+
+#if LJ_TARGET_HAS_GETENTROPY
+extern int getentropy(void *buf, size_t len)
+#ifdef __ELF__
+ __attribute__((weak))
+#endif
+;
+#endif
+
+#endif
+
+/* For the /dev/urandom fallback. */
+#include <fcntl.h>
+#include <unistd.h>
+
+#endif
+
+#if LUAJIT_SECURITY_PRNG == 0
+
+/* If you really don't care about security, then define
+** LUAJIT_SECURITY_PRNG=0. This yields a predictable seed
+** and provides NO SECURITY against various attacks on the VM.
+**
+** BTW: This is NOT the way to get predictable table iteration,
+** predictable trace generation, predictable bytecode generation, etc.
+*/
+int LJ_FASTCALL lj_prng_seed_secure(PRNGState *rs)
+{
+ lj_prng_seed_fixed(rs); /* The fixed seed is already conditioned. */
+ return 1;
+}
+
+#else
+
+/* Securely seed PRNG from system entropy. Returns 0 on failure. */
+int LJ_FASTCALL lj_prng_seed_secure(PRNGState *rs)
+{
+#if LJ_TARGET_XBOX360
+
+ if (XNetRandom(rs->u, (unsigned int)sizeof(rs->u)) == 0)
+ goto ok;
+
+#elif LJ_TARGET_PS3
+
+ if (sys_get_random_number(rs->u, sizeof(rs->u)) == 0)
+ goto ok;
+
+#elif LJ_TARGET_PS4 || LJ_TARGET_PS5 || LJ_TARGET_PSVITA
+
+ if (sceRandomGetRandomNumber(rs->u, sizeof(rs->u)) == 0)
+ goto ok;
+
+#elif LJ_TARGET_NX
+
+ if (getentropy(rs->u, sizeof(rs->u)) == 0)
+ goto ok;
+
+#elif LJ_TARGET_UWP || LJ_TARGET_XBOXONE
+
+ if (BCryptGenRandom(NULL, (PUCHAR)(rs->u), (ULONG)sizeof(rs->u),
+ BCRYPT_USE_SYSTEM_PREFERRED_RNG) >= 0)
+ goto ok;
+
+#elif LJ_TARGET_WINDOWS
+
+ /* Keep the library loaded in case multiple VMs are started. */
+ if (!libfunc_rgr) {
+ HMODULE lib = LJ_WIN_LOADLIBA("advapi32.dll");
+ if (!lib) return 0;
+ libfunc_rgr = (PRGR)GetProcAddress(lib, "SystemFunction036");
+ if (!libfunc_rgr) return 0;
+ }
+ if (libfunc_rgr(rs->u, (ULONG)sizeof(rs->u)))
+ goto ok;
+
+#elif LJ_TARGET_POSIX
+
+#if LJ_TARGET_LINUX && defined(SYS_getrandom)
+
+ if (syscall(SYS_getrandom, rs->u, sizeof(rs->u), 0) == (long)sizeof(rs->u))
+ goto ok;
+
+#elif LJ_TARGET_HAS_GETENTROPY
+
+#ifdef __ELF__
+ if (&getentropy && getentropy(rs->u, sizeof(rs->u)) == 0)
+ goto ok;
+#else
+ if (getentropy(rs->u, sizeof(rs->u)) == 0)
+ goto ok;
+#endif
+
+#endif
+
+ /* Fallback to /dev/urandom. This may fail if the device is not
+ ** existent or accessible in a chroot or container, or if the process
+ ** or the OS ran out of file descriptors.
+ */
+ {
+ int fd = open("/dev/urandom", O_RDONLY|O_CLOEXEC);
+ if (fd != -1) {
+ ssize_t n = read(fd, rs->u, sizeof(rs->u));
+ (void)close(fd);
+ if (n == (ssize_t)sizeof(rs->u))
+ goto ok;
+ }
+ }
+
+#else
+
+ /* Add an elif above for your OS with a secure PRNG seed.
+ ** Note that fiddling around with rand(), getpid(), time() or coercing
+ ** ASLR to yield a few bits of randomness is not helpful.
+ ** If you don't want any security, then don't pretend you have any
+ ** and simply define LUAJIT_SECURITY_PRNG=0 for the build.
+ */
+#error "Missing secure PRNG seed for this OS"
+
+#endif
+ return 0; /* Fail. */
+
+ok:
+ lj_prng_condition(rs);
+ (void)lj_prng_u64(rs);
+ return 1; /* Success. */
+}
+
+#endif
+
diff --git a/libs/luajit-cmake/luajit/src/lj_prng.h b/libs/luajit-cmake/luajit/src/lj_prng.h
new file mode 100644
index 0000000..bdc958a
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/lj_prng.h
@@ -0,0 +1,24 @@
+/*
+** Pseudo-random number generation.
+** Copyright (C) 2005-2022 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LJ_PRNG_H
+#define _LJ_PRNG_H
+
+#include "lj_def.h"
+
+LJ_FUNC int LJ_FASTCALL lj_prng_seed_secure(PRNGState *rs);
+LJ_FUNC uint64_t LJ_FASTCALL lj_prng_u64(PRNGState *rs);
+LJ_FUNC uint64_t LJ_FASTCALL lj_prng_u64d(PRNGState *rs);
+
+/* This is just the precomputed result of lib_math.c:random_seed(rs, 0.0). */
+static LJ_AINLINE void lj_prng_seed_fixed(PRNGState *rs)
+{
+ rs->u[0] = U64x(a0d27757,0a345b8c);
+ rs->u[1] = U64x(764a296c,5d4aa64f);
+ rs->u[2] = U64x(51220704,070adeaa);
+ rs->u[3] = U64x(2a2717b5,a7b7b927);
+}
+
+#endif
diff --git a/libs/luajit-cmake/luajit/src/lj_profile.c b/libs/luajit-cmake/luajit/src/lj_profile.c
new file mode 100644
index 0000000..4a13537
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/lj_profile.c
@@ -0,0 +1,371 @@
+/*
+** Low-overhead profiling.
+** Copyright (C) 2005-2022 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#define lj_profile_c
+#define LUA_CORE
+
+#include "lj_obj.h"
+
+#if LJ_HASPROFILE
+
+#include "lj_buf.h"
+#include "lj_frame.h"
+#include "lj_debug.h"
+#include "lj_dispatch.h"
+#if LJ_HASJIT
+#include "lj_jit.h"
+#include "lj_trace.h"
+#endif
+#include "lj_profile.h"
+
+#include "luajit.h"
+
+#if LJ_PROFILE_SIGPROF
+
+#include <sys/time.h>
+#include <signal.h>
+#define profile_lock(ps) UNUSED(ps)
+#define profile_unlock(ps) UNUSED(ps)
+
+#elif LJ_PROFILE_PTHREAD
+
+#include <pthread.h>
+#include <time.h>
+#if LJ_TARGET_PS3
+#include <sys/timer.h>
+#endif
+#define profile_lock(ps) pthread_mutex_lock(&ps->lock)
+#define profile_unlock(ps) pthread_mutex_unlock(&ps->lock)
+
+#elif LJ_PROFILE_WTHREAD
+
+#define WIN32_LEAN_AND_MEAN
+#if LJ_TARGET_XBOX360
+#include <xtl.h>
+#include <xbox.h>
+#else
+#include <windows.h>
+#endif
+typedef unsigned int (WINAPI *WMM_TPFUNC)(unsigned int);
+#define profile_lock(ps) EnterCriticalSection(&ps->lock)
+#define profile_unlock(ps) LeaveCriticalSection(&ps->lock)
+
+#endif
+
+/* Profiler state. */
+typedef struct ProfileState {
+ global_State *g; /* VM state that started the profiler. */
+ luaJIT_profile_callback cb; /* Profiler callback. */
+ void *data; /* Profiler callback data. */
+ SBuf sb; /* String buffer for stack dumps. */
+ int interval; /* Sample interval in milliseconds. */
+ int samples; /* Number of samples for next callback. */
+ int vmstate; /* VM state when profile timer triggered. */
+#if LJ_PROFILE_SIGPROF
+ struct sigaction oldsa; /* Previous SIGPROF state. */
+#elif LJ_PROFILE_PTHREAD
+ pthread_mutex_t lock; /* g->hookmask update lock. */
+ pthread_t thread; /* Timer thread. */
+ int abort; /* Abort timer thread. */
+#elif LJ_PROFILE_WTHREAD
+#if LJ_TARGET_WINDOWS
+ HINSTANCE wmm; /* WinMM library handle. */
+ WMM_TPFUNC wmm_tbp; /* WinMM timeBeginPeriod function. */
+ WMM_TPFUNC wmm_tep; /* WinMM timeEndPeriod function. */
+#endif
+ CRITICAL_SECTION lock; /* g->hookmask update lock. */
+ HANDLE thread; /* Timer thread. */
+ int abort; /* Abort timer thread. */
+#endif
+} ProfileState;
+
+/* Sadly, we have to use a static profiler state.
+**
+** The SIGPROF variant needs a static pointer to the global state, anyway.
+** And it would be hard to extend for multiple threads. You can still use
+** multiple VMs in multiple threads, but only profile one at a time.
+*/
+static ProfileState profile_state;
+
+/* Default sample interval in milliseconds. */
+#define LJ_PROFILE_INTERVAL_DEFAULT 10
+
+/* -- Profiler/hook interaction ------------------------------------------- */
+
+#if !LJ_PROFILE_SIGPROF
+void LJ_FASTCALL lj_profile_hook_enter(global_State *g)
+{
+ ProfileState *ps = &profile_state;
+ if (ps->g) {
+ profile_lock(ps);
+ hook_enter(g);
+ profile_unlock(ps);
+ } else {
+ hook_enter(g);
+ }
+}
+
+void LJ_FASTCALL lj_profile_hook_leave(global_State *g)
+{
+ ProfileState *ps = &profile_state;
+ if (ps->g) {
+ profile_lock(ps);
+ hook_leave(g);
+ profile_unlock(ps);
+ } else {
+ hook_leave(g);
+ }
+}
+#endif
+
+/* -- Profile callbacks --------------------------------------------------- */
+
+/* Callback from profile hook (HOOK_PROFILE already cleared). */
+void LJ_FASTCALL lj_profile_interpreter(lua_State *L)
+{
+ ProfileState *ps = &profile_state;
+ global_State *g = G(L);
+ uint8_t mask;
+ profile_lock(ps);
+ mask = (g->hookmask & ~HOOK_PROFILE);
+ if (!(mask & HOOK_VMEVENT)) {
+ int samples = ps->samples;
+ ps->samples = 0;
+ g->hookmask = HOOK_VMEVENT;
+ lj_dispatch_update(g);
+ profile_unlock(ps);
+ ps->cb(ps->data, L, samples, ps->vmstate); /* Invoke user callback. */
+ profile_lock(ps);
+ mask |= (g->hookmask & HOOK_PROFILE);
+ }
+ g->hookmask = mask;
+ lj_dispatch_update(g);
+ profile_unlock(ps);
+}
+
+/* Trigger profile hook. Asynchronous call from OS-specific profile timer. */
+static void profile_trigger(ProfileState *ps)
+{
+ global_State *g = ps->g;
+ uint8_t mask;
+ profile_lock(ps);
+ ps->samples++; /* Always increment number of samples. */
+ mask = g->hookmask;
+ if (!(mask & (HOOK_PROFILE|HOOK_VMEVENT|HOOK_GC))) { /* Set profile hook. */
+ int st = g->vmstate;
+ ps->vmstate = st >= 0 ? 'N' :
+ st == ~LJ_VMST_INTERP ? 'I' :
+ st == ~LJ_VMST_C ? 'C' :
+ st == ~LJ_VMST_GC ? 'G' : 'J';
+ g->hookmask = (mask | HOOK_PROFILE);
+ lj_dispatch_update(g);
+ }
+ profile_unlock(ps);
+}
+
+/* -- OS-specific profile timer handling ---------------------------------- */
+
+#if LJ_PROFILE_SIGPROF
+
+/* SIGPROF handler. */
+static void profile_signal(int sig)
+{
+ UNUSED(sig);
+ profile_trigger(&profile_state);
+}
+
+/* Start profiling timer. */
+static void profile_timer_start(ProfileState *ps)
+{
+ int interval = ps->interval;
+ struct itimerval tm;
+ struct sigaction sa;
+ tm.it_value.tv_sec = tm.it_interval.tv_sec = interval / 1000;
+ tm.it_value.tv_usec = tm.it_interval.tv_usec = (interval % 1000) * 1000;
+ setitimer(ITIMER_PROF, &tm, NULL);
+#if LJ_TARGET_QNX
+ sa.sa_flags = 0;
+#else
+ sa.sa_flags = SA_RESTART;
+#endif
+ sa.sa_handler = profile_signal;
+ sigemptyset(&sa.sa_mask);
+ sigaction(SIGPROF, &sa, &ps->oldsa);
+}
+
+/* Stop profiling timer. */
+static void profile_timer_stop(ProfileState *ps)
+{
+ struct itimerval tm;
+ tm.it_value.tv_sec = tm.it_interval.tv_sec = 0;
+ tm.it_value.tv_usec = tm.it_interval.tv_usec = 0;
+ setitimer(ITIMER_PROF, &tm, NULL);
+ sigaction(SIGPROF, &ps->oldsa, NULL);
+}
+
+#elif LJ_PROFILE_PTHREAD
+
+/* POSIX timer thread. */
+static void *profile_thread(ProfileState *ps)
+{
+ int interval = ps->interval;
+#if !LJ_TARGET_PS3
+ struct timespec ts;
+ ts.tv_sec = interval / 1000;
+ ts.tv_nsec = (interval % 1000) * 1000000;
+#endif
+ while (1) {
+#if LJ_TARGET_PS3
+ sys_timer_usleep(interval * 1000);
+#else
+ nanosleep(&ts, NULL);
+#endif
+ if (ps->abort) break;
+ profile_trigger(ps);
+ }
+ return NULL;
+}
+
+/* Start profiling timer thread. */
+static void profile_timer_start(ProfileState *ps)
+{
+ pthread_mutex_init(&ps->lock, 0);
+ ps->abort = 0;
+ pthread_create(&ps->thread, NULL, (void *(*)(void *))profile_thread, ps);
+}
+
+/* Stop profiling timer thread. */
+static void profile_timer_stop(ProfileState *ps)
+{
+ ps->abort = 1;
+ pthread_join(ps->thread, NULL);
+ pthread_mutex_destroy(&ps->lock);
+}
+
+#elif LJ_PROFILE_WTHREAD
+
+/* Windows timer thread. */
+static DWORD WINAPI profile_thread(void *psx)
+{
+ ProfileState *ps = (ProfileState *)psx;
+ int interval = ps->interval;
+#if LJ_TARGET_WINDOWS && !LJ_TARGET_UWP
+ ps->wmm_tbp(interval);
+#endif
+ while (1) {
+ Sleep(interval);
+ if (ps->abort) break;
+ profile_trigger(ps);
+ }
+#if LJ_TARGET_WINDOWS && !LJ_TARGET_UWP
+ ps->wmm_tep(interval);
+#endif
+ return 0;
+}
+
+/* Start profiling timer thread. */
+static void profile_timer_start(ProfileState *ps)
+{
+#if LJ_TARGET_WINDOWS && !LJ_TARGET_UWP
+ if (!ps->wmm) { /* Load WinMM library on-demand. */
+ ps->wmm = LJ_WIN_LOADLIBA("winmm.dll");
+ if (ps->wmm) {
+ ps->wmm_tbp = (WMM_TPFUNC)GetProcAddress(ps->wmm, "timeBeginPeriod");
+ ps->wmm_tep = (WMM_TPFUNC)GetProcAddress(ps->wmm, "timeEndPeriod");
+ if (!ps->wmm_tbp || !ps->wmm_tep) {
+ ps->wmm = NULL;
+ return;
+ }
+ }
+ }
+#endif
+ InitializeCriticalSection(&ps->lock);
+ ps->abort = 0;
+ ps->thread = CreateThread(NULL, 0, profile_thread, ps, 0, NULL);
+}
+
+/* Stop profiling timer thread. */
+static void profile_timer_stop(ProfileState *ps)
+{
+ ps->abort = 1;
+ WaitForSingleObject(ps->thread, INFINITE);
+ DeleteCriticalSection(&ps->lock);
+}
+
+#endif
+
+/* -- Public profiling API ------------------------------------------------ */
+
+/* Start profiling. */
+LUA_API void luaJIT_profile_start(lua_State *L, const char *mode,
+ luaJIT_profile_callback cb, void *data)
+{
+ ProfileState *ps = &profile_state;
+ int interval = LJ_PROFILE_INTERVAL_DEFAULT;
+ while (*mode) {
+ int m = *mode++;
+ switch (m) {
+ case 'i':
+ interval = 0;
+ while (*mode >= '0' && *mode <= '9')
+ interval = interval * 10 + (*mode++ - '0');
+ if (interval <= 0) interval = 1;
+ break;
+#if LJ_HASJIT
+ case 'l': case 'f':
+ L2J(L)->prof_mode = m;
+ lj_trace_flushall(L);
+ break;
+#endif
+ default: /* Ignore unknown mode chars. */
+ break;
+ }
+ }
+ if (ps->g) {
+ luaJIT_profile_stop(L);
+ if (ps->g) return; /* Profiler in use by another VM. */
+ }
+ ps->g = G(L);
+ ps->interval = interval;
+ ps->cb = cb;
+ ps->data = data;
+ ps->samples = 0;
+ lj_buf_init(L, &ps->sb);
+ profile_timer_start(ps);
+}
+
+/* Stop profiling. */
+LUA_API void luaJIT_profile_stop(lua_State *L)
+{
+ ProfileState *ps = &profile_state;
+ global_State *g = ps->g;
+ if (G(L) == g) { /* Only stop profiler if started by this VM. */
+ profile_timer_stop(ps);
+ g->hookmask &= ~HOOK_PROFILE;
+ lj_dispatch_update(g);
+#if LJ_HASJIT
+ G2J(g)->prof_mode = 0;
+ lj_trace_flushall(L);
+#endif
+ lj_buf_free(g, &ps->sb);
+ ps->sb.w = ps->sb.e = NULL;
+ ps->g = NULL;
+ }
+}
+
+/* Return a compact stack dump. */
+LUA_API const char *luaJIT_profile_dumpstack(lua_State *L, const char *fmt,
+ int depth, size_t *len)
+{
+ ProfileState *ps = &profile_state;
+ SBuf *sb = &ps->sb;
+ setsbufL(sb, L);
+ lj_buf_reset(sb);
+ lj_debug_dumpstack(L, sb, fmt, depth);
+ *len = (size_t)sbuflen(sb);
+ return sb->b;
+}
+
+#endif
diff --git a/libs/luajit-cmake/luajit/src/lj_profile.h b/libs/luajit-cmake/luajit/src/lj_profile.h
new file mode 100644
index 0000000..3969f8e
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/lj_profile.h
@@ -0,0 +1,21 @@
+/*
+** Low-overhead profiling.
+** Copyright (C) 2005-2022 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LJ_PROFILE_H
+#define _LJ_PROFILE_H
+
+#include "lj_obj.h"
+
+#if LJ_HASPROFILE
+
+LJ_FUNC void LJ_FASTCALL lj_profile_interpreter(lua_State *L);
+#if !LJ_PROFILE_SIGPROF
+LJ_FUNC void LJ_FASTCALL lj_profile_hook_enter(global_State *g);
+LJ_FUNC void LJ_FASTCALL lj_profile_hook_leave(global_State *g);
+#endif
+
+#endif
+
+#endif
diff --git a/libs/luajit-cmake/luajit/src/lj_record.c b/libs/luajit-cmake/luajit/src/lj_record.c
new file mode 100644
index 0000000..bfd4123
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/lj_record.c
@@ -0,0 +1,2838 @@
+/*
+** Trace recorder (bytecode -> SSA IR).
+** Copyright (C) 2005-2022 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#define lj_record_c
+#define LUA_CORE
+
+#include "lj_obj.h"
+
+#if LJ_HASJIT
+
+#include "lj_err.h"
+#include "lj_str.h"
+#include "lj_tab.h"
+#include "lj_meta.h"
+#include "lj_frame.h"
+#if LJ_HASFFI
+#include "lj_ctype.h"
+#endif
+#include "lj_bc.h"
+#include "lj_ff.h"
+#if LJ_HASPROFILE
+#include "lj_debug.h"
+#endif
+#include "lj_ir.h"
+#include "lj_jit.h"
+#include "lj_ircall.h"
+#include "lj_iropt.h"
+#include "lj_trace.h"
+#include "lj_record.h"
+#include "lj_ffrecord.h"
+#include "lj_snap.h"
+#include "lj_dispatch.h"
+#include "lj_vm.h"
+#include "lj_prng.h"
+
+/* Some local macros to save typing. Undef'd at the end. */
+#define IR(ref) (&J->cur.ir[(ref)])
+
+/* Pass IR on to next optimization in chain (FOLD). */
+#define emitir(ot, a, b) (lj_ir_set(J, (ot), (a), (b)), lj_opt_fold(J))
+
+/* Emit raw IR without passing through optimizations. */
+#define emitir_raw(ot, a, b) (lj_ir_set(J, (ot), (a), (b)), lj_ir_emit(J))
+
+/* -- Sanity checks ------------------------------------------------------- */
+
+#ifdef LUA_USE_ASSERT
+/* Sanity check the whole IR -- sloooow. */
+static void rec_check_ir(jit_State *J)
+{
+ IRRef i, nins = J->cur.nins, nk = J->cur.nk;
+ lj_assertJ(nk <= REF_BIAS && nins >= REF_BIAS && nins < 65536,
+ "inconsistent IR layout");
+ for (i = nk; i < nins; i++) {
+ IRIns *ir = IR(i);
+ uint32_t mode = lj_ir_mode[ir->o];
+ IRRef op1 = ir->op1;
+ IRRef op2 = ir->op2;
+ const char *err = NULL;
+ switch (irm_op1(mode)) {
+ case IRMnone:
+ if (op1 != 0) err = "IRMnone op1 used";
+ break;
+ case IRMref:
+ if (op1 < nk || (i >= REF_BIAS ? op1 >= i : op1 <= i))
+ err = "IRMref op1 out of range";
+ break;
+ case IRMlit: break;
+ case IRMcst:
+ if (i >= REF_BIAS) { err = "constant in IR range"; break; }
+ if (irt_is64(ir->t) && ir->o != IR_KNULL)
+ i++;
+ continue;
+ }
+ switch (irm_op2(mode)) {
+ case IRMnone:
+ if (op2) err = "IRMnone op2 used";
+ break;
+ case IRMref:
+ if (op2 < nk || (i >= REF_BIAS ? op2 >= i : op2 <= i))
+ err = "IRMref op2 out of range";
+ break;
+ case IRMlit: break;
+ case IRMcst: err = "IRMcst op2"; break;
+ }
+ if (!err && ir->prev) {
+ if (ir->prev < nk || (i >= REF_BIAS ? ir->prev >= i : ir->prev <= i))
+ err = "chain out of range";
+ else if (ir->o != IR_NOP && IR(ir->prev)->o != ir->o)
+ err = "chain to different op";
+ }
+ lj_assertJ(!err, "bad IR %04d op %d(%04d,%04d): %s",
+ i-REF_BIAS,
+ ir->o,
+ irm_op1(mode) == IRMref ? op1-REF_BIAS : op1,
+ irm_op2(mode) == IRMref ? op2-REF_BIAS : op2,
+ err);
+ }
+}
+
+/* Compare stack slots and frames of the recorder and the VM. */
+static void rec_check_slots(jit_State *J)
+{
+ BCReg s, nslots = J->baseslot + J->maxslot;
+ int32_t depth = 0;
+ cTValue *base = J->L->base - J->baseslot;
+ lj_assertJ(J->baseslot >= 1+LJ_FR2, "bad baseslot");
+ lj_assertJ(J->baseslot == 1+LJ_FR2 || (J->slot[J->baseslot-1] & TREF_FRAME),
+ "baseslot does not point to frame");
+ lj_assertJ(nslots <= LJ_MAX_JSLOTS, "slot overflow");
+ for (s = 0; s < nslots; s++) {
+ TRef tr = J->slot[s];
+ if (tr) {
+ cTValue *tv = &base[s];
+ IRRef ref = tref_ref(tr);
+ IRIns *ir = NULL; /* Silence compiler. */
+ if (!LJ_FR2 || ref || !(tr & (TREF_FRAME | TREF_CONT))) {
+ lj_assertJ(ref >= J->cur.nk && ref < J->cur.nins,
+ "slot %d ref %04d out of range", s, ref - REF_BIAS);
+ ir = IR(ref);
+ lj_assertJ(irt_t(ir->t) == tref_t(tr), "slot %d IR type mismatch", s);
+ }
+ if (s == 0) {
+ lj_assertJ(tref_isfunc(tr), "frame slot 0 is not a function");
+#if LJ_FR2
+ } else if (s == 1) {
+ lj_assertJ((tr & ~TREF_FRAME) == 0, "bad frame slot 1");
+#endif
+ } else if ((tr & TREF_FRAME)) {
+ GCfunc *fn = gco2func(frame_gc(tv));
+ BCReg delta = (BCReg)(tv - frame_prev(tv));
+#if LJ_FR2
+ lj_assertJ(!ref || ir_knum(ir)->u64 == tv->u64,
+ "frame slot %d PC mismatch", s);
+ tr = J->slot[s-1];
+ ir = IR(tref_ref(tr));
+#endif
+ lj_assertJ(tref_isfunc(tr),
+ "frame slot %d is not a function", s-LJ_FR2);
+ lj_assertJ(!tref_isk(tr) || fn == ir_kfunc(ir),
+ "frame slot %d function mismatch", s-LJ_FR2);
+ lj_assertJ(s > delta + LJ_FR2 ? (J->slot[s-delta] & TREF_FRAME)
+ : (s == delta + LJ_FR2),
+ "frame slot %d broken chain", s-LJ_FR2);
+ depth++;
+ } else if ((tr & TREF_CONT)) {
+#if LJ_FR2
+ lj_assertJ(!ref || ir_knum(ir)->u64 == tv->u64,
+ "cont slot %d continuation mismatch", s);
+#else
+ lj_assertJ(ir_kptr(ir) == gcrefp(tv->gcr, void),
+ "cont slot %d continuation mismatch", s);
+#endif
+ lj_assertJ((J->slot[s+1+LJ_FR2] & TREF_FRAME),
+ "cont slot %d not followed by frame", s);
+ depth++;
+ } else if ((tr & TREF_KEYINDEX)) {
+ lj_assertJ(tref_isint(tr), "keyindex slot %d bad type %d",
+ s, tref_type(tr));
+ } else {
+ /* Number repr. may differ, but other types must be the same. */
+ lj_assertJ(tvisnumber(tv) ? tref_isnumber(tr) :
+ itype2irt(tv) == tref_type(tr),
+ "slot %d type mismatch: stack type %d vs IR type %d",
+ s, itypemap(tv), tref_type(tr));
+ if (tref_isk(tr)) { /* Compare constants. */
+ TValue tvk;
+ lj_ir_kvalue(J->L, &tvk, ir);
+ lj_assertJ((tvisnum(&tvk) && tvisnan(&tvk)) ?
+ (tvisnum(tv) && tvisnan(tv)) :
+ lj_obj_equal(tv, &tvk),
+ "slot %d const mismatch: stack %016llx vs IR %016llx",
+ s, tv->u64, tvk.u64);
+ }
+ }
+ }
+ }
+ lj_assertJ(J->framedepth == depth,
+ "frame depth mismatch %d vs %d", J->framedepth, depth);
+}
+#endif
+
+/* -- Type handling and specialization ------------------------------------ */
+
+/* Note: these functions return tagged references (TRef). */
+
+/* Specialize a slot to a specific type. Note: slot can be negative! */
+static TRef sloadt(jit_State *J, int32_t slot, IRType t, int mode)
+{
+ /* Caller may set IRT_GUARD in t. */
+ TRef ref = emitir_raw(IRT(IR_SLOAD, t), (int32_t)J->baseslot+slot, mode);
+ J->base[slot] = ref;
+ return ref;
+}
+
+/* Specialize a slot to the runtime type. Note: slot can be negative! */
+static TRef sload(jit_State *J, int32_t slot)
+{
+ IRType t = itype2irt(&J->L->base[slot]);
+ TRef ref = emitir_raw(IRTG(IR_SLOAD, t), (int32_t)J->baseslot+slot,
+ IRSLOAD_TYPECHECK);
+ if (irtype_ispri(t)) ref = TREF_PRI(t); /* Canonicalize primitive refs. */
+ J->base[slot] = ref;
+ return ref;
+}
+
+/* Get TRef from slot. Load slot and specialize if not done already. */
+#define getslot(J, s) (J->base[(s)] ? J->base[(s)] : sload(J, (int32_t)(s)))
+
+/* Get TRef for current function. */
+static TRef getcurrf(jit_State *J)
+{
+ if (J->base[-1-LJ_FR2])
+ return J->base[-1-LJ_FR2];
+ /* Non-base frame functions ought to be loaded already. */
+ lj_assertJ(J->baseslot == 1+LJ_FR2, "bad baseslot");
+ return sloadt(J, -1-LJ_FR2, IRT_FUNC, IRSLOAD_READONLY);
+}
+
+/* Compare for raw object equality.
+** Returns 0 if the objects are the same.
+** Returns 1 if they are different, but the same type.
+** Returns 2 for two different types.
+** Comparisons between primitives always return 1 -- no caller cares about it.
+*/
+int lj_record_objcmp(jit_State *J, TRef a, TRef b, cTValue *av, cTValue *bv)
+{
+ int diff = !lj_obj_equal(av, bv);
+ if (!tref_isk2(a, b)) { /* Shortcut, also handles primitives. */
+ IRType ta = tref_isinteger(a) ? IRT_INT : tref_type(a);
+ IRType tb = tref_isinteger(b) ? IRT_INT : tref_type(b);
+ if (ta != tb) {
+ /* Widen mixed number/int comparisons to number/number comparison. */
+ if (ta == IRT_INT && tb == IRT_NUM) {
+ a = emitir(IRTN(IR_CONV), a, IRCONV_NUM_INT);
+ ta = IRT_NUM;
+ } else if (ta == IRT_NUM && tb == IRT_INT) {
+ b = emitir(IRTN(IR_CONV), b, IRCONV_NUM_INT);
+ } else {
+ return 2; /* Two different types are never equal. */
+ }
+ }
+ emitir(IRTG(diff ? IR_NE : IR_EQ, ta), a, b);
+ }
+ return diff;
+}
+
+/* Constify a value. Returns 0 for non-representable object types. */
+TRef lj_record_constify(jit_State *J, cTValue *o)
+{
+ if (tvisgcv(o))
+ return lj_ir_kgc(J, gcV(o), itype2irt(o));
+ else if (tvisint(o))
+ return lj_ir_kint(J, intV(o));
+ else if (tvisnum(o))
+ return lj_ir_knumint(J, numV(o));
+ else if (tvisbool(o))
+ return TREF_PRI(itype2irt(o));
+ else
+ return 0; /* Can't represent lightuserdata (pointless). */
+}
+
+/* Emit a VLOAD with the correct type. */
+TRef lj_record_vload(jit_State *J, TRef ref, MSize idx, IRType t)
+{
+ TRef tr = emitir(IRTG(IR_VLOAD, t), ref, idx);
+ if (irtype_ispri(t)) tr = TREF_PRI(t); /* Canonicalize primitives. */
+ return tr;
+}
+
+/* -- Record loop ops ----------------------------------------------------- */
+
+/* Loop event. */
+typedef enum {
+ LOOPEV_LEAVE, /* Loop is left or not entered. */
+ LOOPEV_ENTERLO, /* Loop is entered with a low iteration count left. */
+ LOOPEV_ENTER /* Loop is entered. */
+} LoopEvent;
+
+/* Canonicalize slots: convert integers to numbers. */
+static void canonicalize_slots(jit_State *J)
+{
+ BCReg s;
+ if (LJ_DUALNUM) return;
+ for (s = J->baseslot+J->maxslot-1; s >= 1; s--) {
+ TRef tr = J->slot[s];
+ if (tref_isinteger(tr) && !(tr & TREF_KEYINDEX)) {
+ IRIns *ir = IR(tref_ref(tr));
+ if (!(ir->o == IR_SLOAD && (ir->op2 & (IRSLOAD_READONLY))))
+ J->slot[s] = emitir(IRTN(IR_CONV), tr, IRCONV_NUM_INT);
+ }
+ }
+}
+
+/* Stop recording. */
+void lj_record_stop(jit_State *J, TraceLink linktype, TraceNo lnk)
+{
+#ifdef LUAJIT_ENABLE_TABLE_BUMP
+ if (J->retryrec)
+ lj_trace_err(J, LJ_TRERR_RETRY);
+#endif
+ lj_trace_end(J);
+ J->cur.linktype = (uint8_t)linktype;
+ J->cur.link = (uint16_t)lnk;
+ /* Looping back at the same stack level? */
+ if (lnk == J->cur.traceno && J->framedepth + J->retdepth == 0) {
+ if ((J->flags & JIT_F_OPT_LOOP)) /* Shall we try to create a loop? */
+ goto nocanon; /* Do not canonicalize or we lose the narrowing. */
+ if (J->cur.root) /* Otherwise ensure we always link to the root trace. */
+ J->cur.link = J->cur.root;
+ }
+ canonicalize_slots(J);
+nocanon:
+ /* Note: all loop ops must set J->pc to the following instruction! */
+ lj_snap_add(J); /* Add loop snapshot. */
+ J->needsnap = 0;
+ J->mergesnap = 1; /* In case recording continues. */
+}
+
+/* Search bytecode backwards for a int/num constant slot initializer. */
+static TRef find_kinit(jit_State *J, const BCIns *endpc, BCReg slot, IRType t)
+{
+ /* This algorithm is rather simplistic and assumes quite a bit about
+ ** how the bytecode is generated. It works fine for FORI initializers,
+ ** but it won't necessarily work in other cases (e.g. iterator arguments).
+ ** It doesn't do anything fancy, either (like backpropagating MOVs).
+ */
+ const BCIns *pc, *startpc = proto_bc(J->pt);
+ for (pc = endpc-1; pc > startpc; pc--) {
+ BCIns ins = *pc;
+ BCOp op = bc_op(ins);
+ /* First try to find the last instruction that stores to this slot. */
+ if (bcmode_a(op) == BCMbase && bc_a(ins) <= slot) {
+ return 0; /* Multiple results, e.g. from a CALL or KNIL. */
+ } else if (bcmode_a(op) == BCMdst && bc_a(ins) == slot) {
+ if (op == BC_KSHORT || op == BC_KNUM) { /* Found const. initializer. */
+ /* Now try to verify there's no forward jump across it. */
+ const BCIns *kpc = pc;
+ for (; pc > startpc; pc--)
+ if (bc_op(*pc) == BC_JMP) {
+ const BCIns *target = pc+bc_j(*pc)+1;
+ if (target > kpc && target <= endpc)
+ return 0; /* Conditional assignment. */
+ }
+ if (op == BC_KSHORT) {
+ int32_t k = (int32_t)(int16_t)bc_d(ins);
+ return t == IRT_INT ? lj_ir_kint(J, k) : lj_ir_knum(J, (lua_Number)k);
+ } else {
+ cTValue *tv = proto_knumtv(J->pt, bc_d(ins));
+ if (t == IRT_INT) {
+ int32_t k = numberVint(tv);
+ if (tvisint(tv) || numV(tv) == (lua_Number)k) /* -0 is ok here. */
+ return lj_ir_kint(J, k);
+ return 0; /* Type mismatch. */
+ } else {
+ return lj_ir_knum(J, numberVnum(tv));
+ }
+ }
+ }
+ return 0; /* Non-constant initializer. */
+ }
+ }
+ return 0; /* No assignment to this slot found? */
+}
+
+/* Load and optionally convert a FORI argument from a slot. */
+static TRef fori_load(jit_State *J, BCReg slot, IRType t, int mode)
+{
+ int conv = (tvisint(&J->L->base[slot]) != (t==IRT_INT)) ? IRSLOAD_CONVERT : 0;
+ return sloadt(J, (int32_t)slot,
+ t + (((mode & IRSLOAD_TYPECHECK) ||
+ (conv && t == IRT_INT && !(mode >> 16))) ?
+ IRT_GUARD : 0),
+ mode + conv);
+}
+
+/* Peek before FORI to find a const initializer. Otherwise load from slot. */
+static TRef fori_arg(jit_State *J, const BCIns *fori, BCReg slot,
+ IRType t, int mode)
+{
+ TRef tr = J->base[slot];
+ if (!tr) {
+ tr = find_kinit(J, fori, slot, t);
+ if (!tr)
+ tr = fori_load(J, slot, t, mode);
+ }
+ return tr;
+}
+
+/* Return the direction of the FOR loop iterator.
+** It's important to exactly reproduce the semantics of the interpreter.
+*/
+static int rec_for_direction(cTValue *o)
+{
+ return (tvisint(o) ? intV(o) : (int32_t)o->u32.hi) >= 0;
+}
+
+/* Simulate the runtime behavior of the FOR loop iterator. */
+static LoopEvent rec_for_iter(IROp *op, cTValue *o, int isforl)
+{
+ lua_Number stopv = numberVnum(&o[FORL_STOP]);
+ lua_Number idxv = numberVnum(&o[FORL_IDX]);
+ lua_Number stepv = numberVnum(&o[FORL_STEP]);
+ if (isforl)
+ idxv += stepv;
+ if (rec_for_direction(&o[FORL_STEP])) {
+ if (idxv <= stopv) {
+ *op = IR_LE;
+ return idxv + 2*stepv > stopv ? LOOPEV_ENTERLO : LOOPEV_ENTER;
+ }
+ *op = IR_GT; return LOOPEV_LEAVE;
+ } else {
+ if (stopv <= idxv) {
+ *op = IR_GE;
+ return idxv + 2*stepv < stopv ? LOOPEV_ENTERLO : LOOPEV_ENTER;
+ }
+ *op = IR_LT; return LOOPEV_LEAVE;
+ }
+}
+
+/* Record checks for FOR loop overflow and step direction. */
+static void rec_for_check(jit_State *J, IRType t, int dir,
+ TRef stop, TRef step, int init)
+{
+ if (!tref_isk(step)) {
+ /* Non-constant step: need a guard for the direction. */
+ TRef zero = (t == IRT_INT) ? lj_ir_kint(J, 0) : lj_ir_knum_zero(J);
+ emitir(IRTG(dir ? IR_GE : IR_LT, t), step, zero);
+ /* Add hoistable overflow checks for a narrowed FORL index. */
+ if (init && t == IRT_INT) {
+ if (tref_isk(stop)) {
+ /* Constant stop: optimize check away or to a range check for step. */
+ int32_t k = IR(tref_ref(stop))->i;
+ if (dir) {
+ if (k > 0)
+ emitir(IRTGI(IR_LE), step, lj_ir_kint(J, (int32_t)0x7fffffff-k));
+ } else {
+ if (k < 0)
+ emitir(IRTGI(IR_GE), step, lj_ir_kint(J, (int32_t)0x80000000-k));
+ }
+ } else {
+ /* Stop+step variable: need full overflow check. */
+ TRef tr = emitir(IRTGI(IR_ADDOV), step, stop);
+ emitir(IRTI(IR_USE), tr, 0); /* ADDOV is weak. Avoid dead result. */
+ }
+ }
+ } else if (init && t == IRT_INT && !tref_isk(stop)) {
+ /* Constant step: optimize overflow check to a range check for stop. */
+ int32_t k = IR(tref_ref(step))->i;
+ k = (int32_t)(dir ? 0x7fffffff : 0x80000000) - k;
+ emitir(IRTGI(dir ? IR_LE : IR_GE), stop, lj_ir_kint(J, k));
+ }
+}
+
+/* Record a FORL instruction. */
+static void rec_for_loop(jit_State *J, const BCIns *fori, ScEvEntry *scev,
+ int init)
+{
+ BCReg ra = bc_a(*fori);
+ cTValue *tv = &J->L->base[ra];
+ TRef idx = J->base[ra+FORL_IDX];
+ IRType t = idx ? tref_type(idx) :
+ (init || LJ_DUALNUM) ? lj_opt_narrow_forl(J, tv) : IRT_NUM;
+ int mode = IRSLOAD_INHERIT +
+ ((!LJ_DUALNUM || tvisint(tv) == (t == IRT_INT)) ? IRSLOAD_READONLY : 0);
+ TRef stop = fori_arg(J, fori, ra+FORL_STOP, t, mode);
+ TRef step = fori_arg(J, fori, ra+FORL_STEP, t, mode);
+ int tc, dir = rec_for_direction(&tv[FORL_STEP]);
+ lj_assertJ(bc_op(*fori) == BC_FORI || bc_op(*fori) == BC_JFORI,
+ "bad bytecode %d instead of FORI/JFORI", bc_op(*fori));
+ scev->t.irt = t;
+ scev->dir = dir;
+ scev->stop = tref_ref(stop);
+ scev->step = tref_ref(step);
+ rec_for_check(J, t, dir, stop, step, init);
+ scev->start = tref_ref(find_kinit(J, fori, ra+FORL_IDX, IRT_INT));
+ tc = (LJ_DUALNUM &&
+ !(scev->start && irref_isk(scev->stop) && irref_isk(scev->step) &&
+ tvisint(&tv[FORL_IDX]) == (t == IRT_INT))) ?
+ IRSLOAD_TYPECHECK : 0;
+ if (tc) {
+ J->base[ra+FORL_STOP] = stop;
+ J->base[ra+FORL_STEP] = step;
+ }
+ if (!idx)
+ idx = fori_load(J, ra+FORL_IDX, t,
+ IRSLOAD_INHERIT + tc + (J->scev.start << 16));
+ if (!init)
+ J->base[ra+FORL_IDX] = idx = emitir(IRT(IR_ADD, t), idx, step);
+ J->base[ra+FORL_EXT] = idx;
+ scev->idx = tref_ref(idx);
+ setmref(scev->pc, fori);
+ J->maxslot = ra+FORL_EXT+1;
+}
+
+/* Record FORL/JFORL or FORI/JFORI. */
+static LoopEvent rec_for(jit_State *J, const BCIns *fori, int isforl)
+{
+ BCReg ra = bc_a(*fori);
+ TValue *tv = &J->L->base[ra];
+ TRef *tr = &J->base[ra];
+ IROp op;
+ LoopEvent ev;
+ TRef stop;
+ IRType t;
+ if (isforl) { /* Handle FORL/JFORL opcodes. */
+ TRef idx = tr[FORL_IDX];
+ if (mref(J->scev.pc, const BCIns) == fori && tref_ref(idx) == J->scev.idx) {
+ t = J->scev.t.irt;
+ stop = J->scev.stop;
+ idx = emitir(IRT(IR_ADD, t), idx, J->scev.step);
+ tr[FORL_EXT] = tr[FORL_IDX] = idx;
+ } else {
+ ScEvEntry scev;
+ rec_for_loop(J, fori, &scev, 0);
+ t = scev.t.irt;
+ stop = scev.stop;
+ }
+ } else { /* Handle FORI/JFORI opcodes. */
+ BCReg i;
+ lj_meta_for(J->L, tv);
+ t = (LJ_DUALNUM || tref_isint(tr[FORL_IDX])) ? lj_opt_narrow_forl(J, tv) :
+ IRT_NUM;
+ for (i = FORL_IDX; i <= FORL_STEP; i++) {
+ if (!tr[i]) sload(J, ra+i);
+ lj_assertJ(tref_isnumber_str(tr[i]), "bad FORI argument type");
+ if (tref_isstr(tr[i]))
+ tr[i] = emitir(IRTG(IR_STRTO, IRT_NUM), tr[i], 0);
+ if (t == IRT_INT) {
+ if (!tref_isinteger(tr[i]))
+ tr[i] = emitir(IRTGI(IR_CONV), tr[i], IRCONV_INT_NUM|IRCONV_CHECK);
+ } else {
+ if (!tref_isnum(tr[i]))
+ tr[i] = emitir(IRTN(IR_CONV), tr[i], IRCONV_NUM_INT);
+ }
+ }
+ tr[FORL_EXT] = tr[FORL_IDX];
+ stop = tr[FORL_STOP];
+ rec_for_check(J, t, rec_for_direction(&tv[FORL_STEP]),
+ stop, tr[FORL_STEP], 1);
+ }
+
+ ev = rec_for_iter(&op, tv, isforl);
+ if (ev == LOOPEV_LEAVE) {
+ J->maxslot = ra+FORL_EXT+1;
+ J->pc = fori+1;
+ } else {
+ J->maxslot = ra;
+ J->pc = fori+bc_j(*fori)+1;
+ }
+ lj_snap_add(J);
+
+ emitir(IRTG(op, t), tr[FORL_IDX], stop);
+
+ if (ev == LOOPEV_LEAVE) {
+ J->maxslot = ra;
+ J->pc = fori+bc_j(*fori)+1;
+ } else {
+ J->maxslot = ra+FORL_EXT+1;
+ J->pc = fori+1;
+ }
+ J->needsnap = 1;
+ return ev;
+}
+
+/* Record ITERL/JITERL. */
+static LoopEvent rec_iterl(jit_State *J, const BCIns iterins)
+{
+ BCReg ra = bc_a(iterins);
+ if (!tref_isnil(getslot(J, ra))) { /* Looping back? */
+ J->base[ra-1] = J->base[ra]; /* Copy result of ITERC to control var. */
+ J->maxslot = ra-1+bc_b(J->pc[-1]);
+ J->pc += bc_j(iterins)+1;
+ return LOOPEV_ENTER;
+ } else {
+ J->maxslot = ra-3;
+ J->pc++;
+ return LOOPEV_LEAVE;
+ }
+}
+
+/* Record LOOP/JLOOP. Now, that was easy. */
+static LoopEvent rec_loop(jit_State *J, BCReg ra, int skip)
+{
+ if (ra < J->maxslot) J->maxslot = ra;
+ J->pc += skip;
+ return LOOPEV_ENTER;
+}
+
+/* Check if a loop repeatedly failed to trace because it didn't loop back. */
+static int innerloopleft(jit_State *J, const BCIns *pc)
+{
+ ptrdiff_t i;
+ for (i = 0; i < PENALTY_SLOTS; i++)
+ if (mref(J->penalty[i].pc, const BCIns) == pc) {
+ if ((J->penalty[i].reason == LJ_TRERR_LLEAVE ||
+ J->penalty[i].reason == LJ_TRERR_LINNER) &&
+ J->penalty[i].val >= 2*PENALTY_MIN)
+ return 1;
+ break;
+ }
+ return 0;
+}
+
+/* Handle the case when an interpreted loop op is hit. */
+static void rec_loop_interp(jit_State *J, const BCIns *pc, LoopEvent ev)
+{
+ if (J->parent == 0 && J->exitno == 0) {
+ if (pc == J->startpc && J->framedepth + J->retdepth == 0) {
+ if (bc_op(J->cur.startins) == BC_ITERN) return; /* See rec_itern(). */
+ /* Same loop? */
+ if (ev == LOOPEV_LEAVE) /* Must loop back to form a root trace. */
+ lj_trace_err(J, LJ_TRERR_LLEAVE);
+ lj_record_stop(J, LJ_TRLINK_LOOP, J->cur.traceno); /* Looping trace. */
+ } else if (ev != LOOPEV_LEAVE) { /* Entering inner loop? */
+ /* It's usually better to abort here and wait until the inner loop
+ ** is traced. But if the inner loop repeatedly didn't loop back,
+ ** this indicates a low trip count. In this case try unrolling
+ ** an inner loop even in a root trace. But it's better to be a bit
+ ** more conservative here and only do it for very short loops.
+ */
+ if (bc_j(*pc) != -1 && !innerloopleft(J, pc))
+ lj_trace_err(J, LJ_TRERR_LINNER); /* Root trace hit an inner loop. */
+ if ((ev != LOOPEV_ENTERLO &&
+ J->loopref && J->cur.nins - J->loopref > 24) || --J->loopunroll < 0)
+ lj_trace_err(J, LJ_TRERR_LUNROLL); /* Limit loop unrolling. */
+ J->loopref = J->cur.nins;
+ }
+ } else if (ev != LOOPEV_LEAVE) { /* Side trace enters an inner loop. */
+ J->loopref = J->cur.nins;
+ if (--J->loopunroll < 0)
+ lj_trace_err(J, LJ_TRERR_LUNROLL); /* Limit loop unrolling. */
+ } /* Side trace continues across a loop that's left or not entered. */
+}
+
+/* Handle the case when an already compiled loop op is hit. */
+static void rec_loop_jit(jit_State *J, TraceNo lnk, LoopEvent ev)
+{
+ if (J->parent == 0 && J->exitno == 0) { /* Root trace hit an inner loop. */
+ /* Better let the inner loop spawn a side trace back here. */
+ lj_trace_err(J, LJ_TRERR_LINNER);
+ } else if (ev != LOOPEV_LEAVE) { /* Side trace enters a compiled loop. */
+ J->instunroll = 0; /* Cannot continue across a compiled loop op. */
+ if (J->pc == J->startpc && J->framedepth + J->retdepth == 0)
+ lj_record_stop(J, LJ_TRLINK_LOOP, J->cur.traceno); /* Form extra loop. */
+ else
+ lj_record_stop(J, LJ_TRLINK_ROOT, lnk); /* Link to the loop. */
+ } /* Side trace continues across a loop that's left or not entered. */
+}
+
+/* Record ITERN. */
+static LoopEvent rec_itern(jit_State *J, BCReg ra, BCReg rb)
+{
+#if LJ_BE
+ /* YAGNI: Disabled on big-endian due to issues with lj_vm_next,
+ ** IR_HIOP, RID_RETLO/RID_RETHI and ra_destpair.
+ */
+ UNUSED(ra); UNUSED(rb);
+ setintV(&J->errinfo, (int32_t)BC_ITERN);
+ lj_trace_err_info(J, LJ_TRERR_NYIBC);
+#else
+ RecordIndex ix;
+ /* Since ITERN is recorded at the start, we need our own loop detection. */
+ if (J->pc == J->startpc &&
+ J->framedepth + J->retdepth == 0 && J->parent == 0 && J->exitno == 0) {
+ IRRef ref = REF_FIRST + LJ_HASPROFILE;
+#ifdef LUAJIT_ENABLE_CHECKHOOK
+ ref += 3;
+#endif
+ if (J->cur.nins > ref ||
+ (LJ_HASPROFILE && J->cur.nins == ref && J->cur.ir[ref-1].o != IR_PROF)) {
+ J->instunroll = 0; /* Cannot continue unrolling across an ITERN. */
+ lj_record_stop(J, LJ_TRLINK_LOOP, J->cur.traceno); /* Looping trace. */
+ return LOOPEV_ENTER;
+ }
+ }
+ J->maxslot = ra;
+ lj_snap_add(J); /* Required to make JLOOP the first ins in a side-trace. */
+ ix.tab = getslot(J, ra-2);
+ ix.key = J->base[ra-1] ? J->base[ra-1] :
+ sloadt(J, (int32_t)(ra-1), IRT_GUARD|IRT_INT,
+ IRSLOAD_TYPECHECK|IRSLOAD_KEYINDEX);
+ copyTV(J->L, &ix.tabv, &J->L->base[ra-2]);
+ copyTV(J->L, &ix.keyv, &J->L->base[ra-1]);
+ ix.idxchain = (rb < 3); /* Omit value type check, if unused. */
+ ix.mobj = 1; /* We need the next index, too. */
+ J->maxslot = ra + lj_record_next(J, &ix);
+ J->needsnap = 1;
+ if (!tref_isnil(ix.key)) { /* Looping back? */
+ J->base[ra-1] = ix.mobj | TREF_KEYINDEX; /* Control var has next index. */
+ J->base[ra] = ix.key;
+ J->base[ra+1] = ix.val;
+ J->pc += bc_j(J->pc[1])+2;
+ return LOOPEV_ENTER;
+ } else {
+ J->maxslot = ra-3;
+ J->pc += 2;
+ return LOOPEV_LEAVE;
+ }
+#endif
+}
+
+/* Record ISNEXT. */
+static void rec_isnext(jit_State *J, BCReg ra)
+{
+ cTValue *b = &J->L->base[ra-3];
+ if (tvisfunc(b) && funcV(b)->c.ffid == FF_next &&
+ tvistab(b+1) && tvisnil(b+2)) {
+ /* These checks are folded away for a compiled pairs(). */
+ TRef func = getslot(J, ra-3);
+ TRef trid = emitir(IRT(IR_FLOAD, IRT_U8), func, IRFL_FUNC_FFID);
+ emitir(IRTGI(IR_EQ), trid, lj_ir_kint(J, FF_next));
+ (void)getslot(J, ra-2); /* Type check for table. */
+ (void)getslot(J, ra-1); /* Type check for nil key. */
+ J->base[ra-1] = lj_ir_kint(J, 0) | TREF_KEYINDEX;
+ J->maxslot = ra;
+ } else { /* Abort trace. Interpreter will despecialize bytecode. */
+ lj_trace_err(J, LJ_TRERR_RECERR);
+ }
+}
+
+/* -- Record profiler hook checks ----------------------------------------- */
+
+#if LJ_HASPROFILE
+
+/* Need to insert profiler hook check? */
+static int rec_profile_need(jit_State *J, GCproto *pt, const BCIns *pc)
+{
+ GCproto *ppt;
+ lj_assertJ(J->prof_mode == 'f' || J->prof_mode == 'l',
+ "bad profiler mode %c", J->prof_mode);
+ if (!pt)
+ return 0;
+ ppt = J->prev_pt;
+ J->prev_pt = pt;
+ if (pt != ppt && ppt) {
+ J->prev_line = -1;
+ return 1;
+ }
+ if (J->prof_mode == 'l') {
+ BCLine line = lj_debug_line(pt, proto_bcpos(pt, pc));
+ BCLine pline = J->prev_line;
+ J->prev_line = line;
+ if (pline != line)
+ return 1;
+ }
+ return 0;
+}
+
+static void rec_profile_ins(jit_State *J, const BCIns *pc)
+{
+ if (J->prof_mode && rec_profile_need(J, J->pt, pc)) {
+ emitir(IRTG(IR_PROF, IRT_NIL), 0, 0);
+ lj_snap_add(J);
+ }
+}
+
+static void rec_profile_ret(jit_State *J)
+{
+ if (J->prof_mode == 'f') {
+ emitir(IRTG(IR_PROF, IRT_NIL), 0, 0);
+ J->prev_pt = NULL;
+ lj_snap_add(J);
+ }
+}
+
+#endif
+
+/* -- Record calls and returns -------------------------------------------- */
+
+/* Specialize to the runtime value of the called function or its prototype. */
+static TRef rec_call_specialize(jit_State *J, GCfunc *fn, TRef tr)
+{
+ TRef kfunc;
+ if (isluafunc(fn)) {
+ GCproto *pt = funcproto(fn);
+ /* Too many closures created? Probably not a monomorphic function. */
+ if (pt->flags >= PROTO_CLC_POLY) { /* Specialize to prototype instead. */
+ TRef trpt = emitir(IRT(IR_FLOAD, IRT_PGC), tr, IRFL_FUNC_PC);
+ emitir(IRTG(IR_EQ, IRT_PGC), trpt, lj_ir_kptr(J, proto_bc(pt)));
+ (void)lj_ir_kgc(J, obj2gco(pt), IRT_PROTO); /* Prevent GC of proto. */
+ return tr;
+ }
+ } else {
+ /* Don't specialize to non-monomorphic builtins. */
+ switch (fn->c.ffid) {
+ case FF_coroutine_wrap_aux:
+ case FF_string_gmatch_aux:
+ /* NYI: io_file_iter doesn't have an ffid, yet. */
+ { /* Specialize to the ffid. */
+ TRef trid = emitir(IRT(IR_FLOAD, IRT_U8), tr, IRFL_FUNC_FFID);
+ emitir(IRTGI(IR_EQ), trid, lj_ir_kint(J, fn->c.ffid));
+ }
+ return tr;
+ default:
+ /* NYI: don't specialize to non-monomorphic C functions. */
+ break;
+ }
+ }
+ /* Otherwise specialize to the function (closure) value itself. */
+ kfunc = lj_ir_kfunc(J, fn);
+ emitir(IRTG(IR_EQ, IRT_FUNC), tr, kfunc);
+ return kfunc;
+}
+
+/* Record call setup. */
+static void rec_call_setup(jit_State *J, BCReg func, ptrdiff_t nargs)
+{
+ RecordIndex ix;
+ TValue *functv = &J->L->base[func];
+ TRef kfunc, *fbase = &J->base[func];
+ ptrdiff_t i;
+ (void)getslot(J, func); /* Ensure func has a reference. */
+ for (i = 1; i <= nargs; i++)
+ (void)getslot(J, func+LJ_FR2+i); /* Ensure all args have a reference. */
+ if (!tref_isfunc(fbase[0])) { /* Resolve __call metamethod. */
+ ix.tab = fbase[0];
+ copyTV(J->L, &ix.tabv, functv);
+ if (!lj_record_mm_lookup(J, &ix, MM_call) || !tref_isfunc(ix.mobj))
+ lj_trace_err(J, LJ_TRERR_NOMM);
+ for (i = ++nargs; i > LJ_FR2; i--) /* Shift arguments up. */
+ fbase[i+LJ_FR2] = fbase[i+LJ_FR2-1];
+#if LJ_FR2
+ fbase[2] = fbase[0];
+#endif
+ fbase[0] = ix.mobj; /* Replace function. */
+ functv = &ix.mobjv;
+ }
+ kfunc = rec_call_specialize(J, funcV(functv), fbase[0]);
+#if LJ_FR2
+ fbase[0] = kfunc;
+ fbase[1] = TREF_FRAME;
+#else
+ fbase[0] = kfunc | TREF_FRAME;
+#endif
+ J->maxslot = (BCReg)nargs;
+}
+
+/* Record call. */
+void lj_record_call(jit_State *J, BCReg func, ptrdiff_t nargs)
+{
+ rec_call_setup(J, func, nargs);
+ /* Bump frame. */
+ J->framedepth++;
+ J->base += func+1+LJ_FR2;
+ J->baseslot += func+1+LJ_FR2;
+ if (J->baseslot + J->maxslot >= LJ_MAX_JSLOTS)
+ lj_trace_err(J, LJ_TRERR_STACKOV);
+}
+
+/* Record tail call. */
+void lj_record_tailcall(jit_State *J, BCReg func, ptrdiff_t nargs)
+{
+ rec_call_setup(J, func, nargs);
+ if (frame_isvarg(J->L->base - 1)) {
+ BCReg cbase = (BCReg)frame_delta(J->L->base - 1);
+ if (--J->framedepth < 0)
+ lj_trace_err(J, LJ_TRERR_NYIRETL);
+ J->baseslot -= (BCReg)cbase;
+ J->base -= cbase;
+ func += cbase;
+ }
+ /* Move func + args down. */
+ if (LJ_FR2 && J->baseslot == 2)
+ J->base[func+1] = TREF_FRAME;
+ memmove(&J->base[-1-LJ_FR2], &J->base[func], sizeof(TRef)*(J->maxslot+1+LJ_FR2));
+ /* Note: the new TREF_FRAME is now at J->base[-1] (even for slot #0). */
+ /* Tailcalls can form a loop, so count towards the loop unroll limit. */
+ if (++J->tailcalled > J->loopunroll)
+ lj_trace_err(J, LJ_TRERR_LUNROLL);
+}
+
+/* Check unroll limits for down-recursion. */
+static int check_downrec_unroll(jit_State *J, GCproto *pt)
+{
+ IRRef ptref;
+ for (ptref = J->chain[IR_KGC]; ptref; ptref = IR(ptref)->prev)
+ if (ir_kgc(IR(ptref)) == obj2gco(pt)) {
+ int count = 0;
+ IRRef ref;
+ for (ref = J->chain[IR_RETF]; ref; ref = IR(ref)->prev)
+ if (IR(ref)->op1 == ptref)
+ count++;
+ if (count) {
+ if (J->pc == J->startpc) {
+ if (count + J->tailcalled > J->param[JIT_P_recunroll])
+ return 1;
+ } else {
+ lj_trace_err(J, LJ_TRERR_DOWNREC);
+ }
+ }
+ }
+ return 0;
+}
+
+static TRef rec_cat(jit_State *J, BCReg baseslot, BCReg topslot);
+
+/* Record return. */
+void lj_record_ret(jit_State *J, BCReg rbase, ptrdiff_t gotresults)
+{
+ TValue *frame = J->L->base - 1;
+ ptrdiff_t i;
+ for (i = 0; i < gotresults; i++)
+ (void)getslot(J, rbase+i); /* Ensure all results have a reference. */
+ while (frame_ispcall(frame)) { /* Immediately resolve pcall() returns. */
+ BCReg cbase = (BCReg)frame_delta(frame);
+ if (--J->framedepth <= 0)
+ lj_trace_err(J, LJ_TRERR_NYIRETL);
+ lj_assertJ(J->baseslot > 1+LJ_FR2, "bad baseslot for return");
+ gotresults++;
+ rbase += cbase;
+ J->baseslot -= (BCReg)cbase;
+ J->base -= cbase;
+ J->base[--rbase] = TREF_TRUE; /* Prepend true to results. */
+ frame = frame_prevd(frame);
+ J->needsnap = 1; /* Stop catching on-trace errors. */
+ }
+ /* Return to lower frame via interpreter for unhandled cases. */
+ if (J->framedepth == 0 && J->pt && bc_isret(bc_op(*J->pc)) &&
+ (!frame_islua(frame) ||
+ (J->parent == 0 && J->exitno == 0 &&
+ !bc_isret(bc_op(J->cur.startins))))) {
+ /* NYI: specialize to frame type and return directly, not via RET*. */
+ for (i = 0; i < (ptrdiff_t)rbase; i++)
+ J->base[i] = 0; /* Purge dead slots. */
+ J->maxslot = rbase + (BCReg)gotresults;
+ lj_record_stop(J, LJ_TRLINK_RETURN, 0); /* Return to interpreter. */
+ return;
+ }
+ if (frame_isvarg(frame)) {
+ BCReg cbase = (BCReg)frame_delta(frame);
+ if (--J->framedepth < 0) /* NYI: return of vararg func to lower frame. */
+ lj_trace_err(J, LJ_TRERR_NYIRETL);
+ lj_assertJ(J->baseslot > 1+LJ_FR2, "bad baseslot for return");
+ rbase += cbase;
+ J->baseslot -= (BCReg)cbase;
+ J->base -= cbase;
+ frame = frame_prevd(frame);
+ }
+ if (frame_islua(frame)) { /* Return to Lua frame. */
+ BCIns callins = *(frame_pc(frame)-1);
+ ptrdiff_t nresults = bc_b(callins) ? (ptrdiff_t)bc_b(callins)-1 :gotresults;
+ BCReg cbase = bc_a(callins);
+ GCproto *pt = funcproto(frame_func(frame - (cbase+1+LJ_FR2)));
+ if ((pt->flags & PROTO_NOJIT))
+ lj_trace_err(J, LJ_TRERR_CJITOFF);
+ if (J->framedepth == 0 && J->pt && frame == J->L->base - 1) {
+ if (check_downrec_unroll(J, pt)) {
+ J->maxslot = (BCReg)(rbase + gotresults);
+ lj_snap_purge(J);
+ lj_record_stop(J, LJ_TRLINK_DOWNREC, J->cur.traceno); /* Down-rec. */
+ return;
+ }
+ lj_snap_add(J);
+ }
+ for (i = 0; i < nresults; i++) /* Adjust results. */
+ J->base[i-1-LJ_FR2] = i < gotresults ? J->base[rbase+i] : TREF_NIL;
+ J->maxslot = cbase+(BCReg)nresults;
+ if (J->framedepth > 0) { /* Return to a frame that is part of the trace. */
+ J->framedepth--;
+ lj_assertJ(J->baseslot > cbase+1+LJ_FR2, "bad baseslot for return");
+ J->baseslot -= cbase+1+LJ_FR2;
+ J->base -= cbase+1+LJ_FR2;
+ } else if (J->parent == 0 && J->exitno == 0 &&
+ !bc_isret(bc_op(J->cur.startins))) {
+ /* Return to lower frame would leave the loop in a root trace. */
+ lj_trace_err(J, LJ_TRERR_LLEAVE);
+ } else if (J->needsnap) { /* Tailcalled to ff with side-effects. */
+ lj_trace_err(J, LJ_TRERR_NYIRETL); /* No way to insert snapshot here. */
+ } else { /* Return to lower frame. Guard for the target we return to. */
+ TRef trpt = lj_ir_kgc(J, obj2gco(pt), IRT_PROTO);
+ TRef trpc = lj_ir_kptr(J, (void *)frame_pc(frame));
+ emitir(IRTG(IR_RETF, IRT_PGC), trpt, trpc);
+ J->retdepth++;
+ J->needsnap = 1;
+ lj_assertJ(J->baseslot == 1+LJ_FR2, "bad baseslot for return");
+ /* Shift result slots up and clear the slots of the new frame below. */
+ memmove(J->base + cbase, J->base-1-LJ_FR2, sizeof(TRef)*nresults);
+ memset(J->base-1-LJ_FR2, 0, sizeof(TRef)*(cbase+1+LJ_FR2));
+ }
+ } else if (frame_iscont(frame)) { /* Return to continuation frame. */
+ ASMFunction cont = frame_contf(frame);
+ BCReg cbase = (BCReg)frame_delta(frame);
+ if ((J->framedepth -= 2) < 0)
+ lj_trace_err(J, LJ_TRERR_NYIRETL);
+ J->baseslot -= (BCReg)cbase;
+ J->base -= cbase;
+ J->maxslot = cbase-(2<<LJ_FR2);
+ if (cont == lj_cont_ra) {
+ /* Copy result to destination slot. */
+ BCReg dst = bc_a(*(frame_contpc(frame)-1));
+ J->base[dst] = gotresults ? J->base[cbase+rbase] : TREF_NIL;
+ if (dst >= J->maxslot) {
+ J->maxslot = dst+1;
+ }
+ } else if (cont == lj_cont_nop) {
+ /* Nothing to do here. */
+ } else if (cont == lj_cont_cat) {
+ BCReg bslot = bc_b(*(frame_contpc(frame)-1));
+ TRef tr = gotresults ? J->base[cbase+rbase] : TREF_NIL;
+ if (bslot != J->maxslot) { /* Concatenate the remainder. */
+ TValue *b = J->L->base, save; /* Simulate lower frame and result. */
+ /* Can't handle MM_concat + CALLT + fast func side-effects. */
+ if (J->postproc != LJ_POST_NONE)
+ lj_trace_err(J, LJ_TRERR_NYIRETL);
+ J->base[J->maxslot] = tr;
+ copyTV(J->L, &save, b-(2<<LJ_FR2));
+ if (gotresults)
+ copyTV(J->L, b-(2<<LJ_FR2), b+rbase);
+ else
+ setnilV(b-(2<<LJ_FR2));
+ J->L->base = b - cbase;
+ tr = rec_cat(J, bslot, cbase-(2<<LJ_FR2));
+ b = J->L->base + cbase; /* Undo. */
+ J->L->base = b;
+ copyTV(J->L, b-(2<<LJ_FR2), &save);
+ }
+ if (tr) { /* Store final result. */
+ BCReg dst = bc_a(*(frame_contpc(frame)-1));
+ J->base[dst] = tr;
+ if (dst >= J->maxslot) {
+ J->maxslot = dst+1;
+ }
+ } /* Otherwise continue with another __concat call. */
+ } else {
+ /* Result type already specialized. */
+ lj_assertJ(cont == lj_cont_condf || cont == lj_cont_condt,
+ "bad continuation type");
+ }
+ } else {
+ lj_trace_err(J, LJ_TRERR_NYIRETL); /* NYI: handle return to C frame. */
+ }
+ lj_assertJ(J->baseslot >= 1+LJ_FR2, "bad baseslot for return");
+}
+
+/* -- Metamethod handling ------------------------------------------------- */
+
+/* Prepare to record call to metamethod. */
+static BCReg rec_mm_prep(jit_State *J, ASMFunction cont)
+{
+ BCReg s, top = cont == lj_cont_cat ? J->maxslot : curr_proto(J->L)->framesize;
+#if LJ_FR2
+ J->base[top] = lj_ir_k64(J, IR_KNUM, u64ptr(contptr(cont)));
+ J->base[top+1] = TREF_CONT;
+#else
+ J->base[top] = lj_ir_kptr(J, contptr(cont)) | TREF_CONT;
+#endif
+ J->framedepth++;
+ for (s = J->maxslot; s < top; s++)
+ J->base[s] = 0; /* Clear frame gap to avoid resurrecting previous refs. */
+ return top+1+LJ_FR2;
+}
+
+/* Record metamethod lookup. */
+int lj_record_mm_lookup(jit_State *J, RecordIndex *ix, MMS mm)
+{
+ RecordIndex mix;
+ GCtab *mt;
+ if (tref_istab(ix->tab)) {
+ mt = tabref(tabV(&ix->tabv)->metatable);
+ mix.tab = emitir(IRT(IR_FLOAD, IRT_TAB), ix->tab, IRFL_TAB_META);
+ } else if (tref_isudata(ix->tab)) {
+ int udtype = udataV(&ix->tabv)->udtype;
+ mt = tabref(udataV(&ix->tabv)->metatable);
+ /* The metatables of special userdata objects are treated as immutable. */
+ if (udtype != UDTYPE_USERDATA) {
+ cTValue *mo;
+ if (LJ_HASFFI && udtype == UDTYPE_FFI_CLIB) {
+ /* Specialize to the C library namespace object. */
+ emitir(IRTG(IR_EQ, IRT_PGC), ix->tab, lj_ir_kptr(J, udataV(&ix->tabv)));
+ } else {
+ /* Specialize to the type of userdata. */
+ TRef tr = emitir(IRT(IR_FLOAD, IRT_U8), ix->tab, IRFL_UDATA_UDTYPE);
+ emitir(IRTGI(IR_EQ), tr, lj_ir_kint(J, udtype));
+ }
+ immutable_mt:
+ mo = lj_tab_getstr(mt, mmname_str(J2G(J), mm));
+ if (!mo || tvisnil(mo))
+ return 0; /* No metamethod. */
+ /* Treat metamethod or index table as immutable, too. */
+ if (!(tvisfunc(mo) || tvistab(mo)))
+ lj_trace_err(J, LJ_TRERR_BADTYPE);
+ copyTV(J->L, &ix->mobjv, mo);
+ ix->mobj = lj_ir_kgc(J, gcV(mo), tvisfunc(mo) ? IRT_FUNC : IRT_TAB);
+ ix->mtv = mt;
+ ix->mt = TREF_NIL; /* Dummy value for comparison semantics. */
+ return 1; /* Got metamethod or index table. */
+ }
+ mix.tab = emitir(IRT(IR_FLOAD, IRT_TAB), ix->tab, IRFL_UDATA_META);
+ } else {
+ /* Specialize to base metatable. Must flush mcode in lua_setmetatable(). */
+ mt = tabref(basemt_obj(J2G(J), &ix->tabv));
+ if (mt == NULL) {
+ ix->mt = TREF_NIL;
+ return 0; /* No metamethod. */
+ }
+ /* The cdata metatable is treated as immutable. */
+ if (LJ_HASFFI && tref_iscdata(ix->tab)) goto immutable_mt;
+ ix->mt = mix.tab = lj_ir_ggfload(J, IRT_TAB,
+ GG_OFS(g.gcroot[GCROOT_BASEMT+itypemap(&ix->tabv)]));
+ goto nocheck;
+ }
+ ix->mt = mt ? mix.tab : TREF_NIL;
+ emitir(IRTG(mt ? IR_NE : IR_EQ, IRT_TAB), mix.tab, lj_ir_knull(J, IRT_TAB));
+nocheck:
+ if (mt) {
+ GCstr *mmstr = mmname_str(J2G(J), mm);
+ cTValue *mo = lj_tab_getstr(mt, mmstr);
+ if (mo && !tvisnil(mo))
+ copyTV(J->L, &ix->mobjv, mo);
+ ix->mtv = mt;
+ settabV(J->L, &mix.tabv, mt);
+ setstrV(J->L, &mix.keyv, mmstr);
+ mix.key = lj_ir_kstr(J, mmstr);
+ mix.val = 0;
+ mix.idxchain = 0;
+ ix->mobj = lj_record_idx(J, &mix);
+ return !tref_isnil(ix->mobj); /* 1 if metamethod found, 0 if not. */
+ }
+ return 0; /* No metamethod. */
+}
+
+/* Record call to arithmetic metamethod. */
+static TRef rec_mm_arith(jit_State *J, RecordIndex *ix, MMS mm)
+{
+ /* Set up metamethod call first to save ix->tab and ix->tabv. */
+ BCReg func = rec_mm_prep(J, mm == MM_concat ? lj_cont_cat : lj_cont_ra);
+ TRef *base = J->base + func;
+ TValue *basev = J->L->base + func;
+ base[1+LJ_FR2] = ix->tab; base[2+LJ_FR2] = ix->key;
+ copyTV(J->L, basev+1+LJ_FR2, &ix->tabv);
+ copyTV(J->L, basev+2+LJ_FR2, &ix->keyv);
+ if (!lj_record_mm_lookup(J, ix, mm)) { /* Lookup mm on 1st operand. */
+ if (mm != MM_unm) {
+ ix->tab = ix->key;
+ copyTV(J->L, &ix->tabv, &ix->keyv);
+ if (lj_record_mm_lookup(J, ix, mm)) /* Lookup mm on 2nd operand. */
+ goto ok;
+ }
+ lj_trace_err(J, LJ_TRERR_NOMM);
+ }
+ok:
+ base[0] = ix->mobj;
+#if LJ_FR2
+ base[1] = 0;
+#endif
+ copyTV(J->L, basev+0, &ix->mobjv);
+ lj_record_call(J, func, 2);
+ return 0; /* No result yet. */
+}
+
+/* Record call to __len metamethod. */
+static TRef rec_mm_len(jit_State *J, TRef tr, TValue *tv)
+{
+ RecordIndex ix;
+ ix.tab = tr;
+ copyTV(J->L, &ix.tabv, tv);
+ if (lj_record_mm_lookup(J, &ix, MM_len)) {
+ BCReg func = rec_mm_prep(J, lj_cont_ra);
+ TRef *base = J->base + func;
+ TValue *basev = J->L->base + func;
+ base[0] = ix.mobj; copyTV(J->L, basev+0, &ix.mobjv);
+ base += LJ_FR2;
+ basev += LJ_FR2;
+ base[1] = tr; copyTV(J->L, basev+1, tv);
+#if LJ_52
+ base[2] = tr; copyTV(J->L, basev+2, tv);
+#else
+ base[2] = TREF_NIL; setnilV(basev+2);
+#endif
+ lj_record_call(J, func, 2);
+ } else {
+ if (LJ_52 && tref_istab(tr))
+ return emitir(IRTI(IR_ALEN), tr, TREF_NIL);
+ lj_trace_err(J, LJ_TRERR_NOMM);
+ }
+ return 0; /* No result yet. */
+}
+
+/* Call a comparison metamethod. */
+static void rec_mm_callcomp(jit_State *J, RecordIndex *ix, int op)
+{
+ BCReg func = rec_mm_prep(J, (op&1) ? lj_cont_condf : lj_cont_condt);
+ TRef *base = J->base + func + LJ_FR2;
+ TValue *tv = J->L->base + func + LJ_FR2;
+ base[-LJ_FR2] = ix->mobj; base[1] = ix->val; base[2] = ix->key;
+ copyTV(J->L, tv-LJ_FR2, &ix->mobjv);
+ copyTV(J->L, tv+1, &ix->valv);
+ copyTV(J->L, tv+2, &ix->keyv);
+ lj_record_call(J, func, 2);
+}
+
+/* Record call to equality comparison metamethod (for tab and udata only). */
+static void rec_mm_equal(jit_State *J, RecordIndex *ix, int op)
+{
+ ix->tab = ix->val;
+ copyTV(J->L, &ix->tabv, &ix->valv);
+ if (lj_record_mm_lookup(J, ix, MM_eq)) { /* Lookup mm on 1st operand. */
+ cTValue *bv;
+ TRef mo1 = ix->mobj;
+ TValue mo1v;
+ copyTV(J->L, &mo1v, &ix->mobjv);
+ /* Avoid the 2nd lookup and the objcmp if the metatables are equal. */
+ bv = &ix->keyv;
+ if (tvistab(bv) && tabref(tabV(bv)->metatable) == ix->mtv) {
+ TRef mt2 = emitir(IRT(IR_FLOAD, IRT_TAB), ix->key, IRFL_TAB_META);
+ emitir(IRTG(IR_EQ, IRT_TAB), mt2, ix->mt);
+ } else if (tvisudata(bv) && tabref(udataV(bv)->metatable) == ix->mtv) {
+ TRef mt2 = emitir(IRT(IR_FLOAD, IRT_TAB), ix->key, IRFL_UDATA_META);
+ emitir(IRTG(IR_EQ, IRT_TAB), mt2, ix->mt);
+ } else { /* Lookup metamethod on 2nd operand and compare both. */
+ ix->tab = ix->key;
+ copyTV(J->L, &ix->tabv, bv);
+ if (!lj_record_mm_lookup(J, ix, MM_eq) ||
+ lj_record_objcmp(J, mo1, ix->mobj, &mo1v, &ix->mobjv))
+ return;
+ }
+ rec_mm_callcomp(J, ix, op);
+ }
+}
+
+/* Record call to ordered comparison metamethods (for arbitrary objects). */
+static void rec_mm_comp(jit_State *J, RecordIndex *ix, int op)
+{
+ ix->tab = ix->val;
+ copyTV(J->L, &ix->tabv, &ix->valv);
+ while (1) {
+ MMS mm = (op & 2) ? MM_le : MM_lt; /* Try __le + __lt or only __lt. */
+#if LJ_52
+ if (!lj_record_mm_lookup(J, ix, mm)) { /* Lookup mm on 1st operand. */
+ ix->tab = ix->key;
+ copyTV(J->L, &ix->tabv, &ix->keyv);
+ if (!lj_record_mm_lookup(J, ix, mm)) /* Lookup mm on 2nd operand. */
+ goto nomatch;
+ }
+ rec_mm_callcomp(J, ix, op);
+ return;
+#else
+ if (lj_record_mm_lookup(J, ix, mm)) { /* Lookup mm on 1st operand. */
+ cTValue *bv;
+ TRef mo1 = ix->mobj;
+ TValue mo1v;
+ copyTV(J->L, &mo1v, &ix->mobjv);
+ /* Avoid the 2nd lookup and the objcmp if the metatables are equal. */
+ bv = &ix->keyv;
+ if (tvistab(bv) && tabref(tabV(bv)->metatable) == ix->mtv) {
+ TRef mt2 = emitir(IRT(IR_FLOAD, IRT_TAB), ix->key, IRFL_TAB_META);
+ emitir(IRTG(IR_EQ, IRT_TAB), mt2, ix->mt);
+ } else if (tvisudata(bv) && tabref(udataV(bv)->metatable) == ix->mtv) {
+ TRef mt2 = emitir(IRT(IR_FLOAD, IRT_TAB), ix->key, IRFL_UDATA_META);
+ emitir(IRTG(IR_EQ, IRT_TAB), mt2, ix->mt);
+ } else { /* Lookup metamethod on 2nd operand and compare both. */
+ ix->tab = ix->key;
+ copyTV(J->L, &ix->tabv, bv);
+ if (!lj_record_mm_lookup(J, ix, mm) ||
+ lj_record_objcmp(J, mo1, ix->mobj, &mo1v, &ix->mobjv))
+ goto nomatch;
+ }
+ rec_mm_callcomp(J, ix, op);
+ return;
+ }
+#endif
+ nomatch:
+ /* Lookup failed. Retry with __lt and swapped operands. */
+ if (!(op & 2)) break; /* Already at __lt. Interpreter will throw. */
+ ix->tab = ix->key; ix->key = ix->val; ix->val = ix->tab;
+ copyTV(J->L, &ix->tabv, &ix->keyv);
+ copyTV(J->L, &ix->keyv, &ix->valv);
+ copyTV(J->L, &ix->valv, &ix->tabv);
+ op ^= 3;
+ }
+}
+
+#if LJ_HASFFI
+/* Setup call to cdata comparison metamethod. */
+static void rec_mm_comp_cdata(jit_State *J, RecordIndex *ix, int op, MMS mm)
+{
+ lj_snap_add(J);
+ if (tref_iscdata(ix->val)) {
+ ix->tab = ix->val;
+ copyTV(J->L, &ix->tabv, &ix->valv);
+ } else {
+ lj_assertJ(tref_iscdata(ix->key), "cdata expected");
+ ix->tab = ix->key;
+ copyTV(J->L, &ix->tabv, &ix->keyv);
+ }
+ lj_record_mm_lookup(J, ix, mm);
+ rec_mm_callcomp(J, ix, op);
+}
+#endif
+
+/* -- Indexed access ------------------------------------------------------ */
+
+#ifdef LUAJIT_ENABLE_TABLE_BUMP
+/* Bump table allocations in bytecode when they grow during recording. */
+static void rec_idx_bump(jit_State *J, RecordIndex *ix)
+{
+ RBCHashEntry *rbc = &J->rbchash[(ix->tab & (RBCHASH_SLOTS-1))];
+ if (tref_ref(ix->tab) == rbc->ref) {
+ const BCIns *pc = mref(rbc->pc, const BCIns);
+ GCtab *tb = tabV(&ix->tabv);
+ uint32_t nhbits;
+ IRIns *ir;
+ if (!tvisnil(&ix->keyv))
+ (void)lj_tab_set(J->L, tb, &ix->keyv); /* Grow table right now. */
+ nhbits = tb->hmask > 0 ? lj_fls(tb->hmask)+1 : 0;
+ ir = IR(tref_ref(ix->tab));
+ if (ir->o == IR_TNEW) {
+ uint32_t ah = bc_d(*pc);
+ uint32_t asize = ah & 0x7ff, hbits = ah >> 11;
+ if (nhbits > hbits) hbits = nhbits;
+ if (tb->asize > asize) {
+ asize = tb->asize <= 0x7ff ? tb->asize : 0x7ff;
+ }
+ if ((asize | (hbits<<11)) != ah) { /* Has the size changed? */
+ /* Patch bytecode, but continue recording (for more patching). */
+ setbc_d(pc, (asize | (hbits<<11)));
+ /* Patching TNEW operands is only safe if the trace is aborted. */
+ ir->op1 = asize; ir->op2 = hbits;
+ J->retryrec = 1; /* Abort the trace at the end of recording. */
+ }
+ } else if (ir->o == IR_TDUP) {
+ GCtab *tpl = gco2tab(proto_kgc(&gcref(rbc->pt)->pt, ~(ptrdiff_t)bc_d(*pc)));
+ /* Grow template table, but preserve keys with nil values. */
+ if ((tb->asize > tpl->asize && (1u << nhbits)-1 == tpl->hmask) ||
+ (tb->asize == tpl->asize && (1u << nhbits)-1 > tpl->hmask)) {
+ Node *node = noderef(tpl->node);
+ uint32_t i, hmask = tpl->hmask, asize;
+ TValue *array;
+ for (i = 0; i <= hmask; i++) {
+ if (!tvisnil(&node[i].key) && tvisnil(&node[i].val))
+ settabV(J->L, &node[i].val, tpl);
+ }
+ if (!tvisnil(&ix->keyv) && tref_isk(ix->key)) {
+ TValue *o = lj_tab_set(J->L, tpl, &ix->keyv);
+ if (tvisnil(o)) settabV(J->L, o, tpl);
+ }
+ lj_tab_resize(J->L, tpl, tb->asize, nhbits);
+ node = noderef(tpl->node);
+ hmask = tpl->hmask;
+ for (i = 0; i <= hmask; i++) {
+ /* This is safe, since template tables only hold immutable values. */
+ if (tvistab(&node[i].val))
+ setnilV(&node[i].val);
+ }
+ /* The shape of the table may have changed. Clean up array part, too. */
+ asize = tpl->asize;
+ array = tvref(tpl->array);
+ for (i = 0; i < asize; i++) {
+ if (tvistab(&array[i]))
+ setnilV(&array[i]);
+ }
+ J->retryrec = 1; /* Abort the trace at the end of recording. */
+ }
+ }
+ }
+}
+#endif
+
+/* Record bounds-check. */
+static void rec_idx_abc(jit_State *J, TRef asizeref, TRef ikey, uint32_t asize)
+{
+ /* Try to emit invariant bounds checks. */
+ if ((J->flags & (JIT_F_OPT_LOOP|JIT_F_OPT_ABC)) ==
+ (JIT_F_OPT_LOOP|JIT_F_OPT_ABC)) {
+ IRRef ref = tref_ref(ikey);
+ IRIns *ir = IR(ref);
+ int32_t ofs = 0;
+ IRRef ofsref = 0;
+ /* Handle constant offsets. */
+ if (ir->o == IR_ADD && irref_isk(ir->op2)) {
+ ofsref = ir->op2;
+ ofs = IR(ofsref)->i;
+ ref = ir->op1;
+ ir = IR(ref);
+ }
+ /* Got scalar evolution analysis results for this reference? */
+ if (ref == J->scev.idx) {
+ int32_t stop;
+ lj_assertJ(irt_isint(J->scev.t) && ir->o == IR_SLOAD,
+ "only int SCEV supported");
+ stop = numberVint(&(J->L->base - J->baseslot)[ir->op1 + FORL_STOP]);
+ /* Runtime value for stop of loop is within bounds? */
+ if ((uint64_t)stop + ofs < (uint64_t)asize) {
+ /* Emit invariant bounds check for stop. */
+ emitir(IRTG(IR_ABC, IRT_P32), asizeref, ofs == 0 ? J->scev.stop :
+ emitir(IRTI(IR_ADD), J->scev.stop, ofsref));
+ /* Emit invariant bounds check for start, if not const or negative. */
+ if (!(J->scev.dir && J->scev.start &&
+ (int64_t)IR(J->scev.start)->i + ofs >= 0))
+ emitir(IRTG(IR_ABC, IRT_P32), asizeref, ikey);
+ return;
+ }
+ }
+ }
+ emitir(IRTGI(IR_ABC), asizeref, ikey); /* Emit regular bounds check. */
+}
+
+/* Record indexed key lookup. */
+static TRef rec_idx_key(jit_State *J, RecordIndex *ix, IRRef *rbref,
+ IRType1 *rbguard)
+{
+ TRef key;
+ GCtab *t = tabV(&ix->tabv);
+ ix->oldv = lj_tab_get(J->L, t, &ix->keyv); /* Lookup previous value. */
+ *rbref = 0;
+ rbguard->irt = 0;
+
+ /* Integer keys are looked up in the array part first. */
+ key = ix->key;
+ if (tref_isnumber(key)) {
+ int32_t k = numberVint(&ix->keyv);
+ if (!tvisint(&ix->keyv) && numV(&ix->keyv) != (lua_Number)k)
+ k = LJ_MAX_ASIZE;
+ if ((MSize)k < LJ_MAX_ASIZE) { /* Potential array key? */
+ TRef ikey = lj_opt_narrow_index(J, key);
+ TRef asizeref = emitir(IRTI(IR_FLOAD), ix->tab, IRFL_TAB_ASIZE);
+ if ((MSize)k < t->asize) { /* Currently an array key? */
+ TRef arrayref;
+ rec_idx_abc(J, asizeref, ikey, t->asize);
+ arrayref = emitir(IRT(IR_FLOAD, IRT_PGC), ix->tab, IRFL_TAB_ARRAY);
+ return emitir(IRT(IR_AREF, IRT_PGC), arrayref, ikey);
+ } else { /* Currently not in array (may be an array extension)? */
+ emitir(IRTGI(IR_ULE), asizeref, ikey); /* Inv. bounds check. */
+ if (k == 0 && tref_isk(key))
+ key = lj_ir_knum_zero(J); /* Canonicalize 0 or +-0.0 to +0.0. */
+ /* And continue with the hash lookup. */
+ }
+ } else if (!tref_isk(key)) {
+ /* We can rule out const numbers which failed the integerness test
+ ** above. But all other numbers are potential array keys.
+ */
+ if (t->asize == 0) { /* True sparse tables have an empty array part. */
+ /* Guard that the array part stays empty. */
+ TRef tmp = emitir(IRTI(IR_FLOAD), ix->tab, IRFL_TAB_ASIZE);
+ emitir(IRTGI(IR_EQ), tmp, lj_ir_kint(J, 0));
+ } else {
+ lj_trace_err(J, LJ_TRERR_NYITMIX);
+ }
+ }
+ }
+
+ /* Otherwise the key is located in the hash part. */
+ if (t->hmask == 0) { /* Shortcut for empty hash part. */
+ /* Guard that the hash part stays empty. */
+ TRef tmp = emitir(IRTI(IR_FLOAD), ix->tab, IRFL_TAB_HMASK);
+ emitir(IRTGI(IR_EQ), tmp, lj_ir_kint(J, 0));
+ return lj_ir_kkptr(J, niltvg(J2G(J)));
+ }
+ if (tref_isinteger(key)) /* Hash keys are based on numbers, not ints. */
+ key = emitir(IRTN(IR_CONV), key, IRCONV_NUM_INT);
+ if (tref_isk(key)) {
+ /* Optimize lookup of constant hash keys. */
+ GCSize hslot = (GCSize)((char *)ix->oldv-(char *)&noderef(t->node)[0].val);
+ if (hslot <= t->hmask*(GCSize)sizeof(Node) &&
+ hslot <= 65535*(GCSize)sizeof(Node)) {
+ TRef node, kslot, hm;
+ *rbref = J->cur.nins; /* Mark possible rollback point. */
+ *rbguard = J->guardemit;
+ hm = emitir(IRTI(IR_FLOAD), ix->tab, IRFL_TAB_HMASK);
+ emitir(IRTGI(IR_EQ), hm, lj_ir_kint(J, (int32_t)t->hmask));
+ node = emitir(IRT(IR_FLOAD, IRT_PGC), ix->tab, IRFL_TAB_NODE);
+ kslot = lj_ir_kslot(J, key, (IRRef)(hslot / sizeof(Node)));
+ return emitir(IRTG(IR_HREFK, IRT_PGC), node, kslot);
+ }
+ }
+ /* Fall back to a regular hash lookup. */
+ return emitir(IRT(IR_HREF, IRT_PGC), ix->tab, key);
+}
+
+/* Determine whether a key is NOT one of the fast metamethod names. */
+static int nommstr(jit_State *J, TRef key)
+{
+ if (tref_isstr(key)) {
+ if (tref_isk(key)) {
+ GCstr *str = ir_kstr(IR(tref_ref(key)));
+ uint32_t mm;
+ for (mm = 0; mm <= MM_FAST; mm++)
+ if (mmname_str(J2G(J), mm) == str)
+ return 0; /* MUST be one the fast metamethod names. */
+ } else {
+ return 0; /* Variable string key MAY be a metamethod name. */
+ }
+ }
+ return 1; /* CANNOT be a metamethod name. */
+}
+
+/* Record indexed load/store. */
+TRef lj_record_idx(jit_State *J, RecordIndex *ix)
+{
+ TRef xref;
+ IROp xrefop, loadop;
+ IRRef rbref;
+ IRType1 rbguard;
+ cTValue *oldv;
+
+ while (!tref_istab(ix->tab)) { /* Handle non-table lookup. */
+ /* Never call raw lj_record_idx() on non-table. */
+ lj_assertJ(ix->idxchain != 0, "bad usage");
+ if (!lj_record_mm_lookup(J, ix, ix->val ? MM_newindex : MM_index))
+ lj_trace_err(J, LJ_TRERR_NOMM);
+ handlemm:
+ if (tref_isfunc(ix->mobj)) { /* Handle metamethod call. */
+ BCReg func = rec_mm_prep(J, ix->val ? lj_cont_nop : lj_cont_ra);
+ TRef *base = J->base + func + LJ_FR2;
+ TValue *tv = J->L->base + func + LJ_FR2;
+ base[-LJ_FR2] = ix->mobj; base[1] = ix->tab; base[2] = ix->key;
+ setfuncV(J->L, tv-LJ_FR2, funcV(&ix->mobjv));
+ copyTV(J->L, tv+1, &ix->tabv);
+ copyTV(J->L, tv+2, &ix->keyv);
+ if (ix->val) {
+ base[3] = ix->val;
+ copyTV(J->L, tv+3, &ix->valv);
+ lj_record_call(J, func, 3); /* mobj(tab, key, val) */
+ return 0;
+ } else {
+ lj_record_call(J, func, 2); /* res = mobj(tab, key) */
+ return 0; /* No result yet. */
+ }
+ }
+#if LJ_HASBUFFER
+ /* The index table of buffer objects is treated as immutable. */
+ if (ix->mt == TREF_NIL && !ix->val &&
+ tref_isudata(ix->tab) && udataV(&ix->tabv)->udtype == UDTYPE_BUFFER &&
+ tref_istab(ix->mobj) && tref_isstr(ix->key) && tref_isk(ix->key)) {
+ cTValue *val = lj_tab_getstr(tabV(&ix->mobjv), strV(&ix->keyv));
+ TRef tr = lj_record_constify(J, val);
+ if (tr) return tr; /* Specialize to the value, i.e. a method. */
+ }
+#endif
+ /* Otherwise retry lookup with metaobject. */
+ ix->tab = ix->mobj;
+ copyTV(J->L, &ix->tabv, &ix->mobjv);
+ if (--ix->idxchain == 0)
+ lj_trace_err(J, LJ_TRERR_IDXLOOP);
+ }
+
+ /* First catch nil and NaN keys for tables. */
+ if (tvisnil(&ix->keyv) || (tvisnum(&ix->keyv) && tvisnan(&ix->keyv))) {
+ if (ix->val) /* Better fail early. */
+ lj_trace_err(J, LJ_TRERR_STORENN);
+ if (tref_isk(ix->key)) {
+ if (ix->idxchain && lj_record_mm_lookup(J, ix, MM_index))
+ goto handlemm;
+ return TREF_NIL;
+ }
+ }
+
+ /* Record the key lookup. */
+ xref = rec_idx_key(J, ix, &rbref, &rbguard);
+ xrefop = IR(tref_ref(xref))->o;
+ loadop = xrefop == IR_AREF ? IR_ALOAD : IR_HLOAD;
+ /* The lj_meta_tset() inconsistency is gone, but better play safe. */
+ oldv = xrefop == IR_KKPTR ? (cTValue *)ir_kptr(IR(tref_ref(xref))) : ix->oldv;
+
+ if (ix->val == 0) { /* Indexed load */
+ IRType t = itype2irt(oldv);
+ TRef res;
+ if (oldv == niltvg(J2G(J))) {
+ emitir(IRTG(IR_EQ, IRT_PGC), xref, lj_ir_kkptr(J, niltvg(J2G(J))));
+ res = TREF_NIL;
+ } else {
+ res = emitir(IRTG(loadop, t), xref, 0);
+ }
+ if (tref_ref(res) < rbref) { /* HREFK + load forwarded? */
+ lj_ir_rollback(J, rbref); /* Rollback to eliminate hmask guard. */
+ J->guardemit = rbguard;
+ }
+ if (t == IRT_NIL && ix->idxchain && lj_record_mm_lookup(J, ix, MM_index))
+ goto handlemm;
+ if (irtype_ispri(t)) res = TREF_PRI(t); /* Canonicalize primitives. */
+ return res;
+ } else { /* Indexed store. */
+ GCtab *mt = tabref(tabV(&ix->tabv)->metatable);
+ int keybarrier = tref_isgcv(ix->key) && !tref_isnil(ix->val);
+ if (tref_ref(xref) < rbref) { /* HREFK forwarded? */
+ lj_ir_rollback(J, rbref); /* Rollback to eliminate hmask guard. */
+ J->guardemit = rbguard;
+ }
+ if (tvisnil(oldv)) { /* Previous value was nil? */
+ /* Need to duplicate the hasmm check for the early guards. */
+ int hasmm = 0;
+ if (ix->idxchain && mt) {
+ cTValue *mo = lj_tab_getstr(mt, mmname_str(J2G(J), MM_newindex));
+ hasmm = mo && !tvisnil(mo);
+ }
+ if (hasmm)
+ emitir(IRTG(loadop, IRT_NIL), xref, 0); /* Guard for nil value. */
+ else if (xrefop == IR_HREF)
+ emitir(IRTG(oldv == niltvg(J2G(J)) ? IR_EQ : IR_NE, IRT_PGC),
+ xref, lj_ir_kkptr(J, niltvg(J2G(J))));
+ if (ix->idxchain && lj_record_mm_lookup(J, ix, MM_newindex)) {
+ lj_assertJ(hasmm, "inconsistent metamethod handling");
+ goto handlemm;
+ }
+ lj_assertJ(!hasmm, "inconsistent metamethod handling");
+ if (oldv == niltvg(J2G(J))) { /* Need to insert a new key. */
+ TRef key = ix->key;
+ if (tref_isinteger(key)) /* NEWREF needs a TValue as a key. */
+ key = emitir(IRTN(IR_CONV), key, IRCONV_NUM_INT);
+ xref = emitir(IRT(IR_NEWREF, IRT_PGC), ix->tab, key);
+ keybarrier = 0; /* NEWREF already takes care of the key barrier. */
+#ifdef LUAJIT_ENABLE_TABLE_BUMP
+ if ((J->flags & JIT_F_OPT_SINK)) /* Avoid a separate flag. */
+ rec_idx_bump(J, ix);
+#endif
+ }
+ } else if (!lj_opt_fwd_wasnonnil(J, loadop, tref_ref(xref))) {
+ /* Cannot derive that the previous value was non-nil, must do checks. */
+ if (xrefop == IR_HREF) /* Guard against store to niltv. */
+ emitir(IRTG(IR_NE, IRT_PGC), xref, lj_ir_kkptr(J, niltvg(J2G(J))));
+ if (ix->idxchain) { /* Metamethod lookup required? */
+ /* A check for NULL metatable is cheaper (hoistable) than a load. */
+ if (!mt) {
+ TRef mtref = emitir(IRT(IR_FLOAD, IRT_TAB), ix->tab, IRFL_TAB_META);
+ emitir(IRTG(IR_EQ, IRT_TAB), mtref, lj_ir_knull(J, IRT_TAB));
+ } else {
+ IRType t = itype2irt(oldv);
+ emitir(IRTG(loadop, t), xref, 0); /* Guard for non-nil value. */
+ }
+ }
+ } else {
+ keybarrier = 0; /* Previous non-nil value kept the key alive. */
+ }
+ /* Convert int to number before storing. */
+ if (!LJ_DUALNUM && tref_isinteger(ix->val))
+ ix->val = emitir(IRTN(IR_CONV), ix->val, IRCONV_NUM_INT);
+ emitir(IRT(loadop+IRDELTA_L2S, tref_type(ix->val)), xref, ix->val);
+ if (keybarrier || tref_isgcv(ix->val))
+ emitir(IRT(IR_TBAR, IRT_NIL), ix->tab, 0);
+ /* Invalidate neg. metamethod cache for stores with certain string keys. */
+ if (!nommstr(J, ix->key)) {
+ TRef fref = emitir(IRT(IR_FREF, IRT_PGC), ix->tab, IRFL_TAB_NOMM);
+ emitir(IRT(IR_FSTORE, IRT_U8), fref, lj_ir_kint(J, 0));
+ }
+ J->needsnap = 1;
+ return 0;
+ }
+}
+
+/* Determine result type of table traversal. */
+static IRType rec_next_types(GCtab *t, uint32_t idx)
+{
+ for (; idx < t->asize; idx++) {
+ cTValue *a = arrayslot(t, idx);
+ if (LJ_LIKELY(!tvisnil(a)))
+ return (LJ_DUALNUM ? IRT_INT : IRT_NUM) + (itype2irt(a) << 8);
+ }
+ idx -= t->asize;
+ for (; idx <= t->hmask; idx++) {
+ Node *n = &noderef(t->node)[idx];
+ if (!tvisnil(&n->val))
+ return itype2irt(&n->key) + (itype2irt(&n->val) << 8);
+ }
+ return IRT_NIL + (IRT_NIL << 8);
+}
+
+/* Record a table traversal step aka next(). */
+int lj_record_next(jit_State *J, RecordIndex *ix)
+{
+ IRType t, tkey, tval;
+ TRef trvk;
+ t = rec_next_types(tabV(&ix->tabv), ix->keyv.u32.lo);
+ tkey = (t & 0xff); tval = (t >> 8);
+ trvk = lj_ir_call(J, IRCALL_lj_vm_next, ix->tab, ix->key);
+ if (ix->mobj || tkey == IRT_NIL) {
+ TRef idx = emitir(IRTI(IR_HIOP), trvk, trvk);
+ /* Always check for invalid key from next() for nil result. */
+ if (!ix->mobj) emitir(IRTGI(IR_NE), idx, lj_ir_kint(J, -1));
+ ix->mobj = idx;
+ }
+ ix->key = lj_record_vload(J, trvk, 1, tkey);
+ if (tkey == IRT_NIL || ix->idxchain) { /* Omit value type check. */
+ ix->val = TREF_NIL;
+ return 1;
+ } else { /* Need value. */
+ ix->val = lj_record_vload(J, trvk, 0, tval);
+ return 2;
+ }
+}
+
+static void rec_tsetm(jit_State *J, BCReg ra, BCReg rn, int32_t i)
+{
+ RecordIndex ix;
+ cTValue *basev = J->L->base;
+ GCtab *t = tabV(&basev[ra-1]);
+ settabV(J->L, &ix.tabv, t);
+ ix.tab = getslot(J, ra-1);
+ ix.idxchain = 0;
+#ifdef LUAJIT_ENABLE_TABLE_BUMP
+ if ((J->flags & JIT_F_OPT_SINK)) {
+ if (t->asize < i+rn-ra)
+ lj_tab_reasize(J->L, t, i+rn-ra);
+ setnilV(&ix.keyv);
+ rec_idx_bump(J, &ix);
+ }
+#endif
+ for (; ra < rn; i++, ra++) {
+ setintV(&ix.keyv, i);
+ ix.key = lj_ir_kint(J, i);
+ copyTV(J->L, &ix.valv, &basev[ra]);
+ ix.val = getslot(J, ra);
+ lj_record_idx(J, &ix);
+ }
+}
+
+/* -- Upvalue access ------------------------------------------------------ */
+
+/* Check whether upvalue is immutable and ok to constify. */
+static int rec_upvalue_constify(jit_State *J, GCupval *uvp)
+{
+ if (uvp->immutable) {
+ cTValue *o = uvval(uvp);
+ /* Don't constify objects that may retain large amounts of memory. */
+#if LJ_HASFFI
+ if (tviscdata(o)) {
+ GCcdata *cd = cdataV(o);
+ if (!cdataisv(cd) && !(cd->marked & LJ_GC_CDATA_FIN)) {
+ CType *ct = ctype_raw(ctype_ctsG(J2G(J)), cd->ctypeid);
+ if (!ctype_hassize(ct->info) || ct->size <= 16)
+ return 1;
+ }
+ return 0;
+ }
+#else
+ UNUSED(J);
+#endif
+ if (!(tvistab(o) || tvisudata(o) || tvisthread(o)))
+ return 1;
+ }
+ return 0;
+}
+
+/* Record upvalue load/store. */
+static TRef rec_upvalue(jit_State *J, uint32_t uv, TRef val)
+{
+ GCupval *uvp = &gcref(J->fn->l.uvptr[uv])->uv;
+ TRef fn = getcurrf(J);
+ IRRef uref;
+ int needbarrier = 0;
+ if (rec_upvalue_constify(J, uvp)) { /* Try to constify immutable upvalue. */
+ TRef tr, kfunc;
+ lj_assertJ(val == 0, "bad usage");
+ if (!tref_isk(fn)) { /* Late specialization of current function. */
+ if (J->pt->flags >= PROTO_CLC_POLY)
+ goto noconstify;
+ kfunc = lj_ir_kfunc(J, J->fn);
+ emitir(IRTG(IR_EQ, IRT_FUNC), fn, kfunc);
+#if LJ_FR2
+ J->base[-2] = kfunc;
+#else
+ J->base[-1] = kfunc | TREF_FRAME;
+#endif
+ fn = kfunc;
+ }
+ tr = lj_record_constify(J, uvval(uvp));
+ if (tr)
+ return tr;
+ }
+noconstify:
+ /* Note: this effectively limits LJ_MAX_UPVAL to 127. */
+ uv = (uv << 8) | (hashrot(uvp->dhash, uvp->dhash + HASH_BIAS) & 0xff);
+ if (!uvp->closed) {
+ uref = tref_ref(emitir(IRTG(IR_UREFO, IRT_PGC), fn, uv));
+ /* In current stack? */
+ if (uvval(uvp) >= tvref(J->L->stack) &&
+ uvval(uvp) < tvref(J->L->maxstack)) {
+ int32_t slot = (int32_t)(uvval(uvp) - (J->L->base - J->baseslot));
+ if (slot >= 0) { /* Aliases an SSA slot? */
+ emitir(IRTG(IR_EQ, IRT_PGC),
+ REF_BASE,
+ emitir(IRT(IR_ADD, IRT_PGC), uref,
+ lj_ir_kint(J, (slot - 1 - LJ_FR2) * -8)));
+ slot -= (int32_t)J->baseslot; /* Note: slot number may be negative! */
+ if (val == 0) {
+ return getslot(J, slot);
+ } else {
+ J->base[slot] = val;
+ if (slot >= (int32_t)J->maxslot) J->maxslot = (BCReg)(slot+1);
+ return 0;
+ }
+ }
+ }
+ emitir(IRTG(IR_UGT, IRT_PGC),
+ emitir(IRT(IR_SUB, IRT_PGC), uref, REF_BASE),
+ lj_ir_kint(J, (J->baseslot + J->maxslot) * 8));
+ } else {
+ needbarrier = 1;
+ uref = tref_ref(emitir(IRTG(IR_UREFC, IRT_PGC), fn, uv));
+ }
+ if (val == 0) { /* Upvalue load */
+ IRType t = itype2irt(uvval(uvp));
+ TRef res = emitir(IRTG(IR_ULOAD, t), uref, 0);
+ if (irtype_ispri(t)) res = TREF_PRI(t); /* Canonicalize primitive refs. */
+ return res;
+ } else { /* Upvalue store. */
+ /* Convert int to number before storing. */
+ if (!LJ_DUALNUM && tref_isinteger(val))
+ val = emitir(IRTN(IR_CONV), val, IRCONV_NUM_INT);
+ emitir(IRT(IR_USTORE, tref_type(val)), uref, val);
+ if (needbarrier && tref_isgcv(val))
+ emitir(IRT(IR_OBAR, IRT_NIL), uref, val);
+ J->needsnap = 1;
+ return 0;
+ }
+}
+
+/* -- Record calls to Lua functions --------------------------------------- */
+
+/* Check unroll limits for calls. */
+static void check_call_unroll(jit_State *J, TraceNo lnk)
+{
+ cTValue *frame = J->L->base - 1;
+ void *pc = mref(frame_func(frame)->l.pc, void);
+ int32_t depth = J->framedepth;
+ int32_t count = 0;
+ if ((J->pt->flags & PROTO_VARARG)) depth--; /* Vararg frame still missing. */
+ for (; depth > 0; depth--) { /* Count frames with same prototype. */
+ if (frame_iscont(frame)) depth--;
+ frame = frame_prev(frame);
+ if (mref(frame_func(frame)->l.pc, void) == pc)
+ count++;
+ }
+ if (J->pc == J->startpc) {
+ if (count + J->tailcalled > J->param[JIT_P_recunroll]) {
+ J->pc++;
+ if (J->framedepth + J->retdepth == 0)
+ lj_record_stop(J, LJ_TRLINK_TAILREC, J->cur.traceno); /* Tail-rec. */
+ else
+ lj_record_stop(J, LJ_TRLINK_UPREC, J->cur.traceno); /* Up-recursion. */
+ }
+ } else {
+ if (count > J->param[JIT_P_callunroll]) {
+ if (lnk) { /* Possible tail- or up-recursion. */
+ lj_trace_flush(J, lnk); /* Flush trace that only returns. */
+ /* Set a small, pseudo-random hotcount for a quick retry of JFUNC*. */
+ hotcount_set(J2GG(J), J->pc+1, lj_prng_u64(&J2G(J)->prng) & 15u);
+ }
+ lj_trace_err(J, LJ_TRERR_CUNROLL);
+ }
+ }
+}
+
+/* Record Lua function setup. */
+static void rec_func_setup(jit_State *J)
+{
+ GCproto *pt = J->pt;
+ BCReg s, numparams = pt->numparams;
+ if ((pt->flags & PROTO_NOJIT))
+ lj_trace_err(J, LJ_TRERR_CJITOFF);
+ if (J->baseslot + pt->framesize >= LJ_MAX_JSLOTS)
+ lj_trace_err(J, LJ_TRERR_STACKOV);
+ /* Fill up missing parameters with nil. */
+ for (s = J->maxslot; s < numparams; s++)
+ J->base[s] = TREF_NIL;
+ /* The remaining slots should never be read before they are written. */
+ J->maxslot = numparams;
+}
+
+/* Record Lua vararg function setup. */
+static void rec_func_vararg(jit_State *J)
+{
+ GCproto *pt = J->pt;
+ BCReg s, fixargs, vframe = J->maxslot+1+LJ_FR2;
+ lj_assertJ((pt->flags & PROTO_VARARG), "FUNCV in non-vararg function");
+ if (J->baseslot + vframe + pt->framesize >= LJ_MAX_JSLOTS)
+ lj_trace_err(J, LJ_TRERR_STACKOV);
+ J->base[vframe-1-LJ_FR2] = J->base[-1-LJ_FR2]; /* Copy function up. */
+#if LJ_FR2
+ J->base[vframe-1] = TREF_FRAME;
+#endif
+ /* Copy fixarg slots up and set their original slots to nil. */
+ fixargs = pt->numparams < J->maxslot ? pt->numparams : J->maxslot;
+ for (s = 0; s < fixargs; s++) {
+ J->base[vframe+s] = J->base[s];
+ J->base[s] = TREF_NIL;
+ }
+ J->maxslot = fixargs;
+ J->framedepth++;
+ J->base += vframe;
+ J->baseslot += vframe;
+}
+
+/* Record entry to a Lua function. */
+static void rec_func_lua(jit_State *J)
+{
+ rec_func_setup(J);
+ check_call_unroll(J, 0);
+}
+
+/* Record entry to an already compiled function. */
+static void rec_func_jit(jit_State *J, TraceNo lnk)
+{
+ GCtrace *T;
+ rec_func_setup(J);
+ T = traceref(J, lnk);
+ if (T->linktype == LJ_TRLINK_RETURN) { /* Trace returns to interpreter? */
+ check_call_unroll(J, lnk);
+ /* Temporarily unpatch JFUNC* to continue recording across function. */
+ J->patchins = *J->pc;
+ J->patchpc = (BCIns *)J->pc;
+ *J->patchpc = T->startins;
+ return;
+ }
+ J->instunroll = 0; /* Cannot continue across a compiled function. */
+ if (J->pc == J->startpc && J->framedepth + J->retdepth == 0)
+ lj_record_stop(J, LJ_TRLINK_TAILREC, J->cur.traceno); /* Extra tail-rec. */
+ else
+ lj_record_stop(J, LJ_TRLINK_ROOT, lnk); /* Link to the function. */
+}
+
+/* -- Vararg handling ----------------------------------------------------- */
+
+/* Detect y = select(x, ...) idiom. */
+static int select_detect(jit_State *J)
+{
+ BCIns ins = J->pc[1];
+ if (bc_op(ins) == BC_CALLM && bc_b(ins) == 2 && bc_c(ins) == 1) {
+ cTValue *func = &J->L->base[bc_a(ins)];
+ if (tvisfunc(func) && funcV(func)->c.ffid == FF_select) {
+ TRef kfunc = lj_ir_kfunc(J, funcV(func));
+ emitir(IRTG(IR_EQ, IRT_FUNC), getslot(J, bc_a(ins)), kfunc);
+ return 1;
+ }
+ }
+ return 0;
+}
+
+/* Record vararg instruction. */
+static void rec_varg(jit_State *J, BCReg dst, ptrdiff_t nresults)
+{
+ int32_t numparams = J->pt->numparams;
+ ptrdiff_t nvararg = frame_delta(J->L->base-1) - numparams - 1 - LJ_FR2;
+ lj_assertJ(frame_isvarg(J->L->base-1), "VARG in non-vararg frame");
+ if (LJ_FR2 && dst > J->maxslot)
+ J->base[dst-1] = 0; /* Prevent resurrection of unrelated slot. */
+ if (J->framedepth > 0) { /* Simple case: varargs defined on-trace. */
+ ptrdiff_t i;
+ if (nvararg < 0) nvararg = 0;
+ if (nresults == -1) {
+ nresults = nvararg;
+ J->maxslot = dst + (BCReg)nvararg;
+ } else if (dst + nresults > J->maxslot) {
+ J->maxslot = dst + (BCReg)nresults;
+ }
+ for (i = 0; i < nresults; i++)
+ J->base[dst+i] = i < nvararg ? getslot(J, i - nvararg - 1 - LJ_FR2) : TREF_NIL;
+ } else { /* Unknown number of varargs passed to trace. */
+ TRef fr = emitir(IRTI(IR_SLOAD), LJ_FR2, IRSLOAD_READONLY|IRSLOAD_FRAME);
+ int32_t frofs = 8*(1+LJ_FR2+numparams)+FRAME_VARG;
+ if (nresults >= 0) { /* Known fixed number of results. */
+ ptrdiff_t i;
+ if (nvararg > 0) {
+ ptrdiff_t nload = nvararg >= nresults ? nresults : nvararg;
+ TRef vbase;
+ if (nvararg >= nresults)
+ emitir(IRTGI(IR_GE), fr, lj_ir_kint(J, frofs+8*(int32_t)nresults));
+ else
+ emitir(IRTGI(IR_EQ), fr,
+ lj_ir_kint(J, (int32_t)frame_ftsz(J->L->base-1)));
+ vbase = emitir(IRT(IR_SUB, IRT_IGC), REF_BASE, fr);
+ vbase = emitir(IRT(IR_ADD, IRT_PGC), vbase, lj_ir_kint(J, frofs-8*(1+LJ_FR2)));
+ for (i = 0; i < nload; i++) {
+ IRType t = itype2irt(&J->L->base[i-1-LJ_FR2-nvararg]);
+ J->base[dst+i] = lj_record_vload(J, vbase, (MSize)i, t);
+ }
+ } else {
+ emitir(IRTGI(IR_LE), fr, lj_ir_kint(J, frofs));
+ nvararg = 0;
+ }
+ for (i = nvararg; i < nresults; i++)
+ J->base[dst+i] = TREF_NIL;
+ if (dst + (BCReg)nresults > J->maxslot)
+ J->maxslot = dst + (BCReg)nresults;
+ } else if (select_detect(J)) { /* y = select(x, ...) */
+ TRef tridx = J->base[dst-1];
+ TRef tr = TREF_NIL;
+ ptrdiff_t idx = lj_ffrecord_select_mode(J, tridx, &J->L->base[dst-1]);
+ if (idx < 0) goto nyivarg;
+ if (idx != 0 && !tref_isinteger(tridx))
+ tridx = emitir(IRTGI(IR_CONV), tridx, IRCONV_INT_NUM|IRCONV_INDEX);
+ if (idx != 0 && tref_isk(tridx)) {
+ emitir(IRTGI(idx <= nvararg ? IR_GE : IR_LT),
+ fr, lj_ir_kint(J, frofs+8*(int32_t)idx));
+ frofs -= 8; /* Bias for 1-based index. */
+ } else if (idx <= nvararg) { /* Compute size. */
+ TRef tmp = emitir(IRTI(IR_ADD), fr, lj_ir_kint(J, -frofs));
+ if (numparams)
+ emitir(IRTGI(IR_GE), tmp, lj_ir_kint(J, 0));
+ tr = emitir(IRTI(IR_BSHR), tmp, lj_ir_kint(J, 3));
+ if (idx != 0) {
+ tridx = emitir(IRTI(IR_ADD), tridx, lj_ir_kint(J, -1));
+ rec_idx_abc(J, tr, tridx, (uint32_t)nvararg);
+ }
+ } else {
+ TRef tmp = lj_ir_kint(J, frofs);
+ if (idx != 0) {
+ TRef tmp2 = emitir(IRTI(IR_BSHL), tridx, lj_ir_kint(J, 3));
+ tmp = emitir(IRTI(IR_ADD), tmp2, tmp);
+ } else {
+ tr = lj_ir_kint(J, 0);
+ }
+ emitir(IRTGI(IR_LT), fr, tmp);
+ }
+ if (idx != 0 && idx <= nvararg) {
+ IRType t;
+ TRef aref, vbase = emitir(IRT(IR_SUB, IRT_IGC), REF_BASE, fr);
+ vbase = emitir(IRT(IR_ADD, IRT_PGC), vbase,
+ lj_ir_kint(J, frofs-(8<<LJ_FR2)));
+ t = itype2irt(&J->L->base[idx-2-LJ_FR2-nvararg]);
+ aref = emitir(IRT(IR_AREF, IRT_PGC), vbase, tridx);
+ tr = lj_record_vload(J, aref, 0, t);
+ }
+ J->base[dst-2-LJ_FR2] = tr;
+ J->maxslot = dst-1-LJ_FR2;
+ J->bcskip = 2; /* Skip CALLM + select. */
+ } else {
+ nyivarg:
+ setintV(&J->errinfo, BC_VARG);
+ lj_trace_err_info(J, LJ_TRERR_NYIBC);
+ }
+ }
+ if (J->baseslot + J->maxslot >= LJ_MAX_JSLOTS)
+ lj_trace_err(J, LJ_TRERR_STACKOV);
+}
+
+/* -- Record allocations -------------------------------------------------- */
+
+static TRef rec_tnew(jit_State *J, uint32_t ah)
+{
+ uint32_t asize = ah & 0x7ff;
+ uint32_t hbits = ah >> 11;
+ TRef tr;
+ if (asize == 0x7ff) asize = 0x801;
+ tr = emitir(IRTG(IR_TNEW, IRT_TAB), asize, hbits);
+#ifdef LUAJIT_ENABLE_TABLE_BUMP
+ J->rbchash[(tr & (RBCHASH_SLOTS-1))].ref = tref_ref(tr);
+ setmref(J->rbchash[(tr & (RBCHASH_SLOTS-1))].pc, J->pc);
+ setgcref(J->rbchash[(tr & (RBCHASH_SLOTS-1))].pt, obj2gco(J->pt));
+#endif
+ return tr;
+}
+
+/* -- Concatenation ------------------------------------------------------- */
+
+static TRef rec_cat(jit_State *J, BCReg baseslot, BCReg topslot)
+{
+ TRef *top = &J->base[topslot];
+ TValue savetv[5+LJ_FR2];
+ BCReg s;
+ RecordIndex ix;
+ lj_assertJ(baseslot < topslot, "bad CAT arg");
+ for (s = baseslot; s <= topslot; s++)
+ (void)getslot(J, s); /* Ensure all arguments have a reference. */
+ if (tref_isnumber_str(top[0]) && tref_isnumber_str(top[-1])) {
+ TRef tr, hdr, *trp, *xbase, *base = &J->base[baseslot];
+ /* First convert numbers to strings. */
+ for (trp = top; trp >= base; trp--) {
+ if (tref_isnumber(*trp))
+ *trp = emitir(IRT(IR_TOSTR, IRT_STR), *trp,
+ tref_isnum(*trp) ? IRTOSTR_NUM : IRTOSTR_INT);
+ else if (!tref_isstr(*trp))
+ break;
+ }
+ xbase = ++trp;
+ tr = hdr = emitir(IRT(IR_BUFHDR, IRT_PGC),
+ lj_ir_kptr(J, &J2G(J)->tmpbuf), IRBUFHDR_RESET);
+ do {
+ tr = emitir(IRTG(IR_BUFPUT, IRT_PGC), tr, *trp++);
+ } while (trp <= top);
+ tr = emitir(IRTG(IR_BUFSTR, IRT_STR), tr, hdr);
+ J->maxslot = (BCReg)(xbase - J->base);
+ if (xbase == base) return tr; /* Return simple concatenation result. */
+ /* Pass partial result. */
+ topslot = J->maxslot--;
+ *xbase = tr;
+ top = xbase;
+ setstrV(J->L, &ix.keyv, &J2G(J)->strempty); /* Simulate string result. */
+ } else {
+ J->maxslot = topslot-1;
+ copyTV(J->L, &ix.keyv, &J->L->base[topslot]);
+ }
+ copyTV(J->L, &ix.tabv, &J->L->base[topslot-1]);
+ ix.tab = top[-1];
+ ix.key = top[0];
+ memcpy(savetv, &J->L->base[topslot-1], sizeof(savetv)); /* Save slots. */
+ rec_mm_arith(J, &ix, MM_concat); /* Call __concat metamethod. */
+ memcpy(&J->L->base[topslot-1], savetv, sizeof(savetv)); /* Restore slots. */
+ return 0; /* No result yet. */
+}
+
+/* -- Record bytecode ops ------------------------------------------------- */
+
+/* Prepare for comparison. */
+static void rec_comp_prep(jit_State *J)
+{
+ /* Prevent merging with snapshot #0 (GC exit) since we fixup the PC. */
+ if (J->cur.nsnap == 1 && J->cur.snap[0].ref == J->cur.nins)
+ emitir_raw(IRT(IR_NOP, IRT_NIL), 0, 0);
+ lj_snap_add(J);
+}
+
+/* Fixup comparison. */
+static void rec_comp_fixup(jit_State *J, const BCIns *pc, int cond)
+{
+ BCIns jmpins = pc[1];
+ const BCIns *npc = pc + 2 + (cond ? bc_j(jmpins) : 0);
+ SnapShot *snap = &J->cur.snap[J->cur.nsnap-1];
+ /* Set PC to opposite target to avoid re-recording the comp. in side trace. */
+#if LJ_FR2
+ SnapEntry *flink = &J->cur.snapmap[snap->mapofs + snap->nent];
+ uint64_t pcbase;
+ memcpy(&pcbase, flink, sizeof(uint64_t));
+ pcbase = (pcbase & 0xff) | (u64ptr(npc) << 8);
+ memcpy(flink, &pcbase, sizeof(uint64_t));
+#else
+ J->cur.snapmap[snap->mapofs + snap->nent] = SNAP_MKPC(npc);
+#endif
+ J->needsnap = 1;
+ if (bc_a(jmpins) < J->maxslot) J->maxslot = bc_a(jmpins);
+ lj_snap_shrink(J); /* Shrink last snapshot if possible. */
+}
+
+/* Record the next bytecode instruction (_before_ it's executed). */
+void lj_record_ins(jit_State *J)
+{
+ cTValue *lbase;
+ RecordIndex ix;
+ const BCIns *pc;
+ BCIns ins;
+ BCOp op;
+ TRef ra, rb, rc;
+
+ /* Perform post-processing action before recording the next instruction. */
+ if (LJ_UNLIKELY(J->postproc != LJ_POST_NONE)) {
+ switch (J->postproc) {
+ case LJ_POST_FIXCOMP: /* Fixup comparison. */
+ pc = (const BCIns *)(uintptr_t)J2G(J)->tmptv.u64;
+ rec_comp_fixup(J, pc, (!tvistruecond(&J2G(J)->tmptv2) ^ (bc_op(*pc)&1)));
+ /* fallthrough */
+ case LJ_POST_FIXGUARD: /* Fixup and emit pending guard. */
+ case LJ_POST_FIXGUARDSNAP: /* Fixup and emit pending guard and snapshot. */
+ if (!tvistruecond(&J2G(J)->tmptv2)) {
+ J->fold.ins.o ^= 1; /* Flip guard to opposite. */
+ if (J->postproc == LJ_POST_FIXGUARDSNAP) {
+ SnapShot *snap = &J->cur.snap[J->cur.nsnap-1];
+ J->cur.snapmap[snap->mapofs+snap->nent-1]--; /* False -> true. */
+ }
+ }
+ lj_opt_fold(J); /* Emit pending guard. */
+ /* fallthrough */
+ case LJ_POST_FIXBOOL:
+ if (!tvistruecond(&J2G(J)->tmptv2)) {
+ BCReg s;
+ TValue *tv = J->L->base;
+ for (s = 0; s < J->maxslot; s++) /* Fixup stack slot (if any). */
+ if (J->base[s] == TREF_TRUE && tvisfalse(&tv[s])) {
+ J->base[s] = TREF_FALSE;
+ break;
+ }
+ }
+ break;
+ case LJ_POST_FIXCONST:
+ {
+ BCReg s;
+ TValue *tv = J->L->base;
+ for (s = 0; s < J->maxslot; s++) /* Constify stack slots (if any). */
+ if (J->base[s] == TREF_NIL && !tvisnil(&tv[s]))
+ J->base[s] = lj_record_constify(J, &tv[s]);
+ }
+ break;
+ case LJ_POST_FFRETRY: /* Suppress recording of retried fast function. */
+ if (bc_op(*J->pc) >= BC__MAX)
+ return;
+ break;
+ default: lj_assertJ(0, "bad post-processing mode"); break;
+ }
+ J->postproc = LJ_POST_NONE;
+ }
+
+ /* Need snapshot before recording next bytecode (e.g. after a store). */
+ if (J->needsnap) {
+ J->needsnap = 0;
+ if (J->pt) lj_snap_purge(J);
+ lj_snap_add(J);
+ J->mergesnap = 1;
+ }
+
+ /* Skip some bytecodes. */
+ if (LJ_UNLIKELY(J->bcskip > 0)) {
+ J->bcskip--;
+ return;
+ }
+
+ /* Record only closed loops for root traces. */
+ pc = J->pc;
+ if (J->framedepth == 0 &&
+ (MSize)((char *)pc - (char *)J->bc_min) >= J->bc_extent)
+ lj_trace_err(J, LJ_TRERR_LLEAVE);
+
+#ifdef LUA_USE_ASSERT
+ rec_check_slots(J);
+ rec_check_ir(J);
+#endif
+
+#if LJ_HASPROFILE
+ rec_profile_ins(J, pc);
+#endif
+
+ /* Keep a copy of the runtime values of var/num/str operands. */
+#define rav (&ix.valv)
+#define rbv (&ix.tabv)
+#define rcv (&ix.keyv)
+
+ lbase = J->L->base;
+ ins = *pc;
+ op = bc_op(ins);
+ ra = bc_a(ins);
+ ix.val = 0;
+ switch (bcmode_a(op)) {
+ case BCMvar:
+ copyTV(J->L, rav, &lbase[ra]); ix.val = ra = getslot(J, ra); break;
+ default: break; /* Handled later. */
+ }
+ rb = bc_b(ins);
+ rc = bc_c(ins);
+ switch (bcmode_b(op)) {
+ case BCMnone: rb = 0; rc = bc_d(ins); break; /* Upgrade rc to 'rd'. */
+ case BCMvar:
+ copyTV(J->L, rbv, &lbase[rb]); ix.tab = rb = getslot(J, rb); break;
+ default: break; /* Handled later. */
+ }
+ switch (bcmode_c(op)) {
+ case BCMvar:
+ copyTV(J->L, rcv, &lbase[rc]); ix.key = rc = getslot(J, rc); break;
+ case BCMpri: setpriV(rcv, ~rc); ix.key = rc = TREF_PRI(IRT_NIL+rc); break;
+ case BCMnum: { cTValue *tv = proto_knumtv(J->pt, rc);
+ copyTV(J->L, rcv, tv); ix.key = rc = tvisint(tv) ? lj_ir_kint(J, intV(tv)) :
+ tv->u32.hi == LJ_KEYINDEX ? (lj_ir_kint(J, 0) | TREF_KEYINDEX) :
+ lj_ir_knumint(J, numV(tv)); } break;
+ case BCMstr: { GCstr *s = gco2str(proto_kgc(J->pt, ~(ptrdiff_t)rc));
+ setstrV(J->L, rcv, s); ix.key = rc = lj_ir_kstr(J, s); } break;
+ default: break; /* Handled later. */
+ }
+
+ switch (op) {
+
+ /* -- Comparison ops ---------------------------------------------------- */
+
+ case BC_ISLT: case BC_ISGE: case BC_ISLE: case BC_ISGT:
+#if LJ_HASFFI
+ if (tref_iscdata(ra) || tref_iscdata(rc)) {
+ rec_mm_comp_cdata(J, &ix, op, ((int)op & 2) ? MM_le : MM_lt);
+ break;
+ }
+#endif
+ /* Emit nothing for two numeric or string consts. */
+ if (!(tref_isk2(ra,rc) && tref_isnumber_str(ra) && tref_isnumber_str(rc))) {
+ IRType ta = tref_isinteger(ra) ? IRT_INT : tref_type(ra);
+ IRType tc = tref_isinteger(rc) ? IRT_INT : tref_type(rc);
+ int irop;
+ if (ta != tc) {
+ /* Widen mixed number/int comparisons to number/number comparison. */
+ if (ta == IRT_INT && tc == IRT_NUM) {
+ ra = emitir(IRTN(IR_CONV), ra, IRCONV_NUM_INT);
+ ta = IRT_NUM;
+ } else if (ta == IRT_NUM && tc == IRT_INT) {
+ rc = emitir(IRTN(IR_CONV), rc, IRCONV_NUM_INT);
+ } else if (LJ_52) {
+ ta = IRT_NIL; /* Force metamethod for different types. */
+ } else if (!((ta == IRT_FALSE || ta == IRT_TRUE) &&
+ (tc == IRT_FALSE || tc == IRT_TRUE))) {
+ break; /* Interpreter will throw for two different types. */
+ }
+ }
+ rec_comp_prep(J);
+ irop = (int)op - (int)BC_ISLT + (int)IR_LT;
+ if (ta == IRT_NUM) {
+ if ((irop & 1)) irop ^= 4; /* ISGE/ISGT are unordered. */
+ if (!lj_ir_numcmp(numberVnum(rav), numberVnum(rcv), (IROp)irop))
+ irop ^= 5;
+ } else if (ta == IRT_INT) {
+ if (!lj_ir_numcmp(numberVnum(rav), numberVnum(rcv), (IROp)irop))
+ irop ^= 1;
+ } else if (ta == IRT_STR) {
+ if (!lj_ir_strcmp(strV(rav), strV(rcv), (IROp)irop)) irop ^= 1;
+ ra = lj_ir_call(J, IRCALL_lj_str_cmp, ra, rc);
+ rc = lj_ir_kint(J, 0);
+ ta = IRT_INT;
+ } else {
+ rec_mm_comp(J, &ix, (int)op);
+ break;
+ }
+ emitir(IRTG(irop, ta), ra, rc);
+ rec_comp_fixup(J, J->pc, ((int)op ^ irop) & 1);
+ }
+ break;
+
+ case BC_ISEQV: case BC_ISNEV:
+ case BC_ISEQS: case BC_ISNES:
+ case BC_ISEQN: case BC_ISNEN:
+ case BC_ISEQP: case BC_ISNEP:
+#if LJ_HASFFI
+ if (tref_iscdata(ra) || tref_iscdata(rc)) {
+ rec_mm_comp_cdata(J, &ix, op, MM_eq);
+ break;
+ }
+#endif
+ /* Emit nothing for two non-table, non-udata consts. */
+ if (!(tref_isk2(ra, rc) && !(tref_istab(ra) || tref_isudata(ra)))) {
+ int diff;
+ rec_comp_prep(J);
+ diff = lj_record_objcmp(J, ra, rc, rav, rcv);
+ if (diff == 2 || !(tref_istab(ra) || tref_isudata(ra)))
+ rec_comp_fixup(J, J->pc, ((int)op & 1) == !diff);
+ else if (diff == 1) /* Only check __eq if different, but same type. */
+ rec_mm_equal(J, &ix, (int)op);
+ }
+ break;
+
+ /* -- Unary test and copy ops ------------------------------------------- */
+
+ case BC_ISTC: case BC_ISFC:
+ if ((op & 1) == tref_istruecond(rc))
+ rc = 0; /* Don't store if condition is not true. */
+ /* fallthrough */
+ case BC_IST: case BC_ISF: /* Type specialization suffices. */
+ if (bc_a(pc[1]) < J->maxslot)
+ J->maxslot = bc_a(pc[1]); /* Shrink used slots. */
+ break;
+
+ case BC_ISTYPE: case BC_ISNUM:
+ /* These coercions need to correspond with lj_meta_istype(). */
+ if (LJ_DUALNUM && rc == ~LJ_TNUMX+1)
+ ra = lj_opt_narrow_toint(J, ra);
+ else if (rc == ~LJ_TNUMX+2)
+ ra = lj_ir_tonum(J, ra);
+ else if (rc == ~LJ_TSTR+1)
+ ra = lj_ir_tostr(J, ra);
+ /* else: type specialization suffices. */
+ J->base[bc_a(ins)] = ra;
+ break;
+
+ /* -- Unary ops --------------------------------------------------------- */
+
+ case BC_NOT:
+ /* Type specialization already forces const result. */
+ rc = tref_istruecond(rc) ? TREF_FALSE : TREF_TRUE;
+ break;
+
+ case BC_LEN:
+ if (tref_isstr(rc))
+ rc = emitir(IRTI(IR_FLOAD), rc, IRFL_STR_LEN);
+ else if (!LJ_52 && tref_istab(rc))
+ rc = emitir(IRTI(IR_ALEN), rc, TREF_NIL);
+ else
+ rc = rec_mm_len(J, rc, rcv);
+ break;
+
+ /* -- Arithmetic ops ---------------------------------------------------- */
+
+ case BC_UNM:
+ if (tref_isnumber_str(rc)) {
+ rc = lj_opt_narrow_unm(J, rc, rcv);
+ } else {
+ ix.tab = rc;
+ copyTV(J->L, &ix.tabv, rcv);
+ rc = rec_mm_arith(J, &ix, MM_unm);
+ }
+ break;
+
+ case BC_ADDNV: case BC_SUBNV: case BC_MULNV: case BC_DIVNV: case BC_MODNV:
+ /* Swap rb/rc and rbv/rcv. rav is temp. */
+ ix.tab = rc; ix.key = rc = rb; rb = ix.tab;
+ copyTV(J->L, rav, rbv);
+ copyTV(J->L, rbv, rcv);
+ copyTV(J->L, rcv, rav);
+ if (op == BC_MODNV)
+ goto recmod;
+ /* fallthrough */
+ case BC_ADDVN: case BC_SUBVN: case BC_MULVN: case BC_DIVVN:
+ case BC_ADDVV: case BC_SUBVV: case BC_MULVV: case BC_DIVVV: {
+ MMS mm = bcmode_mm(op);
+ if (tref_isnumber_str(rb) && tref_isnumber_str(rc))
+ rc = lj_opt_narrow_arith(J, rb, rc, rbv, rcv,
+ (int)mm - (int)MM_add + (int)IR_ADD);
+ else
+ rc = rec_mm_arith(J, &ix, mm);
+ break;
+ }
+
+ case BC_MODVN: case BC_MODVV:
+ recmod:
+ if (tref_isnumber_str(rb) && tref_isnumber_str(rc))
+ rc = lj_opt_narrow_mod(J, rb, rc, rbv, rcv);
+ else
+ rc = rec_mm_arith(J, &ix, MM_mod);
+ break;
+
+ case BC_POW:
+ if (tref_isnumber_str(rb) && tref_isnumber_str(rc))
+ rc = lj_opt_narrow_arith(J, rb, rc, rbv, rcv, IR_POW);
+ else
+ rc = rec_mm_arith(J, &ix, MM_pow);
+ break;
+
+ /* -- Miscellaneous ops ------------------------------------------------- */
+
+ case BC_CAT:
+ rc = rec_cat(J, rb, rc);
+ break;
+
+ /* -- Constant and move ops --------------------------------------------- */
+
+ case BC_MOV:
+ /* Clear gap of method call to avoid resurrecting previous refs. */
+ if (ra > J->maxslot) {
+#if LJ_FR2
+ memset(J->base + J->maxslot, 0, (ra - J->maxslot) * sizeof(TRef));
+#else
+ J->base[ra-1] = 0;
+#endif
+ }
+ break;
+ case BC_KSTR: case BC_KNUM: case BC_KPRI:
+ break;
+ case BC_KSHORT:
+ rc = lj_ir_kint(J, (int32_t)(int16_t)rc);
+ break;
+ case BC_KNIL:
+ if (LJ_FR2 && ra > J->maxslot)
+ J->base[ra-1] = 0;
+ while (ra <= rc)
+ J->base[ra++] = TREF_NIL;
+ if (rc >= J->maxslot) J->maxslot = rc+1;
+ break;
+#if LJ_HASFFI
+ case BC_KCDATA:
+ rc = lj_ir_kgc(J, proto_kgc(J->pt, ~(ptrdiff_t)rc), IRT_CDATA);
+ break;
+#endif
+
+ /* -- Upvalue and function ops ------------------------------------------ */
+
+ case BC_UGET:
+ rc = rec_upvalue(J, rc, 0);
+ break;
+ case BC_USETV: case BC_USETS: case BC_USETN: case BC_USETP:
+ rec_upvalue(J, ra, rc);
+ break;
+
+ /* -- Table ops --------------------------------------------------------- */
+
+ case BC_GGET: case BC_GSET:
+ settabV(J->L, &ix.tabv, tabref(J->fn->l.env));
+ ix.tab = emitir(IRT(IR_FLOAD, IRT_TAB), getcurrf(J), IRFL_FUNC_ENV);
+ ix.idxchain = LJ_MAX_IDXCHAIN;
+ rc = lj_record_idx(J, &ix);
+ break;
+
+ case BC_TGETB: case BC_TSETB:
+ setintV(&ix.keyv, (int32_t)rc);
+ ix.key = lj_ir_kint(J, (int32_t)rc);
+ /* fallthrough */
+ case BC_TGETV: case BC_TGETS: case BC_TSETV: case BC_TSETS:
+ ix.idxchain = LJ_MAX_IDXCHAIN;
+ rc = lj_record_idx(J, &ix);
+ break;
+ case BC_TGETR: case BC_TSETR:
+ ix.idxchain = 0;
+ rc = lj_record_idx(J, &ix);
+ break;
+
+ case BC_TSETM:
+ rec_tsetm(J, ra, (BCReg)(J->L->top - J->L->base), (int32_t)rcv->u32.lo);
+ break;
+
+ case BC_TNEW:
+ rc = rec_tnew(J, rc);
+ break;
+ case BC_TDUP:
+ rc = emitir(IRTG(IR_TDUP, IRT_TAB),
+ lj_ir_ktab(J, gco2tab(proto_kgc(J->pt, ~(ptrdiff_t)rc))), 0);
+#ifdef LUAJIT_ENABLE_TABLE_BUMP
+ J->rbchash[(rc & (RBCHASH_SLOTS-1))].ref = tref_ref(rc);
+ setmref(J->rbchash[(rc & (RBCHASH_SLOTS-1))].pc, pc);
+ setgcref(J->rbchash[(rc & (RBCHASH_SLOTS-1))].pt, obj2gco(J->pt));
+#endif
+ break;
+
+ /* -- Calls and vararg handling ----------------------------------------- */
+
+ case BC_ITERC:
+ J->base[ra] = getslot(J, ra-3);
+ J->base[ra+1+LJ_FR2] = getslot(J, ra-2);
+ J->base[ra+2+LJ_FR2] = getslot(J, ra-1);
+ { /* Do the actual copy now because lj_record_call needs the values. */
+ TValue *b = &J->L->base[ra];
+ copyTV(J->L, b, b-3);
+ copyTV(J->L, b+1+LJ_FR2, b-2);
+ copyTV(J->L, b+2+LJ_FR2, b-1);
+ }
+ lj_record_call(J, ra, (ptrdiff_t)rc-1);
+ break;
+
+ /* L->top is set to L->base+ra+rc+NARGS-1+1. See lj_dispatch_ins(). */
+ case BC_CALLM:
+ rc = (BCReg)(J->L->top - J->L->base) - ra - LJ_FR2;
+ /* fallthrough */
+ case BC_CALL:
+ lj_record_call(J, ra, (ptrdiff_t)rc-1);
+ break;
+
+ case BC_CALLMT:
+ rc = (BCReg)(J->L->top - J->L->base) - ra - LJ_FR2;
+ /* fallthrough */
+ case BC_CALLT:
+ lj_record_tailcall(J, ra, (ptrdiff_t)rc-1);
+ break;
+
+ case BC_VARG:
+ rec_varg(J, ra, (ptrdiff_t)rb-1);
+ break;
+
+ /* -- Returns ----------------------------------------------------------- */
+
+ case BC_RETM:
+ /* L->top is set to L->base+ra+rc+NRESULTS-1, see lj_dispatch_ins(). */
+ rc = (BCReg)(J->L->top - J->L->base) - ra + 1;
+ /* fallthrough */
+ case BC_RET: case BC_RET0: case BC_RET1:
+#if LJ_HASPROFILE
+ rec_profile_ret(J);
+#endif
+ lj_record_ret(J, ra, (ptrdiff_t)rc-1);
+ break;
+
+ /* -- Loops and branches ------------------------------------------------ */
+
+ case BC_FORI:
+ if (rec_for(J, pc, 0) != LOOPEV_LEAVE)
+ J->loopref = J->cur.nins;
+ break;
+ case BC_JFORI:
+ lj_assertJ(bc_op(pc[(ptrdiff_t)rc-BCBIAS_J]) == BC_JFORL,
+ "JFORI does not point to JFORL");
+ if (rec_for(J, pc, 0) != LOOPEV_LEAVE) /* Link to existing loop. */
+ lj_record_stop(J, LJ_TRLINK_ROOT, bc_d(pc[(ptrdiff_t)rc-BCBIAS_J]));
+ /* Continue tracing if the loop is not entered. */
+ break;
+
+ case BC_FORL:
+ rec_loop_interp(J, pc, rec_for(J, pc+((ptrdiff_t)rc-BCBIAS_J), 1));
+ break;
+ case BC_ITERL:
+ rec_loop_interp(J, pc, rec_iterl(J, *pc));
+ break;
+ case BC_ITERN:
+ rec_loop_interp(J, pc, rec_itern(J, ra, rb));
+ break;
+ case BC_LOOP:
+ rec_loop_interp(J, pc, rec_loop(J, ra, 1));
+ break;
+
+ case BC_JFORL:
+ rec_loop_jit(J, rc, rec_for(J, pc+bc_j(traceref(J, rc)->startins), 1));
+ break;
+ case BC_JITERL:
+ rec_loop_jit(J, rc, rec_iterl(J, traceref(J, rc)->startins));
+ break;
+ case BC_JLOOP:
+ rec_loop_jit(J, rc, rec_loop(J, ra,
+ !bc_isret(bc_op(traceref(J, rc)->startins)) &&
+ bc_op(traceref(J, rc)->startins) != BC_ITERN));
+ break;
+
+ case BC_IFORL:
+ case BC_IITERL:
+ case BC_ILOOP:
+ case BC_IFUNCF:
+ case BC_IFUNCV:
+ lj_trace_err(J, LJ_TRERR_BLACKL);
+ break;
+
+ case BC_JMP:
+ if (ra < J->maxslot)
+ J->maxslot = ra; /* Shrink used slots. */
+ break;
+
+ case BC_ISNEXT:
+ rec_isnext(J, ra);
+ break;
+
+ /* -- Function headers -------------------------------------------------- */
+
+ case BC_FUNCF:
+ rec_func_lua(J);
+ break;
+ case BC_JFUNCF:
+ rec_func_jit(J, rc);
+ break;
+
+ case BC_FUNCV:
+ rec_func_vararg(J);
+ rec_func_lua(J);
+ break;
+ case BC_JFUNCV:
+ /* Cannot happen. No hotcall counting for varag funcs. */
+ lj_assertJ(0, "unsupported vararg hotcall");
+ break;
+
+ case BC_FUNCC:
+ case BC_FUNCCW:
+ lj_ffrecord_func(J);
+ break;
+
+ default:
+ if (op >= BC__MAX) {
+ lj_ffrecord_func(J);
+ break;
+ }
+ /* fallthrough */
+ case BC_UCLO:
+ case BC_FNEW:
+ setintV(&J->errinfo, (int32_t)op);
+ lj_trace_err_info(J, LJ_TRERR_NYIBC);
+ break;
+ }
+
+ /* rc == 0 if we have no result yet, e.g. pending __index metamethod call. */
+ if (bcmode_a(op) == BCMdst && rc) {
+ J->base[ra] = rc;
+ if (ra >= J->maxslot) {
+#if LJ_FR2
+ if (ra > J->maxslot) J->base[ra-1] = 0;
+#endif
+ J->maxslot = ra+1;
+ }
+ }
+
+#undef rav
+#undef rbv
+#undef rcv
+
+ /* Limit the number of recorded IR instructions and constants. */
+ if (J->cur.nins > REF_FIRST+(IRRef)J->param[JIT_P_maxrecord] ||
+ J->cur.nk < REF_BIAS-(IRRef)J->param[JIT_P_maxirconst])
+ lj_trace_err(J, LJ_TRERR_TRACEOV);
+}
+
+/* -- Recording setup ----------------------------------------------------- */
+
+/* Setup recording for a root trace started by a hot loop. */
+static const BCIns *rec_setup_root(jit_State *J)
+{
+ /* Determine the next PC and the bytecode range for the loop. */
+ const BCIns *pcj, *pc = J->pc;
+ BCIns ins = *pc;
+ BCReg ra = bc_a(ins);
+ switch (bc_op(ins)) {
+ case BC_FORL:
+ J->bc_extent = (MSize)(-bc_j(ins))*sizeof(BCIns);
+ pc += 1+bc_j(ins);
+ J->bc_min = pc;
+ break;
+ case BC_ITERL:
+ if (bc_op(pc[-1]) == BC_JLOOP)
+ lj_trace_err(J, LJ_TRERR_LINNER);
+ lj_assertJ(bc_op(pc[-1]) == BC_ITERC, "no ITERC before ITERL");
+ J->maxslot = ra + bc_b(pc[-1]) - 1;
+ J->bc_extent = (MSize)(-bc_j(ins))*sizeof(BCIns);
+ pc += 1+bc_j(ins);
+ lj_assertJ(bc_op(pc[-1]) == BC_JMP, "ITERL does not point to JMP+1");
+ J->bc_min = pc;
+ break;
+ case BC_ITERN:
+ lj_assertJ(bc_op(pc[1]) == BC_ITERL, "no ITERL after ITERN");
+ J->maxslot = ra;
+ J->bc_extent = (MSize)(-bc_j(pc[1]))*sizeof(BCIns);
+ J->bc_min = pc+2 + bc_j(pc[1]);
+ J->state = LJ_TRACE_RECORD_1ST; /* Record the first ITERN, too. */
+ break;
+ case BC_LOOP:
+ /* Only check BC range for real loops, but not for "repeat until true". */
+ pcj = pc + bc_j(ins);
+ ins = *pcj;
+ if (bc_op(ins) == BC_JMP && bc_j(ins) < 0) {
+ J->bc_min = pcj+1 + bc_j(ins);
+ J->bc_extent = (MSize)(-bc_j(ins))*sizeof(BCIns);
+ }
+ J->maxslot = ra;
+ pc++;
+ break;
+ case BC_RET:
+ case BC_RET0:
+ case BC_RET1:
+ /* No bytecode range check for down-recursive root traces. */
+ J->maxslot = ra + bc_d(ins) - 1;
+ break;
+ case BC_FUNCF:
+ /* No bytecode range check for root traces started by a hot call. */
+ J->maxslot = J->pt->numparams;
+ pc++;
+ break;
+ case BC_CALLM:
+ case BC_CALL:
+ case BC_ITERC:
+ /* No bytecode range check for stitched traces. */
+ pc++;
+ break;
+ default:
+ lj_assertJ(0, "bad root trace start bytecode %d", bc_op(ins));
+ break;
+ }
+ return pc;
+}
+
+/* Setup for recording a new trace. */
+void lj_record_setup(jit_State *J)
+{
+ uint32_t i;
+
+ /* Initialize state related to current trace. */
+ memset(J->slot, 0, sizeof(J->slot));
+ memset(J->chain, 0, sizeof(J->chain));
+#ifdef LUAJIT_ENABLE_TABLE_BUMP
+ memset(J->rbchash, 0, sizeof(J->rbchash));
+#endif
+ memset(J->bpropcache, 0, sizeof(J->bpropcache));
+ J->scev.idx = REF_NIL;
+ setmref(J->scev.pc, NULL);
+
+ J->baseslot = 1+LJ_FR2; /* Invoking function is at base[-1-LJ_FR2]. */
+ J->base = J->slot + J->baseslot;
+ J->maxslot = 0;
+ J->framedepth = 0;
+ J->retdepth = 0;
+
+ J->instunroll = J->param[JIT_P_instunroll];
+ J->loopunroll = J->param[JIT_P_loopunroll];
+ J->tailcalled = 0;
+ J->loopref = 0;
+
+ J->bc_min = NULL; /* Means no limit. */
+ J->bc_extent = ~(MSize)0;
+
+ /* Emit instructions for fixed references. Also triggers initial IR alloc. */
+ emitir_raw(IRT(IR_BASE, IRT_PGC), J->parent, J->exitno);
+ for (i = 0; i <= 2; i++) {
+ IRIns *ir = IR(REF_NIL-i);
+ ir->i = 0;
+ ir->t.irt = (uint8_t)(IRT_NIL+i);
+ ir->o = IR_KPRI;
+ ir->prev = 0;
+ }
+ J->cur.nk = REF_TRUE;
+
+ J->startpc = J->pc;
+ setmref(J->cur.startpc, J->pc);
+ if (J->parent) { /* Side trace. */
+ GCtrace *T = traceref(J, J->parent);
+ TraceNo root = T->root ? T->root : J->parent;
+ J->cur.root = (uint16_t)root;
+ J->cur.startins = BCINS_AD(BC_JMP, 0, 0);
+ /* Check whether we could at least potentially form an extra loop. */
+ if (J->exitno == 0 && T->snap[0].nent == 0) {
+ /* We can narrow a FORL for some side traces, too. */
+ if (J->pc > proto_bc(J->pt) && bc_op(J->pc[-1]) == BC_JFORI &&
+ bc_d(J->pc[bc_j(J->pc[-1])-1]) == root) {
+ lj_snap_add(J);
+ rec_for_loop(J, J->pc-1, &J->scev, 1);
+ goto sidecheck;
+ }
+ } else {
+ J->startpc = NULL; /* Prevent forming an extra loop. */
+ }
+ lj_snap_replay(J, T);
+ sidecheck:
+ if ((traceref(J, J->cur.root)->nchild >= J->param[JIT_P_maxside] ||
+ T->snap[J->exitno].count >= J->param[JIT_P_hotexit] +
+ J->param[JIT_P_tryside])) {
+ if (bc_op(*J->pc) == BC_JLOOP) {
+ BCIns startins = traceref(J, bc_d(*J->pc))->startins;
+ if (bc_op(startins) == BC_ITERN)
+ rec_itern(J, bc_a(startins), bc_b(startins));
+ }
+ lj_record_stop(J, LJ_TRLINK_INTERP, 0);
+ }
+ } else { /* Root trace. */
+ J->cur.root = 0;
+ J->cur.startins = *J->pc;
+ J->pc = rec_setup_root(J);
+ /* Note: the loop instruction itself is recorded at the end and not
+ ** at the start! So snapshot #0 needs to point to the *next* instruction.
+ ** The one exception is BC_ITERN, which sets LJ_TRACE_RECORD_1ST.
+ */
+ lj_snap_add(J);
+ if (bc_op(J->cur.startins) == BC_FORL)
+ rec_for_loop(J, J->pc-1, &J->scev, 1);
+ else if (bc_op(J->cur.startins) == BC_ITERC)
+ J->startpc = NULL;
+ if (1 + J->pt->framesize >= LJ_MAX_JSLOTS)
+ lj_trace_err(J, LJ_TRERR_STACKOV);
+ }
+#if LJ_HASPROFILE
+ J->prev_pt = NULL;
+ J->prev_line = -1;
+#endif
+#ifdef LUAJIT_ENABLE_CHECKHOOK
+ /* Regularly check for instruction/line hooks from compiled code and
+ ** exit to the interpreter if the hooks are set.
+ **
+ ** This is a compile-time option and disabled by default, since the
+ ** hook checks may be quite expensive in tight loops.
+ **
+ ** Note this is only useful if hooks are *not* set most of the time.
+ ** Use this only if you want to *asynchronously* interrupt the execution.
+ **
+ ** You can set the instruction hook via lua_sethook() with a count of 1
+ ** from a signal handler or another native thread. Please have a look
+ ** at the first few functions in luajit.c for an example (Ctrl-C handler).
+ */
+ {
+ TRef tr = emitir(IRT(IR_XLOAD, IRT_U8),
+ lj_ir_kptr(J, &J2G(J)->hookmask), IRXLOAD_VOLATILE);
+ tr = emitir(IRTI(IR_BAND), tr, lj_ir_kint(J, (LUA_MASKLINE|LUA_MASKCOUNT)));
+ emitir(IRTGI(IR_EQ), tr, lj_ir_kint(J, 0));
+ }
+#endif
+}
+
+#undef IR
+#undef emitir_raw
+#undef emitir
+
+#endif
diff --git a/libs/luajit-cmake/luajit/src/lj_record.h b/libs/luajit-cmake/luajit/src/lj_record.h
new file mode 100644
index 0000000..ab2f4c8
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/lj_record.h
@@ -0,0 +1,47 @@
+/*
+** Trace recorder (bytecode -> SSA IR).
+** Copyright (C) 2005-2022 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LJ_RECORD_H
+#define _LJ_RECORD_H
+
+#include "lj_obj.h"
+#include "lj_jit.h"
+
+#if LJ_HASJIT
+/* Context for recording an indexed load/store. */
+typedef struct RecordIndex {
+ TValue tabv; /* Runtime value of table (or indexed object). */
+ TValue keyv; /* Runtime value of key. */
+ TValue valv; /* Runtime value of stored value. */
+ TValue mobjv; /* Runtime value of metamethod object. */
+ GCtab *mtv; /* Runtime value of metatable object. */
+ cTValue *oldv; /* Runtime value of previously stored value. */
+ TRef tab; /* Table (or indexed object) reference. */
+ TRef key; /* Key reference. */
+ TRef val; /* Value reference for a store or 0 for a load. */
+ TRef mt; /* Metatable reference. */
+ TRef mobj; /* Metamethod object reference. */
+ int idxchain; /* Index indirections left or 0 for raw lookup. */
+} RecordIndex;
+
+LJ_FUNC int lj_record_objcmp(jit_State *J, TRef a, TRef b,
+ cTValue *av, cTValue *bv);
+LJ_FUNC void lj_record_stop(jit_State *J, TraceLink linktype, TraceNo lnk);
+LJ_FUNC TRef lj_record_constify(jit_State *J, cTValue *o);
+LJ_FUNC TRef lj_record_vload(jit_State *J, TRef ref, MSize idx, IRType t);
+
+LJ_FUNC void lj_record_call(jit_State *J, BCReg func, ptrdiff_t nargs);
+LJ_FUNC void lj_record_tailcall(jit_State *J, BCReg func, ptrdiff_t nargs);
+LJ_FUNC void lj_record_ret(jit_State *J, BCReg rbase, ptrdiff_t gotresults);
+
+LJ_FUNC int lj_record_mm_lookup(jit_State *J, RecordIndex *ix, MMS mm);
+LJ_FUNC TRef lj_record_idx(jit_State *J, RecordIndex *ix);
+LJ_FUNC int lj_record_next(jit_State *J, RecordIndex *ix);
+
+LJ_FUNC void lj_record_ins(jit_State *J);
+LJ_FUNC void lj_record_setup(jit_State *J);
+#endif
+
+#endif
diff --git a/libs/luajit-cmake/luajit/src/lj_serialize.c b/libs/luajit-cmake/luajit/src/lj_serialize.c
new file mode 100644
index 0000000..f7e5182
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/lj_serialize.c
@@ -0,0 +1,539 @@
+/*
+** Object de/serialization.
+** Copyright (C) 2005-2022 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#define lj_serialize_c
+#define LUA_CORE
+
+#include "lj_obj.h"
+
+#if LJ_HASBUFFER
+#include "lj_err.h"
+#include "lj_buf.h"
+#include "lj_str.h"
+#include "lj_tab.h"
+#include "lj_udata.h"
+#if LJ_HASFFI
+#include "lj_ctype.h"
+#include "lj_cdata.h"
+#endif
+#if LJ_HASJIT
+#include "lj_ir.h"
+#endif
+#include "lj_serialize.h"
+
+/* Tags for internal serialization format. */
+enum {
+ SER_TAG_NIL, /* 0x00 */
+ SER_TAG_FALSE,
+ SER_TAG_TRUE,
+ SER_TAG_NULL,
+ SER_TAG_LIGHTUD32,
+ SER_TAG_LIGHTUD64,
+ SER_TAG_INT,
+ SER_TAG_NUM,
+ SER_TAG_TAB, /* 0x08 */
+ SER_TAG_DICT_MT = SER_TAG_TAB+6,
+ SER_TAG_DICT_STR,
+ SER_TAG_INT64, /* 0x10 */
+ SER_TAG_UINT64,
+ SER_TAG_COMPLEX,
+ SER_TAG_0x13,
+ SER_TAG_0x14,
+ SER_TAG_0x15,
+ SER_TAG_0x16,
+ SER_TAG_0x17,
+ SER_TAG_0x18, /* 0x18 */
+ SER_TAG_0x19,
+ SER_TAG_0x1a,
+ SER_TAG_0x1b,
+ SER_TAG_0x1c,
+ SER_TAG_0x1d,
+ SER_TAG_0x1e,
+ SER_TAG_0x1f,
+ SER_TAG_STR, /* 0x20 + str->len */
+};
+LJ_STATIC_ASSERT((SER_TAG_TAB & 7) == 0);
+
+/* -- Helper functions ---------------------------------------------------- */
+
+static LJ_AINLINE char *serialize_more(char *w, SBufExt *sbx, MSize sz)
+{
+ if (LJ_UNLIKELY(sz > (MSize)(sbx->e - w))) {
+ sbx->w = w;
+ w = lj_buf_more2((SBuf *)sbx, sz);
+ }
+ return w;
+}
+
+/* Write U124 to buffer. */
+static LJ_NOINLINE char *serialize_wu124_(char *w, uint32_t v)
+{
+ if (v < 0x1fe0) {
+ v -= 0xe0;
+ *w++ = (char)(0xe0 | (v >> 8)); *w++ = (char)v;
+ } else {
+ *w++ = (char)0xff;
+#if LJ_BE
+ v = lj_bswap(v);
+#endif
+ memcpy(w, &v, 4); w += 4;
+ }
+ return w;
+}
+
+static LJ_AINLINE char *serialize_wu124(char *w, uint32_t v)
+{
+ if (LJ_LIKELY(v < 0xe0)) {
+ *w++ = (char)v;
+ return w;
+ } else {
+ return serialize_wu124_(w, v);
+ }
+}
+
+static LJ_NOINLINE char *serialize_ru124_(char *r, char *w, uint32_t *pv)
+{
+ uint32_t v = *pv;
+ if (v != 0xff) {
+ if (r >= w) return NULL;
+ v = ((v & 0x1f) << 8) + *(uint8_t *)r + 0xe0; r++;
+ } else {
+ if (r + 4 > w) return NULL;
+ v = lj_getu32(r); r += 4;
+#if LJ_BE
+ v = lj_bswap(v);
+#endif
+ }
+ *pv = v;
+ return r;
+}
+
+static LJ_AINLINE char *serialize_ru124(char *r, char *w, uint32_t *pv)
+{
+ if (LJ_LIKELY(r < w)) {
+ uint32_t v = *(uint8_t *)r; r++;
+ *pv = v;
+ if (LJ_UNLIKELY(v >= 0xe0)) {
+ r = serialize_ru124_(r, w, pv);
+ }
+ return r;
+ }
+ return NULL;
+}
+
+/* Prepare string dictionary for use (once). */
+void LJ_FASTCALL lj_serialize_dict_prep_str(lua_State *L, GCtab *dict)
+{
+ if (!dict->hmask) { /* No hash part means not prepared, yet. */
+ MSize i, len = lj_tab_len(dict);
+ if (!len) return;
+ lj_tab_resize(L, dict, dict->asize, hsize2hbits(len));
+ for (i = 1; i <= len && i < dict->asize; i++) {
+ cTValue *o = arrayslot(dict, i);
+ if (tvisstr(o)) {
+ if (!lj_tab_getstr(dict, strV(o))) { /* Ignore dups. */
+ lj_tab_newkey(L, dict, o)->u64 = (uint64_t)(i-1);
+ }
+ } else if (!tvisfalse(o)) {
+ lj_err_caller(L, LJ_ERR_BUFFER_BADOPT);
+ }
+ }
+ }
+}
+
+/* Prepare metatable dictionary for use (once). */
+void LJ_FASTCALL lj_serialize_dict_prep_mt(lua_State *L, GCtab *dict)
+{
+ if (!dict->hmask) { /* No hash part means not prepared, yet. */
+ MSize i, len = lj_tab_len(dict);
+ if (!len) return;
+ lj_tab_resize(L, dict, dict->asize, hsize2hbits(len));
+ for (i = 1; i <= len && i < dict->asize; i++) {
+ cTValue *o = arrayslot(dict, i);
+ if (tvistab(o)) {
+ if (tvisnil(lj_tab_get(L, dict, o))) { /* Ignore dups. */
+ lj_tab_newkey(L, dict, o)->u64 = (uint64_t)(i-1);
+ }
+ } else if (!tvisfalse(o)) {
+ lj_err_caller(L, LJ_ERR_BUFFER_BADOPT);
+ }
+ }
+ }
+}
+
+/* -- Internal serializer ------------------------------------------------- */
+
+/* Put serialized object into buffer. */
+static char *serialize_put(char *w, SBufExt *sbx, cTValue *o)
+{
+ if (LJ_LIKELY(tvisstr(o))) {
+ const GCstr *str = strV(o);
+ MSize len = str->len;
+ w = serialize_more(w, sbx, 5+len);
+ w = serialize_wu124(w, SER_TAG_STR + len);
+ w = lj_buf_wmem(w, strdata(str), len);
+ } else if (tvisint(o)) {
+ uint32_t x = LJ_BE ? lj_bswap((uint32_t)intV(o)) : (uint32_t)intV(o);
+ w = serialize_more(w, sbx, 1+4);
+ *w++ = SER_TAG_INT; memcpy(w, &x, 4); w += 4;
+ } else if (tvisnum(o)) {
+ uint64_t x = LJ_BE ? lj_bswap64(o->u64) : o->u64;
+ w = serialize_more(w, sbx, 1+sizeof(lua_Number));
+ *w++ = SER_TAG_NUM; memcpy(w, &x, 8); w += 8;
+ } else if (tvispri(o)) {
+ w = serialize_more(w, sbx, 1);
+ *w++ = (char)(SER_TAG_NIL + ~itype(o));
+ } else if (tvistab(o)) {
+ const GCtab *t = tabV(o);
+ uint32_t narray = 0, nhash = 0, one = 2;
+ if (sbx->depth <= 0) lj_err_caller(sbufL(sbx), LJ_ERR_BUFFER_DEPTH);
+ sbx->depth--;
+ if (t->asize > 0) { /* Determine max. length of array part. */
+ ptrdiff_t i;
+ TValue *array = tvref(t->array);
+ for (i = (ptrdiff_t)t->asize-1; i >= 0; i--)
+ if (!tvisnil(&array[i]))
+ break;
+ narray = (uint32_t)(i+1);
+ if (narray && tvisnil(&array[0])) one = 4;
+ }
+ if (t->hmask > 0) { /* Count number of used hash slots. */
+ uint32_t i, hmask = t->hmask;
+ Node *node = noderef(t->node);
+ for (i = 0; i <= hmask; i++)
+ nhash += !tvisnil(&node[i].val);
+ }
+ /* Write metatable index. */
+ if (LJ_UNLIKELY(tabref(sbx->dict_mt)) && tabref(t->metatable)) {
+ TValue mto;
+ Node *n;
+ settabV(sbufL(sbx), &mto, tabref(t->metatable));
+ n = hashgcref(tabref(sbx->dict_mt), mto.gcr);
+ do {
+ if (n->key.u64 == mto.u64) {
+ uint32_t idx = n->val.u32.lo;
+ w = serialize_more(w, sbx, 1+5);
+ *w++ = SER_TAG_DICT_MT;
+ w = serialize_wu124(w, idx);
+ break;
+ }
+ } while ((n = nextnode(n)));
+ }
+ /* Write number of array slots and hash slots. */
+ w = serialize_more(w, sbx, 1+2*5);
+ *w++ = (char)(SER_TAG_TAB + (nhash ? 1 : 0) + (narray ? one : 0));
+ if (narray) w = serialize_wu124(w, narray);
+ if (nhash) w = serialize_wu124(w, nhash);
+ if (narray) { /* Write array entries. */
+ cTValue *oa = tvref(t->array) + (one >> 2);
+ cTValue *oe = tvref(t->array) + narray;
+ while (oa < oe) w = serialize_put(w, sbx, oa++);
+ }
+ if (nhash) { /* Write hash entries. */
+ const Node *node = noderef(t->node) + t->hmask;
+ GCtab *dict_str = tabref(sbx->dict_str);
+ if (LJ_UNLIKELY(dict_str)) {
+ for (;; node--)
+ if (!tvisnil(&node->val)) {
+ if (LJ_LIKELY(tvisstr(&node->key))) {
+ /* Inlined lj_tab_getstr is 30% faster. */
+ const GCstr *str = strV(&node->key);
+ Node *n = hashstr(dict_str, str);
+ do {
+ if (tvisstr(&n->key) && strV(&n->key) == str) {
+ uint32_t idx = n->val.u32.lo;
+ w = serialize_more(w, sbx, 1+5);
+ *w++ = SER_TAG_DICT_STR;
+ w = serialize_wu124(w, idx);
+ break;
+ }
+ n = nextnode(n);
+ if (!n) {
+ MSize len = str->len;
+ w = serialize_more(w, sbx, 5+len);
+ w = serialize_wu124(w, SER_TAG_STR + len);
+ w = lj_buf_wmem(w, strdata(str), len);
+ break;
+ }
+ } while (1);
+ } else {
+ w = serialize_put(w, sbx, &node->key);
+ }
+ w = serialize_put(w, sbx, &node->val);
+ if (--nhash == 0) break;
+ }
+ } else {
+ for (;; node--)
+ if (!tvisnil(&node->val)) {
+ w = serialize_put(w, sbx, &node->key);
+ w = serialize_put(w, sbx, &node->val);
+ if (--nhash == 0) break;
+ }
+ }
+ }
+ sbx->depth++;
+#if LJ_HASFFI
+ } else if (tviscdata(o)) {
+ CTState *cts = ctype_cts(sbufL(sbx));
+ CType *s = ctype_raw(cts, cdataV(o)->ctypeid);
+ uint8_t *sp = cdataptr(cdataV(o));
+ if (ctype_isinteger(s->info) && s->size == 8) {
+ w = serialize_more(w, sbx, 1+8);
+ *w++ = (s->info & CTF_UNSIGNED) ? SER_TAG_UINT64 : SER_TAG_INT64;
+#if LJ_BE
+ { uint64_t u = lj_bswap64(*(uint64_t *)sp); memcpy(w, &u, 8); }
+#else
+ memcpy(w, sp, 8);
+#endif
+ w += 8;
+ } else if (ctype_iscomplex(s->info) && s->size == 16) {
+ w = serialize_more(w, sbx, 1+16);
+ *w++ = SER_TAG_COMPLEX;
+#if LJ_BE
+ { /* Only swap the doubles. The re/im order stays the same. */
+ uint64_t u = lj_bswap64(((uint64_t *)sp)[0]); memcpy(w, &u, 8);
+ u = lj_bswap64(((uint64_t *)sp)[1]); memcpy(w+8, &u, 8);
+ }
+#else
+ memcpy(w, sp, 16);
+#endif
+ w += 16;
+ } else {
+ goto badenc; /* NYI other cdata */
+ }
+#endif
+ } else if (tvislightud(o)) {
+ uintptr_t ud = (uintptr_t)lightudV(G(sbufL(sbx)), o);
+ w = serialize_more(w, sbx, 1+sizeof(ud));
+ if (ud == 0) {
+ *w++ = SER_TAG_NULL;
+ } else if (LJ_32 || checku32(ud)) {
+#if LJ_BE && LJ_64
+ ud = lj_bswap64(ud);
+#elif LJ_BE
+ ud = lj_bswap(ud);
+#endif
+ *w++ = SER_TAG_LIGHTUD32; memcpy(w, &ud, 4); w += 4;
+#if LJ_64
+ } else {
+#if LJ_BE
+ ud = lj_bswap64(ud);
+#endif
+ *w++ = SER_TAG_LIGHTUD64; memcpy(w, &ud, 8); w += 8;
+#endif
+ }
+ } else {
+ /* NYI userdata */
+#if LJ_HASFFI
+ badenc:
+#endif
+ lj_err_callerv(sbufL(sbx), LJ_ERR_BUFFER_BADENC, lj_typename(o));
+ }
+ return w;
+}
+
+/* Get serialized object from buffer. */
+static char *serialize_get(char *r, SBufExt *sbx, TValue *o)
+{
+ char *w = sbx->w;
+ uint32_t tp;
+ r = serialize_ru124(r, w, &tp); if (LJ_UNLIKELY(!r)) goto eob;
+ if (LJ_LIKELY(tp >= SER_TAG_STR)) {
+ uint32_t len = tp - SER_TAG_STR;
+ if (LJ_UNLIKELY(len > (uint32_t)(w - r))) goto eob;
+ setstrV(sbufL(sbx), o, lj_str_new(sbufL(sbx), r, len));
+ r += len;
+ } else if (tp == SER_TAG_INT) {
+ if (LJ_UNLIKELY(r + 4 > w)) goto eob;
+ setintV(o, (int32_t)(LJ_BE ? lj_bswap(lj_getu32(r)) : lj_getu32(r)));
+ r += 4;
+ } else if (tp == SER_TAG_NUM) {
+ if (LJ_UNLIKELY(r + 8 > w)) goto eob;
+ memcpy(o, r, 8); r += 8;
+#if LJ_BE
+ o->u64 = lj_bswap64(o->u64);
+#endif
+ if (!tvisnum(o)) setnanV(o); /* Fix non-canonical NaNs. */
+ } else if (tp <= SER_TAG_TRUE) {
+ setpriV(o, ~tp);
+ } else if (tp == SER_TAG_DICT_STR) {
+ GCtab *dict_str;
+ uint32_t idx;
+ r = serialize_ru124(r, w, &idx); if (LJ_UNLIKELY(!r)) goto eob;
+ idx++;
+ dict_str = tabref(sbx->dict_str);
+ if (dict_str && idx < dict_str->asize && tvisstr(arrayslot(dict_str, idx)))
+ copyTV(sbufL(sbx), o, arrayslot(dict_str, idx));
+ else
+ lj_err_callerv(sbufL(sbx), LJ_ERR_BUFFER_BADDICTX, idx);
+ } else if (tp >= SER_TAG_TAB && tp <= SER_TAG_DICT_MT) {
+ uint32_t narray = 0, nhash = 0;
+ GCtab *t, *mt = NULL;
+ if (sbx->depth <= 0) lj_err_caller(sbufL(sbx), LJ_ERR_BUFFER_DEPTH);
+ sbx->depth--;
+ if (tp == SER_TAG_DICT_MT) {
+ GCtab *dict_mt;
+ uint32_t idx;
+ r = serialize_ru124(r, w, &idx); if (LJ_UNLIKELY(!r)) goto eob;
+ idx++;
+ dict_mt = tabref(sbx->dict_mt);
+ if (dict_mt && idx < dict_mt->asize && tvistab(arrayslot(dict_mt, idx)))
+ mt = tabV(arrayslot(dict_mt, idx));
+ else
+ lj_err_callerv(sbufL(sbx), LJ_ERR_BUFFER_BADDICTX, idx);
+ r = serialize_ru124(r, w, &tp); if (LJ_UNLIKELY(!r)) goto eob;
+ if (!(tp >= SER_TAG_TAB && tp < SER_TAG_DICT_MT)) goto badtag;
+ }
+ if (tp >= SER_TAG_TAB+2) {
+ r = serialize_ru124(r, w, &narray); if (LJ_UNLIKELY(!r)) goto eob;
+ }
+ if ((tp & 1)) {
+ r = serialize_ru124(r, w, &nhash); if (LJ_UNLIKELY(!r)) goto eob;
+ }
+ t = lj_tab_new(sbufL(sbx), narray, hsize2hbits(nhash));
+ /* NOBARRIER: The table is new (marked white). */
+ setgcref(t->metatable, obj2gco(mt));
+ settabV(sbufL(sbx), o, t);
+ if (narray) {
+ TValue *oa = tvref(t->array) + (tp >= SER_TAG_TAB+4);
+ TValue *oe = tvref(t->array) + narray;
+ while (oa < oe) r = serialize_get(r, sbx, oa++);
+ }
+ if (nhash) {
+ do {
+ TValue k, *v;
+ r = serialize_get(r, sbx, &k);
+ v = lj_tab_set(sbufL(sbx), t, &k);
+ if (LJ_UNLIKELY(!tvisnil(v)))
+ lj_err_caller(sbufL(sbx), LJ_ERR_BUFFER_DUPKEY);
+ r = serialize_get(r, sbx, v);
+ } while (--nhash);
+ }
+ sbx->depth++;
+#if LJ_HASFFI
+ } else if (tp >= SER_TAG_INT64 && tp <= SER_TAG_COMPLEX) {
+ uint32_t sz = tp == SER_TAG_COMPLEX ? 16 : 8;
+ GCcdata *cd;
+ if (LJ_UNLIKELY(r + sz > w)) goto eob;
+ if (LJ_UNLIKELY(!ctype_ctsG(G(sbufL(sbx))))) goto badtag;
+ cd = lj_cdata_new_(sbufL(sbx),
+ tp == SER_TAG_INT64 ? CTID_INT64 :
+ tp == SER_TAG_UINT64 ? CTID_UINT64 : CTID_COMPLEX_DOUBLE,
+ sz);
+ memcpy(cdataptr(cd), r, sz); r += sz;
+#if LJ_BE
+ *(uint64_t *)cdataptr(cd) = lj_bswap64(*(uint64_t *)cdataptr(cd));
+ if (sz == 16)
+ ((uint64_t *)cdataptr(cd))[1] = lj_bswap64(((uint64_t *)cdataptr(cd))[1]);
+#endif
+ if (sz == 16) { /* Fix non-canonical NaNs. */
+ TValue *cdo = (TValue *)cdataptr(cd);
+ if (!tvisnum(&cdo[0])) setnanV(&cdo[0]);
+ if (!tvisnum(&cdo[1])) setnanV(&cdo[1]);
+ }
+ setcdataV(sbufL(sbx), o, cd);
+#endif
+ } else if (tp <= (LJ_64 ? SER_TAG_LIGHTUD64 : SER_TAG_LIGHTUD32)) {
+ uintptr_t ud = 0;
+ if (tp == SER_TAG_LIGHTUD32) {
+ if (LJ_UNLIKELY(r + 4 > w)) goto eob;
+ ud = (uintptr_t)(LJ_BE ? lj_bswap(lj_getu32(r)) : lj_getu32(r));
+ r += 4;
+ }
+#if LJ_64
+ else if (tp == SER_TAG_LIGHTUD64) {
+ if (LJ_UNLIKELY(r + 8 > w)) goto eob;
+ memcpy(&ud, r, 8); r += 8;
+#if LJ_BE
+ ud = lj_bswap64(ud);
+#endif
+ }
+ setrawlightudV(o, lj_lightud_intern(sbufL(sbx), (void *)ud));
+#else
+ setrawlightudV(o, (void *)ud);
+#endif
+ } else {
+badtag:
+ lj_err_callerv(sbufL(sbx), LJ_ERR_BUFFER_BADDEC, tp);
+ }
+ return r;
+eob:
+ lj_err_caller(sbufL(sbx), LJ_ERR_BUFFER_EOB);
+ return NULL;
+}
+
+/* -- External serialization API ------------------------------------------ */
+
+/* Encode to buffer. */
+SBufExt * LJ_FASTCALL lj_serialize_put(SBufExt *sbx, cTValue *o)
+{
+ sbx->depth = LJ_SERIALIZE_DEPTH;
+ sbx->w = serialize_put(sbx->w, sbx, o);
+ return sbx;
+}
+
+/* Decode from buffer. */
+char * LJ_FASTCALL lj_serialize_get(SBufExt *sbx, TValue *o)
+{
+ sbx->depth = LJ_SERIALIZE_DEPTH;
+ return serialize_get(sbx->r, sbx, o);
+}
+
+/* Stand-alone encoding, borrowing from global temporary buffer. */
+GCstr * LJ_FASTCALL lj_serialize_encode(lua_State *L, cTValue *o)
+{
+ SBufExt sbx;
+ char *w;
+ memset(&sbx, 0, sizeof(SBufExt));
+ lj_bufx_set_borrow(L, &sbx, &G(L)->tmpbuf);
+ sbx.depth = LJ_SERIALIZE_DEPTH;
+ w = serialize_put(sbx.w, &sbx, o);
+ return lj_str_new(L, sbx.b, (size_t)(w - sbx.b));
+}
+
+/* Stand-alone decoding, copy-on-write from string. */
+void lj_serialize_decode(lua_State *L, TValue *o, GCstr *str)
+{
+ SBufExt sbx;
+ char *r;
+ memset(&sbx, 0, sizeof(SBufExt));
+ lj_bufx_set_cow(L, &sbx, strdata(str), str->len);
+ /* No need to set sbx.cowref here. */
+ sbx.depth = LJ_SERIALIZE_DEPTH;
+ r = serialize_get(sbx.r, &sbx, o);
+ if (r != sbx.w) lj_err_caller(L, LJ_ERR_BUFFER_LEFTOV);
+}
+
+#if LJ_HASJIT
+/* Peek into buffer to find the result IRType for specialization purposes. */
+LJ_FUNC MSize LJ_FASTCALL lj_serialize_peektype(SBufExt *sbx)
+{
+ uint32_t tp;
+ if (serialize_ru124(sbx->r, sbx->w, &tp)) {
+ /* This must match the handling of all tags in the decoder above. */
+ switch (tp) {
+ case SER_TAG_NIL: return IRT_NIL;
+ case SER_TAG_FALSE: return IRT_FALSE;
+ case SER_TAG_TRUE: return IRT_TRUE;
+ case SER_TAG_NULL: case SER_TAG_LIGHTUD32: case SER_TAG_LIGHTUD64:
+ return IRT_LIGHTUD;
+ case SER_TAG_INT: return LJ_DUALNUM ? IRT_INT : IRT_NUM;
+ case SER_TAG_NUM: return IRT_NUM;
+ case SER_TAG_TAB: case SER_TAG_TAB+1: case SER_TAG_TAB+2:
+ case SER_TAG_TAB+3: case SER_TAG_TAB+4: case SER_TAG_TAB+5:
+ case SER_TAG_DICT_MT:
+ return IRT_TAB;
+ case SER_TAG_INT64: case SER_TAG_UINT64: case SER_TAG_COMPLEX:
+ return IRT_CDATA;
+ case SER_TAG_DICT_STR:
+ default:
+ return IRT_STR;
+ }
+ }
+ return IRT_NIL; /* Will fail on actual decode. */
+}
+#endif
+
+#endif
diff --git a/libs/luajit-cmake/luajit/src/lj_serialize.h b/libs/luajit-cmake/luajit/src/lj_serialize.h
new file mode 100644
index 0000000..d3f4275
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/lj_serialize.h
@@ -0,0 +1,28 @@
+/*
+** Object de/serialization.
+** Copyright (C) 2005-2022 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LJ_SERIALIZE_H
+#define _LJ_SERIALIZE_H
+
+#include "lj_obj.h"
+#include "lj_buf.h"
+
+#if LJ_HASBUFFER
+
+#define LJ_SERIALIZE_DEPTH 100 /* Default depth. */
+
+LJ_FUNC void LJ_FASTCALL lj_serialize_dict_prep_str(lua_State *L, GCtab *dict);
+LJ_FUNC void LJ_FASTCALL lj_serialize_dict_prep_mt(lua_State *L, GCtab *dict);
+LJ_FUNC SBufExt * LJ_FASTCALL lj_serialize_put(SBufExt *sbx, cTValue *o);
+LJ_FUNC char * LJ_FASTCALL lj_serialize_get(SBufExt *sbx, TValue *o);
+LJ_FUNC GCstr * LJ_FASTCALL lj_serialize_encode(lua_State *L, cTValue *o);
+LJ_FUNC void lj_serialize_decode(lua_State *L, TValue *o, GCstr *str);
+#if LJ_HASJIT
+LJ_FUNC MSize LJ_FASTCALL lj_serialize_peektype(SBufExt *sbx);
+#endif
+
+#endif
+
+#endif
diff --git a/libs/luajit-cmake/luajit/src/lj_snap.c b/libs/luajit-cmake/luajit/src/lj_snap.c
new file mode 100644
index 0000000..4140fdb
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/lj_snap.c
@@ -0,0 +1,996 @@
+/*
+** Snapshot handling.
+** Copyright (C) 2005-2022 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#define lj_snap_c
+#define LUA_CORE
+
+#include "lj_obj.h"
+
+#if LJ_HASJIT
+
+#include "lj_gc.h"
+#include "lj_tab.h"
+#include "lj_state.h"
+#include "lj_frame.h"
+#include "lj_bc.h"
+#include "lj_ir.h"
+#include "lj_jit.h"
+#include "lj_iropt.h"
+#include "lj_trace.h"
+#include "lj_snap.h"
+#include "lj_target.h"
+#if LJ_HASFFI
+#include "lj_ctype.h"
+#include "lj_cdata.h"
+#endif
+
+/* Pass IR on to next optimization in chain (FOLD). */
+#define emitir(ot, a, b) (lj_ir_set(J, (ot), (a), (b)), lj_opt_fold(J))
+
+/* Emit raw IR without passing through optimizations. */
+#define emitir_raw(ot, a, b) (lj_ir_set(J, (ot), (a), (b)), lj_ir_emit(J))
+
+/* -- Snapshot buffer allocation ------------------------------------------ */
+
+/* Grow snapshot buffer. */
+void lj_snap_grow_buf_(jit_State *J, MSize need)
+{
+ MSize maxsnap = (MSize)J->param[JIT_P_maxsnap];
+ if (need > maxsnap)
+ lj_trace_err(J, LJ_TRERR_SNAPOV);
+ lj_mem_growvec(J->L, J->snapbuf, J->sizesnap, maxsnap, SnapShot);
+ J->cur.snap = J->snapbuf;
+}
+
+/* Grow snapshot map buffer. */
+void lj_snap_grow_map_(jit_State *J, MSize need)
+{
+ if (need < 2*J->sizesnapmap)
+ need = 2*J->sizesnapmap;
+ else if (need < 64)
+ need = 64;
+ J->snapmapbuf = (SnapEntry *)lj_mem_realloc(J->L, J->snapmapbuf,
+ J->sizesnapmap*sizeof(SnapEntry), need*sizeof(SnapEntry));
+ J->cur.snapmap = J->snapmapbuf;
+ J->sizesnapmap = need;
+}
+
+/* -- Snapshot generation ------------------------------------------------- */
+
+/* Add all modified slots to the snapshot. */
+static MSize snapshot_slots(jit_State *J, SnapEntry *map, BCReg nslots)
+{
+ IRRef retf = J->chain[IR_RETF]; /* Limits SLOAD restore elimination. */
+ BCReg s;
+ MSize n = 0;
+ for (s = 0; s < nslots; s++) {
+ TRef tr = J->slot[s];
+ IRRef ref = tref_ref(tr);
+#if LJ_FR2
+ if (s == 1) { /* Ignore slot 1 in LJ_FR2 mode, except if tailcalled. */
+ if ((tr & TREF_FRAME))
+ map[n++] = SNAP(1, SNAP_FRAME | SNAP_NORESTORE, REF_NIL);
+ continue;
+ }
+ if ((tr & (TREF_FRAME | TREF_CONT)) && !ref) {
+ cTValue *base = J->L->base - J->baseslot;
+ tr = J->slot[s] = (tr & 0xff0000) | lj_ir_k64(J, IR_KNUM, base[s].u64);
+ ref = tref_ref(tr);
+ }
+#endif
+ if (ref) {
+ SnapEntry sn = SNAP_TR(s, tr);
+ IRIns *ir = &J->cur.ir[ref];
+ if ((LJ_FR2 || !(sn & (SNAP_CONT|SNAP_FRAME))) &&
+ ir->o == IR_SLOAD && ir->op1 == s && ref > retf) {
+ /*
+ ** No need to snapshot unmodified non-inherited slots.
+ ** But always snapshot the function below a frame in LJ_FR2 mode.
+ */
+ if (!(ir->op2 & IRSLOAD_INHERIT) &&
+ (!LJ_FR2 || s == 0 || s+1 == nslots ||
+ !(J->slot[s+1] & (TREF_CONT|TREF_FRAME))))
+ continue;
+ /* No need to restore readonly slots and unmodified non-parent slots. */
+ if (!(LJ_DUALNUM && (ir->op2 & IRSLOAD_CONVERT)) &&
+ (ir->op2 & (IRSLOAD_READONLY|IRSLOAD_PARENT)) != IRSLOAD_PARENT)
+ sn |= SNAP_NORESTORE;
+ }
+ if (LJ_SOFTFP32 && irt_isnum(ir->t))
+ sn |= SNAP_SOFTFPNUM;
+ map[n++] = sn;
+ }
+ }
+ return n;
+}
+
+/* Add frame links at the end of the snapshot. */
+static MSize snapshot_framelinks(jit_State *J, SnapEntry *map, uint8_t *topslot)
+{
+ cTValue *frame = J->L->base - 1;
+ cTValue *lim = J->L->base - J->baseslot + LJ_FR2;
+ GCfunc *fn = frame_func(frame);
+ cTValue *ftop = isluafunc(fn) ? (frame+funcproto(fn)->framesize) : J->L->top;
+#if LJ_FR2
+ uint64_t pcbase = (u64ptr(J->pc) << 8) | (J->baseslot - 2);
+ lj_assertJ(2 <= J->baseslot && J->baseslot <= 257, "bad baseslot");
+ memcpy(map, &pcbase, sizeof(uint64_t));
+#else
+ MSize f = 0;
+ map[f++] = SNAP_MKPC(J->pc); /* The current PC is always the first entry. */
+#endif
+ lj_assertJ(!J->pt ||
+ (J->pc >= proto_bc(J->pt) &&
+ J->pc < proto_bc(J->pt) + J->pt->sizebc), "bad snapshot PC");
+ while (frame > lim) { /* Backwards traversal of all frames above base. */
+ if (frame_islua(frame)) {
+#if !LJ_FR2
+ map[f++] = SNAP_MKPC(frame_pc(frame));
+#endif
+ frame = frame_prevl(frame);
+ } else if (frame_iscont(frame)) {
+#if !LJ_FR2
+ map[f++] = SNAP_MKFTSZ(frame_ftsz(frame));
+ map[f++] = SNAP_MKPC(frame_contpc(frame));
+#endif
+ frame = frame_prevd(frame);
+ } else {
+ lj_assertJ(!frame_isc(frame), "broken frame chain");
+#if !LJ_FR2
+ map[f++] = SNAP_MKFTSZ(frame_ftsz(frame));
+#endif
+ frame = frame_prevd(frame);
+ continue;
+ }
+ if (frame + funcproto(frame_func(frame))->framesize > ftop)
+ ftop = frame + funcproto(frame_func(frame))->framesize;
+ }
+ *topslot = (uint8_t)(ftop - lim);
+#if LJ_FR2
+ lj_assertJ(sizeof(SnapEntry) * 2 == sizeof(uint64_t), "bad SnapEntry def");
+ return 2;
+#else
+ lj_assertJ(f == (MSize)(1 + J->framedepth), "miscalculated snapshot size");
+ return f;
+#endif
+}
+
+/* Take a snapshot of the current stack. */
+static void snapshot_stack(jit_State *J, SnapShot *snap, MSize nsnapmap)
+{
+ BCReg nslots = J->baseslot + J->maxslot;
+ MSize nent;
+ SnapEntry *p;
+ /* Conservative estimate. */
+ lj_snap_grow_map(J, nsnapmap + nslots + (MSize)(LJ_FR2?2:J->framedepth+1));
+ p = &J->cur.snapmap[nsnapmap];
+ nent = snapshot_slots(J, p, nslots);
+ snap->nent = (uint8_t)nent;
+ nent += snapshot_framelinks(J, p + nent, &snap->topslot);
+ snap->mapofs = (uint32_t)nsnapmap;
+ snap->ref = (IRRef1)J->cur.nins;
+ snap->mcofs = 0;
+ snap->nslots = (uint8_t)nslots;
+ snap->count = 0;
+ J->cur.nsnapmap = (uint32_t)(nsnapmap + nent);
+}
+
+/* Add or merge a snapshot. */
+void lj_snap_add(jit_State *J)
+{
+ MSize nsnap = J->cur.nsnap;
+ MSize nsnapmap = J->cur.nsnapmap;
+ /* Merge if no ins. inbetween or if requested and no guard inbetween. */
+ if ((nsnap > 0 && J->cur.snap[nsnap-1].ref == J->cur.nins) ||
+ (J->mergesnap && !irt_isguard(J->guardemit))) {
+ if (nsnap == 1) { /* But preserve snap #0 PC. */
+ emitir_raw(IRT(IR_NOP, IRT_NIL), 0, 0);
+ goto nomerge;
+ }
+ nsnapmap = J->cur.snap[--nsnap].mapofs;
+ } else {
+ nomerge:
+ lj_snap_grow_buf(J, nsnap+1);
+ J->cur.nsnap = (uint16_t)(nsnap+1);
+ }
+ J->mergesnap = 0;
+ J->guardemit.irt = 0;
+ snapshot_stack(J, &J->cur.snap[nsnap], nsnapmap);
+}
+
+/* -- Snapshot modification ----------------------------------------------- */
+
+#define SNAP_USEDEF_SLOTS (LJ_MAX_JSLOTS+LJ_STACK_EXTRA)
+
+/* Find unused slots with reaching-definitions bytecode data-flow analysis. */
+static BCReg snap_usedef(jit_State *J, uint8_t *udf,
+ const BCIns *pc, BCReg maxslot)
+{
+ BCReg s;
+ GCobj *o;
+
+ if (maxslot == 0) return 0;
+#ifdef LUAJIT_USE_VALGRIND
+ /* Avoid errors for harmless reads beyond maxslot. */
+ memset(udf, 1, SNAP_USEDEF_SLOTS);
+#else
+ memset(udf, 1, maxslot);
+#endif
+
+ /* Treat open upvalues as used. */
+ o = gcref(J->L->openupval);
+ while (o) {
+ if (uvval(gco2uv(o)) < J->L->base) break;
+ udf[uvval(gco2uv(o)) - J->L->base] = 0;
+ o = gcref(o->gch.nextgc);
+ }
+
+#define USE_SLOT(s) udf[(s)] &= ~1
+#define DEF_SLOT(s) udf[(s)] *= 3
+
+ /* Scan through following bytecode and check for uses/defs. */
+ lj_assertJ(pc >= proto_bc(J->pt) && pc < proto_bc(J->pt) + J->pt->sizebc,
+ "snapshot PC out of range");
+ for (;;) {
+ BCIns ins = *pc++;
+ BCOp op = bc_op(ins);
+ switch (bcmode_b(op)) {
+ case BCMvar: USE_SLOT(bc_b(ins)); break;
+ default: break;
+ }
+ switch (bcmode_c(op)) {
+ case BCMvar: USE_SLOT(bc_c(ins)); break;
+ case BCMrbase:
+ lj_assertJ(op == BC_CAT, "unhandled op %d with RC rbase", op);
+ for (s = bc_b(ins); s <= bc_c(ins); s++) USE_SLOT(s);
+ for (; s < maxslot; s++) DEF_SLOT(s);
+ break;
+ case BCMjump:
+ handle_jump: {
+ BCReg minslot = bc_a(ins);
+ if (op >= BC_FORI && op <= BC_JFORL) minslot += FORL_EXT;
+ else if (op >= BC_ITERL && op <= BC_JITERL) minslot += bc_b(pc[-2])-1;
+ else if (op == BC_UCLO) {
+ ptrdiff_t delta = bc_j(ins);
+ if (delta < 0) return maxslot; /* Prevent loop. */
+ pc += delta;
+ break;
+ }
+ for (s = minslot; s < maxslot; s++) DEF_SLOT(s);
+ return minslot < maxslot ? minslot : maxslot;
+ }
+ case BCMlit:
+ if (op == BC_JFORL || op == BC_JITERL || op == BC_JLOOP) {
+ goto handle_jump;
+ } else if (bc_isret(op)) {
+ BCReg top = op == BC_RETM ? maxslot : (bc_a(ins) + bc_d(ins)-1);
+ for (s = 0; s < bc_a(ins); s++) DEF_SLOT(s);
+ for (; s < top; s++) USE_SLOT(s);
+ for (; s < maxslot; s++) DEF_SLOT(s);
+ return 0;
+ }
+ break;
+ case BCMfunc: return maxslot; /* NYI: will abort, anyway. */
+ default: break;
+ }
+ switch (bcmode_a(op)) {
+ case BCMvar: USE_SLOT(bc_a(ins)); break;
+ case BCMdst:
+ if (!(op == BC_ISTC || op == BC_ISFC)) DEF_SLOT(bc_a(ins));
+ break;
+ case BCMbase:
+ if (op >= BC_CALLM && op <= BC_ITERN) {
+ BCReg top = (op == BC_CALLM || op == BC_CALLMT || bc_c(ins) == 0) ?
+ maxslot : (bc_a(ins) + bc_c(ins)+LJ_FR2);
+ if (LJ_FR2) DEF_SLOT(bc_a(ins)+1);
+ s = bc_a(ins) - ((op == BC_ITERC || op == BC_ITERN) ? 3 : 0);
+ for (; s < top; s++) USE_SLOT(s);
+ for (; s < maxslot; s++) DEF_SLOT(s);
+ if (op == BC_CALLT || op == BC_CALLMT) {
+ for (s = 0; s < bc_a(ins); s++) DEF_SLOT(s);
+ return 0;
+ }
+ } else if (op == BC_VARG) {
+ return maxslot; /* NYI: punt. */
+ } else if (op == BC_KNIL) {
+ for (s = bc_a(ins); s <= bc_d(ins); s++) DEF_SLOT(s);
+ } else if (op == BC_TSETM) {
+ for (s = bc_a(ins)-1; s < maxslot; s++) USE_SLOT(s);
+ }
+ break;
+ default: break;
+ }
+ lj_assertJ(pc >= proto_bc(J->pt) && pc < proto_bc(J->pt) + J->pt->sizebc,
+ "use/def analysis PC out of range");
+ }
+
+#undef USE_SLOT
+#undef DEF_SLOT
+
+ return 0; /* unreachable */
+}
+
+/* Mark slots used by upvalues of child prototypes as used. */
+static void snap_useuv(GCproto *pt, uint8_t *udf)
+{
+ /* This is a coarse check, because it's difficult to correlate the lifetime
+ ** of slots and closures. But the number of false positives is quite low.
+ ** A false positive may cause a slot not to be purged, which is just
+ ** a missed optimization.
+ */
+ if ((pt->flags & PROTO_CHILD)) {
+ ptrdiff_t i, j, n = pt->sizekgc;
+ GCRef *kr = mref(pt->k, GCRef) - 1;
+ for (i = 0; i < n; i++, kr--) {
+ GCobj *o = gcref(*kr);
+ if (o->gch.gct == ~LJ_TPROTO) {
+ for (j = 0; j < gco2pt(o)->sizeuv; j++) {
+ uint32_t v = proto_uv(gco2pt(o))[j];
+ if ((v & PROTO_UV_LOCAL)) {
+ udf[(v & 0xff)] = 0;
+ }
+ }
+ }
+ }
+ }
+}
+
+/* Purge dead slots before the next snapshot. */
+void lj_snap_purge(jit_State *J)
+{
+ uint8_t udf[SNAP_USEDEF_SLOTS];
+ BCReg s, maxslot = J->maxslot;
+ if (bc_op(*J->pc) == BC_FUNCV && maxslot > J->pt->numparams)
+ maxslot = J->pt->numparams;
+ s = snap_usedef(J, udf, J->pc, maxslot);
+ if (s < maxslot) {
+ snap_useuv(J->pt, udf);
+ for (; s < maxslot; s++)
+ if (udf[s] != 0)
+ J->base[s] = 0; /* Purge dead slots. */
+ }
+}
+
+/* Shrink last snapshot. */
+void lj_snap_shrink(jit_State *J)
+{
+ SnapShot *snap = &J->cur.snap[J->cur.nsnap-1];
+ SnapEntry *map = &J->cur.snapmap[snap->mapofs];
+ MSize n, m, nlim, nent = snap->nent;
+ uint8_t udf[SNAP_USEDEF_SLOTS];
+ BCReg maxslot = J->maxslot;
+ BCReg baseslot = J->baseslot;
+ BCReg minslot = snap_usedef(J, udf, snap_pc(&map[nent]), maxslot);
+ if (minslot < maxslot) snap_useuv(J->pt, udf);
+ maxslot += baseslot;
+ minslot += baseslot;
+ snap->nslots = (uint8_t)maxslot;
+ for (n = m = 0; n < nent; n++) { /* Remove unused slots from snapshot. */
+ BCReg s = snap_slot(map[n]);
+ if (s < minslot || (s < maxslot && udf[s-baseslot] == 0))
+ map[m++] = map[n]; /* Only copy used slots. */
+ }
+ snap->nent = (uint8_t)m;
+ nlim = J->cur.nsnapmap - snap->mapofs - 1;
+ while (n <= nlim) map[m++] = map[n++]; /* Move PC + frame links down. */
+ J->cur.nsnapmap = (uint32_t)(snap->mapofs + m); /* Free up space in map. */
+}
+
+/* -- Snapshot access ----------------------------------------------------- */
+
+/* Initialize a Bloom Filter with all renamed refs.
+** There are very few renames (often none), so the filter has
+** very few bits set. This makes it suitable for negative filtering.
+*/
+static BloomFilter snap_renamefilter(GCtrace *T, SnapNo lim)
+{
+ BloomFilter rfilt = 0;
+ IRIns *ir;
+ for (ir = &T->ir[T->nins-1]; ir->o == IR_RENAME; ir--)
+ if (ir->op2 <= lim)
+ bloomset(rfilt, ir->op1);
+ return rfilt;
+}
+
+/* Process matching renames to find the original RegSP. */
+static RegSP snap_renameref(GCtrace *T, SnapNo lim, IRRef ref, RegSP rs)
+{
+ IRIns *ir;
+ for (ir = &T->ir[T->nins-1]; ir->o == IR_RENAME; ir--)
+ if (ir->op1 == ref && ir->op2 <= lim)
+ rs = ir->prev;
+ return rs;
+}
+
+/* Copy RegSP from parent snapshot to the parent links of the IR. */
+IRIns *lj_snap_regspmap(jit_State *J, GCtrace *T, SnapNo snapno, IRIns *ir)
+{
+ SnapShot *snap = &T->snap[snapno];
+ SnapEntry *map = &T->snapmap[snap->mapofs];
+ BloomFilter rfilt = snap_renamefilter(T, snapno);
+ MSize n = 0;
+ IRRef ref = 0;
+ UNUSED(J);
+ for ( ; ; ir++) {
+ uint32_t rs;
+ if (ir->o == IR_SLOAD) {
+ if (!(ir->op2 & IRSLOAD_PARENT)) break;
+ for ( ; ; n++) {
+ lj_assertJ(n < snap->nent, "slot %d not found in snapshot", ir->op1);
+ if (snap_slot(map[n]) == ir->op1) {
+ ref = snap_ref(map[n++]);
+ break;
+ }
+ }
+ } else if (LJ_SOFTFP32 && ir->o == IR_HIOP) {
+ ref++;
+ } else if (ir->o == IR_PVAL) {
+ ref = ir->op1 + REF_BIAS;
+ } else {
+ break;
+ }
+ rs = T->ir[ref].prev;
+ if (bloomtest(rfilt, ref))
+ rs = snap_renameref(T, snapno, ref, rs);
+ ir->prev = (uint16_t)rs;
+ lj_assertJ(regsp_used(rs), "unused IR %04d in snapshot", ref - REF_BIAS);
+ }
+ return ir;
+}
+
+/* -- Snapshot replay ----------------------------------------------------- */
+
+/* Replay constant from parent trace. */
+static TRef snap_replay_const(jit_State *J, IRIns *ir)
+{
+ /* Only have to deal with constants that can occur in stack slots. */
+ switch ((IROp)ir->o) {
+ case IR_KPRI: return TREF_PRI(irt_type(ir->t));
+ case IR_KINT: return lj_ir_kint(J, ir->i);
+ case IR_KGC: return lj_ir_kgc(J, ir_kgc(ir), irt_t(ir->t));
+ case IR_KNUM: case IR_KINT64:
+ return lj_ir_k64(J, (IROp)ir->o, ir_k64(ir)->u64);
+ case IR_KPTR: return lj_ir_kptr(J, ir_kptr(ir)); /* Continuation. */
+ default: lj_assertJ(0, "bad IR constant op %d", ir->o); return TREF_NIL;
+ }
+}
+
+/* De-duplicate parent reference. */
+static TRef snap_dedup(jit_State *J, SnapEntry *map, MSize nmax, IRRef ref)
+{
+ MSize j;
+ for (j = 0; j < nmax; j++)
+ if (snap_ref(map[j]) == ref)
+ return J->slot[snap_slot(map[j])] & ~(SNAP_KEYINDEX|SNAP_CONT|SNAP_FRAME);
+ return 0;
+}
+
+/* Emit parent reference with de-duplication. */
+static TRef snap_pref(jit_State *J, GCtrace *T, SnapEntry *map, MSize nmax,
+ BloomFilter seen, IRRef ref)
+{
+ IRIns *ir = &T->ir[ref];
+ TRef tr;
+ if (irref_isk(ref))
+ tr = snap_replay_const(J, ir);
+ else if (!regsp_used(ir->prev))
+ tr = 0;
+ else if (!bloomtest(seen, ref) || (tr = snap_dedup(J, map, nmax, ref)) == 0)
+ tr = emitir(IRT(IR_PVAL, irt_type(ir->t)), ref - REF_BIAS, 0);
+ return tr;
+}
+
+/* Check whether a sunk store corresponds to an allocation. Slow path. */
+static int snap_sunk_store2(GCtrace *T, IRIns *ira, IRIns *irs)
+{
+ if (irs->o == IR_ASTORE || irs->o == IR_HSTORE ||
+ irs->o == IR_FSTORE || irs->o == IR_XSTORE) {
+ IRIns *irk = &T->ir[irs->op1];
+ if (irk->o == IR_AREF || irk->o == IR_HREFK)
+ irk = &T->ir[irk->op1];
+ return (&T->ir[irk->op1] == ira);
+ }
+ return 0;
+}
+
+/* Check whether a sunk store corresponds to an allocation. Fast path. */
+static LJ_AINLINE int snap_sunk_store(GCtrace *T, IRIns *ira, IRIns *irs)
+{
+ if (irs->s != 255)
+ return (ira + irs->s == irs); /* Fast check. */
+ return snap_sunk_store2(T, ira, irs);
+}
+
+/* Replay snapshot state to setup side trace. */
+void lj_snap_replay(jit_State *J, GCtrace *T)
+{
+ SnapShot *snap = &T->snap[J->exitno];
+ SnapEntry *map = &T->snapmap[snap->mapofs];
+ MSize n, nent = snap->nent;
+ BloomFilter seen = 0;
+ int pass23 = 0;
+ J->framedepth = 0;
+ /* Emit IR for slots inherited from parent snapshot. */
+ for (n = 0; n < nent; n++) {
+ SnapEntry sn = map[n];
+ BCReg s = snap_slot(sn);
+ IRRef ref = snap_ref(sn);
+ IRIns *ir = &T->ir[ref];
+ TRef tr;
+ /* The bloom filter avoids O(nent^2) overhead for de-duping slots. */
+ if (bloomtest(seen, ref) && (tr = snap_dedup(J, map, n, ref)) != 0)
+ goto setslot;
+ bloomset(seen, ref);
+ if (irref_isk(ref)) {
+ /* See special treatment of LJ_FR2 slot 1 in snapshot_slots() above. */
+ if (LJ_FR2 && (sn == SNAP(1, SNAP_FRAME | SNAP_NORESTORE, REF_NIL)))
+ tr = 0;
+ else
+ tr = snap_replay_const(J, ir);
+ } else if (!regsp_used(ir->prev)) {
+ pass23 = 1;
+ lj_assertJ(s != 0, "unused slot 0 in snapshot");
+ tr = s;
+ } else {
+ IRType t = irt_type(ir->t);
+ uint32_t mode = IRSLOAD_INHERIT|IRSLOAD_PARENT;
+ if (LJ_SOFTFP32 && (sn & SNAP_SOFTFPNUM)) t = IRT_NUM;
+ if (ir->o == IR_SLOAD) mode |= (ir->op2 & IRSLOAD_READONLY);
+ if ((sn & SNAP_KEYINDEX)) mode |= IRSLOAD_KEYINDEX;
+ tr = emitir_raw(IRT(IR_SLOAD, t), s, mode);
+ }
+ setslot:
+ /* Same as TREF_* flags. */
+ J->slot[s] = tr | (sn&(SNAP_KEYINDEX|SNAP_CONT|SNAP_FRAME));
+ J->framedepth += ((sn & (SNAP_CONT|SNAP_FRAME)) && (s != LJ_FR2));
+ if ((sn & SNAP_FRAME))
+ J->baseslot = s+1;
+ }
+ if (pass23) {
+ IRIns *irlast = &T->ir[snap->ref];
+ pass23 = 0;
+ /* Emit dependent PVALs. */
+ for (n = 0; n < nent; n++) {
+ SnapEntry sn = map[n];
+ IRRef refp = snap_ref(sn);
+ IRIns *ir = &T->ir[refp];
+ if (regsp_reg(ir->r) == RID_SUNK) {
+ if (J->slot[snap_slot(sn)] != snap_slot(sn)) continue;
+ pass23 = 1;
+ lj_assertJ(ir->o == IR_TNEW || ir->o == IR_TDUP ||
+ ir->o == IR_CNEW || ir->o == IR_CNEWI,
+ "sunk parent IR %04d has bad op %d", refp - REF_BIAS, ir->o);
+ if (ir->op1 >= T->nk) snap_pref(J, T, map, nent, seen, ir->op1);
+ if (ir->op2 >= T->nk) snap_pref(J, T, map, nent, seen, ir->op2);
+ if (LJ_HASFFI && ir->o == IR_CNEWI) {
+ if (LJ_32 && refp+1 < T->nins && (ir+1)->o == IR_HIOP)
+ snap_pref(J, T, map, nent, seen, (ir+1)->op2);
+ } else {
+ IRIns *irs;
+ for (irs = ir+1; irs < irlast; irs++)
+ if (irs->r == RID_SINK && snap_sunk_store(T, ir, irs)) {
+ if (snap_pref(J, T, map, nent, seen, irs->op2) == 0)
+ snap_pref(J, T, map, nent, seen, T->ir[irs->op2].op1);
+ else if ((LJ_SOFTFP32 || (LJ_32 && LJ_HASFFI)) &&
+ irs+1 < irlast && (irs+1)->o == IR_HIOP)
+ snap_pref(J, T, map, nent, seen, (irs+1)->op2);
+ }
+ }
+ } else if (!irref_isk(refp) && !regsp_used(ir->prev)) {
+ lj_assertJ(ir->o == IR_CONV && ir->op2 == IRCONV_NUM_INT,
+ "sunk parent IR %04d has bad op %d", refp - REF_BIAS, ir->o);
+ J->slot[snap_slot(sn)] = snap_pref(J, T, map, nent, seen, ir->op1);
+ }
+ }
+ /* Replay sunk instructions. */
+ for (n = 0; pass23 && n < nent; n++) {
+ SnapEntry sn = map[n];
+ IRRef refp = snap_ref(sn);
+ IRIns *ir = &T->ir[refp];
+ if (regsp_reg(ir->r) == RID_SUNK) {
+ TRef op1, op2;
+ if (J->slot[snap_slot(sn)] != snap_slot(sn)) { /* De-dup allocs. */
+ J->slot[snap_slot(sn)] = J->slot[J->slot[snap_slot(sn)]];
+ continue;
+ }
+ op1 = ir->op1;
+ if (op1 >= T->nk) op1 = snap_pref(J, T, map, nent, seen, op1);
+ op2 = ir->op2;
+ if (op2 >= T->nk) op2 = snap_pref(J, T, map, nent, seen, op2);
+ if (LJ_HASFFI && ir->o == IR_CNEWI) {
+ if (LJ_32 && refp+1 < T->nins && (ir+1)->o == IR_HIOP) {
+ lj_needsplit(J); /* Emit joining HIOP. */
+ op2 = emitir_raw(IRT(IR_HIOP, IRT_I64), op2,
+ snap_pref(J, T, map, nent, seen, (ir+1)->op2));
+ }
+ J->slot[snap_slot(sn)] = emitir(ir->ot & ~(IRT_MARK|IRT_ISPHI), op1, op2);
+ } else {
+ IRIns *irs;
+ TRef tr = emitir(ir->ot, op1, op2);
+ J->slot[snap_slot(sn)] = tr;
+ for (irs = ir+1; irs < irlast; irs++)
+ if (irs->r == RID_SINK && snap_sunk_store(T, ir, irs)) {
+ IRIns *irr = &T->ir[irs->op1];
+ TRef val, key = irr->op2, tmp = tr;
+ if (irr->o != IR_FREF) {
+ IRIns *irk = &T->ir[key];
+ if (irr->o == IR_HREFK)
+ key = lj_ir_kslot(J, snap_replay_const(J, &T->ir[irk->op1]),
+ irk->op2);
+ else
+ key = snap_replay_const(J, irk);
+ if (irr->o == IR_HREFK || irr->o == IR_AREF) {
+ IRIns *irf = &T->ir[irr->op1];
+ tmp = emitir(irf->ot, tmp, irf->op2);
+ }
+ }
+ tmp = emitir(irr->ot, tmp, key);
+ val = snap_pref(J, T, map, nent, seen, irs->op2);
+ if (val == 0) {
+ IRIns *irc = &T->ir[irs->op2];
+ lj_assertJ(irc->o == IR_CONV && irc->op2 == IRCONV_NUM_INT,
+ "sunk store for parent IR %04d with bad op %d",
+ refp - REF_BIAS, irc->o);
+ val = snap_pref(J, T, map, nent, seen, irc->op1);
+ val = emitir(IRTN(IR_CONV), val, IRCONV_NUM_INT);
+ } else if ((LJ_SOFTFP32 || (LJ_32 && LJ_HASFFI)) &&
+ irs+1 < irlast && (irs+1)->o == IR_HIOP) {
+ IRType t = IRT_I64;
+ if (LJ_SOFTFP32 && irt_type((irs+1)->t) == IRT_SOFTFP)
+ t = IRT_NUM;
+ lj_needsplit(J);
+ if (irref_isk(irs->op2) && irref_isk((irs+1)->op2)) {
+ uint64_t k = (uint32_t)T->ir[irs->op2].i +
+ ((uint64_t)T->ir[(irs+1)->op2].i << 32);
+ val = lj_ir_k64(J, t == IRT_I64 ? IR_KINT64 : IR_KNUM, k);
+ } else {
+ val = emitir_raw(IRT(IR_HIOP, t), val,
+ snap_pref(J, T, map, nent, seen, (irs+1)->op2));
+ }
+ tmp = emitir(IRT(irs->o, t), tmp, val);
+ continue;
+ }
+ tmp = emitir(irs->ot, tmp, val);
+ } else if (LJ_HASFFI && irs->o == IR_XBAR && ir->o == IR_CNEW) {
+ emitir(IRT(IR_XBAR, IRT_NIL), 0, 0);
+ }
+ }
+ }
+ }
+ }
+ J->base = J->slot + J->baseslot;
+ J->maxslot = snap->nslots - J->baseslot;
+ lj_snap_add(J);
+ if (pass23) /* Need explicit GC step _after_ initial snapshot. */
+ emitir_raw(IRTG(IR_GCSTEP, IRT_NIL), 0, 0);
+}
+
+/* -- Snapshot restore ---------------------------------------------------- */
+
+static void snap_unsink(jit_State *J, GCtrace *T, ExitState *ex,
+ SnapNo snapno, BloomFilter rfilt,
+ IRIns *ir, TValue *o);
+
+/* Restore a value from the trace exit state. */
+static void snap_restoreval(jit_State *J, GCtrace *T, ExitState *ex,
+ SnapNo snapno, BloomFilter rfilt,
+ IRRef ref, TValue *o)
+{
+ IRIns *ir = &T->ir[ref];
+ IRType1 t = ir->t;
+ RegSP rs = ir->prev;
+ if (irref_isk(ref)) { /* Restore constant slot. */
+ if (ir->o == IR_KPTR) {
+ o->u64 = (uint64_t)(uintptr_t)ir_kptr(ir);
+ } else {
+ lj_assertJ(!(ir->o == IR_KKPTR || ir->o == IR_KNULL),
+ "restore of const from IR %04d with bad op %d",
+ ref - REF_BIAS, ir->o);
+ lj_ir_kvalue(J->L, o, ir);
+ }
+ return;
+ }
+ if (LJ_UNLIKELY(bloomtest(rfilt, ref)))
+ rs = snap_renameref(T, snapno, ref, rs);
+ if (ra_hasspill(regsp_spill(rs))) { /* Restore from spill slot. */
+ int32_t *sps = &ex->spill[regsp_spill(rs)];
+ if (irt_isinteger(t)) {
+ setintV(o, *sps);
+#if !LJ_SOFTFP32
+ } else if (irt_isnum(t)) {
+ o->u64 = *(uint64_t *)sps;
+#endif
+#if LJ_64 && !LJ_GC64
+ } else if (irt_islightud(t)) {
+ /* 64 bit lightuserdata which may escape already has the tag bits. */
+ o->u64 = *(uint64_t *)sps;
+#endif
+ } else {
+ lj_assertJ(!irt_ispri(t), "PRI ref with spill slot");
+ setgcV(J->L, o, (GCobj *)(uintptr_t)*(GCSize *)sps, irt_toitype(t));
+ }
+ } else { /* Restore from register. */
+ Reg r = regsp_reg(rs);
+ if (ra_noreg(r)) {
+ lj_assertJ(ir->o == IR_CONV && ir->op2 == IRCONV_NUM_INT,
+ "restore from IR %04d has no reg", ref - REF_BIAS);
+ snap_restoreval(J, T, ex, snapno, rfilt, ir->op1, o);
+ if (LJ_DUALNUM) setnumV(o, (lua_Number)intV(o));
+ return;
+ } else if (irt_isinteger(t)) {
+ setintV(o, (int32_t)ex->gpr[r-RID_MIN_GPR]);
+#if !LJ_SOFTFP
+ } else if (irt_isnum(t)) {
+ setnumV(o, ex->fpr[r-RID_MIN_FPR]);
+#elif LJ_64 /* && LJ_SOFTFP */
+ } else if (irt_isnum(t)) {
+ o->u64 = ex->gpr[r-RID_MIN_GPR];
+#endif
+#if LJ_64 && !LJ_GC64
+ } else if (irt_is64(t)) {
+ /* 64 bit values that already have the tag bits. */
+ o->u64 = ex->gpr[r-RID_MIN_GPR];
+#endif
+ } else if (irt_ispri(t)) {
+ setpriV(o, irt_toitype(t));
+ } else {
+ setgcV(J->L, o, (GCobj *)ex->gpr[r-RID_MIN_GPR], irt_toitype(t));
+ }
+ }
+}
+
+#if LJ_HASFFI
+/* Restore raw data from the trace exit state. */
+static void snap_restoredata(jit_State *J, GCtrace *T, ExitState *ex,
+ SnapNo snapno, BloomFilter rfilt,
+ IRRef ref, void *dst, CTSize sz)
+{
+ IRIns *ir = &T->ir[ref];
+ RegSP rs = ir->prev;
+ int32_t *src;
+ uint64_t tmp;
+ UNUSED(J);
+ if (irref_isk(ref)) {
+ if (ir_isk64(ir)) {
+ src = (int32_t *)&ir[1];
+ } else if (sz == 8) {
+ tmp = (uint64_t)(uint32_t)ir->i;
+ src = (int32_t *)&tmp;
+ } else {
+ src = &ir->i;
+ }
+ } else {
+ if (LJ_UNLIKELY(bloomtest(rfilt, ref)))
+ rs = snap_renameref(T, snapno, ref, rs);
+ if (ra_hasspill(regsp_spill(rs))) {
+ src = &ex->spill[regsp_spill(rs)];
+ if (sz == 8 && !irt_is64(ir->t)) {
+ tmp = (uint64_t)(uint32_t)*src;
+ src = (int32_t *)&tmp;
+ }
+ } else {
+ Reg r = regsp_reg(rs);
+ if (ra_noreg(r)) {
+ /* Note: this assumes CNEWI is never used for SOFTFP split numbers. */
+ lj_assertJ(sz == 8 && ir->o == IR_CONV && ir->op2 == IRCONV_NUM_INT,
+ "restore from IR %04d has no reg", ref - REF_BIAS);
+ snap_restoredata(J, T, ex, snapno, rfilt, ir->op1, dst, 4);
+ *(lua_Number *)dst = (lua_Number)*(int32_t *)dst;
+ return;
+ }
+ src = (int32_t *)&ex->gpr[r-RID_MIN_GPR];
+#if !LJ_SOFTFP
+ if (r >= RID_MAX_GPR) {
+ src = (int32_t *)&ex->fpr[r-RID_MIN_FPR];
+#if LJ_TARGET_PPC
+ if (sz == 4) { /* PPC FPRs are always doubles. */
+ *(float *)dst = (float)*(double *)src;
+ return;
+ }
+#else
+ if (LJ_BE && sz == 4) src++;
+#endif
+ } else
+#endif
+ if (LJ_64 && LJ_BE && sz == 4) src++;
+ }
+ }
+ lj_assertJ(sz == 1 || sz == 2 || sz == 4 || sz == 8,
+ "restore from IR %04d with bad size %d", ref - REF_BIAS, sz);
+ if (sz == 4) *(int32_t *)dst = *src;
+ else if (sz == 8) *(int64_t *)dst = *(int64_t *)src;
+ else if (sz == 1) *(int8_t *)dst = (int8_t)*src;
+ else *(int16_t *)dst = (int16_t)*src;
+}
+#endif
+
+/* Unsink allocation from the trace exit state. Unsink sunk stores. */
+static void snap_unsink(jit_State *J, GCtrace *T, ExitState *ex,
+ SnapNo snapno, BloomFilter rfilt,
+ IRIns *ir, TValue *o)
+{
+ lj_assertJ(ir->o == IR_TNEW || ir->o == IR_TDUP ||
+ ir->o == IR_CNEW || ir->o == IR_CNEWI,
+ "sunk allocation with bad op %d", ir->o);
+#if LJ_HASFFI
+ if (ir->o == IR_CNEW || ir->o == IR_CNEWI) {
+ CTState *cts = ctype_cts(J->L);
+ CTypeID id = (CTypeID)T->ir[ir->op1].i;
+ CTSize sz;
+ CTInfo info = lj_ctype_info(cts, id, &sz);
+ GCcdata *cd = lj_cdata_newx(cts, id, sz, info);
+ setcdataV(J->L, o, cd);
+ if (ir->o == IR_CNEWI) {
+ uint8_t *p = (uint8_t *)cdataptr(cd);
+ lj_assertJ(sz == 4 || sz == 8, "sunk cdata with bad size %d", sz);
+ if (LJ_32 && sz == 8 && ir+1 < T->ir + T->nins && (ir+1)->o == IR_HIOP) {
+ snap_restoredata(J, T, ex, snapno, rfilt, (ir+1)->op2,
+ LJ_LE ? p+4 : p, 4);
+ if (LJ_BE) p += 4;
+ sz = 4;
+ }
+ snap_restoredata(J, T, ex, snapno, rfilt, ir->op2, p, sz);
+ } else {
+ IRIns *irs, *irlast = &T->ir[T->snap[snapno].ref];
+ for (irs = ir+1; irs < irlast; irs++)
+ if (irs->r == RID_SINK && snap_sunk_store(T, ir, irs)) {
+ IRIns *iro = &T->ir[T->ir[irs->op1].op2];
+ uint8_t *p = (uint8_t *)cd;
+ CTSize szs;
+ lj_assertJ(irs->o == IR_XSTORE, "sunk store with bad op %d", irs->o);
+ lj_assertJ(T->ir[irs->op1].o == IR_ADD,
+ "sunk store with bad add op %d", T->ir[irs->op1].o);
+ lj_assertJ(iro->o == IR_KINT || iro->o == IR_KINT64,
+ "sunk store with bad const offset op %d", iro->o);
+ if (irt_is64(irs->t)) szs = 8;
+ else if (irt_isi8(irs->t) || irt_isu8(irs->t)) szs = 1;
+ else if (irt_isi16(irs->t) || irt_isu16(irs->t)) szs = 2;
+ else szs = 4;
+ if (LJ_64 && iro->o == IR_KINT64)
+ p += (int64_t)ir_k64(iro)->u64;
+ else
+ p += iro->i;
+ lj_assertJ(p >= (uint8_t *)cdataptr(cd) &&
+ p + szs <= (uint8_t *)cdataptr(cd) + sz,
+ "sunk store with offset out of range");
+ if (LJ_32 && irs+1 < T->ir + T->nins && (irs+1)->o == IR_HIOP) {
+ lj_assertJ(szs == 4, "sunk store with bad size %d", szs);
+ snap_restoredata(J, T, ex, snapno, rfilt, (irs+1)->op2,
+ LJ_LE ? p+4 : p, 4);
+ if (LJ_BE) p += 4;
+ }
+ snap_restoredata(J, T, ex, snapno, rfilt, irs->op2, p, szs);
+ }
+ }
+ } else
+#endif
+ {
+ IRIns *irs, *irlast;
+ GCtab *t = ir->o == IR_TNEW ? lj_tab_new(J->L, ir->op1, ir->op2) :
+ lj_tab_dup(J->L, ir_ktab(&T->ir[ir->op1]));
+ settabV(J->L, o, t);
+ irlast = &T->ir[T->snap[snapno].ref];
+ for (irs = ir+1; irs < irlast; irs++)
+ if (irs->r == RID_SINK && snap_sunk_store(T, ir, irs)) {
+ IRIns *irk = &T->ir[irs->op1];
+ TValue tmp, *val;
+ lj_assertJ(irs->o == IR_ASTORE || irs->o == IR_HSTORE ||
+ irs->o == IR_FSTORE,
+ "sunk store with bad op %d", irs->o);
+ if (irk->o == IR_FREF) {
+ lj_assertJ(irk->op2 == IRFL_TAB_META,
+ "sunk store with bad field %d", irk->op2);
+ snap_restoreval(J, T, ex, snapno, rfilt, irs->op2, &tmp);
+ /* NOBARRIER: The table is new (marked white). */
+ setgcref(t->metatable, obj2gco(tabV(&tmp)));
+ } else {
+ irk = &T->ir[irk->op2];
+ if (irk->o == IR_KSLOT) irk = &T->ir[irk->op1];
+ lj_ir_kvalue(J->L, &tmp, irk);
+ val = lj_tab_set(J->L, t, &tmp);
+ /* NOBARRIER: The table is new (marked white). */
+ snap_restoreval(J, T, ex, snapno, rfilt, irs->op2, val);
+ if (LJ_SOFTFP32 && irs+1 < T->ir + T->nins && (irs+1)->o == IR_HIOP) {
+ snap_restoreval(J, T, ex, snapno, rfilt, (irs+1)->op2, &tmp);
+ val->u32.hi = tmp.u32.lo;
+ }
+ }
+ }
+ }
+}
+
+/* Restore interpreter state from exit state with the help of a snapshot. */
+const BCIns *lj_snap_restore(jit_State *J, void *exptr)
+{
+ ExitState *ex = (ExitState *)exptr;
+ SnapNo snapno = J->exitno; /* For now, snapno == exitno. */
+ GCtrace *T = traceref(J, J->parent);
+ SnapShot *snap = &T->snap[snapno];
+ MSize n, nent = snap->nent;
+ SnapEntry *map = &T->snapmap[snap->mapofs];
+#if !LJ_FR2 || defined(LUA_USE_ASSERT)
+ SnapEntry *flinks = &T->snapmap[snap_nextofs(T, snap)-1-LJ_FR2];
+#endif
+#if !LJ_FR2
+ ptrdiff_t ftsz0;
+#endif
+ TValue *frame;
+ BloomFilter rfilt = snap_renamefilter(T, snapno);
+ const BCIns *pc = snap_pc(&map[nent]);
+ lua_State *L = J->L;
+
+ /* Set interpreter PC to the next PC to get correct error messages. */
+ setcframe_pc(cframe_raw(L->cframe), pc+1);
+
+ /* Make sure the stack is big enough for the slots from the snapshot. */
+ if (LJ_UNLIKELY(L->base + snap->topslot >= tvref(L->maxstack))) {
+ L->top = curr_topL(L);
+ lj_state_growstack(L, snap->topslot - curr_proto(L)->framesize);
+ }
+
+ /* Fill stack slots with data from the registers and spill slots. */
+ frame = L->base-1-LJ_FR2;
+#if !LJ_FR2
+ ftsz0 = frame_ftsz(frame); /* Preserve link to previous frame in slot #0. */
+#endif
+ for (n = 0; n < nent; n++) {
+ SnapEntry sn = map[n];
+ if (!(sn & SNAP_NORESTORE)) {
+ TValue *o = &frame[snap_slot(sn)];
+ IRRef ref = snap_ref(sn);
+ IRIns *ir = &T->ir[ref];
+ if (ir->r == RID_SUNK) {
+ MSize j;
+ for (j = 0; j < n; j++)
+ if (snap_ref(map[j]) == ref) { /* De-duplicate sunk allocations. */
+ copyTV(L, o, &frame[snap_slot(map[j])]);
+ goto dupslot;
+ }
+ snap_unsink(J, T, ex, snapno, rfilt, ir, o);
+ dupslot:
+ continue;
+ }
+ snap_restoreval(J, T, ex, snapno, rfilt, ref, o);
+ if (LJ_SOFTFP32 && (sn & SNAP_SOFTFPNUM) && tvisint(o)) {
+ TValue tmp;
+ snap_restoreval(J, T, ex, snapno, rfilt, ref+1, &tmp);
+ o->u32.hi = tmp.u32.lo;
+#if !LJ_FR2
+ } else if ((sn & (SNAP_CONT|SNAP_FRAME))) {
+ /* Overwrite tag with frame link. */
+ setframe_ftsz(o, snap_slot(sn) != 0 ? (int32_t)*flinks-- : ftsz0);
+ L->base = o+1;
+#endif
+ } else if ((sn & SNAP_KEYINDEX)) {
+ /* A IRT_INT key index slot is restored as a number. Undo this. */
+ o->u32.lo = (uint32_t)(LJ_DUALNUM ? intV(o) : lj_num2int(numV(o)));
+ o->u32.hi = LJ_KEYINDEX;
+ }
+ }
+ }
+#if LJ_FR2
+ L->base += (map[nent+LJ_BE] & 0xff);
+#endif
+ lj_assertJ(map + nent == flinks, "inconsistent frames in snapshot");
+
+ /* Compute current stack top. */
+ switch (bc_op(*pc)) {
+ default:
+ if (bc_op(*pc) < BC_FUNCF) {
+ L->top = curr_topL(L);
+ break;
+ }
+ /* fallthrough */
+ case BC_CALLM: case BC_CALLMT: case BC_RETM: case BC_TSETM:
+ L->top = frame + snap->nslots;
+ break;
+ }
+ return pc;
+}
+
+#undef emitir_raw
+#undef emitir
+
+#endif
diff --git a/libs/luajit-cmake/luajit/src/lj_snap.h b/libs/luajit-cmake/luajit/src/lj_snap.h
new file mode 100644
index 0000000..b7dabed
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/lj_snap.h
@@ -0,0 +1,35 @@
+/*
+** Snapshot handling.
+** Copyright (C) 2005-2022 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LJ_SNAP_H
+#define _LJ_SNAP_H
+
+#include "lj_obj.h"
+#include "lj_jit.h"
+
+#if LJ_HASJIT
+LJ_FUNC void lj_snap_add(jit_State *J);
+LJ_FUNC void lj_snap_purge(jit_State *J);
+LJ_FUNC void lj_snap_shrink(jit_State *J);
+LJ_FUNC IRIns *lj_snap_regspmap(jit_State *J, GCtrace *T, SnapNo snapno,
+ IRIns *ir);
+LJ_FUNC void lj_snap_replay(jit_State *J, GCtrace *T);
+LJ_FUNC const BCIns *lj_snap_restore(jit_State *J, void *exptr);
+LJ_FUNC void lj_snap_grow_buf_(jit_State *J, MSize need);
+LJ_FUNC void lj_snap_grow_map_(jit_State *J, MSize need);
+
+static LJ_AINLINE void lj_snap_grow_buf(jit_State *J, MSize need)
+{
+ if (LJ_UNLIKELY(need > J->sizesnap)) lj_snap_grow_buf_(J, need);
+}
+
+static LJ_AINLINE void lj_snap_grow_map(jit_State *J, MSize need)
+{
+ if (LJ_UNLIKELY(need > J->sizesnapmap)) lj_snap_grow_map_(J, need);
+}
+
+#endif
+
+#endif
diff --git a/libs/luajit-cmake/luajit/src/lj_state.c b/libs/luajit-cmake/luajit/src/lj_state.c
new file mode 100644
index 0000000..0b9c46b
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/lj_state.c
@@ -0,0 +1,335 @@
+/*
+** State and stack handling.
+** Copyright (C) 2005-2022 Mike Pall. See Copyright Notice in luajit.h
+**
+** Portions taken verbatim or adapted from the Lua interpreter.
+** Copyright (C) 1994-2008 Lua.org, PUC-Rio. See Copyright Notice in lua.h
+*/
+
+#define lj_state_c
+#define LUA_CORE
+
+#include "lj_obj.h"
+#include "lj_gc.h"
+#include "lj_err.h"
+#include "lj_buf.h"
+#include "lj_str.h"
+#include "lj_tab.h"
+#include "lj_func.h"
+#include "lj_meta.h"
+#include "lj_state.h"
+#include "lj_frame.h"
+#if LJ_HASFFI
+#include "lj_ctype.h"
+#endif
+#include "lj_trace.h"
+#include "lj_dispatch.h"
+#include "lj_vm.h"
+#include "lj_prng.h"
+#include "lj_lex.h"
+#include "lj_alloc.h"
+#include "luajit.h"
+
+/* -- Stack handling ------------------------------------------------------ */
+
+/* Stack sizes. */
+#define LJ_STACK_MIN LUA_MINSTACK /* Min. stack size. */
+#define LJ_STACK_MAX LUAI_MAXSTACK /* Max. stack size. */
+#define LJ_STACK_START (2*LJ_STACK_MIN) /* Starting stack size. */
+#define LJ_STACK_MAXEX (LJ_STACK_MAX + 1 + LJ_STACK_EXTRA)
+
+/* Explanation of LJ_STACK_EXTRA:
+**
+** Calls to metamethods store their arguments beyond the current top
+** without checking for the stack limit. This avoids stack resizes which
+** would invalidate passed TValue pointers. The stack check is performed
+** later by the function header. This can safely resize the stack or raise
+** an error. Thus we need some extra slots beyond the current stack limit.
+**
+** Most metamethods need 4 slots above top (cont, mobj, arg1, arg2) plus
+** one extra slot if mobj is not a function. Only lj_meta_tset needs 5
+** slots above top, but then mobj is always a function. So we can get by
+** with 5 extra slots.
+** LJ_FR2: We need 2 more slots for the frame PC and the continuation PC.
+*/
+
+/* Resize stack slots and adjust pointers in state. */
+static void resizestack(lua_State *L, MSize n)
+{
+ TValue *st, *oldst = tvref(L->stack);
+ ptrdiff_t delta;
+ MSize oldsize = L->stacksize;
+ MSize realsize = n + 1 + LJ_STACK_EXTRA;
+ GCobj *up;
+ lj_assertL((MSize)(tvref(L->maxstack)-oldst) == L->stacksize-LJ_STACK_EXTRA-1,
+ "inconsistent stack size");
+ st = (TValue *)lj_mem_realloc(L, tvref(L->stack),
+ (MSize)(oldsize*sizeof(TValue)),
+ (MSize)(realsize*sizeof(TValue)));
+ setmref(L->stack, st);
+ delta = (char *)st - (char *)oldst;
+ setmref(L->maxstack, st + n);
+ while (oldsize < realsize) /* Clear new slots. */
+ setnilV(st + oldsize++);
+ L->stacksize = realsize;
+ if ((size_t)(mref(G(L)->jit_base, char) - (char *)oldst) < oldsize)
+ setmref(G(L)->jit_base, mref(G(L)->jit_base, char) + delta);
+ L->base = (TValue *)((char *)L->base + delta);
+ L->top = (TValue *)((char *)L->top + delta);
+ for (up = gcref(L->openupval); up != NULL; up = gcnext(up))
+ setmref(gco2uv(up)->v, (TValue *)((char *)uvval(gco2uv(up)) + delta));
+}
+
+/* Relimit stack after error, in case the limit was overdrawn. */
+void lj_state_relimitstack(lua_State *L)
+{
+ if (L->stacksize > LJ_STACK_MAXEX && L->top-tvref(L->stack) < LJ_STACK_MAX-1)
+ resizestack(L, LJ_STACK_MAX);
+}
+
+/* Try to shrink the stack (called from GC). */
+void lj_state_shrinkstack(lua_State *L, MSize used)
+{
+ if (L->stacksize > LJ_STACK_MAXEX)
+ return; /* Avoid stack shrinking while handling stack overflow. */
+ if (4*used < L->stacksize &&
+ 2*(LJ_STACK_START+LJ_STACK_EXTRA) < L->stacksize &&
+ /* Don't shrink stack of live trace. */
+ (tvref(G(L)->jit_base) == NULL || obj2gco(L) != gcref(G(L)->cur_L)))
+ resizestack(L, L->stacksize >> 1);
+}
+
+/* Try to grow stack. */
+void LJ_FASTCALL lj_state_growstack(lua_State *L, MSize need)
+{
+ MSize n;
+ if (L->stacksize > LJ_STACK_MAXEX) /* Overflow while handling overflow? */
+ lj_err_throw(L, LUA_ERRERR);
+ n = L->stacksize + need;
+ if (n > LJ_STACK_MAX) {
+ n += 2*LUA_MINSTACK;
+ } else if (n < 2*L->stacksize) {
+ n = 2*L->stacksize;
+ if (n >= LJ_STACK_MAX)
+ n = LJ_STACK_MAX;
+ }
+ resizestack(L, n);
+ if (L->stacksize > LJ_STACK_MAXEX)
+ lj_err_msg(L, LJ_ERR_STKOV);
+}
+
+void LJ_FASTCALL lj_state_growstack1(lua_State *L)
+{
+ lj_state_growstack(L, 1);
+}
+
+/* Allocate basic stack for new state. */
+static void stack_init(lua_State *L1, lua_State *L)
+{
+ TValue *stend, *st = lj_mem_newvec(L, LJ_STACK_START+LJ_STACK_EXTRA, TValue);
+ setmref(L1->stack, st);
+ L1->stacksize = LJ_STACK_START + LJ_STACK_EXTRA;
+ stend = st + L1->stacksize;
+ setmref(L1->maxstack, stend - LJ_STACK_EXTRA - 1);
+ setthreadV(L1, st++, L1); /* Needed for curr_funcisL() on empty stack. */
+ if (LJ_FR2) setnilV(st++);
+ L1->base = L1->top = st;
+ while (st < stend) /* Clear new slots. */
+ setnilV(st++);
+}
+
+/* -- State handling ------------------------------------------------------ */
+
+/* Open parts that may cause memory-allocation errors. */
+static TValue *cpluaopen(lua_State *L, lua_CFunction dummy, void *ud)
+{
+ global_State *g = G(L);
+ UNUSED(dummy);
+ UNUSED(ud);
+ stack_init(L, L);
+ /* NOBARRIER: State initialization, all objects are white. */
+ setgcref(L->env, obj2gco(lj_tab_new(L, 0, LJ_MIN_GLOBAL)));
+ settabV(L, registry(L), lj_tab_new(L, 0, LJ_MIN_REGISTRY));
+ lj_str_init(L);
+ lj_meta_init(L);
+ lj_lex_init(L);
+ fixstring(lj_err_str(L, LJ_ERR_ERRMEM)); /* Preallocate memory error msg. */
+ g->gc.threshold = 4*g->gc.total;
+ lj_trace_initstate(g);
+ lj_err_verify();
+ return NULL;
+}
+
+static void close_state(lua_State *L)
+{
+ global_State *g = G(L);
+ lj_func_closeuv(L, tvref(L->stack));
+ lj_gc_freeall(g);
+ lj_assertG(gcref(g->gc.root) == obj2gco(L),
+ "main thread is not first GC object");
+ lj_assertG(g->str.num == 0, "leaked %d strings", g->str.num);
+ lj_trace_freestate(g);
+#if LJ_HASFFI
+ lj_ctype_freestate(g);
+#endif
+ lj_str_freetab(g);
+ lj_buf_free(g, &g->tmpbuf);
+ lj_mem_freevec(g, tvref(L->stack), L->stacksize, TValue);
+#if LJ_64
+ if (mref(g->gc.lightudseg, uint32_t)) {
+ MSize segnum = g->gc.lightudnum ? (2 << lj_fls(g->gc.lightudnum)) : 2;
+ lj_mem_freevec(g, mref(g->gc.lightudseg, uint32_t), segnum, uint32_t);
+ }
+#endif
+ lj_assertG(g->gc.total == sizeof(GG_State),
+ "memory leak of %lld bytes",
+ (long long)(g->gc.total - sizeof(GG_State)));
+#ifndef LUAJIT_USE_SYSMALLOC
+ if (g->allocf == lj_alloc_f)
+ lj_alloc_destroy(g->allocd);
+ else
+#endif
+ g->allocf(g->allocd, G2GG(g), sizeof(GG_State), 0);
+}
+
+#if LJ_64 && !LJ_GC64 && !(defined(LUAJIT_USE_VALGRIND) && defined(LUAJIT_USE_SYSMALLOC))
+lua_State *lj_state_newstate(lua_Alloc allocf, void *allocd)
+#else
+LUA_API lua_State *lua_newstate(lua_Alloc allocf, void *allocd)
+#endif
+{
+ PRNGState prng;
+ GG_State *GG;
+ lua_State *L;
+ global_State *g;
+ /* We need the PRNG for the memory allocator, so initialize this first. */
+ if (!lj_prng_seed_secure(&prng)) {
+ lj_assertX(0, "secure PRNG seeding failed");
+ /* Can only return NULL here, so this errors with "not enough memory". */
+ return NULL;
+ }
+#ifndef LUAJIT_USE_SYSMALLOC
+ if (allocf == LJ_ALLOCF_INTERNAL) {
+ allocd = lj_alloc_create(&prng);
+ if (!allocd) return NULL;
+ allocf = lj_alloc_f;
+ }
+#endif
+ GG = (GG_State *)allocf(allocd, NULL, 0, sizeof(GG_State));
+ if (GG == NULL || !checkptrGC(GG)) return NULL;
+ memset(GG, 0, sizeof(GG_State));
+ L = &GG->L;
+ g = &GG->g;
+ L->gct = ~LJ_TTHREAD;
+ L->marked = LJ_GC_WHITE0 | LJ_GC_FIXED | LJ_GC_SFIXED; /* Prevent free. */
+ L->dummy_ffid = FF_C;
+ setmref(L->glref, g);
+ g->gc.currentwhite = LJ_GC_WHITE0 | LJ_GC_FIXED;
+ g->strempty.marked = LJ_GC_WHITE0;
+ g->strempty.gct = ~LJ_TSTR;
+ g->allocf = allocf;
+ g->allocd = allocd;
+ g->prng = prng;
+#ifndef LUAJIT_USE_SYSMALLOC
+ if (allocf == lj_alloc_f) {
+ lj_alloc_setprng(allocd, &g->prng);
+ }
+#endif
+ setgcref(g->mainthref, obj2gco(L));
+ setgcref(g->uvhead.prev, obj2gco(&g->uvhead));
+ setgcref(g->uvhead.next, obj2gco(&g->uvhead));
+ g->str.mask = ~(MSize)0;
+ setnilV(registry(L));
+ setnilV(&g->nilnode.val);
+ setnilV(&g->nilnode.key);
+#if !LJ_GC64
+ setmref(g->nilnode.freetop, &g->nilnode);
+#endif
+ lj_buf_init(NULL, &g->tmpbuf);
+ g->gc.state = GCSpause;
+ setgcref(g->gc.root, obj2gco(L));
+ setmref(g->gc.sweep, &g->gc.root);
+ g->gc.total = sizeof(GG_State);
+ g->gc.pause = LUAI_GCPAUSE;
+ g->gc.stepmul = LUAI_GCMUL;
+ lj_dispatch_init((GG_State *)L);
+ L->status = LUA_ERRERR+1; /* Avoid touching the stack upon memory error. */
+ if (lj_vm_cpcall(L, NULL, NULL, cpluaopen) != 0) {
+ /* Memory allocation error: free partial state. */
+ close_state(L);
+ return NULL;
+ }
+ L->status = LUA_OK;
+ return L;
+}
+
+static TValue *cpfinalize(lua_State *L, lua_CFunction dummy, void *ud)
+{
+ UNUSED(dummy);
+ UNUSED(ud);
+ lj_gc_finalize_cdata(L);
+ lj_gc_finalize_udata(L);
+ /* Frame pop omitted. */
+ return NULL;
+}
+
+LUA_API void lua_close(lua_State *L)
+{
+ global_State *g = G(L);
+ int i;
+ L = mainthread(g); /* Only the main thread can be closed. */
+#if LJ_HASPROFILE
+ luaJIT_profile_stop(L);
+#endif
+ setgcrefnull(g->cur_L);
+ lj_func_closeuv(L, tvref(L->stack));
+ lj_gc_separateudata(g, 1); /* Separate udata which have GC metamethods. */
+#if LJ_HASJIT
+ G2J(g)->flags &= ~JIT_F_ON;
+ G2J(g)->state = LJ_TRACE_IDLE;
+ lj_dispatch_update(g);
+#endif
+ for (i = 0;;) {
+ hook_enter(g);
+ L->status = LUA_OK;
+ L->base = L->top = tvref(L->stack) + 1 + LJ_FR2;
+ L->cframe = NULL;
+ if (lj_vm_cpcall(L, NULL, NULL, cpfinalize) == LUA_OK) {
+ if (++i >= 10) break;
+ lj_gc_separateudata(g, 1); /* Separate udata again. */
+ if (gcref(g->gc.mmudata) == NULL) /* Until nothing is left to do. */
+ break;
+ }
+ }
+ close_state(L);
+}
+
+lua_State *lj_state_new(lua_State *L)
+{
+ lua_State *L1 = lj_mem_newobj(L, lua_State);
+ L1->gct = ~LJ_TTHREAD;
+ L1->dummy_ffid = FF_C;
+ L1->status = LUA_OK;
+ L1->stacksize = 0;
+ setmref(L1->stack, NULL);
+ L1->cframe = NULL;
+ /* NOBARRIER: The lua_State is new (marked white). */
+ setgcrefnull(L1->openupval);
+ setmrefr(L1->glref, L->glref);
+ setgcrefr(L1->env, L->env);
+ stack_init(L1, L); /* init stack */
+ lj_assertL(iswhite(obj2gco(L1)), "new thread object is not white");
+ return L1;
+}
+
+void LJ_FASTCALL lj_state_free(global_State *g, lua_State *L)
+{
+ lj_assertG(L != mainthread(g), "free of main thread");
+ if (obj2gco(L) == gcref(g->cur_L))
+ setgcrefnull(g->cur_L);
+ lj_func_closeuv(L, tvref(L->stack));
+ lj_assertG(gcref(L->openupval) == NULL, "stale open upvalues");
+ lj_mem_freevec(g, tvref(L->stack), L->stacksize, TValue);
+ lj_mem_freet(g, L);
+}
+
diff --git a/libs/luajit-cmake/luajit/src/lj_state.h b/libs/luajit-cmake/luajit/src/lj_state.h
new file mode 100644
index 0000000..d22b7a6
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/lj_state.h
@@ -0,0 +1,37 @@
+/*
+** State and stack handling.
+** Copyright (C) 2005-2022 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LJ_STATE_H
+#define _LJ_STATE_H
+
+#include "lj_obj.h"
+
+#define incr_top(L) \
+ (++L->top >= tvref(L->maxstack) && (lj_state_growstack1(L), 0))
+
+#define savestack(L, p) ((char *)(p) - mref(L->stack, char))
+#define restorestack(L, n) ((TValue *)(mref(L->stack, char) + (n)))
+
+LJ_FUNC void lj_state_relimitstack(lua_State *L);
+LJ_FUNC void lj_state_shrinkstack(lua_State *L, MSize used);
+LJ_FUNCA void LJ_FASTCALL lj_state_growstack(lua_State *L, MSize need);
+LJ_FUNC void LJ_FASTCALL lj_state_growstack1(lua_State *L);
+
+static LJ_AINLINE void lj_state_checkstack(lua_State *L, MSize need)
+{
+ if ((mref(L->maxstack, char) - (char *)L->top) <=
+ (ptrdiff_t)need*(ptrdiff_t)sizeof(TValue))
+ lj_state_growstack(L, need);
+}
+
+LJ_FUNC lua_State *lj_state_new(lua_State *L);
+LJ_FUNC void LJ_FASTCALL lj_state_free(global_State *g, lua_State *L);
+#if LJ_64 && !LJ_GC64 && !(defined(LUAJIT_USE_VALGRIND) && defined(LUAJIT_USE_SYSMALLOC))
+LJ_FUNC lua_State *lj_state_newstate(lua_Alloc f, void *ud);
+#endif
+
+#define LJ_ALLOCF_INTERNAL ((lua_Alloc)(void *)(uintptr_t)(1237<<4))
+
+#endif
diff --git a/libs/luajit-cmake/luajit/src/lj_str.c b/libs/luajit-cmake/luajit/src/lj_str.c
new file mode 100644
index 0000000..a5282da
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/lj_str.c
@@ -0,0 +1,370 @@
+/*
+** String handling.
+** Copyright (C) 2005-2022 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#define lj_str_c
+#define LUA_CORE
+
+#include "lj_obj.h"
+#include "lj_gc.h"
+#include "lj_err.h"
+#include "lj_str.h"
+#include "lj_char.h"
+#include "lj_prng.h"
+
+/* -- String helpers ------------------------------------------------------ */
+
+/* Ordered compare of strings. Assumes string data is 4-byte aligned. */
+int32_t LJ_FASTCALL lj_str_cmp(GCstr *a, GCstr *b)
+{
+ MSize i, n = a->len > b->len ? b->len : a->len;
+ for (i = 0; i < n; i += 4) {
+ /* Note: innocuous access up to end of string + 3. */
+ uint32_t va = *(const uint32_t *)(strdata(a)+i);
+ uint32_t vb = *(const uint32_t *)(strdata(b)+i);
+ if (va != vb) {
+#if LJ_LE
+ va = lj_bswap(va); vb = lj_bswap(vb);
+#endif
+ i -= n;
+ if ((int32_t)i >= -3) {
+ va >>= 32+(i<<3); vb >>= 32+(i<<3);
+ if (va == vb) break;
+ }
+ return va < vb ? -1 : 1;
+ }
+ }
+ return (int32_t)(a->len - b->len);
+}
+
+/* Find fixed string p inside string s. */
+const char *lj_str_find(const char *s, const char *p, MSize slen, MSize plen)
+{
+ if (plen <= slen) {
+ if (plen == 0) {
+ return s;
+ } else {
+ int c = *(const uint8_t *)p++;
+ plen--; slen -= plen;
+ while (slen) {
+ const char *q = (const char *)memchr(s, c, slen);
+ if (!q) break;
+ if (memcmp(q+1, p, plen) == 0) return q;
+ q++; slen -= (MSize)(q-s); s = q;
+ }
+ }
+ }
+ return NULL;
+}
+
+/* Check whether a string has a pattern matching character. */
+int lj_str_haspattern(GCstr *s)
+{
+ const char *p = strdata(s), *q = p + s->len;
+ while (p < q) {
+ int c = *(const uint8_t *)p++;
+ if (lj_char_ispunct(c) && strchr("^$*+?.([%-", c))
+ return 1; /* Found a pattern matching char. */
+ }
+ return 0; /* No pattern matching chars found. */
+}
+
+/* -- String hashing ------------------------------------------------------ */
+
+/* Keyed sparse ARX string hash. Constant time. */
+static StrHash hash_sparse(uint64_t seed, const char *str, MSize len)
+{
+ /* Constants taken from lookup3 hash by Bob Jenkins. */
+ StrHash a, b, h = len ^ (StrHash)seed;
+ if (len >= 4) { /* Caveat: unaligned access! */
+ a = lj_getu32(str);
+ h ^= lj_getu32(str+len-4);
+ b = lj_getu32(str+(len>>1)-2);
+ h ^= b; h -= lj_rol(b, 14);
+ b += lj_getu32(str+(len>>2)-1);
+ } else {
+ a = *(const uint8_t *)str;
+ h ^= *(const uint8_t *)(str+len-1);
+ b = *(const uint8_t *)(str+(len>>1));
+ h ^= b; h -= lj_rol(b, 14);
+ }
+ a ^= h; a -= lj_rol(h, 11);
+ b ^= a; b -= lj_rol(a, 25);
+ h ^= b; h -= lj_rol(b, 16);
+ return h;
+}
+
+#if LUAJIT_SECURITY_STRHASH
+/* Keyed dense ARX string hash. Linear time. */
+static LJ_NOINLINE StrHash hash_dense(uint64_t seed, StrHash h,
+ const char *str, MSize len)
+{
+ StrHash b = lj_bswap(lj_rol(h ^ (StrHash)(seed >> 32), 4));
+ if (len > 12) {
+ StrHash a = (StrHash)seed;
+ const char *pe = str+len-12, *p = pe, *q = str;
+ do {
+ a += lj_getu32(p);
+ b += lj_getu32(p+4);
+ h += lj_getu32(p+8);
+ p = q; q += 12;
+ h ^= b; h -= lj_rol(b, 14);
+ a ^= h; a -= lj_rol(h, 11);
+ b ^= a; b -= lj_rol(a, 25);
+ } while (p < pe);
+ h ^= b; h -= lj_rol(b, 16);
+ a ^= h; a -= lj_rol(h, 4);
+ b ^= a; b -= lj_rol(a, 14);
+ }
+ return b;
+}
+#endif
+
+/* -- String interning ---------------------------------------------------- */
+
+#define LJ_STR_MAXCOLL 32
+
+/* Resize the string interning hash table (grow and shrink). */
+void lj_str_resize(lua_State *L, MSize newmask)
+{
+ global_State *g = G(L);
+ GCRef *newtab, *oldtab = g->str.tab;
+ MSize i;
+
+ /* No resizing during GC traversal or if already too big. */
+ if (g->gc.state == GCSsweepstring || newmask >= LJ_MAX_STRTAB-1)
+ return;
+
+ newtab = lj_mem_newvec(L, newmask+1, GCRef);
+ memset(newtab, 0, (newmask+1)*sizeof(GCRef));
+
+#if LUAJIT_SECURITY_STRHASH
+ /* Check which chains need secondary hashes. */
+ if (g->str.second) {
+ int newsecond = 0;
+ /* Compute primary chain lengths. */
+ for (i = g->str.mask; i != ~(MSize)0; i--) {
+ GCobj *o = (GCobj *)(gcrefu(oldtab[i]) & ~(uintptr_t)1);
+ while (o) {
+ GCstr *s = gco2str(o);
+ MSize hash = s->hashalg ? hash_sparse(g->str.seed, strdata(s), s->len) :
+ s->hash;
+ hash &= newmask;
+ setgcrefp(newtab[hash], gcrefu(newtab[hash]) + 1);
+ o = gcnext(o);
+ }
+ }
+ /* Mark secondary chains. */
+ for (i = newmask; i != ~(MSize)0; i--) {
+ int secondary = gcrefu(newtab[i]) > LJ_STR_MAXCOLL;
+ newsecond |= secondary;
+ setgcrefp(newtab[i], secondary);
+ }
+ g->str.second = newsecond;
+ }
+#endif
+
+ /* Reinsert all strings from the old table into the new table. */
+ for (i = g->str.mask; i != ~(MSize)0; i--) {
+ GCobj *o = (GCobj *)(gcrefu(oldtab[i]) & ~(uintptr_t)1);
+ while (o) {
+ GCobj *next = gcnext(o);
+ GCstr *s = gco2str(o);
+ MSize hash = s->hash;
+#if LUAJIT_SECURITY_STRHASH
+ uintptr_t u;
+ if (LJ_LIKELY(!s->hashalg)) { /* String hashed with primary hash. */
+ hash &= newmask;
+ u = gcrefu(newtab[hash]);
+ if (LJ_UNLIKELY(u & 1)) { /* Switch string to secondary hash. */
+ s->hash = hash = hash_dense(g->str.seed, s->hash, strdata(s), s->len);
+ s->hashalg = 1;
+ hash &= newmask;
+ u = gcrefu(newtab[hash]);
+ }
+ } else { /* String hashed with secondary hash. */
+ MSize shash = hash_sparse(g->str.seed, strdata(s), s->len);
+ u = gcrefu(newtab[shash & newmask]);
+ if (u & 1) {
+ hash &= newmask;
+ u = gcrefu(newtab[hash]);
+ } else { /* Revert string back to primary hash. */
+ s->hash = shash;
+ s->hashalg = 0;
+ hash = (shash & newmask);
+ }
+ }
+ /* NOBARRIER: The string table is a GC root. */
+ setgcrefp(o->gch.nextgc, (u & ~(uintptr_t)1));
+ setgcrefp(newtab[hash], ((uintptr_t)o | (u & 1)));
+#else
+ hash &= newmask;
+ /* NOBARRIER: The string table is a GC root. */
+ setgcrefr(o->gch.nextgc, newtab[hash]);
+ setgcref(newtab[hash], o);
+#endif
+ o = next;
+ }
+ }
+
+ /* Free old table and replace with new table. */
+ lj_str_freetab(g);
+ g->str.tab = newtab;
+ g->str.mask = newmask;
+}
+
+#if LUAJIT_SECURITY_STRHASH
+/* Rehash and rechain all strings in a chain. */
+static LJ_NOINLINE GCstr *lj_str_rehash_chain(lua_State *L, StrHash hashc,
+ const char *str, MSize len)
+{
+ global_State *g = G(L);
+ int ow = g->gc.state == GCSsweepstring ? otherwhite(g) : 0; /* Sweeping? */
+ GCRef *strtab = g->str.tab;
+ MSize strmask = g->str.mask;
+ GCobj *o = gcref(strtab[hashc & strmask]);
+ setgcrefp(strtab[hashc & strmask], (void *)((uintptr_t)1));
+ g->str.second = 1;
+ while (o) {
+ uintptr_t u;
+ GCobj *next = gcnext(o);
+ GCstr *s = gco2str(o);
+ StrHash hash;
+ if (ow) { /* Must sweep while rechaining. */
+ if (((o->gch.marked ^ LJ_GC_WHITES) & ow)) { /* String alive? */
+ lj_assertG(!isdead(g, o) || (o->gch.marked & LJ_GC_FIXED),
+ "sweep of undead string");
+ makewhite(g, o);
+ } else { /* Free dead string. */
+ lj_assertG(isdead(g, o) || ow == LJ_GC_SFIXED,
+ "sweep of unlive string");
+ lj_str_free(g, s);
+ o = next;
+ continue;
+ }
+ }
+ hash = s->hash;
+ if (!s->hashalg) { /* Rehash with secondary hash. */
+ hash = hash_dense(g->str.seed, hash, strdata(s), s->len);
+ s->hash = hash;
+ s->hashalg = 1;
+ }
+ /* Rechain. */
+ hash &= strmask;
+ u = gcrefu(strtab[hash]);
+ setgcrefp(o->gch.nextgc, (u & ~(uintptr_t)1));
+ setgcrefp(strtab[hash], ((uintptr_t)o | (u & 1)));
+ o = next;
+ }
+ /* Try to insert the pending string again. */
+ return lj_str_new(L, str, len);
+}
+#endif
+
+/* Reseed String ID from PRNG after random interval < 2^bits. */
+#if LUAJIT_SECURITY_STRID == 1
+#define STRID_RESEED_INTERVAL 8
+#elif LUAJIT_SECURITY_STRID == 2
+#define STRID_RESEED_INTERVAL 4
+#elif LUAJIT_SECURITY_STRID >= 3
+#define STRID_RESEED_INTERVAL 0
+#endif
+
+/* Allocate a new string and add to string interning table. */
+static GCstr *lj_str_alloc(lua_State *L, const char *str, MSize len,
+ StrHash hash, int hashalg)
+{
+ GCstr *s = lj_mem_newt(L, lj_str_size(len), GCstr);
+ global_State *g = G(L);
+ uintptr_t u;
+ newwhite(g, s);
+ s->gct = ~LJ_TSTR;
+ s->len = len;
+ s->hash = hash;
+#ifndef STRID_RESEED_INTERVAL
+ s->sid = g->str.id++;
+#elif STRID_RESEED_INTERVAL
+ if (!g->str.idreseed--) {
+ uint64_t r = lj_prng_u64(&g->prng);
+ g->str.id = (StrID)r;
+ g->str.idreseed = (uint8_t)(r >> (64 - STRID_RESEED_INTERVAL));
+ }
+ s->sid = g->str.id++;
+#else
+ s->sid = (StrID)lj_prng_u64(&g->prng);
+#endif
+ s->reserved = 0;
+ s->hashalg = (uint8_t)hashalg;
+ /* Clear last 4 bytes of allocated memory. Implies zero-termination, too. */
+ *(uint32_t *)(strdatawr(s)+(len & ~(MSize)3)) = 0;
+ memcpy(strdatawr(s), str, len);
+ /* Add to string hash table. */
+ hash &= g->str.mask;
+ u = gcrefu(g->str.tab[hash]);
+ setgcrefp(s->nextgc, (u & ~(uintptr_t)1));
+ /* NOBARRIER: The string table is a GC root. */
+ setgcrefp(g->str.tab[hash], ((uintptr_t)s | (u & 1)));
+ if (g->str.num++ > g->str.mask) /* Allow a 100% load factor. */
+ lj_str_resize(L, (g->str.mask<<1)+1); /* Grow string table. */
+ return s; /* Return newly interned string. */
+}
+
+/* Intern a string and return string object. */
+GCstr *lj_str_new(lua_State *L, const char *str, size_t lenx)
+{
+ global_State *g = G(L);
+ if (lenx-1 < LJ_MAX_STR-1) {
+ MSize len = (MSize)lenx;
+ StrHash hash = hash_sparse(g->str.seed, str, len);
+ MSize coll = 0;
+ int hashalg = 0;
+ /* Check if the string has already been interned. */
+ GCobj *o = gcref(g->str.tab[hash & g->str.mask]);
+#if LUAJIT_SECURITY_STRHASH
+ if (LJ_UNLIKELY((uintptr_t)o & 1)) { /* Secondary hash for this chain? */
+ hashalg = 1;
+ hash = hash_dense(g->str.seed, hash, str, len);
+ o = (GCobj *)(gcrefu(g->str.tab[hash & g->str.mask]) & ~(uintptr_t)1);
+ }
+#endif
+ while (o != NULL) {
+ GCstr *sx = gco2str(o);
+ if (sx->hash == hash && sx->len == len) {
+ if (memcmp(str, strdata(sx), len) == 0) {
+ if (isdead(g, o)) flipwhite(o); /* Resurrect if dead. */
+ return sx; /* Return existing string. */
+ }
+ coll++;
+ }
+ coll++;
+ o = gcnext(o);
+ }
+#if LUAJIT_SECURITY_STRHASH
+ /* Rehash chain if there are too many collisions. */
+ if (LJ_UNLIKELY(coll > LJ_STR_MAXCOLL) && !hashalg) {
+ return lj_str_rehash_chain(L, hash, str, len);
+ }
+#endif
+ /* Otherwise allocate a new string. */
+ return lj_str_alloc(L, str, len, hash, hashalg);
+ } else {
+ if (lenx)
+ lj_err_msg(L, LJ_ERR_STROV);
+ return &g->strempty;
+ }
+}
+
+void LJ_FASTCALL lj_str_free(global_State *g, GCstr *s)
+{
+ g->str.num--;
+ lj_mem_free(g, s, lj_str_size(s->len));
+}
+
+void LJ_FASTCALL lj_str_init(lua_State *L)
+{
+ global_State *g = G(L);
+ g->str.seed = lj_prng_u64(&g->prng);
+ lj_str_resize(L, LJ_MIN_STRTAB-1);
+}
+
diff --git a/libs/luajit-cmake/luajit/src/lj_str.h b/libs/luajit-cmake/luajit/src/lj_str.h
new file mode 100644
index 0000000..28edb5a
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/lj_str.h
@@ -0,0 +1,31 @@
+/*
+** String handling.
+** Copyright (C) 2005-2022 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LJ_STR_H
+#define _LJ_STR_H
+
+#include <stdarg.h>
+
+#include "lj_obj.h"
+
+/* String helpers. */
+LJ_FUNC int32_t LJ_FASTCALL lj_str_cmp(GCstr *a, GCstr *b);
+LJ_FUNC const char *lj_str_find(const char *s, const char *f,
+ MSize slen, MSize flen);
+LJ_FUNC int lj_str_haspattern(GCstr *s);
+
+/* String interning. */
+LJ_FUNC void lj_str_resize(lua_State *L, MSize newmask);
+LJ_FUNCA GCstr *lj_str_new(lua_State *L, const char *str, size_t len);
+LJ_FUNC void LJ_FASTCALL lj_str_free(global_State *g, GCstr *s);
+LJ_FUNC void LJ_FASTCALL lj_str_init(lua_State *L);
+#define lj_str_freetab(g) \
+ (lj_mem_freevec(g, g->str.tab, g->str.mask+1, GCRef))
+
+#define lj_str_newz(L, s) (lj_str_new(L, s, strlen(s)))
+#define lj_str_newlit(L, s) (lj_str_new(L, "" s, sizeof(s)-1))
+#define lj_str_size(len) (sizeof(GCstr) + (((len)+4) & ~(MSize)3))
+
+#endif
diff --git a/libs/luajit-cmake/luajit/src/lj_strfmt.c b/libs/luajit-cmake/luajit/src/lj_strfmt.c
new file mode 100644
index 0000000..5c80829
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/lj_strfmt.c
@@ -0,0 +1,606 @@
+/*
+** String formatting.
+** Copyright (C) 2005-2022 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#include <stdio.h>
+
+#define lj_strfmt_c
+#define LUA_CORE
+
+#include "lj_obj.h"
+#include "lj_err.h"
+#include "lj_buf.h"
+#include "lj_str.h"
+#include "lj_meta.h"
+#include "lj_state.h"
+#include "lj_char.h"
+#include "lj_strfmt.h"
+#if LJ_HASFFI
+#include "lj_ctype.h"
+#endif
+#include "lj_lib.h"
+
+/* -- Format parser ------------------------------------------------------- */
+
+static const uint8_t strfmt_map[('x'-'A')+1] = {
+ STRFMT_A,0,0,0,STRFMT_E,STRFMT_F,STRFMT_G,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,STRFMT_X,0,0,
+ 0,0,0,0,0,0,
+ STRFMT_A,0,STRFMT_C,STRFMT_D,STRFMT_E,STRFMT_F,STRFMT_G,0,STRFMT_I,0,0,0,0,
+ 0,STRFMT_O,STRFMT_P,STRFMT_Q,0,STRFMT_S,0,STRFMT_U,0,0,STRFMT_X
+};
+
+SFormat LJ_FASTCALL lj_strfmt_parse(FormatState *fs)
+{
+ const uint8_t *p = fs->p, *e = fs->e;
+ fs->str = (const char *)p;
+ for (; p < e; p++) {
+ if (*p == '%') { /* Escape char? */
+ if (p[1] == '%') { /* '%%'? */
+ fs->p = ++p+1;
+ goto retlit;
+ } else {
+ SFormat sf = 0;
+ uint32_t c;
+ if (p != (const uint8_t *)fs->str)
+ break;
+ for (p++; (uint32_t)*p - ' ' <= (uint32_t)('0' - ' '); p++) {
+ /* Parse flags. */
+ if (*p == '-') sf |= STRFMT_F_LEFT;
+ else if (*p == '+') sf |= STRFMT_F_PLUS;
+ else if (*p == '0') sf |= STRFMT_F_ZERO;
+ else if (*p == ' ') sf |= STRFMT_F_SPACE;
+ else if (*p == '#') sf |= STRFMT_F_ALT;
+ else break;
+ }
+ if ((uint32_t)*p - '0' < 10) { /* Parse width. */
+ uint32_t width = (uint32_t)*p++ - '0';
+ if ((uint32_t)*p - '0' < 10)
+ width = (uint32_t)*p++ - '0' + width*10;
+ sf |= (width << STRFMT_SH_WIDTH);
+ }
+ if (*p == '.') { /* Parse precision. */
+ uint32_t prec = 0;
+ p++;
+ if ((uint32_t)*p - '0' < 10) {
+ prec = (uint32_t)*p++ - '0';
+ if ((uint32_t)*p - '0' < 10)
+ prec = (uint32_t)*p++ - '0' + prec*10;
+ }
+ sf |= ((prec+1) << STRFMT_SH_PREC);
+ }
+ /* Parse conversion. */
+ c = (uint32_t)*p - 'A';
+ if (LJ_LIKELY(c <= (uint32_t)('x' - 'A'))) {
+ uint32_t sx = strfmt_map[c];
+ if (sx) {
+ fs->p = p+1;
+ return (sf | sx | ((c & 0x20) ? 0 : STRFMT_F_UPPER));
+ }
+ }
+ /* Return error location. */
+ if (*p >= 32) p++;
+ fs->len = (MSize)(p - (const uint8_t *)fs->str);
+ fs->p = fs->e;
+ return STRFMT_ERR;
+ }
+ }
+ }
+ fs->p = p;
+retlit:
+ fs->len = (MSize)(p - (const uint8_t *)fs->str);
+ return fs->len ? STRFMT_LIT : STRFMT_EOF;
+}
+
+/* -- Raw conversions ----------------------------------------------------- */
+
+#define WINT_R(x, sh, sc) \
+ { uint32_t d = (x*(((1<<sh)+sc-1)/sc))>>sh; x -= d*sc; *p++ = (char)('0'+d); }
+
+/* Write integer to buffer. */
+char * LJ_FASTCALL lj_strfmt_wint(char *p, int32_t k)
+{
+ uint32_t u = (uint32_t)k;
+ if (k < 0) { u = (uint32_t)-k; *p++ = '-'; }
+ if (u < 10000) {
+ if (u < 10) goto dig1;
+ if (u < 100) goto dig2;
+ if (u < 1000) goto dig3;
+ } else {
+ uint32_t v = u / 10000; u -= v * 10000;
+ if (v < 10000) {
+ if (v < 10) goto dig5;
+ if (v < 100) goto dig6;
+ if (v < 1000) goto dig7;
+ } else {
+ uint32_t w = v / 10000; v -= w * 10000;
+ if (w >= 10) WINT_R(w, 10, 10)
+ *p++ = (char)('0'+w);
+ }
+ WINT_R(v, 23, 1000)
+ dig7: WINT_R(v, 12, 100)
+ dig6: WINT_R(v, 10, 10)
+ dig5: *p++ = (char)('0'+v);
+ }
+ WINT_R(u, 23, 1000)
+ dig3: WINT_R(u, 12, 100)
+ dig2: WINT_R(u, 10, 10)
+ dig1: *p++ = (char)('0'+u);
+ return p;
+}
+#undef WINT_R
+
+/* Write pointer to buffer. */
+char * LJ_FASTCALL lj_strfmt_wptr(char *p, const void *v)
+{
+ ptrdiff_t x = (ptrdiff_t)v;
+ MSize i, n = STRFMT_MAXBUF_PTR;
+ if (x == 0) {
+ *p++ = 'N'; *p++ = 'U'; *p++ = 'L'; *p++ = 'L';
+ return p;
+ }
+#if LJ_64
+ /* Shorten output for 64 bit pointers. */
+ n = 2+2*4+((x >> 32) ? 2+2*(lj_fls((uint32_t)(x >> 32))>>3) : 0);
+#endif
+ p[0] = '0';
+ p[1] = 'x';
+ for (i = n-1; i >= 2; i--, x >>= 4)
+ p[i] = "0123456789abcdef"[(x & 15)];
+ return p+n;
+}
+
+/* Write ULEB128 to buffer. */
+char * LJ_FASTCALL lj_strfmt_wuleb128(char *p, uint32_t v)
+{
+ for (; v >= 0x80; v >>= 7)
+ *p++ = (char)((v & 0x7f) | 0x80);
+ *p++ = (char)v;
+ return p;
+}
+
+/* Return string or write number to tmp buffer and return pointer to start. */
+const char *lj_strfmt_wstrnum(lua_State *L, cTValue *o, MSize *lenp)
+{
+ SBuf *sb;
+ if (tvisstr(o)) {
+ *lenp = strV(o)->len;
+ return strVdata(o);
+ } else if (tvisbuf(o)) {
+ SBufExt *sbx = bufV(o);
+ *lenp = sbufxlen(sbx);
+ return sbx->r;
+ } else if (tvisint(o)) {
+ sb = lj_strfmt_putint(lj_buf_tmp_(L), intV(o));
+ } else if (tvisnum(o)) {
+ sb = lj_strfmt_putfnum(lj_buf_tmp_(L), STRFMT_G14, o->n);
+ } else {
+ return NULL;
+ }
+ *lenp = sbuflen(sb);
+ return sb->b;
+}
+
+/* -- Unformatted conversions to buffer ----------------------------------- */
+
+/* Add integer to buffer. */
+SBuf * LJ_FASTCALL lj_strfmt_putint(SBuf *sb, int32_t k)
+{
+ sb->w = lj_strfmt_wint(lj_buf_more(sb, STRFMT_MAXBUF_INT), k);
+ return sb;
+}
+
+#if LJ_HASJIT
+/* Add number to buffer. */
+SBuf * LJ_FASTCALL lj_strfmt_putnum(SBuf *sb, cTValue *o)
+{
+ return lj_strfmt_putfnum(sb, STRFMT_G14, o->n);
+}
+#endif
+
+SBuf * LJ_FASTCALL lj_strfmt_putptr(SBuf *sb, const void *v)
+{
+ sb->w = lj_strfmt_wptr(lj_buf_more(sb, STRFMT_MAXBUF_PTR), v);
+ return sb;
+}
+
+/* Add quoted string to buffer. */
+static SBuf *strfmt_putquotedlen(SBuf *sb, const char *s, MSize len)
+{
+ lj_buf_putb(sb, '"');
+ while (len--) {
+ uint32_t c = (uint32_t)(uint8_t)*s++;
+ char *w = lj_buf_more(sb, 4);
+ if (c == '"' || c == '\\' || c == '\n') {
+ *w++ = '\\';
+ } else if (lj_char_iscntrl(c)) { /* This can only be 0-31 or 127. */
+ uint32_t d;
+ *w++ = '\\';
+ if (c >= 100 || lj_char_isdigit((uint8_t)*s)) {
+ *w++ = (char)('0'+(c >= 100)); if (c >= 100) c -= 100;
+ goto tens;
+ } else if (c >= 10) {
+ tens:
+ d = (c * 205) >> 11; c -= d * 10; *w++ = (char)('0'+d);
+ }
+ c += '0';
+ }
+ *w++ = (char)c;
+ sb->w = w;
+ }
+ lj_buf_putb(sb, '"');
+ return sb;
+}
+
+#if LJ_HASJIT
+SBuf * LJ_FASTCALL lj_strfmt_putquoted(SBuf *sb, GCstr *str)
+{
+ return strfmt_putquotedlen(sb, strdata(str), str->len);
+}
+#endif
+
+/* -- Formatted conversions to buffer ------------------------------------- */
+
+/* Add formatted char to buffer. */
+SBuf *lj_strfmt_putfchar(SBuf *sb, SFormat sf, int32_t c)
+{
+ MSize width = STRFMT_WIDTH(sf);
+ char *w = lj_buf_more(sb, width > 1 ? width : 1);
+ if ((sf & STRFMT_F_LEFT)) *w++ = (char)c;
+ while (width-- > 1) *w++ = ' ';
+ if (!(sf & STRFMT_F_LEFT)) *w++ = (char)c;
+ sb->w = w;
+ return sb;
+}
+
+/* Add formatted string to buffer. */
+static SBuf *strfmt_putfstrlen(SBuf *sb, SFormat sf, const char *s, MSize len)
+{
+ MSize width = STRFMT_WIDTH(sf);
+ char *w;
+ if (len > STRFMT_PREC(sf)) len = STRFMT_PREC(sf);
+ w = lj_buf_more(sb, width > len ? width : len);
+ if ((sf & STRFMT_F_LEFT)) w = lj_buf_wmem(w, s, len);
+ while (width-- > len) *w++ = ' ';
+ if (!(sf & STRFMT_F_LEFT)) w = lj_buf_wmem(w, s, len);
+ sb->w = w;
+ return sb;
+}
+
+#if LJ_HASJIT
+SBuf *lj_strfmt_putfstr(SBuf *sb, SFormat sf, GCstr *str)
+{
+ return strfmt_putfstrlen(sb, sf, strdata(str), str->len);
+}
+#endif
+
+/* Add formatted signed/unsigned integer to buffer. */
+SBuf *lj_strfmt_putfxint(SBuf *sb, SFormat sf, uint64_t k)
+{
+ char buf[STRFMT_MAXBUF_XINT], *q = buf + sizeof(buf), *w;
+#ifdef LUA_USE_ASSERT
+ char *ws;
+#endif
+ MSize prefix = 0, len, prec, pprec, width, need;
+
+ /* Figure out signed prefixes. */
+ if (STRFMT_TYPE(sf) == STRFMT_INT) {
+ if ((int64_t)k < 0) {
+ k = (uint64_t)-(int64_t)k;
+ prefix = 256 + '-';
+ } else if ((sf & STRFMT_F_PLUS)) {
+ prefix = 256 + '+';
+ } else if ((sf & STRFMT_F_SPACE)) {
+ prefix = 256 + ' ';
+ }
+ }
+
+ /* Convert number and store to fixed-size buffer in reverse order. */
+ prec = STRFMT_PREC(sf);
+ if ((int32_t)prec >= 0) sf &= ~STRFMT_F_ZERO;
+ if (k == 0) { /* Special-case zero argument. */
+ if (prec != 0 ||
+ (sf & (STRFMT_T_OCT|STRFMT_F_ALT)) == (STRFMT_T_OCT|STRFMT_F_ALT))
+ *--q = '0';
+ } else if (!(sf & (STRFMT_T_HEX|STRFMT_T_OCT))) { /* Decimal. */
+ uint32_t k2;
+ while ((k >> 32)) { *--q = (char)('0' + k % 10); k /= 10; }
+ k2 = (uint32_t)k;
+ do { *--q = (char)('0' + k2 % 10); k2 /= 10; } while (k2);
+ } else if ((sf & STRFMT_T_HEX)) { /* Hex. */
+ const char *hexdig = (sf & STRFMT_F_UPPER) ? "0123456789ABCDEF" :
+ "0123456789abcdef";
+ do { *--q = hexdig[(k & 15)]; k >>= 4; } while (k);
+ if ((sf & STRFMT_F_ALT)) prefix = 512 + ((sf & STRFMT_F_UPPER) ? 'X' : 'x');
+ } else { /* Octal. */
+ do { *--q = (char)('0' + (uint32_t)(k & 7)); k >>= 3; } while (k);
+ if ((sf & STRFMT_F_ALT)) *--q = '0';
+ }
+
+ /* Calculate sizes. */
+ len = (MSize)(buf + sizeof(buf) - q);
+ if ((int32_t)len >= (int32_t)prec) prec = len;
+ width = STRFMT_WIDTH(sf);
+ pprec = prec + (prefix >> 8);
+ need = width > pprec ? width : pprec;
+ w = lj_buf_more(sb, need);
+#ifdef LUA_USE_ASSERT
+ ws = w;
+#endif
+
+ /* Format number with leading/trailing whitespace and zeros. */
+ if ((sf & (STRFMT_F_LEFT|STRFMT_F_ZERO)) == 0)
+ while (width-- > pprec) *w++ = ' ';
+ if (prefix) {
+ if ((char)prefix >= 'X') *w++ = '0';
+ *w++ = (char)prefix;
+ }
+ if ((sf & (STRFMT_F_LEFT|STRFMT_F_ZERO)) == STRFMT_F_ZERO)
+ while (width-- > pprec) *w++ = '0';
+ while (prec-- > len) *w++ = '0';
+ while (q < buf + sizeof(buf)) *w++ = *q++; /* Add number itself. */
+ if ((sf & STRFMT_F_LEFT))
+ while (width-- > pprec) *w++ = ' ';
+
+ lj_assertX(need == (MSize)(w - ws), "miscalculated format size");
+ sb->w = w;
+ return sb;
+}
+
+/* Add number formatted as signed integer to buffer. */
+SBuf *lj_strfmt_putfnum_int(SBuf *sb, SFormat sf, lua_Number n)
+{
+ int64_t k = (int64_t)n;
+ if (checki32(k) && sf == STRFMT_INT)
+ return lj_strfmt_putint(sb, (int32_t)k); /* Shortcut for plain %d. */
+ else
+ return lj_strfmt_putfxint(sb, sf, (uint64_t)k);
+}
+
+/* Add number formatted as unsigned integer to buffer. */
+SBuf *lj_strfmt_putfnum_uint(SBuf *sb, SFormat sf, lua_Number n)
+{
+ int64_t k;
+ if (n >= 9223372036854775808.0)
+ k = (int64_t)(n - 18446744073709551616.0);
+ else
+ k = (int64_t)n;
+ return lj_strfmt_putfxint(sb, sf, (uint64_t)k);
+}
+
+/* Format stack arguments to buffer. */
+int lj_strfmt_putarg(lua_State *L, SBuf *sb, int arg, int retry)
+{
+ int narg = (int)(L->top - L->base);
+ GCstr *fmt = lj_lib_checkstr(L, arg);
+ FormatState fs;
+ SFormat sf;
+ lj_strfmt_init(&fs, strdata(fmt), fmt->len);
+ while ((sf = lj_strfmt_parse(&fs)) != STRFMT_EOF) {
+ if (sf == STRFMT_LIT) {
+ lj_buf_putmem(sb, fs.str, fs.len);
+ } else if (sf == STRFMT_ERR) {
+ lj_err_callerv(L, LJ_ERR_STRFMT,
+ strdata(lj_str_new(L, fs.str, fs.len)));
+ } else {
+ TValue *o = &L->base[arg++];
+ if (arg > narg)
+ lj_err_arg(L, arg, LJ_ERR_NOVAL);
+ switch (STRFMT_TYPE(sf)) {
+ case STRFMT_INT:
+ if (tvisint(o)) {
+ int32_t k = intV(o);
+ if (sf == STRFMT_INT)
+ lj_strfmt_putint(sb, k); /* Shortcut for plain %d. */
+ else
+ lj_strfmt_putfxint(sb, sf, k);
+ break;
+ }
+#if LJ_HASFFI
+ if (tviscdata(o)) {
+ GCcdata *cd = cdataV(o);
+ if (cd->ctypeid == CTID_INT64 || cd->ctypeid == CTID_UINT64) {
+ lj_strfmt_putfxint(sb, sf, *(uint64_t *)cdataptr(cd));
+ break;
+ }
+ }
+#endif
+ lj_strfmt_putfnum_int(sb, sf, lj_lib_checknum(L, arg));
+ break;
+ case STRFMT_UINT:
+ if (tvisint(o)) {
+ lj_strfmt_putfxint(sb, sf, intV(o));
+ break;
+ }
+#if LJ_HASFFI
+ if (tviscdata(o)) {
+ GCcdata *cd = cdataV(o);
+ if (cd->ctypeid == CTID_INT64 || cd->ctypeid == CTID_UINT64) {
+ lj_strfmt_putfxint(sb, sf, *(uint64_t *)cdataptr(cd));
+ break;
+ }
+ }
+#endif
+ lj_strfmt_putfnum_uint(sb, sf, lj_lib_checknum(L, arg));
+ break;
+ case STRFMT_NUM:
+ lj_strfmt_putfnum(sb, sf, lj_lib_checknum(L, arg));
+ break;
+ case STRFMT_STR: {
+ MSize len;
+ const char *s;
+ cTValue *mo;
+ if (LJ_UNLIKELY(!tvisstr(o) && !tvisbuf(o)) && retry >= 0 &&
+ !tvisnil(mo = lj_meta_lookup(L, o, MM_tostring))) {
+ /* Call __tostring metamethod once. */
+ copyTV(L, L->top++, mo);
+ copyTV(L, L->top++, o);
+ lua_call(L, 1, 1);
+ o = &L->base[arg-1]; /* Stack may have been reallocated. */
+ copyTV(L, o, --L->top); /* Replace inline for retry. */
+ if (retry < 2) { /* Global buffer may have been overwritten. */
+ retry = 1;
+ break;
+ }
+ }
+ if (LJ_LIKELY(tvisstr(o))) {
+ len = strV(o)->len;
+ s = strVdata(o);
+#if LJ_HASBUFFER
+ } else if (tvisbuf(o)) {
+ SBufExt *sbx = bufV(o);
+ if (sbx == (SBufExt *)sb) lj_err_arg(L, arg+1, LJ_ERR_BUFFER_SELF);
+ len = sbufxlen(sbx);
+ s = sbx->r;
+#endif
+ } else {
+ GCstr *str = lj_strfmt_obj(L, o);
+ len = str->len;
+ s = strdata(str);
+ }
+ if ((sf & STRFMT_T_QUOTED))
+ strfmt_putquotedlen(sb, s, len); /* No formatting. */
+ else
+ strfmt_putfstrlen(sb, sf, s, len);
+ break;
+ }
+ case STRFMT_CHAR:
+ lj_strfmt_putfchar(sb, sf, lj_lib_checkint(L, arg));
+ break;
+ case STRFMT_PTR: /* No formatting. */
+ lj_strfmt_putptr(sb, lj_obj_ptr(G(L), o));
+ break;
+ default:
+ lj_assertL(0, "bad string format type");
+ break;
+ }
+ }
+ }
+ return retry;
+}
+
+/* -- Conversions to strings ---------------------------------------------- */
+
+/* Convert integer to string. */
+GCstr * LJ_FASTCALL lj_strfmt_int(lua_State *L, int32_t k)
+{
+ char buf[STRFMT_MAXBUF_INT];
+ MSize len = (MSize)(lj_strfmt_wint(buf, k) - buf);
+ return lj_str_new(L, buf, len);
+}
+
+/* Convert integer or number to string. */
+GCstr * LJ_FASTCALL lj_strfmt_number(lua_State *L, cTValue *o)
+{
+ return tvisint(o) ? lj_strfmt_int(L, intV(o)) : lj_strfmt_num(L, o);
+}
+
+#if LJ_HASJIT
+/* Convert char value to string. */
+GCstr * LJ_FASTCALL lj_strfmt_char(lua_State *L, int c)
+{
+ char buf[1];
+ buf[0] = c;
+ return lj_str_new(L, buf, 1);
+}
+#endif
+
+/* Raw conversion of object to string. */
+GCstr * LJ_FASTCALL lj_strfmt_obj(lua_State *L, cTValue *o)
+{
+ if (tvisstr(o)) {
+ return strV(o);
+ } else if (tvisnumber(o)) {
+ return lj_strfmt_number(L, o);
+ } else if (tvisnil(o)) {
+ return lj_str_newlit(L, "nil");
+ } else if (tvisfalse(o)) {
+ return lj_str_newlit(L, "false");
+ } else if (tvistrue(o)) {
+ return lj_str_newlit(L, "true");
+ } else {
+ char buf[8+2+2+16], *p = buf;
+ p = lj_buf_wmem(p, lj_typename(o), (MSize)strlen(lj_typename(o)));
+ *p++ = ':'; *p++ = ' ';
+ if (tvisfunc(o) && isffunc(funcV(o))) {
+ p = lj_buf_wmem(p, "builtin#", 8);
+ p = lj_strfmt_wint(p, funcV(o)->c.ffid);
+ } else {
+ p = lj_strfmt_wptr(p, lj_obj_ptr(G(L), o));
+ }
+ return lj_str_new(L, buf, (size_t)(p - buf));
+ }
+}
+
+/* -- Internal string formatting ------------------------------------------ */
+
+/*
+** These functions are only used for lua_pushfstring(), lua_pushvfstring()
+** and for internal string formatting (e.g. error messages). Caveat: unlike
+** string.format(), only a limited subset of formats and flags are supported!
+**
+** LuaJIT has support for a couple more formats than Lua 5.1/5.2:
+** - %d %u %o %x with full formatting, 32 bit integers only.
+** - %f and other FP formats are really %.14g.
+** - %s %c %p without formatting.
+*/
+
+/* Push formatted message as a string object to Lua stack. va_list variant. */
+const char *lj_strfmt_pushvf(lua_State *L, const char *fmt, va_list argp)
+{
+ SBuf *sb = lj_buf_tmp_(L);
+ FormatState fs;
+ SFormat sf;
+ GCstr *str;
+ lj_strfmt_init(&fs, fmt, (MSize)strlen(fmt));
+ while ((sf = lj_strfmt_parse(&fs)) != STRFMT_EOF) {
+ switch (STRFMT_TYPE(sf)) {
+ case STRFMT_LIT:
+ lj_buf_putmem(sb, fs.str, fs.len);
+ break;
+ case STRFMT_INT:
+ lj_strfmt_putfxint(sb, sf, va_arg(argp, int32_t));
+ break;
+ case STRFMT_UINT:
+ lj_strfmt_putfxint(sb, sf, va_arg(argp, uint32_t));
+ break;
+ case STRFMT_NUM:
+ lj_strfmt_putfnum(sb, STRFMT_G14, va_arg(argp, lua_Number));
+ break;
+ case STRFMT_STR: {
+ const char *s = va_arg(argp, char *);
+ if (s == NULL) s = "(null)";
+ lj_buf_putmem(sb, s, (MSize)strlen(s));
+ break;
+ }
+ case STRFMT_CHAR:
+ lj_buf_putb(sb, va_arg(argp, int));
+ break;
+ case STRFMT_PTR:
+ lj_strfmt_putptr(sb, va_arg(argp, void *));
+ break;
+ case STRFMT_ERR:
+ default:
+ lj_buf_putb(sb, '?');
+ lj_assertL(0, "bad string format near offset %d", fs.len);
+ break;
+ }
+ }
+ str = lj_buf_str(L, sb);
+ setstrV(L, L->top, str);
+ incr_top(L);
+ return strdata(str);
+}
+
+/* Push formatted message as a string object to Lua stack. Vararg variant. */
+const char *lj_strfmt_pushf(lua_State *L, const char *fmt, ...)
+{
+ const char *msg;
+ va_list argp;
+ va_start(argp, fmt);
+ msg = lj_strfmt_pushvf(L, fmt, argp);
+ va_end(argp);
+ return msg;
+}
+
diff --git a/libs/luajit-cmake/luajit/src/lj_strfmt.h b/libs/luajit-cmake/luajit/src/lj_strfmt.h
new file mode 100644
index 0000000..a452960
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/lj_strfmt.h
@@ -0,0 +1,131 @@
+/*
+** String formatting.
+** Copyright (C) 2005-2022 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LJ_STRFMT_H
+#define _LJ_STRFMT_H
+
+#include "lj_obj.h"
+
+typedef uint32_t SFormat; /* Format indicator. */
+
+/* Format parser state. */
+typedef struct FormatState {
+ const uint8_t *p; /* Current format string pointer. */
+ const uint8_t *e; /* End of format string. */
+ const char *str; /* Returned literal string. */
+ MSize len; /* Size of literal string. */
+} FormatState;
+
+/* Format types (max. 16). */
+typedef enum FormatType {
+ STRFMT_EOF, STRFMT_ERR, STRFMT_LIT,
+ STRFMT_INT, STRFMT_UINT, STRFMT_NUM, STRFMT_STR, STRFMT_CHAR, STRFMT_PTR
+} FormatType;
+
+/* Format subtypes (bits are reused). */
+#define STRFMT_T_HEX 0x0010 /* STRFMT_UINT */
+#define STRFMT_T_OCT 0x0020 /* STRFMT_UINT */
+#define STRFMT_T_FP_A 0x0000 /* STRFMT_NUM */
+#define STRFMT_T_FP_E 0x0010 /* STRFMT_NUM */
+#define STRFMT_T_FP_F 0x0020 /* STRFMT_NUM */
+#define STRFMT_T_FP_G 0x0030 /* STRFMT_NUM */
+#define STRFMT_T_QUOTED 0x0010 /* STRFMT_STR */
+
+/* Format flags. */
+#define STRFMT_F_LEFT 0x0100
+#define STRFMT_F_PLUS 0x0200
+#define STRFMT_F_ZERO 0x0400
+#define STRFMT_F_SPACE 0x0800
+#define STRFMT_F_ALT 0x1000
+#define STRFMT_F_UPPER 0x2000
+
+/* Format indicator fields. */
+#define STRFMT_SH_WIDTH 16
+#define STRFMT_SH_PREC 24
+
+#define STRFMT_TYPE(sf) ((FormatType)((sf) & 15))
+#define STRFMT_WIDTH(sf) (((sf) >> STRFMT_SH_WIDTH) & 255u)
+#define STRFMT_PREC(sf) ((((sf) >> STRFMT_SH_PREC) & 255u) - 1u)
+#define STRFMT_FP(sf) (((sf) >> 4) & 3)
+
+/* Formats for conversion characters. */
+#define STRFMT_A (STRFMT_NUM|STRFMT_T_FP_A)
+#define STRFMT_C (STRFMT_CHAR)
+#define STRFMT_D (STRFMT_INT)
+#define STRFMT_E (STRFMT_NUM|STRFMT_T_FP_E)
+#define STRFMT_F (STRFMT_NUM|STRFMT_T_FP_F)
+#define STRFMT_G (STRFMT_NUM|STRFMT_T_FP_G)
+#define STRFMT_I STRFMT_D
+#define STRFMT_O (STRFMT_UINT|STRFMT_T_OCT)
+#define STRFMT_P (STRFMT_PTR)
+#define STRFMT_Q (STRFMT_STR|STRFMT_T_QUOTED)
+#define STRFMT_S (STRFMT_STR)
+#define STRFMT_U (STRFMT_UINT)
+#define STRFMT_X (STRFMT_UINT|STRFMT_T_HEX)
+#define STRFMT_G14 (STRFMT_G | ((14+1) << STRFMT_SH_PREC))
+
+/* Maximum buffer sizes for conversions. */
+#define STRFMT_MAXBUF_XINT (1+22) /* '0' prefix + uint64_t in octal. */
+#define STRFMT_MAXBUF_INT (1+10) /* Sign + int32_t in decimal. */
+#define STRFMT_MAXBUF_NUM 32 /* Must correspond with STRFMT_G14. */
+#define STRFMT_MAXBUF_PTR (2+2*sizeof(ptrdiff_t)) /* "0x" + hex ptr. */
+
+/* Format parser. */
+LJ_FUNC SFormat LJ_FASTCALL lj_strfmt_parse(FormatState *fs);
+
+static LJ_AINLINE void lj_strfmt_init(FormatState *fs, const char *p, MSize len)
+{
+ fs->p = (const uint8_t *)p;
+ fs->e = (const uint8_t *)p + len;
+ /* Must be NUL-terminated. May have NULs inside, too. */
+ lj_assertX(*fs->e == 0, "format not NUL-terminated");
+}
+
+/* Raw conversions. */
+LJ_FUNC char * LJ_FASTCALL lj_strfmt_wint(char *p, int32_t k);
+LJ_FUNC char * LJ_FASTCALL lj_strfmt_wptr(char *p, const void *v);
+LJ_FUNC char * LJ_FASTCALL lj_strfmt_wuleb128(char *p, uint32_t v);
+LJ_FUNC const char *lj_strfmt_wstrnum(lua_State *L, cTValue *o, MSize *lenp);
+
+/* Unformatted conversions to buffer. */
+LJ_FUNC SBuf * LJ_FASTCALL lj_strfmt_putint(SBuf *sb, int32_t k);
+#if LJ_HASJIT
+LJ_FUNC SBuf * LJ_FASTCALL lj_strfmt_putnum(SBuf *sb, cTValue *o);
+#endif
+LJ_FUNC SBuf * LJ_FASTCALL lj_strfmt_putptr(SBuf *sb, const void *v);
+#if LJ_HASJIT
+LJ_FUNC SBuf * LJ_FASTCALL lj_strfmt_putquoted(SBuf *sb, GCstr *str);
+#endif
+
+/* Formatted conversions to buffer. */
+LJ_FUNC SBuf *lj_strfmt_putfxint(SBuf *sb, SFormat sf, uint64_t k);
+LJ_FUNC SBuf *lj_strfmt_putfnum_int(SBuf *sb, SFormat sf, lua_Number n);
+LJ_FUNC SBuf *lj_strfmt_putfnum_uint(SBuf *sb, SFormat sf, lua_Number n);
+LJ_FUNC SBuf *lj_strfmt_putfnum(SBuf *sb, SFormat, lua_Number n);
+LJ_FUNC SBuf *lj_strfmt_putfchar(SBuf *sb, SFormat, int32_t c);
+#if LJ_HASJIT
+LJ_FUNC SBuf *lj_strfmt_putfstr(SBuf *sb, SFormat, GCstr *str);
+#endif
+LJ_FUNC int lj_strfmt_putarg(lua_State *L, SBuf *sb, int arg, int retry);
+
+/* Conversions to strings. */
+LJ_FUNC GCstr * LJ_FASTCALL lj_strfmt_int(lua_State *L, int32_t k);
+LJ_FUNCA GCstr * LJ_FASTCALL lj_strfmt_num(lua_State *L, cTValue *o);
+LJ_FUNCA GCstr * LJ_FASTCALL lj_strfmt_number(lua_State *L, cTValue *o);
+#if LJ_HASJIT
+LJ_FUNC GCstr * LJ_FASTCALL lj_strfmt_char(lua_State *L, int c);
+#endif
+LJ_FUNC GCstr * LJ_FASTCALL lj_strfmt_obj(lua_State *L, cTValue *o);
+
+/* Internal string formatting. */
+LJ_FUNC const char *lj_strfmt_pushvf(lua_State *L, const char *fmt,
+ va_list argp);
+LJ_FUNC const char *lj_strfmt_pushf(lua_State *L, const char *fmt, ...)
+#if defined(__GNUC__) || defined(__clang__)
+ __attribute__ ((format (printf, 2, 3)))
+#endif
+ ;
+
+#endif
diff --git a/libs/luajit-cmake/luajit/src/lj_strfmt_num.c b/libs/luajit-cmake/luajit/src/lj_strfmt_num.c
new file mode 100644
index 0000000..3c60695
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/lj_strfmt_num.c
@@ -0,0 +1,592 @@
+/*
+** String formatting for floating-point numbers.
+** Copyright (C) 2005-2022 Mike Pall. See Copyright Notice in luajit.h
+** Contributed by Peter Cawley.
+*/
+
+#include <stdio.h>
+
+#define lj_strfmt_num_c
+#define LUA_CORE
+
+#include "lj_obj.h"
+#include "lj_buf.h"
+#include "lj_str.h"
+#include "lj_strfmt.h"
+
+/* -- Precomputed tables -------------------------------------------------- */
+
+/* Rescale factors to push the exponent of a number towards zero. */
+#define RESCALE_EXPONENTS(P, N) \
+ P(308), P(289), P(270), P(250), P(231), P(212), P(193), P(173), P(154), \
+ P(135), P(115), P(96), P(77), P(58), P(38), P(0), P(0), P(0), N(39), N(58), \
+ N(77), N(96), N(116), N(135), N(154), N(174), N(193), N(212), N(231), \
+ N(251), N(270), N(289)
+
+#define ONE_E_P(X) 1e+0 ## X
+#define ONE_E_N(X) 1e-0 ## X
+static const int16_t rescale_e[] = { RESCALE_EXPONENTS(-, +) };
+static const double rescale_n[] = { RESCALE_EXPONENTS(ONE_E_P, ONE_E_N) };
+#undef ONE_E_N
+#undef ONE_E_P
+
+/*
+** For p in range -70 through 57, this table encodes pairs (m, e) such that
+** 4*2^p <= (uint8_t)m*10^e, and is the smallest value for which this holds.
+*/
+static const int8_t four_ulp_m_e[] = {
+ 34, -21, 68, -21, 14, -20, 28, -20, 55, -20, 2, -19, 3, -19, 5, -19, 9, -19,
+ -82, -18, 35, -18, 7, -17, -117, -17, 28, -17, 56, -17, 112, -16, -33, -16,
+ 45, -16, 89, -16, -78, -15, 36, -15, 72, -15, -113, -14, 29, -14, 57, -14,
+ 114, -13, -28, -13, 46, -13, 91, -12, -74, -12, 37, -12, 73, -12, 15, -11, 3,
+ -11, 59, -11, 2, -10, 3, -10, 5, -10, 1, -9, -69, -9, 38, -9, 75, -9, 15, -7,
+ 3, -7, 6, -7, 12, -6, -17, -7, 48, -7, 96, -7, -65, -6, 39, -6, 77, -6, -103,
+ -5, 31, -5, 62, -5, 123, -4, -11, -4, 49, -4, 98, -4, -60, -3, 4, -2, 79, -3,
+ 16, -2, 32, -2, 63, -2, 2, -1, 25, 0, 5, 1, 1, 2, 2, 2, 4, 2, 8, 2, 16, 2,
+ 32, 2, 64, 2, -128, 2, 26, 2, 52, 2, 103, 3, -51, 3, 41, 4, 82, 4, -92, 4,
+ 33, 4, 66, 4, -124, 5, 27, 5, 53, 5, 105, 6, 21, 6, 42, 6, 84, 6, 17, 7, 34,
+ 7, 68, 7, 2, 8, 3, 8, 6, 8, 108, 9, -41, 9, 43, 10, 86, 9, -84, 10, 35, 10,
+ 69, 10, -118, 11, 28, 11, 55, 12, 11, 13, 22, 13, 44, 13, 88, 13, -80, 13,
+ 36, 13, 71, 13, -115, 14, 29, 14, 57, 14, 113, 15, -30, 15, 46, 15, 91, 15,
+ 19, 16, 37, 16, 73, 16, 2, 17, 3, 17, 6, 17
+};
+
+/* min(2^32-1, 10^e-1) for e in range 0 through 10 */
+static uint32_t ndigits_dec_threshold[] = {
+ 0, 9U, 99U, 999U, 9999U, 99999U, 999999U,
+ 9999999U, 99999999U, 999999999U, 0xffffffffU
+};
+
+/* -- Helper functions ---------------------------------------------------- */
+
+/* Compute the number of digits in the decimal representation of x. */
+static MSize ndigits_dec(uint32_t x)
+{
+ MSize t = ((lj_fls(x | 1) * 77) >> 8) + 1; /* 2^8/77 is roughly log2(10) */
+ return t + (x > ndigits_dec_threshold[t]);
+}
+
+#define WINT_R(x, sh, sc) \
+ { uint32_t d = (x*(((1<<sh)+sc-1)/sc))>>sh; x -= d*sc; *p++ = (char)('0'+d); }
+
+/* Write 9-digit unsigned integer to buffer. */
+static char *lj_strfmt_wuint9(char *p, uint32_t u)
+{
+ uint32_t v = u / 10000, w;
+ u -= v * 10000;
+ w = v / 10000;
+ v -= w * 10000;
+ *p++ = (char)('0'+w);
+ WINT_R(v, 23, 1000)
+ WINT_R(v, 12, 100)
+ WINT_R(v, 10, 10)
+ *p++ = (char)('0'+v);
+ WINT_R(u, 23, 1000)
+ WINT_R(u, 12, 100)
+ WINT_R(u, 10, 10)
+ *p++ = (char)('0'+u);
+ return p;
+}
+#undef WINT_R
+
+/* -- Extended precision arithmetic --------------------------------------- */
+
+/*
+** The "nd" format is a fixed-precision decimal representation for numbers. It
+** consists of up to 64 uint32_t values, with each uint32_t storing a value
+** in the range [0, 1e9). A number in "nd" format consists of three variables:
+**
+** uint32_t nd[64];
+** uint32_t ndlo;
+** uint32_t ndhi;
+**
+** The integral part of the number is stored in nd[0 ... ndhi], the value of
+** which is sum{i in [0, ndhi] | nd[i] * 10^(9*i)}. If the fractional part of
+** the number is zero, ndlo is zero. Otherwise, the fractional part is stored
+** in nd[ndlo ... 63], the value of which is taken to be
+** sum{i in [ndlo, 63] | nd[i] * 10^(9*(i-64))}.
+**
+** If the array part had 128 elements rather than 64, then every double would
+** have an exact representation in "nd" format. With 64 elements, all integral
+** doubles have an exact representation, and all non-integral doubles have
+** enough digits to make both %.99e and %.99f do the right thing.
+*/
+
+#if LJ_64
+#define ND_MUL2K_MAX_SHIFT 29
+#define ND_MUL2K_DIV1E9(val) ((uint32_t)((val) / 1000000000))
+#else
+#define ND_MUL2K_MAX_SHIFT 11
+#define ND_MUL2K_DIV1E9(val) ((uint32_t)((val) >> 9) / 1953125)
+#endif
+
+/* Multiply nd by 2^k and add carry_in (ndlo is assumed to be zero). */
+static uint32_t nd_mul2k(uint32_t* nd, uint32_t ndhi, uint32_t k,
+ uint32_t carry_in, SFormat sf)
+{
+ uint32_t i, ndlo = 0, start = 1;
+ /* Performance hacks. */
+ if (k > ND_MUL2K_MAX_SHIFT*2 && STRFMT_FP(sf) != STRFMT_FP(STRFMT_T_FP_F)) {
+ start = ndhi - (STRFMT_PREC(sf) + 17) / 8;
+ }
+ /* Real logic. */
+ while (k >= ND_MUL2K_MAX_SHIFT) {
+ for (i = ndlo; i <= ndhi; i++) {
+ uint64_t val = ((uint64_t)nd[i] << ND_MUL2K_MAX_SHIFT) | carry_in;
+ carry_in = ND_MUL2K_DIV1E9(val);
+ nd[i] = (uint32_t)val - carry_in * 1000000000;
+ }
+ if (carry_in) {
+ nd[++ndhi] = carry_in; carry_in = 0;
+ if (start++ == ndlo) ++ndlo;
+ }
+ k -= ND_MUL2K_MAX_SHIFT;
+ }
+ if (k) {
+ for (i = ndlo; i <= ndhi; i++) {
+ uint64_t val = ((uint64_t)nd[i] << k) | carry_in;
+ carry_in = ND_MUL2K_DIV1E9(val);
+ nd[i] = (uint32_t)val - carry_in * 1000000000;
+ }
+ if (carry_in) nd[++ndhi] = carry_in;
+ }
+ return ndhi;
+}
+
+/* Divide nd by 2^k (ndlo is assumed to be zero). */
+static uint32_t nd_div2k(uint32_t* nd, uint32_t ndhi, uint32_t k, SFormat sf)
+{
+ uint32_t ndlo = 0, stop1 = ~0, stop2 = ~0;
+ /* Performance hacks. */
+ if (!ndhi) {
+ if (!nd[0]) {
+ return 0;
+ } else {
+ uint32_t s = lj_ffs(nd[0]);
+ if (s >= k) { nd[0] >>= k; return 0; }
+ nd[0] >>= s; k -= s;
+ }
+ }
+ if (k > 18) {
+ if (STRFMT_FP(sf) == STRFMT_FP(STRFMT_T_FP_F)) {
+ stop1 = 63 - (int32_t)STRFMT_PREC(sf) / 9;
+ } else {
+ int32_t floorlog2 = ndhi * 29 + lj_fls(nd[ndhi]) - k;
+ int32_t floorlog10 = (int32_t)(floorlog2 * 0.30102999566398114);
+ stop1 = 62 + (floorlog10 - (int32_t)STRFMT_PREC(sf)) / 9;
+ stop2 = 61 + ndhi - (int32_t)STRFMT_PREC(sf) / 8;
+ }
+ }
+ /* Real logic. */
+ while (k >= 9) {
+ uint32_t i = ndhi, carry = 0;
+ for (;;) {
+ uint32_t val = nd[i];
+ nd[i] = (val >> 9) + carry;
+ carry = (val & 0x1ff) * 1953125;
+ if (i == ndlo) break;
+ i = (i - 1) & 0x3f;
+ }
+ if (ndlo != stop1 && ndlo != stop2) {
+ if (carry) { ndlo = (ndlo - 1) & 0x3f; nd[ndlo] = carry; }
+ if (!nd[ndhi]) { ndhi = (ndhi - 1) & 0x3f; stop2--; }
+ } else if (!nd[ndhi]) {
+ if (ndhi != ndlo) { ndhi = (ndhi - 1) & 0x3f; stop2--; }
+ else return ndlo;
+ }
+ k -= 9;
+ }
+ if (k) {
+ uint32_t mask = (1U << k) - 1, mul = 1000000000 >> k, i = ndhi, carry = 0;
+ for (;;) {
+ uint32_t val = nd[i];
+ nd[i] = (val >> k) + carry;
+ carry = (val & mask) * mul;
+ if (i == ndlo) break;
+ i = (i - 1) & 0x3f;
+ }
+ if (carry) { ndlo = (ndlo - 1) & 0x3f; nd[ndlo] = carry; }
+ }
+ return ndlo;
+}
+
+/* Add m*10^e to nd (assumes ndlo <= e/9 <= ndhi and 0 <= m <= 9). */
+static uint32_t nd_add_m10e(uint32_t* nd, uint32_t ndhi, uint8_t m, int32_t e)
+{
+ uint32_t i, carry;
+ if (e >= 0) {
+ i = (uint32_t)e/9;
+ carry = m * (ndigits_dec_threshold[e - (int32_t)i*9] + 1);
+ } else {
+ int32_t f = (e-8)/9;
+ i = (uint32_t)(64 + f);
+ carry = m * (ndigits_dec_threshold[e - f*9] + 1);
+ }
+ for (;;) {
+ uint32_t val = nd[i] + carry;
+ if (LJ_UNLIKELY(val >= 1000000000)) {
+ val -= 1000000000;
+ nd[i] = val;
+ if (LJ_UNLIKELY(i == ndhi)) {
+ ndhi = (ndhi + 1) & 0x3f;
+ nd[ndhi] = 1;
+ break;
+ }
+ carry = 1;
+ i = (i + 1) & 0x3f;
+ } else {
+ nd[i] = val;
+ break;
+ }
+ }
+ return ndhi;
+}
+
+/* Test whether two "nd" values are equal in their most significant digits. */
+static int nd_similar(uint32_t* nd, uint32_t ndhi, uint32_t* ref, MSize hilen,
+ MSize prec)
+{
+ char nd9[9], ref9[9];
+ if (hilen <= prec) {
+ if (LJ_UNLIKELY(nd[ndhi] != *ref)) return 0;
+ prec -= hilen; ref--; ndhi = (ndhi - 1) & 0x3f;
+ if (prec >= 9) {
+ if (LJ_UNLIKELY(nd[ndhi] != *ref)) return 0;
+ prec -= 9; ref--; ndhi = (ndhi - 1) & 0x3f;
+ }
+ } else {
+ prec -= hilen - 9;
+ }
+ lj_assertX(prec < 9, "bad precision %d", prec);
+ lj_strfmt_wuint9(nd9, nd[ndhi]);
+ lj_strfmt_wuint9(ref9, *ref);
+ return !memcmp(nd9, ref9, prec) && (nd9[prec] < '5') == (ref9[prec] < '5');
+}
+
+/* -- Formatted conversions to buffer ------------------------------------- */
+
+/* Write formatted floating-point number to either sb or p. */
+static char *lj_strfmt_wfnum(SBuf *sb, SFormat sf, lua_Number n, char *p)
+{
+ MSize width = STRFMT_WIDTH(sf), prec = STRFMT_PREC(sf), len;
+ TValue t;
+ t.n = n;
+ if (LJ_UNLIKELY((t.u32.hi << 1) >= 0xffe00000)) {
+ /* Handle non-finite values uniformly for %a, %e, %f, %g. */
+ int prefix = 0, ch = (sf & STRFMT_F_UPPER) ? 0x202020 : 0;
+ if (((t.u32.hi & 0x000fffff) | t.u32.lo) != 0) {
+ ch ^= ('n' << 16) | ('a' << 8) | 'n';
+ if ((sf & STRFMT_F_SPACE)) prefix = ' ';
+ } else {
+ ch ^= ('i' << 16) | ('n' << 8) | 'f';
+ if ((t.u32.hi & 0x80000000)) prefix = '-';
+ else if ((sf & STRFMT_F_PLUS)) prefix = '+';
+ else if ((sf & STRFMT_F_SPACE)) prefix = ' ';
+ }
+ len = 3 + (prefix != 0);
+ if (!p) p = lj_buf_more(sb, width > len ? width : len);
+ if (!(sf & STRFMT_F_LEFT)) while (width-- > len) *p++ = ' ';
+ if (prefix) *p++ = prefix;
+ *p++ = (char)(ch >> 16); *p++ = (char)(ch >> 8); *p++ = (char)ch;
+ } else if (STRFMT_FP(sf) == STRFMT_FP(STRFMT_T_FP_A)) {
+ /* %a */
+ const char *hexdig = (sf & STRFMT_F_UPPER) ? "0123456789ABCDEFPX"
+ : "0123456789abcdefpx";
+ int32_t e = (t.u32.hi >> 20) & 0x7ff;
+ char prefix = 0, eprefix = '+';
+ if (t.u32.hi & 0x80000000) prefix = '-';
+ else if ((sf & STRFMT_F_PLUS)) prefix = '+';
+ else if ((sf & STRFMT_F_SPACE)) prefix = ' ';
+ t.u32.hi &= 0xfffff;
+ if (e) {
+ t.u32.hi |= 0x100000;
+ e -= 1023;
+ } else if (t.u32.lo | t.u32.hi) {
+ /* Non-zero denormal - normalise it. */
+ uint32_t shift = t.u32.hi ? 20-lj_fls(t.u32.hi) : 52-lj_fls(t.u32.lo);
+ e = -1022 - shift;
+ t.u64 <<= shift;
+ }
+ /* abs(n) == t.u64 * 2^(e - 52) */
+ /* If n != 0, bit 52 of t.u64 is set, and is the highest set bit. */
+ if ((int32_t)prec < 0) {
+ /* Default precision: use smallest precision giving exact result. */
+ prec = t.u32.lo ? 13-lj_ffs(t.u32.lo)/4 : 5-lj_ffs(t.u32.hi|0x100000)/4;
+ } else if (prec < 13) {
+ /* Precision is sufficiently low as to maybe require rounding. */
+ t.u64 += (((uint64_t)1) << (51 - prec*4));
+ }
+ if (e < 0) {
+ eprefix = '-';
+ e = -e;
+ }
+ len = 5 + ndigits_dec((uint32_t)e) + prec + (prefix != 0)
+ + ((prec | (sf & STRFMT_F_ALT)) != 0);
+ if (!p) p = lj_buf_more(sb, width > len ? width : len);
+ if (!(sf & (STRFMT_F_LEFT | STRFMT_F_ZERO))) {
+ while (width-- > len) *p++ = ' ';
+ }
+ if (prefix) *p++ = prefix;
+ *p++ = '0';
+ *p++ = hexdig[17]; /* x or X */
+ if ((sf & (STRFMT_F_LEFT | STRFMT_F_ZERO)) == STRFMT_F_ZERO) {
+ while (width-- > len) *p++ = '0';
+ }
+ *p++ = '0' + (t.u32.hi >> 20); /* Usually '1', sometimes '0' or '2'. */
+ if ((prec | (sf & STRFMT_F_ALT))) {
+ /* Emit fractional part. */
+ char *q = p + 1 + prec;
+ *p = '.';
+ if (prec < 13) t.u64 >>= (52 - prec*4);
+ else while (prec > 13) p[prec--] = '0';
+ while (prec) { p[prec--] = hexdig[t.u64 & 15]; t.u64 >>= 4; }
+ p = q;
+ }
+ *p++ = hexdig[16]; /* p or P */
+ *p++ = eprefix; /* + or - */
+ p = lj_strfmt_wint(p, e);
+ } else {
+ /* %e or %f or %g - begin by converting n to "nd" format. */
+ uint32_t nd[64];
+ uint32_t ndhi = 0, ndlo, i;
+ int32_t e = (t.u32.hi >> 20) & 0x7ff, ndebias = 0;
+ char prefix = 0, *q;
+ if (t.u32.hi & 0x80000000) prefix = '-';
+ else if ((sf & STRFMT_F_PLUS)) prefix = '+';
+ else if ((sf & STRFMT_F_SPACE)) prefix = ' ';
+ prec += ((int32_t)prec >> 31) & 7; /* Default precision is 6. */
+ if (STRFMT_FP(sf) == STRFMT_FP(STRFMT_T_FP_G)) {
+ /* %g - decrement precision if non-zero (to make it like %e). */
+ prec--;
+ prec ^= (uint32_t)((int32_t)prec >> 31);
+ }
+ if ((sf & STRFMT_T_FP_E) && prec < 14 && n != 0) {
+ /* Precision is sufficiently low that rescaling will probably work. */
+ if ((ndebias = rescale_e[e >> 6])) {
+ t.n = n * rescale_n[e >> 6];
+ if (LJ_UNLIKELY(!e)) t.n *= 1e10, ndebias -= 10;
+ t.u64 -= 2; /* Convert 2ulp below (later we convert 2ulp above). */
+ nd[0] = 0x100000 | (t.u32.hi & 0xfffff);
+ e = ((t.u32.hi >> 20) & 0x7ff) - 1075 - (ND_MUL2K_MAX_SHIFT < 29);
+ goto load_t_lo; rescale_failed:
+ t.n = n;
+ e = (t.u32.hi >> 20) & 0x7ff;
+ ndebias = ndhi = 0;
+ }
+ }
+ nd[0] = t.u32.hi & 0xfffff;
+ if (e == 0) e++; else nd[0] |= 0x100000;
+ e -= 1043;
+ if (t.u32.lo) {
+ e -= 32 + (ND_MUL2K_MAX_SHIFT < 29); load_t_lo:
+#if ND_MUL2K_MAX_SHIFT >= 29
+ nd[0] = (nd[0] << 3) | (t.u32.lo >> 29);
+ ndhi = nd_mul2k(nd, ndhi, 29, t.u32.lo & 0x1fffffff, sf);
+#elif ND_MUL2K_MAX_SHIFT >= 11
+ ndhi = nd_mul2k(nd, ndhi, 11, t.u32.lo >> 21, sf);
+ ndhi = nd_mul2k(nd, ndhi, 11, (t.u32.lo >> 10) & 0x7ff, sf);
+ ndhi = nd_mul2k(nd, ndhi, 11, (t.u32.lo << 1) & 0x7ff, sf);
+#else
+#error "ND_MUL2K_MAX_SHIFT too small"
+#endif
+ }
+ if (e >= 0) {
+ ndhi = nd_mul2k(nd, ndhi, (uint32_t)e, 0, sf);
+ ndlo = 0;
+ } else {
+ ndlo = nd_div2k(nd, ndhi, (uint32_t)-e, sf);
+ if (ndhi && !nd[ndhi]) ndhi--;
+ }
+ /* abs(n) == nd * 10^ndebias (for slightly loose interpretation of ==) */
+ if ((sf & STRFMT_T_FP_E)) {
+ /* %e or %g - assume %e and start by calculating nd's exponent (nde). */
+ char eprefix = '+';
+ int32_t nde = -1;
+ MSize hilen;
+ if (ndlo && !nd[ndhi]) {
+ ndhi = 64; do {} while (!nd[--ndhi]);
+ nde -= 64 * 9;
+ }
+ hilen = ndigits_dec(nd[ndhi]);
+ nde += ndhi * 9 + hilen;
+ if (ndebias) {
+ /*
+ ** Rescaling was performed, but this introduced some error, and might
+ ** have pushed us across a rounding boundary. We check whether this
+ ** error affected the result by introducing even more error (2ulp in
+ ** either direction), and seeing whether a rounding boundary was
+ ** crossed. Having already converted the -2ulp case, we save off its
+ ** most significant digits, convert the +2ulp case, and compare them.
+ */
+ int32_t eidx = e + 70 + (ND_MUL2K_MAX_SHIFT < 29)
+ + (t.u32.lo >= 0xfffffffe && !(~t.u32.hi << 12));
+ const int8_t *m_e = four_ulp_m_e + eidx * 2;
+ lj_assertG_(G(sbufL(sb)), 0 <= eidx && eidx < 128, "bad eidx %d", eidx);
+ nd[33] = nd[ndhi];
+ nd[32] = nd[(ndhi - 1) & 0x3f];
+ nd[31] = nd[(ndhi - 2) & 0x3f];
+ nd_add_m10e(nd, ndhi, (uint8_t)*m_e, m_e[1]);
+ if (LJ_UNLIKELY(!nd_similar(nd, ndhi, nd + 33, hilen, prec + 1))) {
+ goto rescale_failed;
+ }
+ }
+ if ((int32_t)(prec - nde) < (0x3f & -(int32_t)ndlo) * 9) {
+ /* Precision is sufficiently low as to maybe require rounding. */
+ ndhi = nd_add_m10e(nd, ndhi, 5, nde - prec - 1);
+ nde += (hilen != ndigits_dec(nd[ndhi]));
+ }
+ nde += ndebias;
+ if ((sf & STRFMT_T_FP_F)) {
+ /* %g */
+ if ((int32_t)prec >= nde && nde >= -4) {
+ if (nde < 0) ndhi = 0;
+ prec -= nde;
+ goto g_format_like_f;
+ } else if (!(sf & STRFMT_F_ALT) && prec && width > 5) {
+ /* Decrease precision in order to strip trailing zeroes. */
+ char tail[9];
+ uint32_t maxprec = hilen - 1 + ((ndhi - ndlo) & 0x3f) * 9;
+ if (prec >= maxprec) prec = maxprec;
+ else ndlo = (ndhi - (((int32_t)(prec - hilen) + 9) / 9)) & 0x3f;
+ i = prec - hilen - (((ndhi - ndlo) & 0x3f) * 9) + 10;
+ lj_strfmt_wuint9(tail, nd[ndlo]);
+ while (prec && tail[--i] == '0') {
+ prec--;
+ if (!i) {
+ if (ndlo == ndhi) { prec = 0; break; }
+ lj_strfmt_wuint9(tail, nd[++ndlo]);
+ i = 9;
+ }
+ }
+ }
+ }
+ if (nde < 0) {
+ /* Make nde non-negative. */
+ eprefix = '-';
+ nde = -nde;
+ }
+ len = 3 + prec + (prefix != 0) + ndigits_dec((uint32_t)nde) + (nde < 10)
+ + ((prec | (sf & STRFMT_F_ALT)) != 0);
+ if (!p) p = lj_buf_more(sb, (width > len ? width : len) + 5);
+ if (!(sf & (STRFMT_F_LEFT | STRFMT_F_ZERO))) {
+ while (width-- > len) *p++ = ' ';
+ }
+ if (prefix) *p++ = prefix;
+ if ((sf & (STRFMT_F_LEFT | STRFMT_F_ZERO)) == STRFMT_F_ZERO) {
+ while (width-- > len) *p++ = '0';
+ }
+ q = lj_strfmt_wint(p + 1, nd[ndhi]);
+ p[0] = p[1]; /* Put leading digit in the correct place. */
+ if ((prec | (sf & STRFMT_F_ALT))) {
+ /* Emit fractional part. */
+ p[1] = '.'; p += 2;
+ prec -= (MSize)(q - p); p = q; /* Account for digits already emitted. */
+ /* Then emit chunks of 9 digits (this may emit 8 digits too many). */
+ for (i = ndhi; (int32_t)prec > 0 && i != ndlo; prec -= 9) {
+ i = (i - 1) & 0x3f;
+ p = lj_strfmt_wuint9(p, nd[i]);
+ }
+ if ((sf & STRFMT_T_FP_F) && !(sf & STRFMT_F_ALT)) {
+ /* %g (and not %#g) - strip trailing zeroes. */
+ p += (int32_t)prec & ((int32_t)prec >> 31);
+ while (p[-1] == '0') p--;
+ if (p[-1] == '.') p--;
+ } else {
+ /* %e (or %#g) - emit trailing zeroes. */
+ while ((int32_t)prec > 0) { *p++ = '0'; prec--; }
+ p += (int32_t)prec;
+ }
+ } else {
+ p++;
+ }
+ *p++ = (sf & STRFMT_F_UPPER) ? 'E' : 'e';
+ *p++ = eprefix; /* + or - */
+ if (nde < 10) *p++ = '0'; /* Always at least two digits of exponent. */
+ p = lj_strfmt_wint(p, nde);
+ } else {
+ /* %f (or, shortly, %g in %f style) */
+ if (prec < (MSize)(0x3f & -(int32_t)ndlo) * 9) {
+ /* Precision is sufficiently low as to maybe require rounding. */
+ ndhi = nd_add_m10e(nd, ndhi, 5, 0 - prec - 1);
+ }
+ g_format_like_f:
+ if ((sf & STRFMT_T_FP_E) && !(sf & STRFMT_F_ALT) && prec && width) {
+ /* Decrease precision in order to strip trailing zeroes. */
+ if (ndlo) {
+ /* nd has a fractional part; we need to look at its digits. */
+ char tail[9];
+ uint32_t maxprec = (64 - ndlo) * 9;
+ if (prec >= maxprec) prec = maxprec;
+ else ndlo = 64 - (prec + 8) / 9;
+ i = prec - ((63 - ndlo) * 9);
+ lj_strfmt_wuint9(tail, nd[ndlo]);
+ while (prec && tail[--i] == '0') {
+ prec--;
+ if (!i) {
+ if (ndlo == 63) { prec = 0; break; }
+ lj_strfmt_wuint9(tail, nd[++ndlo]);
+ i = 9;
+ }
+ }
+ } else {
+ /* nd has no fractional part, so precision goes straight to zero. */
+ prec = 0;
+ }
+ }
+ len = ndhi * 9 + ndigits_dec(nd[ndhi]) + prec + (prefix != 0)
+ + ((prec | (sf & STRFMT_F_ALT)) != 0);
+ if (!p) p = lj_buf_more(sb, (width > len ? width : len) + 8);
+ if (!(sf & (STRFMT_F_LEFT | STRFMT_F_ZERO))) {
+ while (width-- > len) *p++ = ' ';
+ }
+ if (prefix) *p++ = prefix;
+ if ((sf & (STRFMT_F_LEFT | STRFMT_F_ZERO)) == STRFMT_F_ZERO) {
+ while (width-- > len) *p++ = '0';
+ }
+ /* Emit integer part. */
+ p = lj_strfmt_wint(p, nd[ndhi]);
+ i = ndhi;
+ while (i) p = lj_strfmt_wuint9(p, nd[--i]);
+ if ((prec | (sf & STRFMT_F_ALT))) {
+ /* Emit fractional part. */
+ *p++ = '.';
+ /* Emit chunks of 9 digits (this may emit 8 digits too many). */
+ while ((int32_t)prec > 0 && i != ndlo) {
+ i = (i - 1) & 0x3f;
+ p = lj_strfmt_wuint9(p, nd[i]);
+ prec -= 9;
+ }
+ if ((sf & STRFMT_T_FP_E) && !(sf & STRFMT_F_ALT)) {
+ /* %g (and not %#g) - strip trailing zeroes. */
+ p += (int32_t)prec & ((int32_t)prec >> 31);
+ while (p[-1] == '0') p--;
+ if (p[-1] == '.') p--;
+ } else {
+ /* %f (or %#g) - emit trailing zeroes. */
+ while ((int32_t)prec > 0) { *p++ = '0'; prec--; }
+ p += (int32_t)prec;
+ }
+ }
+ }
+ }
+ if ((sf & STRFMT_F_LEFT)) while (width-- > len) *p++ = ' ';
+ return p;
+}
+
+/* Add formatted floating-point number to buffer. */
+SBuf *lj_strfmt_putfnum(SBuf *sb, SFormat sf, lua_Number n)
+{
+ sb->w = lj_strfmt_wfnum(sb, sf, n, NULL);
+ return sb;
+}
+
+/* -- Conversions to strings ---------------------------------------------- */
+
+/* Convert number to string. */
+GCstr * LJ_FASTCALL lj_strfmt_num(lua_State *L, cTValue *o)
+{
+ char buf[STRFMT_MAXBUF_NUM];
+ MSize len = (MSize)(lj_strfmt_wfnum(NULL, STRFMT_G14, o->n, buf) - buf);
+ return lj_str_new(L, buf, len);
+}
+
diff --git a/libs/luajit-cmake/luajit/src/lj_strscan.c b/libs/luajit-cmake/luajit/src/lj_strscan.c
new file mode 100644
index 0000000..1d1c1c7
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/lj_strscan.c
@@ -0,0 +1,558 @@
+/*
+** String scanning.
+** Copyright (C) 2005-2022 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#include <math.h>
+
+#define lj_strscan_c
+#define LUA_CORE
+
+#include "lj_obj.h"
+#include "lj_char.h"
+#include "lj_strscan.h"
+
+/* -- Scanning numbers ---------------------------------------------------- */
+
+/*
+** Rationale for the builtin string to number conversion library:
+**
+** It removes a dependency on libc's strtod(), which is a true portability
+** nightmare. Mainly due to the plethora of supported OS and toolchain
+** combinations. Sadly, the various implementations
+** a) are often buggy, incomplete (no hex floats) and/or imprecise,
+** b) sometimes crash or hang on certain inputs,
+** c) return non-standard NaNs that need to be filtered out, and
+** d) fail if the locale-specific decimal separator is not a dot,
+** which can only be fixed with atrocious workarounds.
+**
+** Also, most of the strtod() implementations are hopelessly bloated,
+** which is not just an I-cache hog, but a problem for static linkage
+** on embedded systems, too.
+**
+** OTOH the builtin conversion function is very compact. Even though it
+** does a lot more, like parsing long longs, octal or imaginary numbers
+** and returning the result in different formats:
+** a) It needs less than 3 KB (!) of machine code (on x64 with -Os),
+** b) it doesn't perform any dynamic allocation and,
+** c) it needs only around 600 bytes of stack space.
+**
+** The builtin function is faster than strtod() for typical inputs, e.g.
+** "123", "1.5" or "1e6". Arguably, it's slower for very large exponents,
+** which are not very common (this could be fixed, if needed).
+**
+** And most importantly, the builtin function is equally precise on all
+** platforms. It correctly converts and rounds any input to a double.
+** If this is not the case, please send a bug report -- but PLEASE verify
+** that the implementation you're comparing to is not the culprit!
+**
+** The implementation quickly pre-scans the entire string first and
+** handles simple integers on-the-fly. Otherwise, it dispatches to the
+** base-specific parser. Hex and octal is straightforward.
+**
+** Decimal to binary conversion uses a fixed-length circular buffer in
+** base 100. Some simple cases are handled directly. For other cases, the
+** number in the buffer is up-scaled or down-scaled until the integer part
+** is in the proper range. Then the integer part is rounded and converted
+** to a double which is finally rescaled to the result. Denormals need
+** special treatment to prevent incorrect 'double rounding'.
+*/
+
+/* Definitions for circular decimal digit buffer (base 100 = 2 digits/byte). */
+#define STRSCAN_DIG 1024
+#define STRSCAN_MAXDIG 800 /* 772 + extra are sufficient. */
+#define STRSCAN_DDIG (STRSCAN_DIG/2)
+#define STRSCAN_DMASK (STRSCAN_DDIG-1)
+#define STRSCAN_MAXEXP (1 << 20)
+
+/* Helpers for circular buffer. */
+#define DNEXT(a) (((a)+1) & STRSCAN_DMASK)
+#define DPREV(a) (((a)-1) & STRSCAN_DMASK)
+#define DLEN(lo, hi) ((int32_t)(((lo)-(hi)) & STRSCAN_DMASK))
+
+#define casecmp(c, k) (((c) | 0x20) == k)
+
+/* Final conversion to double. */
+static void strscan_double(uint64_t x, TValue *o, int32_t ex2, int32_t neg)
+{
+ double n;
+
+ /* Avoid double rounding for denormals. */
+ if (LJ_UNLIKELY(ex2 <= -1075 && x != 0)) {
+ /* NYI: all of this generates way too much code on 32 bit CPUs. */
+#if (defined(__GNUC__) || defined(__clang__)) && LJ_64
+ int32_t b = (int32_t)(__builtin_clzll(x)^63);
+#else
+ int32_t b = (x>>32) ? 32+(int32_t)lj_fls((uint32_t)(x>>32)) :
+ (int32_t)lj_fls((uint32_t)x);
+#endif
+ if ((int32_t)b + ex2 <= -1023 && (int32_t)b + ex2 >= -1075) {
+ uint64_t rb = (uint64_t)1 << (-1075-ex2);
+ if ((x & rb) && ((x & (rb+rb+rb-1)))) x += rb+rb;
+ x = (x & ~(rb+rb-1));
+ }
+ }
+
+ /* Convert to double using a signed int64_t conversion, then rescale. */
+ lj_assertX((int64_t)x >= 0, "bad double conversion");
+ n = (double)(int64_t)x;
+ if (neg) n = -n;
+ if (ex2) n = ldexp(n, ex2);
+ o->n = n;
+}
+
+/* Parse hexadecimal number. */
+static StrScanFmt strscan_hex(const uint8_t *p, TValue *o,
+ StrScanFmt fmt, uint32_t opt,
+ int32_t ex2, int32_t neg, uint32_t dig)
+{
+ uint64_t x = 0;
+ uint32_t i;
+
+ /* Scan hex digits. */
+ for (i = dig > 16 ? 16 : dig ; i; i--, p++) {
+ uint32_t d = (*p != '.' ? *p : *++p); if (d > '9') d += 9;
+ x = (x << 4) + (d & 15);
+ }
+
+ /* Summarize rounding-effect of excess digits. */
+ for (i = 16; i < dig; i++, p++)
+ x |= ((*p != '.' ? *p : *++p) != '0'), ex2 += 4;
+
+ /* Format-specific handling. */
+ switch (fmt) {
+ case STRSCAN_INT:
+ if (!(opt & STRSCAN_OPT_TONUM) && x < 0x80000000u+neg &&
+ !(x == 0 && neg)) {
+ o->i = neg ? -(int32_t)x : (int32_t)x;
+ return STRSCAN_INT; /* Fast path for 32 bit integers. */
+ }
+ if (!(opt & STRSCAN_OPT_C)) { fmt = STRSCAN_NUM; break; }
+ /* fallthrough */
+ case STRSCAN_U32:
+ if (dig > 8) return STRSCAN_ERROR;
+ o->i = neg ? -(int32_t)x : (int32_t)x;
+ return STRSCAN_U32;
+ case STRSCAN_I64:
+ case STRSCAN_U64:
+ if (dig > 16) return STRSCAN_ERROR;
+ o->u64 = neg ? (uint64_t)-(int64_t)x : x;
+ return fmt;
+ default:
+ break;
+ }
+
+ /* Reduce range, then convert to double. */
+ if ((x & U64x(c0000000,0000000))) { x = (x >> 2) | (x & 3); ex2 += 2; }
+ strscan_double(x, o, ex2, neg);
+ return fmt;
+}
+
+/* Parse octal number. */
+static StrScanFmt strscan_oct(const uint8_t *p, TValue *o,
+ StrScanFmt fmt, int32_t neg, uint32_t dig)
+{
+ uint64_t x = 0;
+
+ /* Scan octal digits. */
+ if (dig > 22 || (dig == 22 && *p > '1')) return STRSCAN_ERROR;
+ while (dig-- > 0) {
+ if (!(*p >= '0' && *p <= '7')) return STRSCAN_ERROR;
+ x = (x << 3) + (*p++ & 7);
+ }
+
+ /* Format-specific handling. */
+ switch (fmt) {
+ case STRSCAN_INT:
+ if (x >= 0x80000000u+neg) fmt = STRSCAN_U32;
+ /* fallthrough */
+ case STRSCAN_U32:
+ if ((x >> 32)) return STRSCAN_ERROR;
+ o->i = neg ? -(int32_t)x : (int32_t)x;
+ break;
+ default:
+ case STRSCAN_I64:
+ case STRSCAN_U64:
+ o->u64 = neg ? (uint64_t)-(int64_t)x : x;
+ break;
+ }
+ return fmt;
+}
+
+/* Parse decimal number. */
+static StrScanFmt strscan_dec(const uint8_t *p, TValue *o,
+ StrScanFmt fmt, uint32_t opt,
+ int32_t ex10, int32_t neg, uint32_t dig)
+{
+ uint8_t xi[STRSCAN_DDIG], *xip = xi;
+
+ if (dig) {
+ uint32_t i = dig;
+ if (i > STRSCAN_MAXDIG) {
+ ex10 += (int32_t)(i - STRSCAN_MAXDIG);
+ i = STRSCAN_MAXDIG;
+ }
+ /* Scan unaligned leading digit. */
+ if (((ex10^i) & 1))
+ *xip++ = ((*p != '.' ? *p : *++p) & 15), i--, p++;
+ /* Scan aligned double-digits. */
+ for ( ; i > 1; i -= 2) {
+ uint32_t d = 10 * ((*p != '.' ? *p : *++p) & 15); p++;
+ *xip++ = d + ((*p != '.' ? *p : *++p) & 15); p++;
+ }
+ /* Scan and realign trailing digit. */
+ if (i) *xip++ = 10 * ((*p != '.' ? *p : *++p) & 15), ex10--, dig++, p++;
+
+ /* Summarize rounding-effect of excess digits. */
+ if (dig > STRSCAN_MAXDIG) {
+ do {
+ if ((*p != '.' ? *p : *++p) != '0') { xip[-1] |= 1; break; }
+ p++;
+ } while (--dig > STRSCAN_MAXDIG);
+ dig = STRSCAN_MAXDIG;
+ } else { /* Simplify exponent. */
+ while (ex10 > 0 && dig <= 18) *xip++ = 0, ex10 -= 2, dig += 2;
+ }
+ } else { /* Only got zeros. */
+ ex10 = 0;
+ xi[0] = 0;
+ }
+
+ /* Fast path for numbers in integer format (but handles e.g. 1e6, too). */
+ if (dig <= 20 && ex10 == 0) {
+ uint8_t *xis;
+ uint64_t x = xi[0];
+ double n;
+ for (xis = xi+1; xis < xip; xis++) x = x * 100 + *xis;
+ if (!(dig == 20 && (xi[0] > 18 || (int64_t)x >= 0))) { /* No overflow? */
+ /* Format-specific handling. */
+ switch (fmt) {
+ case STRSCAN_INT:
+ if (!(opt & STRSCAN_OPT_TONUM) && x < 0x80000000u+neg) {
+ o->i = neg ? -(int32_t)x : (int32_t)x;
+ return STRSCAN_INT; /* Fast path for 32 bit integers. */
+ }
+ if (!(opt & STRSCAN_OPT_C)) { fmt = STRSCAN_NUM; goto plainnumber; }
+ /* fallthrough */
+ case STRSCAN_U32:
+ if ((x >> 32) != 0) return STRSCAN_ERROR;
+ o->i = neg ? -(int32_t)x : (int32_t)x;
+ return STRSCAN_U32;
+ case STRSCAN_I64:
+ case STRSCAN_U64:
+ o->u64 = neg ? (uint64_t)-(int64_t)x : x;
+ return fmt;
+ default:
+ plainnumber: /* Fast path for plain numbers < 2^63. */
+ if ((int64_t)x < 0) break;
+ n = (double)(int64_t)x;
+ if (neg) n = -n;
+ o->n = n;
+ return fmt;
+ }
+ }
+ }
+
+ /* Slow non-integer path. */
+ if (fmt == STRSCAN_INT) {
+ if ((opt & STRSCAN_OPT_C)) return STRSCAN_ERROR;
+ fmt = STRSCAN_NUM;
+ } else if (fmt > STRSCAN_INT) {
+ return STRSCAN_ERROR;
+ }
+ {
+ uint32_t hi = 0, lo = (uint32_t)(xip-xi);
+ int32_t ex2 = 0, idig = (int32_t)lo + (ex10 >> 1);
+
+ lj_assertX(lo > 0 && (ex10 & 1) == 0, "bad lo %d ex10 %d", lo, ex10);
+
+ /* Handle simple overflow/underflow. */
+ if (idig > 310/2) { if (neg) setminfV(o); else setpinfV(o); return fmt; }
+ else if (idig < -326/2) { o->n = neg ? -0.0 : 0.0; return fmt; }
+
+ /* Scale up until we have at least 17 or 18 integer part digits. */
+ while (idig < 9 && idig < DLEN(lo, hi)) {
+ uint32_t i, cy = 0;
+ ex2 -= 6;
+ for (i = DPREV(lo); ; i = DPREV(i)) {
+ uint32_t d = (xi[i] << 6) + cy;
+ cy = (((d >> 2) * 5243) >> 17); d = d - cy * 100; /* Div/mod 100. */
+ xi[i] = (uint8_t)d;
+ if (i == hi) break;
+ if (d == 0 && i == DPREV(lo)) lo = i;
+ }
+ if (cy) {
+ hi = DPREV(hi);
+ if (xi[DPREV(lo)] == 0) lo = DPREV(lo);
+ else if (hi == lo) { lo = DPREV(lo); xi[DPREV(lo)] |= xi[lo]; }
+ xi[hi] = (uint8_t)cy; idig++;
+ }
+ }
+
+ /* Scale down until no more than 17 or 18 integer part digits remain. */
+ while (idig > 9) {
+ uint32_t i = hi, cy = 0;
+ ex2 += 6;
+ do {
+ cy += xi[i];
+ xi[i] = (cy >> 6);
+ cy = 100 * (cy & 0x3f);
+ if (xi[i] == 0 && i == hi) hi = DNEXT(hi), idig--;
+ i = DNEXT(i);
+ } while (i != lo);
+ while (cy) {
+ if (hi == lo) { xi[DPREV(lo)] |= 1; break; }
+ xi[lo] = (cy >> 6); lo = DNEXT(lo);
+ cy = 100 * (cy & 0x3f);
+ }
+ }
+
+ /* Collect integer part digits and convert to rescaled double. */
+ {
+ uint64_t x = xi[hi];
+ uint32_t i;
+ for (i = DNEXT(hi); --idig > 0 && i != lo; i = DNEXT(i))
+ x = x * 100 + xi[i];
+ if (i == lo) {
+ while (--idig >= 0) x = x * 100;
+ } else { /* Gather round bit from remaining digits. */
+ x <<= 1; ex2--;
+ do {
+ if (xi[i]) { x |= 1; break; }
+ i = DNEXT(i);
+ } while (i != lo);
+ }
+ strscan_double(x, o, ex2, neg);
+ }
+ }
+ return fmt;
+}
+
+/* Parse binary number. */
+static StrScanFmt strscan_bin(const uint8_t *p, TValue *o,
+ StrScanFmt fmt, uint32_t opt,
+ int32_t ex2, int32_t neg, uint32_t dig)
+{
+ uint64_t x = 0;
+ uint32_t i;
+
+ if (ex2 || dig > 64) return STRSCAN_ERROR;
+
+ /* Scan binary digits. */
+ for (i = dig; i; i--, p++) {
+ if ((*p & ~1) != '0') return STRSCAN_ERROR;
+ x = (x << 1) | (*p & 1);
+ }
+
+ /* Format-specific handling. */
+ switch (fmt) {
+ case STRSCAN_INT:
+ if (!(opt & STRSCAN_OPT_TONUM) && x < 0x80000000u+neg) {
+ o->i = neg ? -(int32_t)x : (int32_t)x;
+ return STRSCAN_INT; /* Fast path for 32 bit integers. */
+ }
+ if (!(opt & STRSCAN_OPT_C)) { fmt = STRSCAN_NUM; break; }
+ /* fallthrough */
+ case STRSCAN_U32:
+ if (dig > 32) return STRSCAN_ERROR;
+ o->i = neg ? -(int32_t)x : (int32_t)x;
+ return STRSCAN_U32;
+ case STRSCAN_I64:
+ case STRSCAN_U64:
+ o->u64 = neg ? (uint64_t)-(int64_t)x : x;
+ return fmt;
+ default:
+ break;
+ }
+
+ /* Reduce range, then convert to double. */
+ if ((x & U64x(c0000000,0000000))) { x = (x >> 2) | (x & 3); ex2 += 2; }
+ strscan_double(x, o, ex2, neg);
+ return fmt;
+}
+
+/* Scan string containing a number. Returns format. Returns value in o. */
+StrScanFmt lj_strscan_scan(const uint8_t *p, MSize len, TValue *o,
+ uint32_t opt)
+{
+ int32_t neg = 0;
+ const uint8_t *pe = p + len;
+
+ /* Remove leading space, parse sign and non-numbers. */
+ if (LJ_UNLIKELY(!lj_char_isdigit(*p))) {
+ while (lj_char_isspace(*p)) p++;
+ if (*p == '+' || *p == '-') neg = (*p++ == '-');
+ if (LJ_UNLIKELY(*p >= 'A')) { /* Parse "inf", "infinity" or "nan". */
+ TValue tmp;
+ setnanV(&tmp);
+ if (casecmp(p[0],'i') && casecmp(p[1],'n') && casecmp(p[2],'f')) {
+ if (neg) setminfV(&tmp); else setpinfV(&tmp);
+ p += 3;
+ if (casecmp(p[0],'i') && casecmp(p[1],'n') && casecmp(p[2],'i') &&
+ casecmp(p[3],'t') && casecmp(p[4],'y')) p += 5;
+ } else if (casecmp(p[0],'n') && casecmp(p[1],'a') && casecmp(p[2],'n')) {
+ p += 3;
+ }
+ while (lj_char_isspace(*p)) p++;
+ if (*p || p < pe) return STRSCAN_ERROR;
+ o->u64 = tmp.u64;
+ return STRSCAN_NUM;
+ }
+ }
+
+ /* Parse regular number. */
+ {
+ StrScanFmt fmt = STRSCAN_INT;
+ int cmask = LJ_CHAR_DIGIT;
+ int base = (opt & STRSCAN_OPT_C) && *p == '0' ? 0 : 10;
+ const uint8_t *sp, *dp = NULL;
+ uint32_t dig = 0, hasdig = 0, x = 0;
+ int32_t ex = 0;
+
+ /* Determine base and skip leading zeros. */
+ if (LJ_UNLIKELY(*p <= '0')) {
+ if (*p == '0') {
+ if (casecmp(p[1], 'x'))
+ base = 16, cmask = LJ_CHAR_XDIGIT, p += 2;
+ else if (casecmp(p[1], 'b'))
+ base = 2, cmask = LJ_CHAR_DIGIT, p += 2;
+ }
+ for ( ; ; p++) {
+ if (*p == '0') {
+ hasdig = 1;
+ } else if (*p == '.') {
+ if (dp) return STRSCAN_ERROR;
+ dp = p;
+ } else {
+ break;
+ }
+ }
+ }
+
+ /* Preliminary digit and decimal point scan. */
+ for (sp = p; ; p++) {
+ if (LJ_LIKELY(lj_char_isa(*p, cmask))) {
+ x = x * 10 + (*p & 15); /* For fast path below. */
+ dig++;
+ } else if (*p == '.') {
+ if (dp) return STRSCAN_ERROR;
+ dp = p;
+ } else {
+ break;
+ }
+ }
+ if (!(hasdig | dig)) return STRSCAN_ERROR;
+
+ /* Handle decimal point. */
+ if (dp) {
+ if (base == 2) return STRSCAN_ERROR;
+ fmt = STRSCAN_NUM;
+ if (dig) {
+ ex = (int32_t)(dp-(p-1)); dp = p-1;
+ while (ex < 0 && *dp-- == '0') ex++, dig--; /* Skip trailing zeros. */
+ if (ex <= -STRSCAN_MAXEXP) return STRSCAN_ERROR;
+ if (base == 16) ex *= 4;
+ }
+ }
+
+ /* Parse exponent. */
+ if (base >= 10 && casecmp(*p, (uint32_t)(base == 16 ? 'p' : 'e'))) {
+ uint32_t xx;
+ int negx = 0;
+ fmt = STRSCAN_NUM; p++;
+ if (*p == '+' || *p == '-') negx = (*p++ == '-');
+ if (!lj_char_isdigit(*p)) return STRSCAN_ERROR;
+ xx = (*p++ & 15);
+ while (lj_char_isdigit(*p)) {
+ xx = xx * 10 + (*p & 15);
+ if (xx >= STRSCAN_MAXEXP) return STRSCAN_ERROR;
+ p++;
+ }
+ ex += negx ? -(int32_t)xx : (int32_t)xx;
+ }
+
+ /* Parse suffix. */
+ if (*p) {
+ /* I (IMAG), U (U32), LL (I64), ULL/LLU (U64), L (long), UL/LU (ulong). */
+ /* NYI: f (float). Not needed until cp_number() handles non-integers. */
+ if (casecmp(*p, 'i')) {
+ if (!(opt & STRSCAN_OPT_IMAG)) return STRSCAN_ERROR;
+ p++; fmt = STRSCAN_IMAG;
+ } else if (fmt == STRSCAN_INT) {
+ if (casecmp(*p, 'u')) p++, fmt = STRSCAN_U32;
+ if (casecmp(*p, 'l')) {
+ p++;
+ if (casecmp(*p, 'l')) p++, fmt += STRSCAN_I64 - STRSCAN_INT;
+ else if (!(opt & STRSCAN_OPT_C)) return STRSCAN_ERROR;
+ else if (sizeof(long) == 8) fmt += STRSCAN_I64 - STRSCAN_INT;
+ }
+ if (casecmp(*p, 'u') && (fmt == STRSCAN_INT || fmt == STRSCAN_I64))
+ p++, fmt += STRSCAN_U32 - STRSCAN_INT;
+ if ((fmt == STRSCAN_U32 && !(opt & STRSCAN_OPT_C)) ||
+ (fmt >= STRSCAN_I64 && !(opt & STRSCAN_OPT_LL)))
+ return STRSCAN_ERROR;
+ }
+ while (lj_char_isspace(*p)) p++;
+ if (*p) return STRSCAN_ERROR;
+ }
+ if (p < pe) return STRSCAN_ERROR;
+
+ /* Fast path for decimal 32 bit integers. */
+ if (fmt == STRSCAN_INT && base == 10 &&
+ (dig < 10 || (dig == 10 && *sp <= '2' && x < 0x80000000u+neg))) {
+ if ((opt & STRSCAN_OPT_TONUM)) {
+ o->n = neg ? -(double)x : (double)x;
+ return STRSCAN_NUM;
+ } else if (x == 0 && neg) {
+ o->n = -0.0;
+ return STRSCAN_NUM;
+ } else {
+ o->i = neg ? -(int32_t)x : (int32_t)x;
+ return STRSCAN_INT;
+ }
+ }
+
+ /* Dispatch to base-specific parser. */
+ if (base == 0 && !(fmt == STRSCAN_NUM || fmt == STRSCAN_IMAG))
+ return strscan_oct(sp, o, fmt, neg, dig);
+ if (base == 16)
+ fmt = strscan_hex(sp, o, fmt, opt, ex, neg, dig);
+ else if (base == 2)
+ fmt = strscan_bin(sp, o, fmt, opt, ex, neg, dig);
+ else
+ fmt = strscan_dec(sp, o, fmt, opt, ex, neg, dig);
+
+ /* Try to convert number to integer, if requested. */
+ if (fmt == STRSCAN_NUM && (opt & STRSCAN_OPT_TOINT) && !tvismzero(o)) {
+ double n = o->n;
+ int32_t i = lj_num2int(n);
+ if (n == (lua_Number)i) { o->i = i; return STRSCAN_INT; }
+ }
+ return fmt;
+ }
+}
+
+int LJ_FASTCALL lj_strscan_num(GCstr *str, TValue *o)
+{
+ StrScanFmt fmt = lj_strscan_scan((const uint8_t *)strdata(str), str->len, o,
+ STRSCAN_OPT_TONUM);
+ lj_assertX(fmt == STRSCAN_ERROR || fmt == STRSCAN_NUM, "bad scan format");
+ return (fmt != STRSCAN_ERROR);
+}
+
+#if LJ_DUALNUM
+int LJ_FASTCALL lj_strscan_number(GCstr *str, TValue *o)
+{
+ StrScanFmt fmt = lj_strscan_scan((const uint8_t *)strdata(str), str->len, o,
+ STRSCAN_OPT_TOINT);
+ lj_assertX(fmt == STRSCAN_ERROR || fmt == STRSCAN_NUM || fmt == STRSCAN_INT,
+ "bad scan format");
+ if (fmt == STRSCAN_INT) setitype(o, LJ_TISNUM);
+ return (fmt != STRSCAN_ERROR);
+}
+#endif
+
+#undef DNEXT
+#undef DPREV
+#undef DLEN
+
diff --git a/libs/luajit-cmake/luajit/src/lj_strscan.h b/libs/luajit-cmake/luajit/src/lj_strscan.h
new file mode 100644
index 0000000..8ed3154
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/lj_strscan.h
@@ -0,0 +1,40 @@
+/*
+** String scanning.
+** Copyright (C) 2005-2022 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LJ_STRSCAN_H
+#define _LJ_STRSCAN_H
+
+#include "lj_obj.h"
+
+/* Options for accepted/returned formats. */
+#define STRSCAN_OPT_TOINT 0x01 /* Convert to int32_t, if possible. */
+#define STRSCAN_OPT_TONUM 0x02 /* Always convert to double. */
+#define STRSCAN_OPT_IMAG 0x04
+#define STRSCAN_OPT_LL 0x08
+#define STRSCAN_OPT_C 0x10
+
+/* Returned format. */
+typedef enum {
+ STRSCAN_ERROR,
+ STRSCAN_NUM, STRSCAN_IMAG,
+ STRSCAN_INT, STRSCAN_U32, STRSCAN_I64, STRSCAN_U64,
+} StrScanFmt;
+
+LJ_FUNC StrScanFmt lj_strscan_scan(const uint8_t *p, MSize len, TValue *o,
+ uint32_t opt);
+LJ_FUNC int LJ_FASTCALL lj_strscan_num(GCstr *str, TValue *o);
+#if LJ_DUALNUM
+LJ_FUNC int LJ_FASTCALL lj_strscan_number(GCstr *str, TValue *o);
+#else
+#define lj_strscan_number(s, o) lj_strscan_num((s), (o))
+#endif
+
+/* Check for number or convert string to number/int in-place (!). */
+static LJ_AINLINE int lj_strscan_numberobj(TValue *o)
+{
+ return tvisnumber(o) || (tvisstr(o) && lj_strscan_number(strV(o), o));
+}
+
+#endif
diff --git a/libs/luajit-cmake/luajit/src/lj_tab.c b/libs/luajit-cmake/luajit/src/lj_tab.c
new file mode 100644
index 0000000..c3609b3
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/lj_tab.c
@@ -0,0 +1,693 @@
+/*
+** Table handling.
+** Copyright (C) 2005-2022 Mike Pall. See Copyright Notice in luajit.h
+**
+** Major portions taken verbatim or adapted from the Lua interpreter.
+** Copyright (C) 1994-2008 Lua.org, PUC-Rio. See Copyright Notice in lua.h
+*/
+
+#define lj_tab_c
+#define LUA_CORE
+
+#include "lj_obj.h"
+#include "lj_gc.h"
+#include "lj_err.h"
+#include "lj_tab.h"
+
+/* -- Object hashing ------------------------------------------------------ */
+
+/* Hash an arbitrary key and return its anchor position in the hash table. */
+static Node *hashkey(const GCtab *t, cTValue *key)
+{
+ lj_assertX(!tvisint(key), "attempt to hash integer");
+ if (tvisstr(key))
+ return hashstr(t, strV(key));
+ else if (tvisnum(key))
+ return hashnum(t, key);
+ else if (tvisbool(key))
+ return hashmask(t, boolV(key));
+ else
+ return hashgcref(t, key->gcr);
+ /* Only hash 32 bits of lightuserdata on a 64 bit CPU. Good enough? */
+}
+
+/* -- Table creation and destruction -------------------------------------- */
+
+/* Create new hash part for table. */
+static LJ_AINLINE void newhpart(lua_State *L, GCtab *t, uint32_t hbits)
+{
+ uint32_t hsize;
+ Node *node;
+ lj_assertL(hbits != 0, "zero hash size");
+ if (hbits > LJ_MAX_HBITS)
+ lj_err_msg(L, LJ_ERR_TABOV);
+ hsize = 1u << hbits;
+ node = lj_mem_newvec(L, hsize, Node);
+ setmref(t->node, node);
+ setfreetop(t, node, &node[hsize]);
+ t->hmask = hsize-1;
+}
+
+/*
+** Q: Why all of these copies of t->hmask, t->node etc. to local variables?
+** A: Because alias analysis for C is _really_ tough.
+** Even state-of-the-art C compilers won't produce good code without this.
+*/
+
+/* Clear hash part of table. */
+static LJ_AINLINE void clearhpart(GCtab *t)
+{
+ uint32_t i, hmask = t->hmask;
+ Node *node = noderef(t->node);
+ lj_assertX(t->hmask != 0, "empty hash part");
+ for (i = 0; i <= hmask; i++) {
+ Node *n = &node[i];
+ setmref(n->next, NULL);
+ setnilV(&n->key);
+ setnilV(&n->val);
+ }
+}
+
+/* Clear array part of table. */
+static LJ_AINLINE void clearapart(GCtab *t)
+{
+ uint32_t i, asize = t->asize;
+ TValue *array = tvref(t->array);
+ for (i = 0; i < asize; i++)
+ setnilV(&array[i]);
+}
+
+/* Create a new table. Note: the slots are not initialized (yet). */
+static GCtab *newtab(lua_State *L, uint32_t asize, uint32_t hbits)
+{
+ GCtab *t;
+ /* First try to colocate the array part. */
+ if (LJ_MAX_COLOSIZE != 0 && asize > 0 && asize <= LJ_MAX_COLOSIZE) {
+ Node *nilnode;
+ lj_assertL((sizeof(GCtab) & 7) == 0, "bad GCtab size");
+ t = (GCtab *)lj_mem_newgco(L, sizetabcolo(asize));
+ t->gct = ~LJ_TTAB;
+ t->nomm = (uint8_t)~0;
+ t->colo = (int8_t)asize;
+ setmref(t->array, (TValue *)((char *)t + sizeof(GCtab)));
+ setgcrefnull(t->metatable);
+ t->asize = asize;
+ t->hmask = 0;
+ nilnode = &G(L)->nilnode;
+ setmref(t->node, nilnode);
+#if LJ_GC64
+ setmref(t->freetop, nilnode);
+#endif
+ } else { /* Otherwise separately allocate the array part. */
+ Node *nilnode;
+ t = lj_mem_newobj(L, GCtab);
+ t->gct = ~LJ_TTAB;
+ t->nomm = (uint8_t)~0;
+ t->colo = 0;
+ setmref(t->array, NULL);
+ setgcrefnull(t->metatable);
+ t->asize = 0; /* In case the array allocation fails. */
+ t->hmask = 0;
+ nilnode = &G(L)->nilnode;
+ setmref(t->node, nilnode);
+#if LJ_GC64
+ setmref(t->freetop, nilnode);
+#endif
+ if (asize > 0) {
+ if (asize > LJ_MAX_ASIZE)
+ lj_err_msg(L, LJ_ERR_TABOV);
+ setmref(t->array, lj_mem_newvec(L, asize, TValue));
+ t->asize = asize;
+ }
+ }
+ if (hbits)
+ newhpart(L, t, hbits);
+ return t;
+}
+
+/* Create a new table.
+**
+** IMPORTANT NOTE: The API differs from lua_createtable()!
+**
+** The array size is non-inclusive. E.g. asize=128 creates array slots
+** for 0..127, but not for 128. If you need slots 1..128, pass asize=129
+** (slot 0 is wasted in this case).
+**
+** The hash size is given in hash bits. hbits=0 means no hash part.
+** hbits=1 creates 2 hash slots, hbits=2 creates 4 hash slots and so on.
+*/
+GCtab *lj_tab_new(lua_State *L, uint32_t asize, uint32_t hbits)
+{
+ GCtab *t = newtab(L, asize, hbits);
+ clearapart(t);
+ if (t->hmask > 0) clearhpart(t);
+ return t;
+}
+
+/* The API of this function conforms to lua_createtable(). */
+GCtab *lj_tab_new_ah(lua_State *L, int32_t a, int32_t h)
+{
+ return lj_tab_new(L, (uint32_t)(a > 0 ? a+1 : 0), hsize2hbits(h));
+}
+
+#if LJ_HASJIT
+GCtab * LJ_FASTCALL lj_tab_new1(lua_State *L, uint32_t ahsize)
+{
+ GCtab *t = newtab(L, ahsize & 0xffffff, ahsize >> 24);
+ clearapart(t);
+ if (t->hmask > 0) clearhpart(t);
+ return t;
+}
+#endif
+
+/* Duplicate a table. */
+GCtab * LJ_FASTCALL lj_tab_dup(lua_State *L, const GCtab *kt)
+{
+ GCtab *t;
+ uint32_t asize, hmask;
+ t = newtab(L, kt->asize, kt->hmask > 0 ? lj_fls(kt->hmask)+1 : 0);
+ lj_assertL(kt->asize == t->asize && kt->hmask == t->hmask,
+ "mismatched size of table and template");
+ t->nomm = 0; /* Keys with metamethod names may be present. */
+ asize = kt->asize;
+ if (asize > 0) {
+ TValue *array = tvref(t->array);
+ TValue *karray = tvref(kt->array);
+ if (asize < 64) { /* An inlined loop beats memcpy for < 512 bytes. */
+ uint32_t i;
+ for (i = 0; i < asize; i++)
+ copyTV(L, &array[i], &karray[i]);
+ } else {
+ memcpy(array, karray, asize*sizeof(TValue));
+ }
+ }
+ hmask = kt->hmask;
+ if (hmask > 0) {
+ uint32_t i;
+ Node *node = noderef(t->node);
+ Node *knode = noderef(kt->node);
+ ptrdiff_t d = (char *)node - (char *)knode;
+ setfreetop(t, node, (Node *)((char *)getfreetop(kt, knode) + d));
+ for (i = 0; i <= hmask; i++) {
+ Node *kn = &knode[i];
+ Node *n = &node[i];
+ Node *next = nextnode(kn);
+ /* Don't use copyTV here, since it asserts on a copy of a dead key. */
+ n->val = kn->val; n->key = kn->key;
+ setmref(n->next, next == NULL? next : (Node *)((char *)next + d));
+ }
+ }
+ return t;
+}
+
+/* Clear a table. */
+void LJ_FASTCALL lj_tab_clear(GCtab *t)
+{
+ clearapart(t);
+ if (t->hmask > 0) {
+ Node *node = noderef(t->node);
+ setfreetop(t, node, &node[t->hmask+1]);
+ clearhpart(t);
+ }
+}
+
+/* Free a table. */
+void LJ_FASTCALL lj_tab_free(global_State *g, GCtab *t)
+{
+ if (t->hmask > 0)
+ lj_mem_freevec(g, noderef(t->node), t->hmask+1, Node);
+ if (t->asize > 0 && LJ_MAX_COLOSIZE != 0 && t->colo <= 0)
+ lj_mem_freevec(g, tvref(t->array), t->asize, TValue);
+ if (LJ_MAX_COLOSIZE != 0 && t->colo)
+ lj_mem_free(g, t, sizetabcolo((uint32_t)t->colo & 0x7f));
+ else
+ lj_mem_freet(g, t);
+}
+
+/* -- Table resizing ------------------------------------------------------ */
+
+/* Resize a table to fit the new array/hash part sizes. */
+void lj_tab_resize(lua_State *L, GCtab *t, uint32_t asize, uint32_t hbits)
+{
+ Node *oldnode = noderef(t->node);
+ uint32_t oldasize = t->asize;
+ uint32_t oldhmask = t->hmask;
+ if (asize > oldasize) { /* Array part grows? */
+ TValue *array;
+ uint32_t i;
+ if (asize > LJ_MAX_ASIZE)
+ lj_err_msg(L, LJ_ERR_TABOV);
+ if (LJ_MAX_COLOSIZE != 0 && t->colo > 0) {
+ /* A colocated array must be separated and copied. */
+ TValue *oarray = tvref(t->array);
+ array = lj_mem_newvec(L, asize, TValue);
+ t->colo = (int8_t)(t->colo | 0x80); /* Mark as separated (colo < 0). */
+ for (i = 0; i < oldasize; i++)
+ copyTV(L, &array[i], &oarray[i]);
+ } else {
+ array = (TValue *)lj_mem_realloc(L, tvref(t->array),
+ oldasize*sizeof(TValue), asize*sizeof(TValue));
+ }
+ setmref(t->array, array);
+ t->asize = asize;
+ for (i = oldasize; i < asize; i++) /* Clear newly allocated slots. */
+ setnilV(&array[i]);
+ }
+ /* Create new (empty) hash part. */
+ if (hbits) {
+ newhpart(L, t, hbits);
+ clearhpart(t);
+ } else {
+ global_State *g = G(L);
+ setmref(t->node, &g->nilnode);
+#if LJ_GC64
+ setmref(t->freetop, &g->nilnode);
+#endif
+ t->hmask = 0;
+ }
+ if (asize < oldasize) { /* Array part shrinks? */
+ TValue *array = tvref(t->array);
+ uint32_t i;
+ t->asize = asize; /* Note: This 'shrinks' even colocated arrays. */
+ for (i = asize; i < oldasize; i++) /* Reinsert old array values. */
+ if (!tvisnil(&array[i]))
+ copyTV(L, lj_tab_setinth(L, t, (int32_t)i), &array[i]);
+ /* Physically shrink only separated arrays. */
+ if (LJ_MAX_COLOSIZE != 0 && t->colo <= 0)
+ setmref(t->array, lj_mem_realloc(L, array,
+ oldasize*sizeof(TValue), asize*sizeof(TValue)));
+ }
+ if (oldhmask > 0) { /* Reinsert pairs from old hash part. */
+ global_State *g;
+ uint32_t i;
+ for (i = 0; i <= oldhmask; i++) {
+ Node *n = &oldnode[i];
+ if (!tvisnil(&n->val))
+ copyTV(L, lj_tab_set(L, t, &n->key), &n->val);
+ }
+ g = G(L);
+ lj_mem_freevec(g, oldnode, oldhmask+1, Node);
+ }
+}
+
+static uint32_t countint(cTValue *key, uint32_t *bins)
+{
+ lj_assertX(!tvisint(key), "bad integer key");
+ if (tvisnum(key)) {
+ lua_Number nk = numV(key);
+ int32_t k = lj_num2int(nk);
+ if ((uint32_t)k < LJ_MAX_ASIZE && nk == (lua_Number)k) {
+ bins[(k > 2 ? lj_fls((uint32_t)(k-1)) : 0)]++;
+ return 1;
+ }
+ }
+ return 0;
+}
+
+static uint32_t countarray(const GCtab *t, uint32_t *bins)
+{
+ uint32_t na, b, i;
+ if (t->asize == 0) return 0;
+ for (na = i = b = 0; b < LJ_MAX_ABITS; b++) {
+ uint32_t n, top = 2u << b;
+ TValue *array;
+ if (top >= t->asize) {
+ top = t->asize-1;
+ if (i > top)
+ break;
+ }
+ array = tvref(t->array);
+ for (n = 0; i <= top; i++)
+ if (!tvisnil(&array[i]))
+ n++;
+ bins[b] += n;
+ na += n;
+ }
+ return na;
+}
+
+static uint32_t counthash(const GCtab *t, uint32_t *bins, uint32_t *narray)
+{
+ uint32_t total, na, i, hmask = t->hmask;
+ Node *node = noderef(t->node);
+ for (total = na = 0, i = 0; i <= hmask; i++) {
+ Node *n = &node[i];
+ if (!tvisnil(&n->val)) {
+ na += countint(&n->key, bins);
+ total++;
+ }
+ }
+ *narray += na;
+ return total;
+}
+
+static uint32_t bestasize(uint32_t bins[], uint32_t *narray)
+{
+ uint32_t b, sum, na = 0, sz = 0, nn = *narray;
+ for (b = 0, sum = 0; 2*nn > (1u<<b) && sum != nn; b++)
+ if (bins[b] > 0 && 2*(sum += bins[b]) > (1u<<b)) {
+ sz = (2u<<b)+1;
+ na = sum;
+ }
+ *narray = sz;
+ return na;
+}
+
+static void rehashtab(lua_State *L, GCtab *t, cTValue *ek)
+{
+ uint32_t bins[LJ_MAX_ABITS];
+ uint32_t total, asize, na, i;
+ for (i = 0; i < LJ_MAX_ABITS; i++) bins[i] = 0;
+ asize = countarray(t, bins);
+ total = 1 + asize;
+ total += counthash(t, bins, &asize);
+ asize += countint(ek, bins);
+ na = bestasize(bins, &asize);
+ total -= na;
+ lj_tab_resize(L, t, asize, hsize2hbits(total));
+}
+
+#if LJ_HASFFI
+void lj_tab_rehash(lua_State *L, GCtab *t)
+{
+ rehashtab(L, t, niltv(L));
+}
+#endif
+
+void lj_tab_reasize(lua_State *L, GCtab *t, uint32_t nasize)
+{
+ lj_tab_resize(L, t, nasize+1, t->hmask > 0 ? lj_fls(t->hmask)+1 : 0);
+}
+
+/* -- Table getters ------------------------------------------------------- */
+
+cTValue * LJ_FASTCALL lj_tab_getinth(GCtab *t, int32_t key)
+{
+ TValue k;
+ Node *n;
+ k.n = (lua_Number)key;
+ n = hashnum(t, &k);
+ do {
+ if (tvisnum(&n->key) && n->key.n == k.n)
+ return &n->val;
+ } while ((n = nextnode(n)));
+ return NULL;
+}
+
+cTValue *lj_tab_getstr(GCtab *t, const GCstr *key)
+{
+ Node *n = hashstr(t, key);
+ do {
+ if (tvisstr(&n->key) && strV(&n->key) == key)
+ return &n->val;
+ } while ((n = nextnode(n)));
+ return NULL;
+}
+
+cTValue *lj_tab_get(lua_State *L, GCtab *t, cTValue *key)
+{
+ if (tvisstr(key)) {
+ cTValue *tv = lj_tab_getstr(t, strV(key));
+ if (tv)
+ return tv;
+ } else if (tvisint(key)) {
+ cTValue *tv = lj_tab_getint(t, intV(key));
+ if (tv)
+ return tv;
+ } else if (tvisnum(key)) {
+ lua_Number nk = numV(key);
+ int32_t k = lj_num2int(nk);
+ if (nk == (lua_Number)k) {
+ cTValue *tv = lj_tab_getint(t, k);
+ if (tv)
+ return tv;
+ } else {
+ goto genlookup; /* Else use the generic lookup. */
+ }
+ } else if (!tvisnil(key)) {
+ Node *n;
+ genlookup:
+ n = hashkey(t, key);
+ do {
+ if (lj_obj_equal(&n->key, key))
+ return &n->val;
+ } while ((n = nextnode(n)));
+ }
+ return niltv(L);
+}
+
+/* -- Table setters ------------------------------------------------------- */
+
+/* Insert new key. Use Brent's variation to optimize the chain length. */
+TValue *lj_tab_newkey(lua_State *L, GCtab *t, cTValue *key)
+{
+ Node *n = hashkey(t, key);
+ if (!tvisnil(&n->val) || t->hmask == 0) {
+ Node *nodebase = noderef(t->node);
+ Node *collide, *freenode = getfreetop(t, nodebase);
+ lj_assertL(freenode >= nodebase && freenode <= nodebase+t->hmask+1,
+ "bad freenode");
+ do {
+ if (freenode == nodebase) { /* No free node found? */
+ rehashtab(L, t, key); /* Rehash table. */
+ return lj_tab_set(L, t, key); /* Retry key insertion. */
+ }
+ } while (!tvisnil(&(--freenode)->key));
+ setfreetop(t, nodebase, freenode);
+ lj_assertL(freenode != &G(L)->nilnode, "store to fallback hash");
+ collide = hashkey(t, &n->key);
+ if (collide != n) { /* Colliding node not the main node? */
+ while (noderef(collide->next) != n) /* Find predecessor. */
+ collide = nextnode(collide);
+ setmref(collide->next, freenode); /* Relink chain. */
+ /* Copy colliding node into free node and free main node. */
+ freenode->val = n->val;
+ freenode->key = n->key;
+ freenode->next = n->next;
+ setmref(n->next, NULL);
+ setnilV(&n->val);
+ /* Rechain pseudo-resurrected string keys with colliding hashes. */
+ while (nextnode(freenode)) {
+ Node *nn = nextnode(freenode);
+ if (!tvisnil(&nn->val) && hashkey(t, &nn->key) == n) {
+ freenode->next = nn->next;
+ nn->next = n->next;
+ setmref(n->next, nn);
+ /*
+ ** Rechaining a resurrected string key creates a new dilemma:
+ ** Another string key may have originally been resurrected via
+ ** _any_ of the previous nodes as a chain anchor. Including
+ ** a node that had to be moved, which makes them unreachable.
+ ** It's not feasible to check for all previous nodes, so rechain
+ ** any string key that's currently in a non-main positions.
+ */
+ while ((nn = nextnode(freenode))) {
+ if (!tvisnil(&nn->val)) {
+ Node *mn = hashkey(t, &nn->key);
+ if (mn != freenode && mn != nn) {
+ freenode->next = nn->next;
+ nn->next = mn->next;
+ setmref(mn->next, nn);
+ } else {
+ freenode = nn;
+ }
+ } else {
+ freenode = nn;
+ }
+ }
+ break;
+ } else {
+ freenode = nn;
+ }
+ }
+ } else { /* Otherwise use free node. */
+ setmrefr(freenode->next, n->next); /* Insert into chain. */
+ setmref(n->next, freenode);
+ n = freenode;
+ }
+ }
+ n->key.u64 = key->u64;
+ if (LJ_UNLIKELY(tvismzero(&n->key)))
+ n->key.u64 = 0;
+ lj_gc_anybarriert(L, t);
+ lj_assertL(tvisnil(&n->val), "new hash slot is not empty");
+ return &n->val;
+}
+
+TValue *lj_tab_setinth(lua_State *L, GCtab *t, int32_t key)
+{
+ TValue k;
+ Node *n;
+ k.n = (lua_Number)key;
+ n = hashnum(t, &k);
+ do {
+ if (tvisnum(&n->key) && n->key.n == k.n)
+ return &n->val;
+ } while ((n = nextnode(n)));
+ return lj_tab_newkey(L, t, &k);
+}
+
+TValue *lj_tab_setstr(lua_State *L, GCtab *t, const GCstr *key)
+{
+ TValue k;
+ Node *n = hashstr(t, key);
+ do {
+ if (tvisstr(&n->key) && strV(&n->key) == key)
+ return &n->val;
+ } while ((n = nextnode(n)));
+ setstrV(L, &k, key);
+ return lj_tab_newkey(L, t, &k);
+}
+
+TValue *lj_tab_set(lua_State *L, GCtab *t, cTValue *key)
+{
+ Node *n;
+ t->nomm = 0; /* Invalidate negative metamethod cache. */
+ if (tvisstr(key)) {
+ return lj_tab_setstr(L, t, strV(key));
+ } else if (tvisint(key)) {
+ return lj_tab_setint(L, t, intV(key));
+ } else if (tvisnum(key)) {
+ lua_Number nk = numV(key);
+ int32_t k = lj_num2int(nk);
+ if (nk == (lua_Number)k)
+ return lj_tab_setint(L, t, k);
+ if (tvisnan(key))
+ lj_err_msg(L, LJ_ERR_NANIDX);
+ /* Else use the generic lookup. */
+ } else if (tvisnil(key)) {
+ lj_err_msg(L, LJ_ERR_NILIDX);
+ }
+ n = hashkey(t, key);
+ do {
+ if (lj_obj_equal(&n->key, key))
+ return &n->val;
+ } while ((n = nextnode(n)));
+ return lj_tab_newkey(L, t, key);
+}
+
+/* -- Table traversal ----------------------------------------------------- */
+
+/* Table traversal indexes:
+**
+** Array key index: [0 .. t->asize-1]
+** Hash key index: [t->asize .. t->asize+t->hmask]
+** Invalid key: ~0
+*/
+
+/* Get the successor traversal index of a key. */
+uint32_t LJ_FASTCALL lj_tab_keyindex(GCtab *t, cTValue *key)
+{
+ TValue tmp;
+ if (tvisint(key)) {
+ int32_t k = intV(key);
+ if ((uint32_t)k < t->asize)
+ return (uint32_t)k + 1;
+ setnumV(&tmp, (lua_Number)k);
+ key = &tmp;
+ } else if (tvisnum(key)) {
+ lua_Number nk = numV(key);
+ int32_t k = lj_num2int(nk);
+ if ((uint32_t)k < t->asize && nk == (lua_Number)k)
+ return (uint32_t)k + 1;
+ }
+ if (!tvisnil(key)) {
+ Node *n = hashkey(t, key);
+ do {
+ if (lj_obj_equal(&n->key, key))
+ return t->asize + (uint32_t)((n+1) - noderef(t->node));
+ } while ((n = nextnode(n)));
+ if (key->u32.hi == LJ_KEYINDEX) /* Despecialized ITERN while running. */
+ return key->u32.lo;
+ return ~0u; /* Invalid key to next. */
+ }
+ return 0; /* A nil key starts the traversal. */
+}
+
+/* Get the next key/value pair of a table traversal. */
+int lj_tab_next(GCtab *t, cTValue *key, TValue *o)
+{
+ uint32_t idx = lj_tab_keyindex(t, key); /* Find successor index of key. */
+ /* First traverse the array part. */
+ for (; idx < t->asize; idx++) {
+ cTValue *a = arrayslot(t, idx);
+ if (LJ_LIKELY(!tvisnil(a))) {
+ setintV(o, idx);
+ o[1] = *a;
+ return 1;
+ }
+ }
+ idx -= t->asize;
+ /* Then traverse the hash part. */
+ for (; idx <= t->hmask; idx++) {
+ Node *n = &noderef(t->node)[idx];
+ if (!tvisnil(&n->val)) {
+ o[0] = n->key;
+ o[1] = n->val;
+ return 1;
+ }
+ }
+ return (int32_t)idx < 0 ? -1 : 0; /* Invalid key or end of traversal. */
+}
+
+/* -- Table length calculation -------------------------------------------- */
+
+/* Compute table length. Slow path with mixed array/hash lookups. */
+LJ_NOINLINE static MSize tab_len_slow(GCtab *t, size_t hi)
+{
+ cTValue *tv;
+ size_t lo = hi;
+ hi++;
+ /* Widening search for an upper bound. */
+ while ((tv = lj_tab_getint(t, (int32_t)hi)) && !tvisnil(tv)) {
+ lo = hi;
+ hi += hi;
+ if (hi > (size_t)(INT_MAX-2)) { /* Punt and do a linear search. */
+ lo = 1;
+ while ((tv = lj_tab_getint(t, (int32_t)lo)) && !tvisnil(tv)) lo++;
+ return (MSize)(lo - 1);
+ }
+ }
+ /* Binary search to find a non-nil to nil transition. */
+ while (hi - lo > 1) {
+ size_t mid = (lo+hi) >> 1;
+ cTValue *tvb = lj_tab_getint(t, (int32_t)mid);
+ if (tvb && !tvisnil(tvb)) lo = mid; else hi = mid;
+ }
+ return (MSize)lo;
+}
+
+/* Compute table length. Fast path. */
+MSize LJ_FASTCALL lj_tab_len(GCtab *t)
+{
+ size_t hi = (size_t)t->asize;
+ if (hi) hi--;
+ /* In a growing array the last array element is very likely nil. */
+ if (hi > 0 && LJ_LIKELY(tvisnil(arrayslot(t, hi)))) {
+ /* Binary search to find a non-nil to nil transition in the array. */
+ size_t lo = 0;
+ while (hi - lo > 1) {
+ size_t mid = (lo+hi) >> 1;
+ if (tvisnil(arrayslot(t, mid))) hi = mid; else lo = mid;
+ }
+ return (MSize)lo;
+ }
+ /* Without a hash part, there's an implicit nil after the last element. */
+ return t->hmask ? tab_len_slow(t, hi) : (MSize)hi;
+}
+
+#if LJ_HASJIT
+/* Verify hinted table length or compute it. */
+MSize LJ_FASTCALL lj_tab_len_hint(GCtab *t, size_t hint)
+{
+ size_t asize = (size_t)t->asize;
+ cTValue *tv = arrayslot(t, hint);
+ if (LJ_LIKELY(hint+1 < asize)) {
+ if (LJ_LIKELY(!tvisnil(tv) && tvisnil(tv+1))) return (MSize)hint;
+ } else if (hint+1 <= asize && LJ_LIKELY(t->hmask == 0) && !tvisnil(tv)) {
+ return (MSize)hint;
+ }
+ return lj_tab_len(t);
+}
+#endif
+
diff --git a/libs/luajit-cmake/luajit/src/lj_tab.h b/libs/luajit-cmake/luajit/src/lj_tab.h
new file mode 100644
index 0000000..2a3f76b
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/lj_tab.h
@@ -0,0 +1,96 @@
+/*
+** Table handling.
+** Copyright (C) 2005-2022 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LJ_TAB_H
+#define _LJ_TAB_H
+
+#include "lj_obj.h"
+
+/* Hash constants. Tuned using a brute force search. */
+#define HASH_BIAS (-0x04c11db7)
+#define HASH_ROT1 14
+#define HASH_ROT2 5
+#define HASH_ROT3 13
+
+/* Scramble the bits of numbers and pointers. */
+static LJ_AINLINE uint32_t hashrot(uint32_t lo, uint32_t hi)
+{
+#if LJ_TARGET_X86ORX64
+ /* Prefer variant that compiles well for a 2-operand CPU. */
+ lo ^= hi; hi = lj_rol(hi, HASH_ROT1);
+ lo -= hi; hi = lj_rol(hi, HASH_ROT2);
+ hi ^= lo; hi -= lj_rol(lo, HASH_ROT3);
+#else
+ lo ^= hi;
+ lo = lo - lj_rol(hi, HASH_ROT1);
+ hi = lo ^ lj_rol(hi, HASH_ROT1 + HASH_ROT2);
+ hi = hi - lj_rol(lo, HASH_ROT3);
+#endif
+ return hi;
+}
+
+/* Hash values are masked with the table hash mask and used as an index. */
+static LJ_AINLINE Node *hashmask(const GCtab *t, uint32_t hash)
+{
+ Node *n = noderef(t->node);
+ return &n[hash & t->hmask];
+}
+
+/* String IDs are generated when a string is interned. */
+#define hashstr(t, s) hashmask(t, (s)->sid)
+
+#define hashlohi(t, lo, hi) hashmask((t), hashrot((lo), (hi)))
+#define hashnum(t, o) hashlohi((t), (o)->u32.lo, ((o)->u32.hi << 1))
+#if LJ_GC64
+#define hashgcref(t, r) \
+ hashlohi((t), (uint32_t)gcrefu(r), (uint32_t)(gcrefu(r) >> 32))
+#else
+#define hashgcref(t, r) hashlohi((t), gcrefu(r), gcrefu(r) + HASH_BIAS)
+#endif
+
+#define hsize2hbits(s) ((s) ? ((s)==1 ? 1 : 1+lj_fls((uint32_t)((s)-1))) : 0)
+
+LJ_FUNCA GCtab *lj_tab_new(lua_State *L, uint32_t asize, uint32_t hbits);
+LJ_FUNC GCtab *lj_tab_new_ah(lua_State *L, int32_t a, int32_t h);
+#if LJ_HASJIT
+LJ_FUNC GCtab * LJ_FASTCALL lj_tab_new1(lua_State *L, uint32_t ahsize);
+#endif
+LJ_FUNCA GCtab * LJ_FASTCALL lj_tab_dup(lua_State *L, const GCtab *kt);
+LJ_FUNC void LJ_FASTCALL lj_tab_clear(GCtab *t);
+LJ_FUNC void LJ_FASTCALL lj_tab_free(global_State *g, GCtab *t);
+#if LJ_HASFFI
+LJ_FUNC void lj_tab_rehash(lua_State *L, GCtab *t);
+#endif
+LJ_FUNC void lj_tab_resize(lua_State *L, GCtab *t, uint32_t asize, uint32_t hbits);
+LJ_FUNCA void lj_tab_reasize(lua_State *L, GCtab *t, uint32_t nasize);
+
+/* Caveat: all getters except lj_tab_get() can return NULL! */
+
+LJ_FUNCA cTValue * LJ_FASTCALL lj_tab_getinth(GCtab *t, int32_t key);
+LJ_FUNC cTValue *lj_tab_getstr(GCtab *t, const GCstr *key);
+LJ_FUNCA cTValue *lj_tab_get(lua_State *L, GCtab *t, cTValue *key);
+
+/* Caveat: all setters require a write barrier for the stored value. */
+
+LJ_FUNCA TValue *lj_tab_newkey(lua_State *L, GCtab *t, cTValue *key);
+LJ_FUNCA TValue *lj_tab_setinth(lua_State *L, GCtab *t, int32_t key);
+LJ_FUNC TValue *lj_tab_setstr(lua_State *L, GCtab *t, const GCstr *key);
+LJ_FUNC TValue *lj_tab_set(lua_State *L, GCtab *t, cTValue *key);
+
+#define inarray(t, key) ((MSize)(key) < (MSize)(t)->asize)
+#define arrayslot(t, i) (&tvref((t)->array)[(i)])
+#define lj_tab_getint(t, key) \
+ (inarray((t), (key)) ? arrayslot((t), (key)) : lj_tab_getinth((t), (key)))
+#define lj_tab_setint(L, t, key) \
+ (inarray((t), (key)) ? arrayslot((t), (key)) : lj_tab_setinth(L, (t), (key)))
+
+LJ_FUNC uint32_t LJ_FASTCALL lj_tab_keyindex(GCtab *t, cTValue *key);
+LJ_FUNCA int lj_tab_next(GCtab *t, cTValue *key, TValue *o);
+LJ_FUNCA MSize LJ_FASTCALL lj_tab_len(GCtab *t);
+#if LJ_HASJIT
+LJ_FUNC MSize LJ_FASTCALL lj_tab_len_hint(GCtab *t, size_t hint);
+#endif
+
+#endif
diff --git a/libs/luajit-cmake/luajit/src/lj_target.h b/libs/luajit-cmake/luajit/src/lj_target.h
new file mode 100644
index 0000000..1971692
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/lj_target.h
@@ -0,0 +1,165 @@
+/*
+** Definitions for target CPU.
+** Copyright (C) 2005-2022 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LJ_TARGET_H
+#define _LJ_TARGET_H
+
+#include "lj_def.h"
+#include "lj_arch.h"
+
+/* -- Registers and spill slots ------------------------------------------- */
+
+/* Register type (uint8_t in ir->r). */
+typedef uint32_t Reg;
+
+/* The hi-bit is NOT set for an allocated register. This means the value
+** can be directly used without masking. The hi-bit is set for a register
+** allocation hint or for RID_INIT, RID_SINK or RID_SUNK.
+*/
+#define RID_NONE 0x80
+#define RID_MASK 0x7f
+#define RID_INIT (RID_NONE|RID_MASK)
+#define RID_SINK (RID_INIT-1)
+#define RID_SUNK (RID_INIT-2)
+
+#define ra_noreg(r) ((r) & RID_NONE)
+#define ra_hasreg(r) (!((r) & RID_NONE))
+
+/* The ra_hashint() macro assumes a previous test for ra_noreg(). */
+#define ra_hashint(r) ((r) < RID_SUNK)
+#define ra_gethint(r) ((Reg)((r) & RID_MASK))
+#define ra_sethint(rr, r) rr = (uint8_t)((r)|RID_NONE)
+#define ra_samehint(r1, r2) (ra_gethint((r1)^(r2)) == 0)
+
+/* Spill slot 0 means no spill slot has been allocated. */
+#define SPS_NONE 0
+
+#define ra_hasspill(s) ((s) != SPS_NONE)
+
+/* Combined register and spill slot (uint16_t in ir->prev). */
+typedef uint32_t RegSP;
+
+#define REGSP(r, s) ((r) + ((s) << 8))
+#define REGSP_HINT(r) ((r)|RID_NONE)
+#define REGSP_INIT REGSP(RID_INIT, 0)
+
+#define regsp_reg(rs) ((rs) & 255)
+#define regsp_spill(rs) ((rs) >> 8)
+#define regsp_used(rs) \
+ (((rs) & ~REGSP(RID_MASK, 0)) != REGSP(RID_NONE, 0))
+
+/* -- Register sets ------------------------------------------------------- */
+
+/* Bitset for registers. 32 registers suffice for most architectures.
+** Note that one set holds bits for both GPRs and FPRs.
+*/
+#if LJ_TARGET_PPC || LJ_TARGET_MIPS || LJ_TARGET_ARM64
+typedef uint64_t RegSet;
+#else
+typedef uint32_t RegSet;
+#endif
+
+#define RID2RSET(r) (((RegSet)1) << (r))
+#define RSET_EMPTY ((RegSet)0)
+#define RSET_RANGE(lo, hi) ((RID2RSET((hi)-(lo))-1) << (lo))
+
+#define rset_test(rs, r) ((int)((rs) >> (r)) & 1)
+#define rset_set(rs, r) (rs |= RID2RSET(r))
+#define rset_clear(rs, r) (rs &= ~RID2RSET(r))
+#define rset_exclude(rs, r) (rs & ~RID2RSET(r))
+#if LJ_TARGET_PPC || LJ_TARGET_MIPS || LJ_TARGET_ARM64
+#define rset_picktop(rs) ((Reg)(__builtin_clzll(rs)^63))
+#define rset_pickbot(rs) ((Reg)__builtin_ctzll(rs))
+#else
+#define rset_picktop(rs) ((Reg)lj_fls(rs))
+#define rset_pickbot(rs) ((Reg)lj_ffs(rs))
+#endif
+
+/* -- Register allocation cost -------------------------------------------- */
+
+/* The register allocation heuristic keeps track of the cost for allocating
+** a specific register:
+**
+** A free register (obviously) has a cost of 0 and a 1-bit in the free mask.
+**
+** An already allocated register has the (non-zero) IR reference in the lowest
+** bits and the result of a blended cost-model in the higher bits.
+**
+** The allocator first checks the free mask for a hit. Otherwise an (unrolled)
+** linear search for the minimum cost is used. The search doesn't need to
+** keep track of the position of the minimum, which makes it very fast.
+** The lowest bits of the minimum cost show the desired IR reference whose
+** register is the one to evict.
+**
+** Without the cost-model this degenerates to the standard heuristics for
+** (reverse) linear-scan register allocation. Since code generation is done
+** in reverse, a live interval extends from the last use to the first def.
+** For an SSA IR the IR reference is the first (and only) def and thus
+** trivially marks the end of the interval. The LSRA heuristics says to pick
+** the register whose live interval has the furthest extent, i.e. the lowest
+** IR reference in our case.
+**
+** A cost-model should take into account other factors, like spill-cost and
+** restore- or rematerialization-cost, which depend on the kind of instruction.
+** E.g. constants have zero spill costs, variant instructions have higher
+** costs than invariants and PHIs should preferably never be spilled.
+**
+** Here's a first cut at simple, but effective blended cost-model for R-LSRA:
+** - Due to careful design of the IR, constants already have lower IR
+** references than invariants and invariants have lower IR references
+** than variants.
+** - The cost in the upper 16 bits is the sum of the IR reference and a
+** weighted score. The score currently only takes into account whether
+** the IRT_ISPHI bit is set in the instruction type.
+** - The PHI weight is the minimum distance (in IR instructions) a PHI
+** reference has to be further apart from a non-PHI reference to be spilled.
+** - It should be a power of two (for speed) and must be between 2 and 32768.
+** Good values for the PHI weight seem to be between 40 and 150.
+** - Further study is required.
+*/
+#define REGCOST_PHI_WEIGHT 64
+
+/* Cost for allocating a specific register. */
+typedef uint32_t RegCost;
+
+/* Note: assumes 16 bit IRRef1. */
+#define REGCOST(cost, ref) ((RegCost)(ref) + ((RegCost)(cost) << 16))
+#define regcost_ref(rc) ((IRRef1)(rc))
+
+#define REGCOST_T(t) \
+ ((RegCost)((t)&IRT_ISPHI) * (((RegCost)(REGCOST_PHI_WEIGHT)<<16)/IRT_ISPHI))
+#define REGCOST_REF_T(ref, t) (REGCOST((ref), (ref)) + REGCOST_T((t)))
+
+/* -- Target-specific definitions ----------------------------------------- */
+
+#if LJ_TARGET_X86ORX64
+#include "lj_target_x86.h"
+#elif LJ_TARGET_ARM
+#include "lj_target_arm.h"
+#elif LJ_TARGET_ARM64
+#include "lj_target_arm64.h"
+#elif LJ_TARGET_PPC
+#include "lj_target_ppc.h"
+#elif LJ_TARGET_MIPS
+#include "lj_target_mips.h"
+#else
+#error "Missing include for target CPU"
+#endif
+
+#ifdef EXITSTUBS_PER_GROUP
+/* Return the address of an exit stub. */
+static LJ_AINLINE char *exitstub_addr_(char **group, uint32_t exitno)
+{
+ lj_assertX(group[exitno / EXITSTUBS_PER_GROUP] != NULL,
+ "exit stub group for exit %d uninitialized", exitno);
+ return (char *)group[exitno / EXITSTUBS_PER_GROUP] +
+ EXITSTUB_SPACING*(exitno % EXITSTUBS_PER_GROUP);
+}
+/* Avoid dependence on lj_jit.h if only including lj_target.h. */
+#define exitstub_addr(J, exitno) \
+ ((MCode *)exitstub_addr_((char **)((J)->exitstubgroup), (exitno)))
+#endif
+
+#endif
diff --git a/libs/luajit-cmake/luajit/src/lj_target_arm.h b/libs/luajit-cmake/luajit/src/lj_target_arm.h
new file mode 100644
index 0000000..48f487a
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/lj_target_arm.h
@@ -0,0 +1,271 @@
+/*
+** Definitions for ARM CPUs.
+** Copyright (C) 2005-2022 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LJ_TARGET_ARM_H
+#define _LJ_TARGET_ARM_H
+
+/* -- Registers IDs ------------------------------------------------------- */
+
+#define GPRDEF(_) \
+ _(R0) _(R1) _(R2) _(R3) _(R4) _(R5) _(R6) _(R7) \
+ _(R8) _(R9) _(R10) _(R11) _(R12) _(SP) _(LR) _(PC)
+#if LJ_SOFTFP
+#define FPRDEF(_)
+#else
+#define FPRDEF(_) \
+ _(D0) _(D1) _(D2) _(D3) _(D4) _(D5) _(D6) _(D7) \
+ _(D8) _(D9) _(D10) _(D11) _(D12) _(D13) _(D14) _(D15)
+#endif
+#define VRIDDEF(_)
+
+#define RIDENUM(name) RID_##name,
+
+enum {
+ GPRDEF(RIDENUM) /* General-purpose registers (GPRs). */
+ FPRDEF(RIDENUM) /* Floating-point registers (FPRs). */
+ RID_MAX,
+ RID_TMP = RID_LR,
+
+ /* Calling conventions. */
+ RID_RET = RID_R0,
+ RID_RETLO = RID_R0,
+ RID_RETHI = RID_R1,
+#if LJ_SOFTFP
+ RID_FPRET = RID_R0,
+#else
+ RID_FPRET = RID_D0,
+#endif
+
+ /* These definitions must match with the *.dasc file(s): */
+ RID_BASE = RID_R9, /* Interpreter BASE. */
+ RID_LPC = RID_R6, /* Interpreter PC. */
+ RID_DISPATCH = RID_R7, /* Interpreter DISPATCH table. */
+ RID_LREG = RID_R8, /* Interpreter L. */
+
+ /* Register ranges [min, max) and number of registers. */
+ RID_MIN_GPR = RID_R0,
+ RID_MAX_GPR = RID_PC+1,
+ RID_MIN_FPR = RID_MAX_GPR,
+#if LJ_SOFTFP
+ RID_MAX_FPR = RID_MIN_FPR,
+#else
+ RID_MAX_FPR = RID_D15+1,
+#endif
+ RID_NUM_GPR = RID_MAX_GPR - RID_MIN_GPR,
+ RID_NUM_FPR = RID_MAX_FPR - RID_MIN_FPR
+};
+
+#define RID_NUM_KREF RID_NUM_GPR
+#define RID_MIN_KREF RID_R0
+
+/* -- Register sets ------------------------------------------------------- */
+
+/* Make use of all registers, except sp, lr and pc. */
+#define RSET_GPR (RSET_RANGE(RID_MIN_GPR, RID_R12+1))
+#define RSET_GPREVEN \
+ (RID2RSET(RID_R0)|RID2RSET(RID_R2)|RID2RSET(RID_R4)|RID2RSET(RID_R6)| \
+ RID2RSET(RID_R8)|RID2RSET(RID_R10))
+#define RSET_GPRODD \
+ (RID2RSET(RID_R1)|RID2RSET(RID_R3)|RID2RSET(RID_R5)|RID2RSET(RID_R7)| \
+ RID2RSET(RID_R9)|RID2RSET(RID_R11))
+#if LJ_SOFTFP
+#define RSET_FPR 0
+#else
+#define RSET_FPR (RSET_RANGE(RID_MIN_FPR, RID_MAX_FPR))
+#endif
+#define RSET_ALL (RSET_GPR|RSET_FPR)
+#define RSET_INIT RSET_ALL
+
+/* ABI-specific register sets. lr is an implicit scratch register. */
+#define RSET_SCRATCH_GPR_ (RSET_RANGE(RID_R0, RID_R3+1)|RID2RSET(RID_R12))
+#ifdef __APPLE__
+#define RSET_SCRATCH_GPR (RSET_SCRATCH_GPR_|RID2RSET(RID_R9))
+#else
+#define RSET_SCRATCH_GPR RSET_SCRATCH_GPR_
+#endif
+#if LJ_SOFTFP
+#define RSET_SCRATCH_FPR 0
+#else
+#define RSET_SCRATCH_FPR (RSET_RANGE(RID_D0, RID_D7+1))
+#endif
+#define RSET_SCRATCH (RSET_SCRATCH_GPR|RSET_SCRATCH_FPR)
+#define REGARG_FIRSTGPR RID_R0
+#define REGARG_LASTGPR RID_R3
+#define REGARG_NUMGPR 4
+#if LJ_ABI_SOFTFP
+#define REGARG_FIRSTFPR 0
+#define REGARG_LASTFPR 0
+#define REGARG_NUMFPR 0
+#else
+#define REGARG_FIRSTFPR RID_D0
+#define REGARG_LASTFPR RID_D7
+#define REGARG_NUMFPR 8
+#endif
+
+/* -- Spill slots --------------------------------------------------------- */
+
+/* Spill slots are 32 bit wide. An even/odd pair is used for FPRs.
+**
+** SPS_FIXED: Available fixed spill slots in interpreter frame.
+** This definition must match with the *.dasc file(s).
+**
+** SPS_FIRST: First spill slot for general use. Reserve min. two 32 bit slots.
+*/
+#define SPS_FIXED 2
+#define SPS_FIRST 2
+
+#define SPOFS_TMP 0
+
+#define sps_scale(slot) (4 * (int32_t)(slot))
+#define sps_align(slot) (((slot) - SPS_FIXED + 1) & ~1)
+
+/* -- Exit state ---------------------------------------------------------- */
+
+/* This definition must match with the *.dasc file(s). */
+typedef struct {
+#if !LJ_SOFTFP
+ lua_Number fpr[RID_NUM_FPR]; /* Floating-point registers. */
+#endif
+ int32_t gpr[RID_NUM_GPR]; /* General-purpose registers. */
+ int32_t spill[256]; /* Spill slots. */
+} ExitState;
+
+/* PC after instruction that caused an exit. Used to find the trace number. */
+#define EXITSTATE_PCREG RID_PC
+/* Highest exit + 1 indicates stack check. */
+#define EXITSTATE_CHECKEXIT 1
+
+#define EXITSTUB_SPACING 4
+#define EXITSTUBS_PER_GROUP 32
+
+/* -- Instructions -------------------------------------------------------- */
+
+/* Instruction fields. */
+#define ARMF_CC(ai, cc) (((ai) ^ ARMI_CCAL) | ((cc) << 28))
+#define ARMF_N(r) ((r) << 16)
+#define ARMF_D(r) ((r) << 12)
+#define ARMF_S(r) ((r) << 8)
+#define ARMF_M(r) (r)
+#define ARMF_SH(sh, n) (((sh) << 5) | ((n) << 7))
+#define ARMF_RSH(sh, r) (0x10 | ((sh) << 5) | ARMF_S(r))
+
+typedef enum ARMIns {
+ ARMI_CCAL = 0xe0000000,
+ ARMI_S = 0x000100000,
+ ARMI_K12 = 0x02000000,
+ ARMI_KNEG = 0x00200000,
+ ARMI_LS_W = 0x00200000,
+ ARMI_LS_U = 0x00800000,
+ ARMI_LS_P = 0x01000000,
+ ARMI_LS_R = 0x02000000,
+ ARMI_LSX_I = 0x00400000,
+
+ ARMI_AND = 0xe0000000,
+ ARMI_EOR = 0xe0200000,
+ ARMI_SUB = 0xe0400000,
+ ARMI_RSB = 0xe0600000,
+ ARMI_ADD = 0xe0800000,
+ ARMI_ADC = 0xe0a00000,
+ ARMI_SBC = 0xe0c00000,
+ ARMI_RSC = 0xe0e00000,
+ ARMI_TST = 0xe1100000,
+ ARMI_TEQ = 0xe1300000,
+ ARMI_CMP = 0xe1500000,
+ ARMI_CMN = 0xe1700000,
+ ARMI_ORR = 0xe1800000,
+ ARMI_MOV = 0xe1a00000,
+ ARMI_BIC = 0xe1c00000,
+ ARMI_MVN = 0xe1e00000,
+
+ ARMI_NOP = 0xe1a00000,
+
+ ARMI_MUL = 0xe0000090,
+ ARMI_SMULL = 0xe0c00090,
+
+ ARMI_LDR = 0xe4100000,
+ ARMI_LDRB = 0xe4500000,
+ ARMI_LDRH = 0xe01000b0,
+ ARMI_LDRSB = 0xe01000d0,
+ ARMI_LDRSH = 0xe01000f0,
+ ARMI_LDRD = 0xe00000d0,
+ ARMI_STR = 0xe4000000,
+ ARMI_STRB = 0xe4400000,
+ ARMI_STRH = 0xe00000b0,
+ ARMI_STRD = 0xe00000f0,
+ ARMI_PUSH = 0xe92d0000,
+
+ ARMI_B = 0xea000000,
+ ARMI_BL = 0xeb000000,
+ ARMI_BLX = 0xfa000000,
+ ARMI_BLXr = 0xe12fff30,
+
+ /* ARMv6 */
+ ARMI_REV = 0xe6bf0f30,
+ ARMI_SXTB = 0xe6af0070,
+ ARMI_SXTH = 0xe6bf0070,
+ ARMI_UXTB = 0xe6ef0070,
+ ARMI_UXTH = 0xe6ff0070,
+
+ /* ARMv6T2 */
+ ARMI_MOVW = 0xe3000000,
+ ARMI_MOVT = 0xe3400000,
+ ARMI_BFI = 0xe7c00010,
+
+ /* VFP */
+ ARMI_VMOV_D = 0xeeb00b40,
+ ARMI_VMOV_S = 0xeeb00a40,
+ ARMI_VMOVI_D = 0xeeb00b00,
+
+ ARMI_VMOV_R_S = 0xee100a10,
+ ARMI_VMOV_S_R = 0xee000a10,
+ ARMI_VMOV_RR_D = 0xec500b10,
+ ARMI_VMOV_D_RR = 0xec400b10,
+
+ ARMI_VADD_D = 0xee300b00,
+ ARMI_VSUB_D = 0xee300b40,
+ ARMI_VMUL_D = 0xee200b00,
+ ARMI_VMLA_D = 0xee000b00,
+ ARMI_VMLS_D = 0xee000b40,
+ ARMI_VNMLS_D = 0xee100b00,
+ ARMI_VDIV_D = 0xee800b00,
+
+ ARMI_VABS_D = 0xeeb00bc0,
+ ARMI_VNEG_D = 0xeeb10b40,
+ ARMI_VSQRT_D = 0xeeb10bc0,
+
+ ARMI_VCMP_D = 0xeeb40b40,
+ ARMI_VCMPZ_D = 0xeeb50b40,
+
+ ARMI_VMRS = 0xeef1fa10,
+
+ ARMI_VCVT_S32_F32 = 0xeebd0ac0,
+ ARMI_VCVT_S32_F64 = 0xeebd0bc0,
+ ARMI_VCVT_U32_F32 = 0xeebc0ac0,
+ ARMI_VCVT_U32_F64 = 0xeebc0bc0,
+ ARMI_VCVT_F32_S32 = 0xeeb80ac0,
+ ARMI_VCVT_F64_S32 = 0xeeb80bc0,
+ ARMI_VCVT_F32_U32 = 0xeeb80a40,
+ ARMI_VCVT_F64_U32 = 0xeeb80b40,
+ ARMI_VCVT_F32_F64 = 0xeeb70bc0,
+ ARMI_VCVT_F64_F32 = 0xeeb70ac0,
+
+ ARMI_VLDR_S = 0xed100a00,
+ ARMI_VLDR_D = 0xed100b00,
+ ARMI_VSTR_S = 0xed000a00,
+ ARMI_VSTR_D = 0xed000b00,
+} ARMIns;
+
+typedef enum ARMShift {
+ ARMSH_LSL, ARMSH_LSR, ARMSH_ASR, ARMSH_ROR
+} ARMShift;
+
+/* ARM condition codes. */
+typedef enum ARMCC {
+ CC_EQ, CC_NE, CC_CS, CC_CC, CC_MI, CC_PL, CC_VS, CC_VC,
+ CC_HI, CC_LS, CC_GE, CC_LT, CC_GT, CC_LE, CC_AL,
+ CC_HS = CC_CS, CC_LO = CC_CC
+} ARMCC;
+
+#endif
diff --git a/libs/luajit-cmake/luajit/src/lj_target_arm64.h b/libs/luajit-cmake/luajit/src/lj_target_arm64.h
new file mode 100644
index 0000000..d45af2e
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/lj_target_arm64.h
@@ -0,0 +1,336 @@
+/*
+** Definitions for ARM64 CPUs.
+** Copyright (C) 2005-2022 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LJ_TARGET_ARM64_H
+#define _LJ_TARGET_ARM64_H
+
+/* -- Registers IDs ------------------------------------------------------- */
+
+#define GPRDEF(_) \
+ _(X0) _(X1) _(X2) _(X3) _(X4) _(X5) _(X6) _(X7) \
+ _(X8) _(X9) _(X10) _(X11) _(X12) _(X13) _(X14) _(X15) \
+ _(X16) _(X17) _(X18) _(X19) _(X20) _(X21) _(X22) _(X23) \
+ _(X24) _(X25) _(X26) _(X27) _(X28) _(FP) _(LR) _(SP)
+#define FPRDEF(_) \
+ _(D0) _(D1) _(D2) _(D3) _(D4) _(D5) _(D6) _(D7) \
+ _(D8) _(D9) _(D10) _(D11) _(D12) _(D13) _(D14) _(D15) \
+ _(D16) _(D17) _(D18) _(D19) _(D20) _(D21) _(D22) _(D23) \
+ _(D24) _(D25) _(D26) _(D27) _(D28) _(D29) _(D30) _(D31)
+#define VRIDDEF(_)
+
+#define RIDENUM(name) RID_##name,
+
+enum {
+ GPRDEF(RIDENUM) /* General-purpose registers (GPRs). */
+ FPRDEF(RIDENUM) /* Floating-point registers (FPRs). */
+ RID_MAX,
+ RID_TMP = RID_LR,
+ RID_ZERO = RID_SP,
+
+ /* Calling conventions. */
+ RID_RET = RID_X0,
+ RID_RETLO = RID_X0,
+ RID_RETHI = RID_X1,
+ RID_FPRET = RID_D0,
+
+ /* These definitions must match with the *.dasc file(s): */
+ RID_BASE = RID_X19, /* Interpreter BASE. */
+ RID_LPC = RID_X21, /* Interpreter PC. */
+ RID_GL = RID_X22, /* Interpreter GL. */
+ RID_LREG = RID_X23, /* Interpreter L. */
+
+ /* Register ranges [min, max) and number of registers. */
+ RID_MIN_GPR = RID_X0,
+ RID_MAX_GPR = RID_SP+1,
+ RID_MIN_FPR = RID_MAX_GPR,
+ RID_MAX_FPR = RID_D31+1,
+ RID_NUM_GPR = RID_MAX_GPR - RID_MIN_GPR,
+ RID_NUM_FPR = RID_MAX_FPR - RID_MIN_FPR
+};
+
+#define RID_NUM_KREF RID_NUM_GPR
+#define RID_MIN_KREF RID_X0
+
+/* -- Register sets ------------------------------------------------------- */
+
+/* Make use of all registers, except for x18, fp, lr and sp. */
+#define RSET_FIXED \
+ (RID2RSET(RID_X18)|RID2RSET(RID_FP)|RID2RSET(RID_LR)|RID2RSET(RID_SP)|\
+ RID2RSET(RID_GL))
+#define RSET_GPR (RSET_RANGE(RID_MIN_GPR, RID_MAX_GPR) - RSET_FIXED)
+#define RSET_FPR RSET_RANGE(RID_MIN_FPR, RID_MAX_FPR)
+#define RSET_ALL (RSET_GPR|RSET_FPR)
+#define RSET_INIT RSET_ALL
+
+/* lr is an implicit scratch register. */
+#define RSET_SCRATCH_GPR (RSET_RANGE(RID_X0, RID_X17+1))
+#define RSET_SCRATCH_FPR \
+ (RSET_RANGE(RID_D0, RID_D7+1)|RSET_RANGE(RID_D16, RID_D31+1))
+#define RSET_SCRATCH (RSET_SCRATCH_GPR|RSET_SCRATCH_FPR)
+#define REGARG_FIRSTGPR RID_X0
+#define REGARG_LASTGPR RID_X7
+#define REGARG_NUMGPR 8
+#define REGARG_FIRSTFPR RID_D0
+#define REGARG_LASTFPR RID_D7
+#define REGARG_NUMFPR 8
+
+/* -- Spill slots --------------------------------------------------------- */
+
+/* Spill slots are 32 bit wide. An even/odd pair is used for FPRs.
+**
+** SPS_FIXED: Available fixed spill slots in interpreter frame.
+** This definition must match with the vm_arm64.dasc file.
+** Pre-allocate some slots to avoid sp adjust in every root trace.
+**
+** SPS_FIRST: First spill slot for general use. Reserve min. two 32 bit slots.
+*/
+#define SPS_FIXED 4
+#define SPS_FIRST 2
+
+#define SPOFS_TMP 0
+
+#define sps_scale(slot) (4 * (int32_t)(slot))
+#define sps_align(slot) (((slot) - SPS_FIXED + 3) & ~3)
+
+/* -- Exit state ---------------------------------------------------------- */
+
+/* This definition must match with the *.dasc file(s). */
+typedef struct {
+ lua_Number fpr[RID_NUM_FPR]; /* Floating-point registers. */
+ intptr_t gpr[RID_NUM_GPR]; /* General-purpose registers. */
+ int32_t spill[256]; /* Spill slots. */
+} ExitState;
+
+/* Highest exit + 1 indicates stack check. */
+#define EXITSTATE_CHECKEXIT 1
+
+/* Return the address of a per-trace exit stub. */
+static LJ_AINLINE uint32_t *exitstub_trace_addr_(uint32_t *p, uint32_t exitno)
+{
+ while (*p == (LJ_LE ? 0xd503201f : 0x1f2003d5)) p++; /* Skip A64I_NOP. */
+ return p + 3 + exitno;
+}
+/* Avoid dependence on lj_jit.h if only including lj_target.h. */
+#define exitstub_trace_addr(T, exitno) \
+ exitstub_trace_addr_((MCode *)((char *)(T)->mcode + (T)->szmcode), (exitno))
+
+/* -- Instructions -------------------------------------------------------- */
+
+/* ARM64 instructions are always little-endian. Swap for ARM64BE. */
+#if LJ_BE
+#define A64I_LE(x) (lj_bswap(x))
+#else
+#define A64I_LE(x) (x)
+#endif
+
+/* Instruction fields. */
+#define A64F_D(r) (r)
+#define A64F_N(r) ((r) << 5)
+#define A64F_A(r) ((r) << 10)
+#define A64F_M(r) ((r) << 16)
+#define A64F_IMMS(x) ((x) << 10)
+#define A64F_IMMR(x) ((x) << 16)
+#define A64F_U16(x) ((x) << 5)
+#define A64F_U12(x) ((x) << 10)
+#define A64F_S26(x) (((uint32_t)(x) & 0x03ffffffu))
+#define A64F_S19(x) (((uint32_t)(x) & 0x7ffffu) << 5)
+#define A64F_S14(x) (((uint32_t)(x) & 0x3fffu) << 5)
+#define A64F_S9(x) ((x) << 12)
+#define A64F_BIT(x) ((x) << 19)
+#define A64F_SH(sh, x) (((sh) << 22) | ((x) << 10))
+#define A64F_EX(ex) (A64I_EX | ((ex) << 13))
+#define A64F_EXSH(ex,x) (A64I_EX | ((ex) << 13) | ((x) << 10))
+#define A64F_FP8(x) ((x) << 13)
+#define A64F_CC(cc) ((cc) << 12)
+#define A64F_LSL16(x) (((x) / 16) << 21)
+#define A64F_BSH(sh) ((sh) << 10)
+
+/* Check for valid field range. */
+#define A64F_S_OK(x, b) ((((x) + (1 << (b-1))) >> (b)) == 0)
+
+typedef enum A64Ins {
+ A64I_S = 0x20000000,
+ A64I_X = 0x80000000,
+ A64I_EX = 0x00200000,
+ A64I_ON = 0x00200000,
+ A64I_K12 = 0x1a000000,
+ A64I_K13 = 0x18000000,
+ A64I_LS_U = 0x01000000,
+ A64I_LS_S = 0x00800000,
+ A64I_LS_R = 0x01200800,
+ A64I_LS_SH = 0x00001000,
+ A64I_LS_UXTWx = 0x00004000,
+ A64I_LS_SXTWx = 0x0000c000,
+ A64I_LS_SXTXx = 0x0000e000,
+ A64I_LS_LSLx = 0x00006000,
+
+ A64I_ADDw = 0x0b000000,
+ A64I_ADDx = 0x8b000000,
+ A64I_ADDSw = 0x2b000000,
+ A64I_ADDSx = 0xab000000,
+ A64I_NEGw = 0x4b0003e0,
+ A64I_NEGx = 0xcb0003e0,
+ A64I_SUBw = 0x4b000000,
+ A64I_SUBx = 0xcb000000,
+ A64I_SUBSw = 0x6b000000,
+ A64I_SUBSx = 0xeb000000,
+
+ A64I_MULw = 0x1b007c00,
+ A64I_MULx = 0x9b007c00,
+ A64I_SMULL = 0x9b207c00,
+
+ A64I_ANDw = 0x0a000000,
+ A64I_ANDx = 0x8a000000,
+ A64I_ANDSw = 0x6a000000,
+ A64I_ANDSx = 0xea000000,
+ A64I_EORw = 0x4a000000,
+ A64I_EORx = 0xca000000,
+ A64I_ORRw = 0x2a000000,
+ A64I_ORRx = 0xaa000000,
+ A64I_TSTw = 0x6a00001f,
+ A64I_TSTx = 0xea00001f,
+
+ A64I_CMPw = 0x6b00001f,
+ A64I_CMPx = 0xeb00001f,
+ A64I_CMNw = 0x2b00001f,
+ A64I_CMNx = 0xab00001f,
+ A64I_CCMPw = 0x7a400000,
+ A64I_CCMPx = 0xfa400000,
+ A64I_CSELw = 0x1a800000,
+ A64I_CSELx = 0x9a800000,
+
+ A64I_ASRw = 0x13007c00,
+ A64I_ASRx = 0x9340fc00,
+ A64I_LSLx = 0xd3400000,
+ A64I_LSRx = 0xd340fc00,
+ A64I_SHRw = 0x1ac02000,
+ A64I_SHRx = 0x9ac02000, /* lsl/lsr/asr/ror x0, x0, x0 */
+ A64I_REVw = 0x5ac00800,
+ A64I_REVx = 0xdac00c00,
+
+ A64I_EXTRw = 0x13800000,
+ A64I_EXTRx = 0x93c00000,
+ A64I_BFMw = 0x33000000,
+ A64I_BFMx = 0xb3400000,
+ A64I_SBFMw = 0x13000000,
+ A64I_SBFMx = 0x93400000,
+ A64I_SXTBw = 0x13001c00,
+ A64I_SXTHw = 0x13003c00,
+ A64I_SXTW = 0x93407c00,
+ A64I_UBFMw = 0x53000000,
+ A64I_UBFMx = 0xd3400000,
+ A64I_UXTBw = 0x53001c00,
+ A64I_UXTHw = 0x53003c00,
+
+ A64I_MOVw = 0x2a0003e0,
+ A64I_MOVx = 0xaa0003e0,
+ A64I_MVNw = 0x2a2003e0,
+ A64I_MVNx = 0xaa2003e0,
+ A64I_MOVKw = 0x72800000,
+ A64I_MOVKx = 0xf2800000,
+ A64I_MOVZw = 0x52800000,
+ A64I_MOVZx = 0xd2800000,
+ A64I_MOVNw = 0x12800000,
+ A64I_MOVNx = 0x92800000,
+
+ A64I_LDRB = 0x39400000,
+ A64I_LDRH = 0x79400000,
+ A64I_LDRw = 0xb9400000,
+ A64I_LDRx = 0xf9400000,
+ A64I_LDRLw = 0x18000000,
+ A64I_LDRLx = 0x58000000,
+ A64I_STRB = 0x39000000,
+ A64I_STRH = 0x79000000,
+ A64I_STRw = 0xb9000000,
+ A64I_STRx = 0xf9000000,
+ A64I_STPw = 0x29000000,
+ A64I_STPx = 0xa9000000,
+ A64I_LDPw = 0x29400000,
+ A64I_LDPx = 0xa9400000,
+
+ A64I_B = 0x14000000,
+ A64I_BCC = 0x54000000,
+ A64I_BL = 0x94000000,
+ A64I_BR = 0xd61f0000,
+ A64I_BLR = 0xd63f0000,
+ A64I_TBZ = 0x36000000,
+ A64I_TBNZ = 0x37000000,
+ A64I_CBZ = 0x34000000,
+ A64I_CBNZ = 0x35000000,
+
+ A64I_NOP = 0xd503201f,
+
+ /* FP */
+ A64I_FADDd = 0x1e602800,
+ A64I_FSUBd = 0x1e603800,
+ A64I_FMADDd = 0x1f400000,
+ A64I_FMSUBd = 0x1f408000,
+ A64I_FNMADDd = 0x1f600000,
+ A64I_FNMSUBd = 0x1f608000,
+ A64I_FMULd = 0x1e600800,
+ A64I_FDIVd = 0x1e601800,
+ A64I_FNEGd = 0x1e614000,
+ A64I_FABS = 0x1e60c000,
+ A64I_FSQRTd = 0x1e61c000,
+ A64I_LDRs = 0xbd400000,
+ A64I_LDRd = 0xfd400000,
+ A64I_STRs = 0xbd000000,
+ A64I_STRd = 0xfd000000,
+ A64I_LDPs = 0x2d400000,
+ A64I_LDPd = 0x6d400000,
+ A64I_STPs = 0x2d000000,
+ A64I_STPd = 0x6d000000,
+ A64I_FCMPd = 0x1e602000,
+ A64I_FCMPZd = 0x1e602008,
+ A64I_FCSELd = 0x1e600c00,
+ A64I_FRINTMd = 0x1e654000,
+ A64I_FRINTPd = 0x1e64c000,
+ A64I_FRINTZd = 0x1e65c000,
+
+ A64I_FCVT_F32_F64 = 0x1e624000,
+ A64I_FCVT_F64_F32 = 0x1e22c000,
+ A64I_FCVT_F32_S32 = 0x1e220000,
+ A64I_FCVT_F64_S32 = 0x1e620000,
+ A64I_FCVT_F32_U32 = 0x1e230000,
+ A64I_FCVT_F64_U32 = 0x1e630000,
+ A64I_FCVT_F32_S64 = 0x9e220000,
+ A64I_FCVT_F64_S64 = 0x9e620000,
+ A64I_FCVT_F32_U64 = 0x9e230000,
+ A64I_FCVT_F64_U64 = 0x9e630000,
+ A64I_FCVT_S32_F64 = 0x1e780000,
+ A64I_FCVT_S32_F32 = 0x1e380000,
+ A64I_FCVT_U32_F64 = 0x1e790000,
+ A64I_FCVT_U32_F32 = 0x1e390000,
+ A64I_FCVT_S64_F64 = 0x9e780000,
+ A64I_FCVT_S64_F32 = 0x9e380000,
+ A64I_FCVT_U64_F64 = 0x9e790000,
+ A64I_FCVT_U64_F32 = 0x9e390000,
+
+ A64I_FMOV_S = 0x1e204000,
+ A64I_FMOV_D = 0x1e604000,
+ A64I_FMOV_R_S = 0x1e260000,
+ A64I_FMOV_S_R = 0x1e270000,
+ A64I_FMOV_R_D = 0x9e660000,
+ A64I_FMOV_D_R = 0x9e670000,
+ A64I_FMOV_DI = 0x1e601000,
+} A64Ins;
+
+typedef enum A64Shift {
+ A64SH_LSL, A64SH_LSR, A64SH_ASR, A64SH_ROR
+} A64Shift;
+
+typedef enum A64Extend {
+ A64EX_UXTB, A64EX_UXTH, A64EX_UXTW, A64EX_UXTX,
+ A64EX_SXTB, A64EX_SXTH, A64EX_SXTW, A64EX_SXTX,
+} A64Extend;
+
+/* ARM condition codes. */
+typedef enum A64CC {
+ CC_EQ, CC_NE, CC_CS, CC_CC, CC_MI, CC_PL, CC_VS, CC_VC,
+ CC_HI, CC_LS, CC_GE, CC_LT, CC_GT, CC_LE, CC_AL,
+ CC_HS = CC_CS, CC_LO = CC_CC
+} A64CC;
+
+#endif
diff --git a/libs/luajit-cmake/luajit/src/lj_target_mips.h b/libs/luajit-cmake/luajit/src/lj_target_mips.h
new file mode 100644
index 0000000..da72d61
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/lj_target_mips.h
@@ -0,0 +1,417 @@
+/*
+** Definitions for MIPS CPUs.
+** Copyright (C) 2005-2022 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LJ_TARGET_MIPS_H
+#define _LJ_TARGET_MIPS_H
+
+/* -- Registers IDs ------------------------------------------------------- */
+
+#define GPRDEF(_) \
+ _(R0) _(R1) _(R2) _(R3) _(R4) _(R5) _(R6) _(R7) \
+ _(R8) _(R9) _(R10) _(R11) _(R12) _(R13) _(R14) _(R15) \
+ _(R16) _(R17) _(R18) _(R19) _(R20) _(R21) _(R22) _(R23) \
+ _(R24) _(R25) _(SYS1) _(SYS2) _(R28) _(SP) _(R30) _(RA)
+#if LJ_SOFTFP
+#define FPRDEF(_)
+#else
+#define FPRDEF(_) \
+ _(F0) _(F1) _(F2) _(F3) _(F4) _(F5) _(F6) _(F7) \
+ _(F8) _(F9) _(F10) _(F11) _(F12) _(F13) _(F14) _(F15) \
+ _(F16) _(F17) _(F18) _(F19) _(F20) _(F21) _(F22) _(F23) \
+ _(F24) _(F25) _(F26) _(F27) _(F28) _(F29) _(F30) _(F31)
+#endif
+#define VRIDDEF(_)
+
+#define RIDENUM(name) RID_##name,
+
+enum {
+ GPRDEF(RIDENUM) /* General-purpose registers (GPRs). */
+ FPRDEF(RIDENUM) /* Floating-point registers (FPRs). */
+ RID_MAX,
+ RID_ZERO = RID_R0,
+ RID_TMP = RID_RA,
+ RID_GP = RID_R28,
+
+ /* Calling conventions. */
+ RID_RET = RID_R2,
+#if LJ_LE
+ RID_RETHI = RID_R3,
+ RID_RETLO = RID_R2,
+#else
+ RID_RETHI = RID_R2,
+ RID_RETLO = RID_R3,
+#endif
+#if LJ_SOFTFP
+ RID_FPRET = RID_R2,
+#else
+ RID_FPRET = RID_F0,
+#endif
+ RID_CFUNCADDR = RID_R25,
+
+ /* These definitions must match with the *.dasc file(s): */
+ RID_BASE = RID_R16, /* Interpreter BASE. */
+ RID_LPC = RID_R18, /* Interpreter PC. */
+ RID_DISPATCH = RID_R19, /* Interpreter DISPATCH table. */
+ RID_LREG = RID_R20, /* Interpreter L. */
+ RID_JGL = RID_R30, /* On-trace: global_State + 32768. */
+
+ /* Register ranges [min, max) and number of registers. */
+ RID_MIN_GPR = RID_R0,
+ RID_MAX_GPR = RID_RA+1,
+ RID_MIN_FPR = RID_MAX_GPR,
+#if LJ_SOFTFP
+ RID_MAX_FPR = RID_MIN_FPR,
+#else
+ RID_MAX_FPR = RID_F31+1,
+#endif
+ RID_NUM_GPR = RID_MAX_GPR - RID_MIN_GPR,
+ RID_NUM_FPR = RID_MAX_FPR - RID_MIN_FPR /* Only even regs are used. */
+};
+
+#define RID_NUM_KREF RID_NUM_GPR
+#define RID_MIN_KREF RID_R0
+
+/* -- Register sets ------------------------------------------------------- */
+
+/* Make use of all registers, except ZERO, TMP, SP, SYS1, SYS2, JGL and GP. */
+#define RSET_FIXED \
+ (RID2RSET(RID_ZERO)|RID2RSET(RID_TMP)|RID2RSET(RID_SP)|\
+ RID2RSET(RID_SYS1)|RID2RSET(RID_SYS2)|RID2RSET(RID_JGL)|RID2RSET(RID_GP))
+#define RSET_GPR (RSET_RANGE(RID_MIN_GPR, RID_MAX_GPR) - RSET_FIXED)
+#if LJ_SOFTFP
+#define RSET_FPR 0
+#else
+#if LJ_32
+#define RSET_FPR \
+ (RID2RSET(RID_F0)|RID2RSET(RID_F2)|RID2RSET(RID_F4)|RID2RSET(RID_F6)|\
+ RID2RSET(RID_F8)|RID2RSET(RID_F10)|RID2RSET(RID_F12)|RID2RSET(RID_F14)|\
+ RID2RSET(RID_F16)|RID2RSET(RID_F18)|RID2RSET(RID_F20)|RID2RSET(RID_F22)|\
+ RID2RSET(RID_F24)|RID2RSET(RID_F26)|RID2RSET(RID_F28)|RID2RSET(RID_F30))
+#else
+#define RSET_FPR RSET_RANGE(RID_MIN_FPR, RID_MAX_FPR)
+#endif
+#endif
+#define RSET_ALL (RSET_GPR|RSET_FPR)
+#define RSET_INIT RSET_ALL
+
+#define RSET_SCRATCH_GPR \
+ (RSET_RANGE(RID_R1, RID_R15+1)|\
+ RID2RSET(RID_R24)|RID2RSET(RID_R25))
+#if LJ_SOFTFP
+#define RSET_SCRATCH_FPR 0
+#else
+#if LJ_32
+#define RSET_SCRATCH_FPR \
+ (RID2RSET(RID_F0)|RID2RSET(RID_F2)|RID2RSET(RID_F4)|RID2RSET(RID_F6)|\
+ RID2RSET(RID_F8)|RID2RSET(RID_F10)|RID2RSET(RID_F12)|RID2RSET(RID_F14)|\
+ RID2RSET(RID_F16)|RID2RSET(RID_F18))
+#else
+#define RSET_SCRATCH_FPR RSET_RANGE(RID_F0, RID_F24)
+#endif
+#endif
+#define RSET_SCRATCH (RSET_SCRATCH_GPR|RSET_SCRATCH_FPR)
+#define REGARG_FIRSTGPR RID_R4
+#if LJ_32
+#define REGARG_LASTGPR RID_R7
+#define REGARG_NUMGPR 4
+#else
+#define REGARG_LASTGPR RID_R11
+#define REGARG_NUMGPR 8
+#endif
+#if LJ_ABI_SOFTFP
+#define REGARG_FIRSTFPR 0
+#define REGARG_LASTFPR 0
+#define REGARG_NUMFPR 0
+#else
+#define REGARG_FIRSTFPR RID_F12
+#if LJ_32
+#define REGARG_LASTFPR RID_F14
+#define REGARG_NUMFPR 2
+#else
+#define REGARG_LASTFPR RID_F19
+#define REGARG_NUMFPR 8
+#endif
+#endif
+
+/* -- Spill slots --------------------------------------------------------- */
+
+/* Spill slots are 32 bit wide. An even/odd pair is used for FPRs.
+**
+** SPS_FIXED: Available fixed spill slots in interpreter frame.
+** This definition must match with the *.dasc file(s).
+**
+** SPS_FIRST: First spill slot for general use.
+*/
+#if LJ_32
+#define SPS_FIXED 5
+#else
+#define SPS_FIXED 4
+#endif
+#define SPS_FIRST 4
+
+#define SPOFS_TMP 0
+
+#define sps_scale(slot) (4 * (int32_t)(slot))
+#define sps_align(slot) (((slot) - SPS_FIXED + 1) & ~1)
+
+/* -- Exit state ---------------------------------------------------------- */
+
+/* This definition must match with the *.dasc file(s). */
+typedef struct {
+#if !LJ_SOFTFP
+ lua_Number fpr[RID_NUM_FPR]; /* Floating-point registers. */
+#endif
+ intptr_t gpr[RID_NUM_GPR]; /* General-purpose registers. */
+ int32_t spill[256]; /* Spill slots. */
+} ExitState;
+
+/* Highest exit + 1 indicates stack check. */
+#define EXITSTATE_CHECKEXIT 1
+
+/* Return the address of a per-trace exit stub. */
+static LJ_AINLINE uint32_t *exitstub_trace_addr_(uint32_t *p)
+{
+ while (*p == 0x00000000) p++; /* Skip MIPSI_NOP. */
+ return p;
+}
+/* Avoid dependence on lj_jit.h if only including lj_target.h. */
+#define exitstub_trace_addr(T, exitno) \
+ exitstub_trace_addr_((MCode *)((char *)(T)->mcode + (T)->szmcode))
+
+/* -- Instructions -------------------------------------------------------- */
+
+/* Instruction fields. */
+#define MIPSF_S(r) ((r) << 21)
+#define MIPSF_T(r) ((r) << 16)
+#define MIPSF_D(r) ((r) << 11)
+#define MIPSF_R(r) ((r) << 21)
+#define MIPSF_H(r) ((r) << 16)
+#define MIPSF_G(r) ((r) << 11)
+#define MIPSF_F(r) ((r) << 6)
+#define MIPSF_A(n) ((n) << 6)
+#define MIPSF_M(n) ((n) << 11)
+#define MIPSF_L(n) ((n) << 6)
+
+typedef enum MIPSIns {
+ MIPSI_D = 0x38,
+ MIPSI_DV = 0x10,
+ MIPSI_D32 = 0x3c,
+ /* Integer instructions. */
+ MIPSI_MOVE = 0x00000025,
+ MIPSI_NOP = 0x00000000,
+
+ MIPSI_LI = 0x24000000,
+ MIPSI_LU = 0x34000000,
+ MIPSI_LUI = 0x3c000000,
+
+ MIPSI_AND = 0x00000024,
+ MIPSI_ANDI = 0x30000000,
+ MIPSI_OR = 0x00000025,
+ MIPSI_ORI = 0x34000000,
+ MIPSI_XOR = 0x00000026,
+ MIPSI_XORI = 0x38000000,
+ MIPSI_NOR = 0x00000027,
+
+ MIPSI_SLT = 0x0000002a,
+ MIPSI_SLTU = 0x0000002b,
+ MIPSI_SLTI = 0x28000000,
+ MIPSI_SLTIU = 0x2c000000,
+
+ MIPSI_ADDU = 0x00000021,
+ MIPSI_ADDIU = 0x24000000,
+ MIPSI_SUB = 0x00000022,
+ MIPSI_SUBU = 0x00000023,
+
+#if !LJ_TARGET_MIPSR6
+ MIPSI_MUL = 0x70000002,
+ MIPSI_DIV = 0x0000001a,
+ MIPSI_DIVU = 0x0000001b,
+
+ MIPSI_MOVZ = 0x0000000a,
+ MIPSI_MOVN = 0x0000000b,
+ MIPSI_MFHI = 0x00000010,
+ MIPSI_MFLO = 0x00000012,
+ MIPSI_MULT = 0x00000018,
+#else
+ MIPSI_MUL = 0x00000098,
+ MIPSI_MUH = 0x000000d8,
+ MIPSI_DIV = 0x0000009a,
+ MIPSI_DIVU = 0x0000009b,
+
+ MIPSI_SELEQZ = 0x00000035,
+ MIPSI_SELNEZ = 0x00000037,
+#endif
+
+ MIPSI_SLL = 0x00000000,
+ MIPSI_SRL = 0x00000002,
+ MIPSI_SRA = 0x00000003,
+ MIPSI_ROTR = 0x00200002, /* MIPSXXR2 */
+ MIPSI_DROTR = 0x0020003a,
+ MIPSI_DROTR32 = 0x0020003e,
+ MIPSI_SLLV = 0x00000004,
+ MIPSI_SRLV = 0x00000006,
+ MIPSI_SRAV = 0x00000007,
+ MIPSI_ROTRV = 0x00000046, /* MIPSXXR2 */
+ MIPSI_DROTRV = 0x00000056,
+
+ MIPSI_INS = 0x7c000004, /* MIPSXXR2 */
+
+ MIPSI_SEB = 0x7c000420, /* MIPSXXR2 */
+ MIPSI_SEH = 0x7c000620, /* MIPSXXR2 */
+ MIPSI_WSBH = 0x7c0000a0, /* MIPSXXR2 */
+ MIPSI_DSBH = 0x7c0000a4,
+
+ MIPSI_B = 0x10000000,
+ MIPSI_J = 0x08000000,
+ MIPSI_JAL = 0x0c000000,
+#if !LJ_TARGET_MIPSR6
+ MIPSI_JALX = 0x74000000,
+ MIPSI_JR = 0x00000008,
+#else
+ MIPSI_JR = 0x00000009,
+ MIPSI_BALC = 0xe8000000,
+#endif
+ MIPSI_JALR = 0x0000f809,
+
+ MIPSI_BEQ = 0x10000000,
+ MIPSI_BNE = 0x14000000,
+ MIPSI_BLEZ = 0x18000000,
+ MIPSI_BGTZ = 0x1c000000,
+ MIPSI_BLTZ = 0x04000000,
+ MIPSI_BGEZ = 0x04010000,
+
+ /* Load/store instructions. */
+ MIPSI_LW = 0x8c000000,
+ MIPSI_LD = 0xdc000000,
+ MIPSI_SW = 0xac000000,
+ MIPSI_SD = 0xfc000000,
+ MIPSI_LB = 0x80000000,
+ MIPSI_SB = 0xa0000000,
+ MIPSI_LH = 0x84000000,
+ MIPSI_SH = 0xa4000000,
+ MIPSI_LBU = 0x90000000,
+ MIPSI_LHU = 0x94000000,
+ MIPSI_LWC1 = 0xc4000000,
+ MIPSI_SWC1 = 0xe4000000,
+ MIPSI_LDC1 = 0xd4000000,
+ MIPSI_SDC1 = 0xf4000000,
+
+ /* MIPS64 instructions. */
+ MIPSI_DADD = 0x0000002c,
+ MIPSI_DADDU = 0x0000002d,
+ MIPSI_DADDIU = 0x64000000,
+ MIPSI_DSUB = 0x0000002e,
+ MIPSI_DSUBU = 0x0000002f,
+#if !LJ_TARGET_MIPSR6
+ MIPSI_DDIV = 0x0000001e,
+ MIPSI_DDIVU = 0x0000001f,
+ MIPSI_DMULT = 0x0000001c,
+ MIPSI_DMULTU = 0x0000001d,
+#else
+ MIPSI_DDIV = 0x0000009e,
+ MIPSI_DMOD = 0x000000de,
+ MIPSI_DDIVU = 0x0000009f,
+ MIPSI_DMODU = 0x000000df,
+ MIPSI_DMUL = 0x0000009c,
+ MIPSI_DMUH = 0x000000dc,
+#endif
+
+ MIPSI_DSLL = 0x00000038,
+ MIPSI_DSRL = 0x0000003a,
+ MIPSI_DSLLV = 0x00000014,
+ MIPSI_DSRLV = 0x00000016,
+ MIPSI_DSRA = 0x0000003b,
+ MIPSI_DSRAV = 0x00000017,
+ MIPSI_DSRA32 = 0x0000003f,
+ MIPSI_DSLL32 = 0x0000003c,
+ MIPSI_DSRL32 = 0x0000003e,
+ MIPSI_DSHD = 0x7c000164,
+
+ MIPSI_AADDU = LJ_32 ? MIPSI_ADDU : MIPSI_DADDU,
+ MIPSI_AADDIU = LJ_32 ? MIPSI_ADDIU : MIPSI_DADDIU,
+ MIPSI_ASUBU = LJ_32 ? MIPSI_SUBU : MIPSI_DSUBU,
+ MIPSI_AL = LJ_32 ? MIPSI_LW : MIPSI_LD,
+ MIPSI_AS = LJ_32 ? MIPSI_SW : MIPSI_SD,
+#if LJ_TARGET_MIPSR6
+ MIPSI_LSA = 0x00000005,
+ MIPSI_DLSA = 0x00000015,
+ MIPSI_ALSA = LJ_32 ? MIPSI_LSA : MIPSI_DLSA,
+#endif
+
+ /* Extract/insert instructions. */
+ MIPSI_DEXTM = 0x7c000001,
+ MIPSI_DEXTU = 0x7c000002,
+ MIPSI_DEXT = 0x7c000003,
+ MIPSI_DINSM = 0x7c000005,
+ MIPSI_DINSU = 0x7c000006,
+ MIPSI_DINS = 0x7c000007,
+
+ MIPSI_FLOOR_D = 0x4620000b,
+
+ /* FP instructions. */
+ MIPSI_MOV_S = 0x46000006,
+ MIPSI_MOV_D = 0x46200006,
+#if !LJ_TARGET_MIPSR6
+ MIPSI_MOVT_D = 0x46210011,
+ MIPSI_MOVF_D = 0x46200011,
+#else
+ MIPSI_MIN_D = 0x4620001C,
+ MIPSI_MAX_D = 0x4620001E,
+ MIPSI_SEL_D = 0x46200010,
+#endif
+
+ MIPSI_ABS_D = 0x46200005,
+ MIPSI_NEG_D = 0x46200007,
+
+ MIPSI_ADD_D = 0x46200000,
+ MIPSI_SUB_D = 0x46200001,
+ MIPSI_MUL_D = 0x46200002,
+ MIPSI_DIV_D = 0x46200003,
+ MIPSI_SQRT_D = 0x46200004,
+
+ MIPSI_ADD_S = 0x46000000,
+ MIPSI_SUB_S = 0x46000001,
+
+ MIPSI_CVT_D_S = 0x46000021,
+ MIPSI_CVT_W_S = 0x46000024,
+ MIPSI_CVT_S_D = 0x46200020,
+ MIPSI_CVT_W_D = 0x46200024,
+ MIPSI_CVT_S_W = 0x46800020,
+ MIPSI_CVT_D_W = 0x46800021,
+ MIPSI_CVT_S_L = 0x46a00020,
+ MIPSI_CVT_D_L = 0x46a00021,
+
+ MIPSI_TRUNC_W_S = 0x4600000d,
+ MIPSI_TRUNC_W_D = 0x4620000d,
+ MIPSI_TRUNC_L_S = 0x46000009,
+ MIPSI_TRUNC_L_D = 0x46200009,
+ MIPSI_FLOOR_W_S = 0x4600000f,
+ MIPSI_FLOOR_W_D = 0x4620000f,
+
+ MIPSI_MFC1 = 0x44000000,
+ MIPSI_MTC1 = 0x44800000,
+ MIPSI_DMTC1 = 0x44a00000,
+ MIPSI_DMFC1 = 0x44200000,
+
+#if !LJ_TARGET_MIPSR6
+ MIPSI_BC1F = 0x45000000,
+ MIPSI_BC1T = 0x45010000,
+ MIPSI_C_EQ_D = 0x46200032,
+ MIPSI_C_OLT_S = 0x46000034,
+ MIPSI_C_OLT_D = 0x46200034,
+ MIPSI_C_ULT_D = 0x46200035,
+ MIPSI_C_OLE_D = 0x46200036,
+ MIPSI_C_ULE_D = 0x46200037,
+#else
+ MIPSI_BC1EQZ = 0x45200000,
+ MIPSI_BC1NEZ = 0x45a00000,
+ MIPSI_CMP_EQ_D = 0x46a00002,
+ MIPSI_CMP_LT_S = 0x46800004,
+ MIPSI_CMP_LT_D = 0x46a00004,
+#endif
+
+} MIPSIns;
+
+#endif
diff --git a/libs/luajit-cmake/luajit/src/lj_target_ppc.h b/libs/luajit-cmake/luajit/src/lj_target_ppc.h
new file mode 100644
index 0000000..bc9802a
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/lj_target_ppc.h
@@ -0,0 +1,280 @@
+/*
+** Definitions for PPC CPUs.
+** Copyright (C) 2005-2022 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LJ_TARGET_PPC_H
+#define _LJ_TARGET_PPC_H
+
+/* -- Registers IDs ------------------------------------------------------- */
+
+#define GPRDEF(_) \
+ _(R0) _(SP) _(SYS1) _(R3) _(R4) _(R5) _(R6) _(R7) \
+ _(R8) _(R9) _(R10) _(R11) _(R12) _(SYS2) _(R14) _(R15) \
+ _(R16) _(R17) _(R18) _(R19) _(R20) _(R21) _(R22) _(R23) \
+ _(R24) _(R25) _(R26) _(R27) _(R28) _(R29) _(R30) _(R31)
+#define FPRDEF(_) \
+ _(F0) _(F1) _(F2) _(F3) _(F4) _(F5) _(F6) _(F7) \
+ _(F8) _(F9) _(F10) _(F11) _(F12) _(F13) _(F14) _(F15) \
+ _(F16) _(F17) _(F18) _(F19) _(F20) _(F21) _(F22) _(F23) \
+ _(F24) _(F25) _(F26) _(F27) _(F28) _(F29) _(F30) _(F31)
+#define VRIDDEF(_)
+
+#define RIDENUM(name) RID_##name,
+
+enum {
+ GPRDEF(RIDENUM) /* General-purpose registers (GPRs). */
+ FPRDEF(RIDENUM) /* Floating-point registers (FPRs). */
+ RID_MAX,
+ RID_TMP = RID_R0,
+
+ /* Calling conventions. */
+ RID_RET = RID_R3,
+ RID_RETHI = RID_R3,
+ RID_RETLO = RID_R4,
+ RID_FPRET = RID_F1,
+
+ /* These definitions must match with the *.dasc file(s): */
+ RID_BASE = RID_R14, /* Interpreter BASE. */
+ RID_LPC = RID_R16, /* Interpreter PC. */
+ RID_DISPATCH = RID_R17, /* Interpreter DISPATCH table. */
+ RID_LREG = RID_R18, /* Interpreter L. */
+ RID_JGL = RID_R31, /* On-trace: global_State + 32768. */
+
+ /* Register ranges [min, max) and number of registers. */
+ RID_MIN_GPR = RID_R0,
+ RID_MAX_GPR = RID_R31+1,
+ RID_MIN_FPR = RID_F0,
+ RID_MAX_FPR = RID_F31+1,
+ RID_NUM_GPR = RID_MAX_GPR - RID_MIN_GPR,
+ RID_NUM_FPR = RID_MAX_FPR - RID_MIN_FPR
+};
+
+#define RID_NUM_KREF RID_NUM_GPR
+#define RID_MIN_KREF RID_R0
+
+/* -- Register sets ------------------------------------------------------- */
+
+/* Make use of all registers, except TMP, SP, SYS1, SYS2 and JGL. */
+#define RSET_FIXED \
+ (RID2RSET(RID_TMP)|RID2RSET(RID_SP)|RID2RSET(RID_SYS1)|\
+ RID2RSET(RID_SYS2)|RID2RSET(RID_JGL))
+#define RSET_GPR (RSET_RANGE(RID_MIN_GPR, RID_MAX_GPR) - RSET_FIXED)
+#define RSET_FPR RSET_RANGE(RID_MIN_FPR, RID_MAX_FPR)
+#define RSET_ALL (RSET_GPR|RSET_FPR)
+#define RSET_INIT RSET_ALL
+
+#define RSET_SCRATCH_GPR (RSET_RANGE(RID_R3, RID_R12+1))
+#define RSET_SCRATCH_FPR (RSET_RANGE(RID_F0, RID_F13+1))
+#define RSET_SCRATCH (RSET_SCRATCH_GPR|RSET_SCRATCH_FPR)
+#define REGARG_FIRSTGPR RID_R3
+#define REGARG_LASTGPR RID_R10
+#define REGARG_NUMGPR 8
+#define REGARG_FIRSTFPR RID_F1
+#define REGARG_LASTFPR RID_F8
+#define REGARG_NUMFPR 8
+
+/* -- Spill slots --------------------------------------------------------- */
+
+/* Spill slots are 32 bit wide. An even/odd pair is used for FPRs.
+**
+** SPS_FIXED: Available fixed spill slots in interpreter frame.
+** This definition must match with the *.dasc file(s).
+**
+** SPS_FIRST: First spill slot for general use.
+** [sp+12] tmplo word \
+** [sp+ 8] tmphi word / tmp dword, parameter area for callee
+** [sp+ 4] tmpw, LR of callee
+** [sp+ 0] stack chain
+*/
+#define SPS_FIXED 7
+#define SPS_FIRST 4
+
+/* Stack offsets for temporary slots. Used for FP<->int conversions etc. */
+#define SPOFS_TMPW 4
+#define SPOFS_TMP 8
+#define SPOFS_TMPHI 8
+#define SPOFS_TMPLO 12
+
+#define sps_scale(slot) (4 * (int32_t)(slot))
+#define sps_align(slot) (((slot) - SPS_FIXED + 3) & ~3)
+
+/* -- Exit state ---------------------------------------------------------- */
+
+/* This definition must match with the *.dasc file(s). */
+typedef struct {
+ lua_Number fpr[RID_NUM_FPR]; /* Floating-point registers. */
+ intptr_t gpr[RID_NUM_GPR]; /* General-purpose registers. */
+ int32_t spill[256]; /* Spill slots. */
+} ExitState;
+
+/* Highest exit + 1 indicates stack check. */
+#define EXITSTATE_CHECKEXIT 1
+
+/* Return the address of a per-trace exit stub. */
+static LJ_AINLINE uint32_t *exitstub_trace_addr_(uint32_t *p, uint32_t exitno)
+{
+ while (*p == 0x60000000) p++; /* Skip PPCI_NOP. */
+ return p + 3 + exitno;
+}
+/* Avoid dependence on lj_jit.h if only including lj_target.h. */
+#define exitstub_trace_addr(T, exitno) \
+ exitstub_trace_addr_((MCode *)((char *)(T)->mcode + (T)->szmcode), (exitno))
+
+/* -- Instructions -------------------------------------------------------- */
+
+/* Instruction fields. */
+#define PPCF_CC(cc) ((((cc) & 3) << 16) | (((cc) & 4) << 22))
+#define PPCF_T(r) ((r) << 21)
+#define PPCF_A(r) ((r) << 16)
+#define PPCF_B(r) ((r) << 11)
+#define PPCF_C(r) ((r) << 6)
+#define PPCF_MB(n) ((n) << 6)
+#define PPCF_ME(n) ((n) << 1)
+#define PPCF_Y 0x00200000
+#define PPCF_DOT 0x00000001
+
+typedef enum PPCIns {
+ /* Integer instructions. */
+ PPCI_MR = 0x7c000378,
+ PPCI_NOP = 0x60000000,
+
+ PPCI_LI = 0x38000000,
+ PPCI_LIS = 0x3c000000,
+
+ PPCI_ADD = 0x7c000214,
+ PPCI_ADDC = 0x7c000014,
+ PPCI_ADDO = 0x7c000614,
+ PPCI_ADDE = 0x7c000114,
+ PPCI_ADDZE = 0x7c000194,
+ PPCI_ADDME = 0x7c0001d4,
+ PPCI_ADDI = 0x38000000,
+ PPCI_ADDIS = 0x3c000000,
+ PPCI_ADDIC = 0x30000000,
+ PPCI_ADDICDOT = 0x34000000,
+
+ PPCI_SUBF = 0x7c000050,
+ PPCI_SUBFC = 0x7c000010,
+ PPCI_SUBFO = 0x7c000450,
+ PPCI_SUBFE = 0x7c000110,
+ PPCI_SUBFZE = 0x7c000190,
+ PPCI_SUBFME = 0x7c0001d0,
+ PPCI_SUBFIC = 0x20000000,
+
+ PPCI_NEG = 0x7c0000d0,
+
+ PPCI_AND = 0x7c000038,
+ PPCI_ANDC = 0x7c000078,
+ PPCI_NAND = 0x7c0003b8,
+ PPCI_ANDIDOT = 0x70000000,
+ PPCI_ANDISDOT = 0x74000000,
+
+ PPCI_OR = 0x7c000378,
+ PPCI_NOR = 0x7c0000f8,
+ PPCI_ORI = 0x60000000,
+ PPCI_ORIS = 0x64000000,
+
+ PPCI_XOR = 0x7c000278,
+ PPCI_EQV = 0x7c000238,
+ PPCI_XORI = 0x68000000,
+ PPCI_XORIS = 0x6c000000,
+
+ PPCI_CMPW = 0x7c000000,
+ PPCI_CMPLW = 0x7c000040,
+ PPCI_CMPWI = 0x2c000000,
+ PPCI_CMPLWI = 0x28000000,
+
+ PPCI_MULLW = 0x7c0001d6,
+ PPCI_MULLI = 0x1c000000,
+ PPCI_MULLWO = 0x7c0005d6,
+
+ PPCI_EXTSB = 0x7c000774,
+ PPCI_EXTSH = 0x7c000734,
+
+ PPCI_SLW = 0x7c000030,
+ PPCI_SRW = 0x7c000430,
+ PPCI_SRAW = 0x7c000630,
+ PPCI_SRAWI = 0x7c000670,
+
+ PPCI_RLWNM = 0x5c000000,
+ PPCI_RLWINM = 0x54000000,
+ PPCI_RLWIMI = 0x50000000,
+
+ PPCI_B = 0x48000000,
+ PPCI_BL = 0x48000001,
+ PPCI_BC = 0x40800000,
+ PPCI_BCL = 0x40800001,
+ PPCI_BCTR = 0x4e800420,
+ PPCI_BCTRL = 0x4e800421,
+
+ PPCI_CRANDC = 0x4c000102,
+ PPCI_CRXOR = 0x4c000182,
+ PPCI_CRAND = 0x4c000202,
+ PPCI_CREQV = 0x4c000242,
+ PPCI_CRORC = 0x4c000342,
+ PPCI_CROR = 0x4c000382,
+
+ PPCI_MFLR = 0x7c0802a6,
+ PPCI_MTCTR = 0x7c0903a6,
+
+ PPCI_MCRXR = 0x7c000400,
+
+ /* Load/store instructions. */
+ PPCI_LWZ = 0x80000000,
+ PPCI_LBZ = 0x88000000,
+ PPCI_STW = 0x90000000,
+ PPCI_STB = 0x98000000,
+ PPCI_LHZ = 0xa0000000,
+ PPCI_LHA = 0xa8000000,
+ PPCI_STH = 0xb0000000,
+
+ PPCI_STWU = 0x94000000,
+
+ PPCI_LFS = 0xc0000000,
+ PPCI_LFD = 0xc8000000,
+ PPCI_STFS = 0xd0000000,
+ PPCI_STFD = 0xd8000000,
+
+ PPCI_LWZX = 0x7c00002e,
+ PPCI_LBZX = 0x7c0000ae,
+ PPCI_STWX = 0x7c00012e,
+ PPCI_STBX = 0x7c0001ae,
+ PPCI_LHZX = 0x7c00022e,
+ PPCI_LHAX = 0x7c0002ae,
+ PPCI_STHX = 0x7c00032e,
+
+ PPCI_LWBRX = 0x7c00042c,
+ PPCI_STWBRX = 0x7c00052c,
+
+ PPCI_LFSX = 0x7c00042e,
+ PPCI_LFDX = 0x7c0004ae,
+ PPCI_STFSX = 0x7c00052e,
+ PPCI_STFDX = 0x7c0005ae,
+
+ /* FP instructions. */
+ PPCI_FMR = 0xfc000090,
+ PPCI_FNEG = 0xfc000050,
+ PPCI_FABS = 0xfc000210,
+
+ PPCI_FRSP = 0xfc000018,
+ PPCI_FCTIWZ = 0xfc00001e,
+
+ PPCI_FADD = 0xfc00002a,
+ PPCI_FSUB = 0xfc000028,
+ PPCI_FMUL = 0xfc000032,
+ PPCI_FDIV = 0xfc000024,
+ PPCI_FSQRT = 0xfc00002c,
+
+ PPCI_FMADD = 0xfc00003a,
+ PPCI_FMSUB = 0xfc000038,
+ PPCI_FNMSUB = 0xfc00003c,
+
+ PPCI_FCMPU = 0xfc000000,
+ PPCI_FSEL = 0xfc00002e,
+} PPCIns;
+
+typedef enum PPCCC {
+ CC_GE, CC_LE, CC_NE, CC_NS, CC_LT, CC_GT, CC_EQ, CC_SO
+} PPCCC;
+
+#endif
diff --git a/libs/luajit-cmake/luajit/src/lj_target_x86.h b/libs/luajit-cmake/luajit/src/lj_target_x86.h
new file mode 100644
index 0000000..69cb8ca
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/lj_target_x86.h
@@ -0,0 +1,357 @@
+/*
+** Definitions for x86 and x64 CPUs.
+** Copyright (C) 2005-2022 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LJ_TARGET_X86_H
+#define _LJ_TARGET_X86_H
+
+/* -- Registers IDs ------------------------------------------------------- */
+
+#if LJ_64
+#define GPRDEF(_) \
+ _(EAX) _(ECX) _(EDX) _(EBX) _(ESP) _(EBP) _(ESI) _(EDI) \
+ _(R8D) _(R9D) _(R10D) _(R11D) _(R12D) _(R13D) _(R14D) _(R15D)
+#define FPRDEF(_) \
+ _(XMM0) _(XMM1) _(XMM2) _(XMM3) _(XMM4) _(XMM5) _(XMM6) _(XMM7) \
+ _(XMM8) _(XMM9) _(XMM10) _(XMM11) _(XMM12) _(XMM13) _(XMM14) _(XMM15)
+#else
+#define GPRDEF(_) \
+ _(EAX) _(ECX) _(EDX) _(EBX) _(ESP) _(EBP) _(ESI) _(EDI)
+#define FPRDEF(_) \
+ _(XMM0) _(XMM1) _(XMM2) _(XMM3) _(XMM4) _(XMM5) _(XMM6) _(XMM7)
+#endif
+#define VRIDDEF(_) \
+ _(MRM) _(RIP)
+
+#define RIDENUM(name) RID_##name,
+
+enum {
+ GPRDEF(RIDENUM) /* General-purpose registers (GPRs). */
+ FPRDEF(RIDENUM) /* Floating-point registers (FPRs). */
+ RID_MAX,
+ RID_MRM = RID_MAX, /* Pseudo-id for ModRM operand. */
+ RID_RIP = RID_MAX+5, /* Pseudo-id for RIP (x64 only), rm bits = 5. */
+
+ /* Calling conventions. */
+ RID_SP = RID_ESP,
+ RID_RET = RID_EAX,
+#if LJ_64
+ RID_FPRET = RID_XMM0,
+#endif
+ RID_RETLO = RID_EAX,
+ RID_RETHI = RID_EDX,
+
+ /* These definitions must match with the *.dasc file(s): */
+ RID_BASE = RID_EDX, /* Interpreter BASE. */
+#if LJ_64 && !LJ_ABI_WIN
+ RID_LPC = RID_EBX, /* Interpreter PC. */
+ RID_DISPATCH = RID_R14D, /* Interpreter DISPATCH table. */
+#else
+ RID_LPC = RID_ESI, /* Interpreter PC. */
+ RID_DISPATCH = RID_EBX, /* Interpreter DISPATCH table. */
+#endif
+
+ /* Register ranges [min, max) and number of registers. */
+ RID_MIN_GPR = RID_EAX,
+ RID_MIN_FPR = RID_XMM0,
+ RID_MAX_GPR = RID_MIN_FPR,
+ RID_MAX_FPR = RID_MAX,
+ RID_NUM_GPR = RID_MAX_GPR - RID_MIN_GPR,
+ RID_NUM_FPR = RID_MAX_FPR - RID_MIN_FPR,
+};
+
+/* -- Register sets ------------------------------------------------------- */
+
+/* Make use of all registers, except the stack pointer (and maybe DISPATCH). */
+#define RSET_GPR (RSET_RANGE(RID_MIN_GPR, RID_MAX_GPR) \
+ - RID2RSET(RID_ESP) \
+ - LJ_GC64*RID2RSET(RID_DISPATCH))
+#define RSET_FPR (RSET_RANGE(RID_MIN_FPR, RID_MAX_FPR))
+#define RSET_ALL (RSET_GPR|RSET_FPR)
+#define RSET_INIT RSET_ALL
+
+#if LJ_64
+/* Note: this requires the use of FORCE_REX! */
+#define RSET_GPR8 RSET_GPR
+#else
+#define RSET_GPR8 (RSET_RANGE(RID_EAX, RID_EBX+1))
+#endif
+
+/* ABI-specific register sets. */
+#define RSET_ACD (RID2RSET(RID_EAX)|RID2RSET(RID_ECX)|RID2RSET(RID_EDX))
+#if LJ_64
+#if LJ_ABI_WIN
+/* Windows x64 ABI. */
+#define RSET_SCRATCH \
+ (RSET_ACD|RSET_RANGE(RID_R8D, RID_R11D+1)|RSET_RANGE(RID_XMM0, RID_XMM5+1))
+#define REGARG_GPRS \
+ (RID_ECX|((RID_EDX|((RID_R8D|(RID_R9D<<5))<<5))<<5))
+#define REGARG_NUMGPR 4
+#define REGARG_NUMFPR 4
+#define REGARG_FIRSTFPR RID_XMM0
+#define REGARG_LASTFPR RID_XMM3
+#define STACKARG_OFS (4*8)
+#else
+/* The rest of the civilized x64 world has a common ABI. */
+#define RSET_SCRATCH \
+ (RSET_ACD|RSET_RANGE(RID_ESI, RID_R11D+1)|RSET_FPR)
+#define REGARG_GPRS \
+ (RID_EDI|((RID_ESI|((RID_EDX|((RID_ECX|((RID_R8D|(RID_R9D \
+ <<5))<<5))<<5))<<5))<<5))
+#define REGARG_NUMGPR 6
+#define REGARG_NUMFPR 8
+#define REGARG_FIRSTFPR RID_XMM0
+#define REGARG_LASTFPR RID_XMM7
+#define STACKARG_OFS 0
+#endif
+#else
+/* Common x86 ABI. */
+#define RSET_SCRATCH (RSET_ACD|RSET_FPR)
+#define REGARG_GPRS (RID_ECX|(RID_EDX<<5)) /* Fastcall only. */
+#define REGARG_NUMGPR 2 /* Fastcall only. */
+#define REGARG_NUMFPR 0
+#define STACKARG_OFS 0
+#endif
+
+#if LJ_64
+/* Prefer the low 8 regs of each type to reduce REX prefixes. */
+#undef rset_picktop
+#define rset_picktop(rs) (lj_fls(lj_bswap(rs)) ^ 0x18)
+#endif
+
+/* -- Spill slots --------------------------------------------------------- */
+
+/* Spill slots are 32 bit wide. An even/odd pair is used for FPRs.
+**
+** SPS_FIXED: Available fixed spill slots in interpreter frame.
+** This definition must match with the *.dasc file(s).
+**
+** SPS_FIRST: First spill slot for general use. Reserve min. two 32 bit slots.
+*/
+#if LJ_64
+#if LJ_ABI_WIN
+#define SPS_FIXED (4*2)
+#define SPS_FIRST (4*2) /* Don't use callee register save area. */
+#else
+#if LJ_GC64
+#define SPS_FIXED 2
+#else
+#define SPS_FIXED 4
+#endif
+#define SPS_FIRST 2
+#endif
+#else
+#define SPS_FIXED 6
+#define SPS_FIRST 2
+#endif
+
+#define SPOFS_TMP 0
+
+#define sps_scale(slot) (4 * (int32_t)(slot))
+#define sps_align(slot) (((slot) - SPS_FIXED + 3) & ~3)
+
+/* -- Exit state ---------------------------------------------------------- */
+
+/* This definition must match with the *.dasc file(s). */
+typedef struct {
+ lua_Number fpr[RID_NUM_FPR]; /* Floating-point registers. */
+ intptr_t gpr[RID_NUM_GPR]; /* General-purpose registers. */
+ int32_t spill[256]; /* Spill slots. */
+} ExitState;
+
+/* Limited by the range of a short fwd jump (127): (2+2)*(32-1)-2 = 122. */
+#define EXITSTUB_SPACING (2+2)
+#define EXITSTUBS_PER_GROUP 32
+
+#define EXITTRACE_VMSTATE 1 /* g->vmstate has traceno on exit. */
+
+/* -- x86 ModRM operand encoding ------------------------------------------ */
+
+typedef enum {
+ XM_OFS0 = 0x00, XM_OFS8 = 0x40, XM_OFS32 = 0x80, XM_REG = 0xc0,
+ XM_SCALE1 = 0x00, XM_SCALE2 = 0x40, XM_SCALE4 = 0x80, XM_SCALE8 = 0xc0,
+ XM_MASK = 0xc0
+} x86Mode;
+
+/* Structure to hold variable ModRM operand. */
+typedef struct {
+ int32_t ofs; /* Offset. */
+ uint8_t base; /* Base register or RID_NONE. */
+ uint8_t idx; /* Index register or RID_NONE. */
+ uint8_t scale; /* Index scale (XM_SCALE1 .. XM_SCALE8). */
+} x86ModRM;
+
+/* -- Opcodes ------------------------------------------------------------- */
+
+/* Macros to construct variable-length x86 opcodes. -(len+1) is in LSB. */
+#define XO_(o) ((uint32_t)(0x0000fe + (0x##o<<24)))
+#define XO_FPU(a,b) ((uint32_t)(0x00fd + (0x##a<<16)+(0x##b<<24)))
+#define XO_0f(o) ((uint32_t)(0x0f00fd + (0x##o<<24)))
+#define XO_66(o) ((uint32_t)(0x6600fd + (0x##o<<24)))
+#define XO_660f(o) ((uint32_t)(0x0f66fc + (0x##o<<24)))
+#define XO_f20f(o) ((uint32_t)(0x0ff2fc + (0x##o<<24)))
+#define XO_f30f(o) ((uint32_t)(0x0ff3fc + (0x##o<<24)))
+
+#define XV_660f38(o) ((uint32_t)(0x79e2c4 + (0x##o<<24)))
+#define XV_f20f38(o) ((uint32_t)(0x7be2c4 + (0x##o<<24)))
+#define XV_f20f3a(o) ((uint32_t)(0x7be3c4 + (0x##o<<24)))
+#define XV_f30f38(o) ((uint32_t)(0x7ae2c4 + (0x##o<<24)))
+
+/* This list of x86 opcodes is not intended to be complete. Opcodes are only
+** included when needed. Take a look at DynASM or jit.dis_x86 to see the
+** whole mess.
+*/
+typedef enum {
+ /* Fixed length opcodes. XI_* prefix. */
+ XI_O16 = 0x66,
+ XI_NOP = 0x90,
+ XI_XCHGa = 0x90,
+ XI_CALL = 0xe8,
+ XI_JMP = 0xe9,
+ XI_JMPs = 0xeb,
+ XI_PUSH = 0x50, /* Really 50+r. */
+ XI_JCCs = 0x70, /* Really 7x. */
+ XI_JCCn = 0x80, /* Really 0f8x. */
+ XI_LEA = 0x8d,
+ XI_MOVrib = 0xb0, /* Really b0+r. */
+ XI_MOVri = 0xb8, /* Really b8+r. */
+ XI_ARITHib = 0x80,
+ XI_ARITHi = 0x81,
+ XI_ARITHi8 = 0x83,
+ XI_PUSHi8 = 0x6a,
+ XI_TESTb = 0x84,
+ XI_TEST = 0x85,
+ XI_INT3 = 0xcc,
+ XI_MOVmi = 0xc7,
+ XI_GROUP5 = 0xff,
+
+ /* Note: little-endian byte-order! */
+ XI_FLDZ = 0xeed9,
+ XI_FLD1 = 0xe8d9,
+ XI_FDUP = 0xc0d9, /* Really fld st0. */
+ XI_FPOP = 0xd8dd, /* Really fstp st0. */
+ XI_FPOP1 = 0xd9dd, /* Really fstp st1. */
+ XI_FRNDINT = 0xfcd9,
+ XI_FSCALE = 0xfdd9,
+ XI_FYL2X = 0xf1d9,
+
+ /* VEX-encoded instructions. XV_* prefix. */
+ XV_RORX = XV_f20f3a(f0),
+ XV_SARX = XV_f30f38(f7),
+ XV_SHLX = XV_660f38(f7),
+ XV_SHRX = XV_f20f38(f7),
+
+ /* Variable-length opcodes. XO_* prefix. */
+ XO_OR = XO_(0b),
+ XO_MOV = XO_(8b),
+ XO_MOVto = XO_(89),
+ XO_MOVtow = XO_66(89),
+ XO_MOVtob = XO_(88),
+ XO_MOVmi = XO_(c7),
+ XO_MOVmib = XO_(c6),
+ XO_LEA = XO_(8d),
+ XO_ARITHib = XO_(80),
+ XO_ARITHi = XO_(81),
+ XO_ARITHi8 = XO_(83),
+ XO_ARITHiw8 = XO_66(83),
+ XO_SHIFTi = XO_(c1),
+ XO_SHIFT1 = XO_(d1),
+ XO_SHIFTcl = XO_(d3),
+ XO_IMUL = XO_0f(af),
+ XO_IMULi = XO_(69),
+ XO_IMULi8 = XO_(6b),
+ XO_CMP = XO_(3b),
+ XO_TESTb = XO_(84),
+ XO_TEST = XO_(85),
+ XO_GROUP3b = XO_(f6),
+ XO_GROUP3 = XO_(f7),
+ XO_GROUP5b = XO_(fe),
+ XO_GROUP5 = XO_(ff),
+ XO_MOVZXb = XO_0f(b6),
+ XO_MOVZXw = XO_0f(b7),
+ XO_MOVSXb = XO_0f(be),
+ XO_MOVSXw = XO_0f(bf),
+ XO_MOVSXd = XO_(63),
+ XO_BSWAP = XO_0f(c8),
+ XO_CMOV = XO_0f(40),
+
+ XO_MOVSD = XO_f20f(10),
+ XO_MOVSDto = XO_f20f(11),
+ XO_MOVSS = XO_f30f(10),
+ XO_MOVSSto = XO_f30f(11),
+ XO_MOVLPD = XO_660f(12),
+ XO_MOVAPS = XO_0f(28),
+ XO_XORPS = XO_0f(57),
+ XO_ANDPS = XO_0f(54),
+ XO_ADDSD = XO_f20f(58),
+ XO_SUBSD = XO_f20f(5c),
+ XO_MULSD = XO_f20f(59),
+ XO_DIVSD = XO_f20f(5e),
+ XO_SQRTSD = XO_f20f(51),
+ XO_MINSD = XO_f20f(5d),
+ XO_MAXSD = XO_f20f(5f),
+ XO_ROUNDSD = 0x0b3a0ffc, /* Really 66 0f 3a 0b. See asm_fpmath. */
+ XO_UCOMISD = XO_660f(2e),
+ XO_CVTSI2SD = XO_f20f(2a),
+ XO_CVTTSD2SI= XO_f20f(2c),
+ XO_CVTSI2SS = XO_f30f(2a),
+ XO_CVTTSS2SI= XO_f30f(2c),
+ XO_CVTSS2SD = XO_f30f(5a),
+ XO_CVTSD2SS = XO_f20f(5a),
+ XO_ADDSS = XO_f30f(58),
+ XO_MOVD = XO_660f(6e),
+ XO_MOVDto = XO_660f(7e),
+
+ XO_FLDd = XO_(d9), XOg_FLDd = 0,
+ XO_FLDq = XO_(dd), XOg_FLDq = 0,
+ XO_FILDd = XO_(db), XOg_FILDd = 0,
+ XO_FILDq = XO_(df), XOg_FILDq = 5,
+ XO_FSTPd = XO_(d9), XOg_FSTPd = 3,
+ XO_FSTPq = XO_(dd), XOg_FSTPq = 3,
+ XO_FISTPq = XO_(df), XOg_FISTPq = 7,
+ XO_FISTTPq = XO_(dd), XOg_FISTTPq = 1,
+ XO_FADDq = XO_(dc), XOg_FADDq = 0,
+ XO_FLDCW = XO_(d9), XOg_FLDCW = 5,
+ XO_FNSTCW = XO_(d9), XOg_FNSTCW = 7
+} x86Op;
+
+/* x86 opcode groups. */
+typedef uint32_t x86Group;
+
+#define XG_(i8, i, g) ((x86Group)(((i8) << 16) + ((i) << 8) + (g)))
+#define XG_ARITHi(g) XG_(XI_ARITHi8, XI_ARITHi, g)
+#define XG_TOXOi(xg) ((x86Op)(0x000000fe + (((xg)<<16) & 0xff000000)))
+#define XG_TOXOi8(xg) ((x86Op)(0x000000fe + (((xg)<<8) & 0xff000000)))
+
+#define XO_ARITH(a) ((x86Op)(0x030000fe + ((a)<<27)))
+#define XO_ARITHw(a) ((x86Op)(0x036600fd + ((a)<<27)))
+
+typedef enum {
+ XOg_ADD, XOg_OR, XOg_ADC, XOg_SBB, XOg_AND, XOg_SUB, XOg_XOR, XOg_CMP,
+ XOg_X_IMUL
+} x86Arith;
+
+typedef enum {
+ XOg_ROL, XOg_ROR, XOg_RCL, XOg_RCR, XOg_SHL, XOg_SHR, XOg_SAL, XOg_SAR
+} x86Shift;
+
+typedef enum {
+ XOg_TEST, XOg_TEST_, XOg_NOT, XOg_NEG, XOg_MUL, XOg_IMUL, XOg_DIV, XOg_IDIV
+} x86Group3;
+
+typedef enum {
+ XOg_INC, XOg_DEC, XOg_CALL, XOg_CALLfar, XOg_JMP, XOg_JMPfar, XOg_PUSH
+} x86Group5;
+
+/* x86 condition codes. */
+typedef enum {
+ CC_O, CC_NO, CC_B, CC_NB, CC_E, CC_NE, CC_BE, CC_NBE,
+ CC_S, CC_NS, CC_P, CC_NP, CC_L, CC_NL, CC_LE, CC_NLE,
+ CC_C = CC_B, CC_NAE = CC_C, CC_NC = CC_NB, CC_AE = CC_NB,
+ CC_Z = CC_E, CC_NZ = CC_NE, CC_NA = CC_BE, CC_A = CC_NBE,
+ CC_PE = CC_P, CC_PO = CC_NP, CC_NGE = CC_L, CC_GE = CC_NL,
+ CC_NG = CC_LE, CC_G = CC_NLE
+} x86CC;
+
+#endif
diff --git a/libs/luajit-cmake/luajit/src/lj_trace.c b/libs/luajit-cmake/luajit/src/lj_trace.c
new file mode 100644
index 0000000..c232939
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/lj_trace.c
@@ -0,0 +1,987 @@
+/*
+** Trace management.
+** Copyright (C) 2005-2022 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#define lj_trace_c
+#define LUA_CORE
+
+#include "lj_obj.h"
+
+#if LJ_HASJIT
+
+#include "lj_gc.h"
+#include "lj_err.h"
+#include "lj_debug.h"
+#include "lj_str.h"
+#include "lj_frame.h"
+#include "lj_state.h"
+#include "lj_bc.h"
+#include "lj_ir.h"
+#include "lj_jit.h"
+#include "lj_iropt.h"
+#include "lj_mcode.h"
+#include "lj_trace.h"
+#include "lj_snap.h"
+#include "lj_gdbjit.h"
+#include "lj_record.h"
+#include "lj_asm.h"
+#include "lj_dispatch.h"
+#include "lj_vm.h"
+#include "lj_vmevent.h"
+#include "lj_target.h"
+#include "lj_prng.h"
+
+/* -- Error handling ------------------------------------------------------ */
+
+/* Synchronous abort with error message. */
+void lj_trace_err(jit_State *J, TraceError e)
+{
+ setnilV(&J->errinfo); /* No error info. */
+ setintV(J->L->top++, (int32_t)e);
+ lj_err_throw(J->L, LUA_ERRRUN);
+}
+
+/* Synchronous abort with error message and error info. */
+void lj_trace_err_info(jit_State *J, TraceError e)
+{
+ setintV(J->L->top++, (int32_t)e);
+ lj_err_throw(J->L, LUA_ERRRUN);
+}
+
+/* -- Trace management ---------------------------------------------------- */
+
+/* The current trace is first assembled in J->cur. The variable length
+** arrays point to shared, growable buffers (J->irbuf etc.). When trace
+** recording ends successfully, the current trace and its data structures
+** are copied to a new (compact) GCtrace object.
+*/
+
+/* Find a free trace number. */
+static TraceNo trace_findfree(jit_State *J)
+{
+ MSize osz, lim;
+ if (J->freetrace == 0)
+ J->freetrace = 1;
+ for (; J->freetrace < J->sizetrace; J->freetrace++)
+ if (traceref(J, J->freetrace) == NULL)
+ return J->freetrace++;
+ /* Need to grow trace array. */
+ lim = (MSize)J->param[JIT_P_maxtrace] + 1;
+ if (lim < 2) lim = 2; else if (lim > 65535) lim = 65535;
+ osz = J->sizetrace;
+ if (osz >= lim)
+ return 0; /* Too many traces. */
+ lj_mem_growvec(J->L, J->trace, J->sizetrace, lim, GCRef);
+ for (; osz < J->sizetrace; osz++)
+ setgcrefnull(J->trace[osz]);
+ return J->freetrace;
+}
+
+#define TRACE_APPENDVEC(field, szfield, tp) \
+ T->field = (tp *)p; \
+ memcpy(p, J->cur.field, J->cur.szfield*sizeof(tp)); \
+ p += J->cur.szfield*sizeof(tp);
+
+#ifdef LUAJIT_USE_PERFTOOLS
+/*
+** Create symbol table of JIT-compiled code. For use with Linux perf tools.
+** Example usage:
+** perf record -f -e cycles luajit test.lua
+** perf report -s symbol
+** rm perf.data /tmp/perf-*.map
+*/
+#include <stdio.h>
+#include <unistd.h>
+
+static void perftools_addtrace(GCtrace *T)
+{
+ static FILE *fp;
+ GCproto *pt = &gcref(T->startpt)->pt;
+ const BCIns *startpc = mref(T->startpc, const BCIns);
+ const char *name = proto_chunknamestr(pt);
+ BCLine lineno;
+ if (name[0] == '@' || name[0] == '=')
+ name++;
+ else
+ name = "(string)";
+ lj_assertX(startpc >= proto_bc(pt) && startpc < proto_bc(pt) + pt->sizebc,
+ "trace PC out of range");
+ lineno = lj_debug_line(pt, proto_bcpos(pt, startpc));
+ if (!fp) {
+ char fname[40];
+ sprintf(fname, "/tmp/perf-%d.map", getpid());
+ if (!(fp = fopen(fname, "w"))) return;
+ setlinebuf(fp);
+ }
+ fprintf(fp, "%lx %x TRACE_%d::%s:%u\n",
+ (long)T->mcode, T->szmcode, T->traceno, name, lineno);
+}
+#endif
+
+/* Allocate space for copy of T. */
+GCtrace * LJ_FASTCALL lj_trace_alloc(lua_State *L, GCtrace *T)
+{
+ size_t sztr = ((sizeof(GCtrace)+7)&~7);
+ size_t szins = (T->nins-T->nk)*sizeof(IRIns);
+ size_t sz = sztr + szins +
+ T->nsnap*sizeof(SnapShot) +
+ T->nsnapmap*sizeof(SnapEntry);
+ GCtrace *T2 = lj_mem_newt(L, (MSize)sz, GCtrace);
+ char *p = (char *)T2 + sztr;
+ T2->gct = ~LJ_TTRACE;
+ T2->marked = 0;
+ T2->traceno = 0;
+ T2->ir = (IRIns *)p - T->nk;
+ T2->nins = T->nins;
+ T2->nk = T->nk;
+ T2->nsnap = T->nsnap;
+ T2->nsnapmap = T->nsnapmap;
+ memcpy(p, T->ir + T->nk, szins);
+ return T2;
+}
+
+/* Save current trace by copying and compacting it. */
+static void trace_save(jit_State *J, GCtrace *T)
+{
+ size_t sztr = ((sizeof(GCtrace)+7)&~7);
+ size_t szins = (J->cur.nins-J->cur.nk)*sizeof(IRIns);
+ char *p = (char *)T + sztr;
+ memcpy(T, &J->cur, sizeof(GCtrace));
+ setgcrefr(T->nextgc, J2G(J)->gc.root);
+ setgcrefp(J2G(J)->gc.root, T);
+ newwhite(J2G(J), T);
+ T->gct = ~LJ_TTRACE;
+ T->ir = (IRIns *)p - J->cur.nk; /* The IR has already been copied above. */
+ p += szins;
+ TRACE_APPENDVEC(snap, nsnap, SnapShot)
+ TRACE_APPENDVEC(snapmap, nsnapmap, SnapEntry)
+ J->cur.traceno = 0;
+ J->curfinal = NULL;
+ setgcrefp(J->trace[T->traceno], T);
+ lj_gc_barriertrace(J2G(J), T->traceno);
+ lj_gdbjit_addtrace(J, T);
+#ifdef LUAJIT_USE_PERFTOOLS
+ perftools_addtrace(T);
+#endif
+}
+
+void LJ_FASTCALL lj_trace_free(global_State *g, GCtrace *T)
+{
+ jit_State *J = G2J(g);
+ if (T->traceno) {
+ lj_gdbjit_deltrace(J, T);
+ if (T->traceno < J->freetrace)
+ J->freetrace = T->traceno;
+ setgcrefnull(J->trace[T->traceno]);
+ }
+ lj_mem_free(g, T,
+ ((sizeof(GCtrace)+7)&~7) + (T->nins-T->nk)*sizeof(IRIns) +
+ T->nsnap*sizeof(SnapShot) + T->nsnapmap*sizeof(SnapEntry));
+}
+
+/* Re-enable compiling a prototype by unpatching any modified bytecode. */
+void lj_trace_reenableproto(GCproto *pt)
+{
+ if ((pt->flags & PROTO_ILOOP)) {
+ BCIns *bc = proto_bc(pt);
+ BCPos i, sizebc = pt->sizebc;
+ pt->flags &= ~PROTO_ILOOP;
+ if (bc_op(bc[0]) == BC_IFUNCF)
+ setbc_op(&bc[0], BC_FUNCF);
+ for (i = 1; i < sizebc; i++) {
+ BCOp op = bc_op(bc[i]);
+ if (op == BC_IFORL || op == BC_IITERL || op == BC_ILOOP)
+ setbc_op(&bc[i], (int)op+(int)BC_LOOP-(int)BC_ILOOP);
+ }
+ }
+}
+
+/* Unpatch the bytecode modified by a root trace. */
+static void trace_unpatch(jit_State *J, GCtrace *T)
+{
+ BCOp op = bc_op(T->startins);
+ BCIns *pc = mref(T->startpc, BCIns);
+ UNUSED(J);
+ if (op == BC_JMP)
+ return; /* No need to unpatch branches in parent traces (yet). */
+ switch (bc_op(*pc)) {
+ case BC_JFORL:
+ lj_assertJ(traceref(J, bc_d(*pc)) == T, "JFORL references other trace");
+ *pc = T->startins;
+ pc += bc_j(T->startins);
+ lj_assertJ(bc_op(*pc) == BC_JFORI, "FORL does not point to JFORI");
+ setbc_op(pc, BC_FORI);
+ break;
+ case BC_JITERL:
+ case BC_JLOOP:
+ lj_assertJ(op == BC_ITERL || op == BC_ITERN || op == BC_LOOP ||
+ bc_isret(op), "bad original bytecode %d", op);
+ *pc = T->startins;
+ break;
+ case BC_JMP:
+ lj_assertJ(op == BC_ITERL, "bad original bytecode %d", op);
+ pc += bc_j(*pc)+2;
+ if (bc_op(*pc) == BC_JITERL) {
+ lj_assertJ(traceref(J, bc_d(*pc)) == T, "JITERL references other trace");
+ *pc = T->startins;
+ }
+ break;
+ case BC_JFUNCF:
+ lj_assertJ(op == BC_FUNCF, "bad original bytecode %d", op);
+ *pc = T->startins;
+ break;
+ default: /* Already unpatched. */
+ break;
+ }
+}
+
+/* Flush a root trace. */
+static void trace_flushroot(jit_State *J, GCtrace *T)
+{
+ GCproto *pt = &gcref(T->startpt)->pt;
+ lj_assertJ(T->root == 0, "not a root trace");
+ lj_assertJ(pt != NULL, "trace has no prototype");
+ /* First unpatch any modified bytecode. */
+ trace_unpatch(J, T);
+ /* Unlink root trace from chain anchored in prototype. */
+ if (pt->trace == T->traceno) { /* Trace is first in chain. Easy. */
+ pt->trace = T->nextroot;
+ } else if (pt->trace) { /* Otherwise search in chain of root traces. */
+ GCtrace *T2 = traceref(J, pt->trace);
+ if (T2) {
+ for (; T2->nextroot; T2 = traceref(J, T2->nextroot))
+ if (T2->nextroot == T->traceno) {
+ T2->nextroot = T->nextroot; /* Unlink from chain. */
+ break;
+ }
+ }
+ }
+}
+
+/* Flush a trace. Only root traces are considered. */
+void lj_trace_flush(jit_State *J, TraceNo traceno)
+{
+ if (traceno > 0 && traceno < J->sizetrace) {
+ GCtrace *T = traceref(J, traceno);
+ if (T && T->root == 0)
+ trace_flushroot(J, T);
+ }
+}
+
+/* Flush all traces associated with a prototype. */
+void lj_trace_flushproto(global_State *g, GCproto *pt)
+{
+ while (pt->trace != 0)
+ trace_flushroot(G2J(g), traceref(G2J(g), pt->trace));
+}
+
+/* Flush all traces. */
+int lj_trace_flushall(lua_State *L)
+{
+ jit_State *J = L2J(L);
+ ptrdiff_t i;
+ if ((J2G(J)->hookmask & HOOK_GC))
+ return 1;
+ for (i = (ptrdiff_t)J->sizetrace-1; i > 0; i--) {
+ GCtrace *T = traceref(J, i);
+ if (T) {
+ if (T->root == 0)
+ trace_flushroot(J, T);
+ lj_gdbjit_deltrace(J, T);
+ T->traceno = T->link = 0; /* Blacklist the link for cont_stitch. */
+ setgcrefnull(J->trace[i]);
+ }
+ }
+ J->cur.traceno = 0;
+ J->freetrace = 0;
+ /* Clear penalty cache. */
+ memset(J->penalty, 0, sizeof(J->penalty));
+ /* Free the whole machine code and invalidate all exit stub groups. */
+ lj_mcode_free(J);
+ memset(J->exitstubgroup, 0, sizeof(J->exitstubgroup));
+ lj_vmevent_send(L, TRACE,
+ setstrV(L, L->top++, lj_str_newlit(L, "flush"));
+ );
+ return 0;
+}
+
+/* Initialize JIT compiler state. */
+void lj_trace_initstate(global_State *g)
+{
+ jit_State *J = G2J(g);
+ TValue *tv;
+
+ /* Initialize aligned SIMD constants. */
+ tv = LJ_KSIMD(J, LJ_KSIMD_ABS);
+ tv[0].u64 = U64x(7fffffff,ffffffff);
+ tv[1].u64 = U64x(7fffffff,ffffffff);
+ tv = LJ_KSIMD(J, LJ_KSIMD_NEG);
+ tv[0].u64 = U64x(80000000,00000000);
+ tv[1].u64 = U64x(80000000,00000000);
+
+ /* Initialize 32/64 bit constants. */
+#if LJ_TARGET_X86ORX64
+ J->k64[LJ_K64_TOBIT].u64 = U64x(43380000,00000000);
+#if LJ_32
+ J->k64[LJ_K64_M2P64_31].u64 = U64x(c1e00000,00000000);
+#endif
+ J->k64[LJ_K64_2P64].u64 = U64x(43f00000,00000000);
+ J->k32[LJ_K32_M2P64_31] = LJ_64 ? 0xdf800000 : 0xcf000000;
+#endif
+#if LJ_TARGET_X86ORX64 || LJ_TARGET_MIPS64
+ J->k64[LJ_K64_M2P64].u64 = U64x(c3f00000,00000000);
+#endif
+#if LJ_TARGET_PPC
+ J->k32[LJ_K32_2P52_2P31] = 0x59800004;
+ J->k32[LJ_K32_2P52] = 0x59800000;
+#endif
+#if LJ_TARGET_PPC || LJ_TARGET_MIPS
+ J->k32[LJ_K32_2P31] = 0x4f000000;
+#endif
+#if LJ_TARGET_MIPS
+ J->k64[LJ_K64_2P31].u64 = U64x(41e00000,00000000);
+#if LJ_64
+ J->k64[LJ_K64_2P63].u64 = U64x(43e00000,00000000);
+ J->k32[LJ_K32_2P63] = 0x5f000000;
+ J->k32[LJ_K32_M2P64] = 0xdf800000;
+#endif
+#endif
+}
+
+/* Free everything associated with the JIT compiler state. */
+void lj_trace_freestate(global_State *g)
+{
+ jit_State *J = G2J(g);
+#ifdef LUA_USE_ASSERT
+ { /* This assumes all traces have already been freed. */
+ ptrdiff_t i;
+ for (i = 1; i < (ptrdiff_t)J->sizetrace; i++)
+ lj_assertG(i == (ptrdiff_t)J->cur.traceno || traceref(J, i) == NULL,
+ "trace still allocated");
+ }
+#endif
+ lj_mcode_free(J);
+ lj_mem_freevec(g, J->snapmapbuf, J->sizesnapmap, SnapEntry);
+ lj_mem_freevec(g, J->snapbuf, J->sizesnap, SnapShot);
+ lj_mem_freevec(g, J->irbuf + J->irbotlim, J->irtoplim - J->irbotlim, IRIns);
+ lj_mem_freevec(g, J->trace, J->sizetrace, GCRef);
+}
+
+/* -- Penalties and blacklisting ------------------------------------------ */
+
+/* Blacklist a bytecode instruction. */
+static void blacklist_pc(GCproto *pt, BCIns *pc)
+{
+ if (bc_op(*pc) == BC_ITERN) {
+ setbc_op(pc, BC_ITERC);
+ setbc_op(pc+1+bc_j(pc[1]), BC_JMP);
+ } else {
+ setbc_op(pc, (int)bc_op(*pc)+(int)BC_ILOOP-(int)BC_LOOP);
+ pt->flags |= PROTO_ILOOP;
+ }
+}
+
+/* Penalize a bytecode instruction. */
+static void penalty_pc(jit_State *J, GCproto *pt, BCIns *pc, TraceError e)
+{
+ uint32_t i, val = PENALTY_MIN;
+ for (i = 0; i < PENALTY_SLOTS; i++)
+ if (mref(J->penalty[i].pc, const BCIns) == pc) { /* Cache slot found? */
+ /* First try to bump its hotcount several times. */
+ val = ((uint32_t)J->penalty[i].val << 1) +
+ (lj_prng_u64(&J2G(J)->prng) & ((1u<<PENALTY_RNDBITS)-1));
+ if (val > PENALTY_MAX) {
+ blacklist_pc(pt, pc); /* Blacklist it, if that didn't help. */
+ return;
+ }
+ goto setpenalty;
+ }
+ /* Assign a new penalty cache slot. */
+ i = J->penaltyslot;
+ J->penaltyslot = (J->penaltyslot + 1) & (PENALTY_SLOTS-1);
+ setmref(J->penalty[i].pc, pc);
+setpenalty:
+ J->penalty[i].val = (uint16_t)val;
+ J->penalty[i].reason = e;
+ hotcount_set(J2GG(J), pc+1, val);
+}
+
+/* -- Trace compiler state machine ---------------------------------------- */
+
+/* Start tracing. */
+static void trace_start(jit_State *J)
+{
+ lua_State *L;
+ TraceNo traceno;
+
+ if ((J->pt->flags & PROTO_NOJIT)) { /* JIT disabled for this proto? */
+ if (J->parent == 0 && J->exitno == 0 && bc_op(*J->pc) != BC_ITERN) {
+ /* Lazy bytecode patching to disable hotcount events. */
+ lj_assertJ(bc_op(*J->pc) == BC_FORL || bc_op(*J->pc) == BC_ITERL ||
+ bc_op(*J->pc) == BC_LOOP || bc_op(*J->pc) == BC_FUNCF,
+ "bad hot bytecode %d", bc_op(*J->pc));
+ setbc_op(J->pc, (int)bc_op(*J->pc)+(int)BC_ILOOP-(int)BC_LOOP);
+ J->pt->flags |= PROTO_ILOOP;
+ }
+ J->state = LJ_TRACE_IDLE; /* Silently ignored. */
+ return;
+ }
+
+ /* Get a new trace number. */
+ traceno = trace_findfree(J);
+ if (LJ_UNLIKELY(traceno == 0)) { /* No free trace? */
+ lj_assertJ((J2G(J)->hookmask & HOOK_GC) == 0,
+ "recorder called from GC hook");
+ lj_trace_flushall(J->L);
+ J->state = LJ_TRACE_IDLE; /* Silently ignored. */
+ return;
+ }
+ setgcrefp(J->trace[traceno], &J->cur);
+
+ /* Setup enough of the current trace to be able to send the vmevent. */
+ memset(&J->cur, 0, sizeof(GCtrace));
+ J->cur.traceno = traceno;
+ J->cur.nins = J->cur.nk = REF_BASE;
+ J->cur.ir = J->irbuf;
+ J->cur.snap = J->snapbuf;
+ J->cur.snapmap = J->snapmapbuf;
+ J->mergesnap = 0;
+ J->needsnap = 0;
+ J->bcskip = 0;
+ J->guardemit.irt = 0;
+ J->postproc = LJ_POST_NONE;
+ lj_resetsplit(J);
+ J->retryrec = 0;
+ J->ktrace = 0;
+ setgcref(J->cur.startpt, obj2gco(J->pt));
+
+ L = J->L;
+ lj_vmevent_send(L, TRACE,
+ setstrV(L, L->top++, lj_str_newlit(L, "start"));
+ setintV(L->top++, traceno);
+ setfuncV(L, L->top++, J->fn);
+ setintV(L->top++, proto_bcpos(J->pt, J->pc));
+ if (J->parent) {
+ setintV(L->top++, J->parent);
+ setintV(L->top++, J->exitno);
+ } else {
+ BCOp op = bc_op(*J->pc);
+ if (op == BC_CALLM || op == BC_CALL || op == BC_ITERC) {
+ setintV(L->top++, J->exitno); /* Parent of stitched trace. */
+ setintV(L->top++, -1);
+ }
+ }
+ );
+ lj_record_setup(J);
+}
+
+/* Stop tracing. */
+static void trace_stop(jit_State *J)
+{
+ BCIns *pc = mref(J->cur.startpc, BCIns);
+ BCOp op = bc_op(J->cur.startins);
+ GCproto *pt = &gcref(J->cur.startpt)->pt;
+ TraceNo traceno = J->cur.traceno;
+ GCtrace *T = J->curfinal;
+ lua_State *L;
+
+ switch (op) {
+ case BC_FORL:
+ setbc_op(pc+bc_j(J->cur.startins), BC_JFORI); /* Patch FORI, too. */
+ /* fallthrough */
+ case BC_LOOP:
+ case BC_ITERL:
+ case BC_FUNCF:
+ /* Patch bytecode of starting instruction in root trace. */
+ setbc_op(pc, (int)op+(int)BC_JLOOP-(int)BC_LOOP);
+ setbc_d(pc, traceno);
+ addroot:
+ /* Add to root trace chain in prototype. */
+ J->cur.nextroot = pt->trace;
+ pt->trace = (TraceNo1)traceno;
+ break;
+ case BC_ITERN:
+ case BC_RET:
+ case BC_RET0:
+ case BC_RET1:
+ *pc = BCINS_AD(BC_JLOOP, J->cur.snap[0].nslots, traceno);
+ goto addroot;
+ case BC_JMP:
+ /* Patch exit branch in parent to side trace entry. */
+ lj_assertJ(J->parent != 0 && J->cur.root != 0, "not a side trace");
+ lj_asm_patchexit(J, traceref(J, J->parent), J->exitno, J->cur.mcode);
+ /* Avoid compiling a side trace twice (stack resizing uses parent exit). */
+ {
+ SnapShot *snap = &traceref(J, J->parent)->snap[J->exitno];
+ snap->count = SNAPCOUNT_DONE;
+ if (J->cur.topslot > snap->topslot) snap->topslot = J->cur.topslot;
+ }
+ /* Add to side trace chain in root trace. */
+ {
+ GCtrace *root = traceref(J, J->cur.root);
+ root->nchild++;
+ J->cur.nextside = root->nextside;
+ root->nextside = (TraceNo1)traceno;
+ }
+ break;
+ case BC_CALLM:
+ case BC_CALL:
+ case BC_ITERC:
+ /* Trace stitching: patch link of previous trace. */
+ traceref(J, J->exitno)->link = traceno;
+ break;
+ default:
+ lj_assertJ(0, "bad stop bytecode %d", op);
+ break;
+ }
+
+ /* Commit new mcode only after all patching is done. */
+ lj_mcode_commit(J, J->cur.mcode);
+ J->postproc = LJ_POST_NONE;
+ trace_save(J, T);
+
+ L = J->L;
+ lj_vmevent_send(L, TRACE,
+ setstrV(L, L->top++, lj_str_newlit(L, "stop"));
+ setintV(L->top++, traceno);
+ setfuncV(L, L->top++, J->fn);
+ );
+}
+
+/* Start a new root trace for down-recursion. */
+static int trace_downrec(jit_State *J)
+{
+ /* Restart recording at the return instruction. */
+ lj_assertJ(J->pt != NULL, "no active prototype");
+ lj_assertJ(bc_isret(bc_op(*J->pc)), "not at a return bytecode");
+ if (bc_op(*J->pc) == BC_RETM)
+ return 0; /* NYI: down-recursion with RETM. */
+ J->parent = 0;
+ J->exitno = 0;
+ J->state = LJ_TRACE_RECORD;
+ trace_start(J);
+ return 1;
+}
+
+/* Abort tracing. */
+static int trace_abort(jit_State *J)
+{
+ lua_State *L = J->L;
+ TraceError e = LJ_TRERR_RECERR;
+ TraceNo traceno;
+
+ J->postproc = LJ_POST_NONE;
+ lj_mcode_abort(J);
+ if (J->curfinal) {
+ lj_trace_free(J2G(J), J->curfinal);
+ J->curfinal = NULL;
+ }
+ if (tvisnumber(L->top-1))
+ e = (TraceError)numberVint(L->top-1);
+ if (e == LJ_TRERR_MCODELM) {
+ L->top--; /* Remove error object */
+ J->state = LJ_TRACE_ASM;
+ return 1; /* Retry ASM with new MCode area. */
+ }
+ /* Penalize or blacklist starting bytecode instruction. */
+ if (J->parent == 0 && !bc_isret(bc_op(J->cur.startins))) {
+ if (J->exitno == 0) {
+ BCIns *startpc = mref(J->cur.startpc, BCIns);
+ if (e == LJ_TRERR_RETRY)
+ hotcount_set(J2GG(J), startpc+1, 1); /* Immediate retry. */
+ else
+ penalty_pc(J, &gcref(J->cur.startpt)->pt, startpc, e);
+ } else {
+ traceref(J, J->exitno)->link = J->exitno; /* Self-link is blacklisted. */
+ }
+ }
+
+ /* Is there anything to abort? */
+ traceno = J->cur.traceno;
+ if (traceno) {
+ ptrdiff_t errobj = savestack(L, L->top-1); /* Stack may be resized. */
+ J->cur.link = 0;
+ J->cur.linktype = LJ_TRLINK_NONE;
+ lj_vmevent_send(L, TRACE,
+ TValue *frame;
+ const BCIns *pc;
+ GCfunc *fn;
+ setstrV(L, L->top++, lj_str_newlit(L, "abort"));
+ setintV(L->top++, traceno);
+ /* Find original Lua function call to generate a better error message. */
+ frame = J->L->base-1;
+ pc = J->pc;
+ while (!isluafunc(frame_func(frame))) {
+ pc = (frame_iscont(frame) ? frame_contpc(frame) : frame_pc(frame)) - 1;
+ frame = frame_prev(frame);
+ }
+ fn = frame_func(frame);
+ setfuncV(L, L->top++, fn);
+ setintV(L->top++, proto_bcpos(funcproto(fn), pc));
+ copyTV(L, L->top++, restorestack(L, errobj));
+ copyTV(L, L->top++, &J->errinfo);
+ );
+ /* Drop aborted trace after the vmevent (which may still access it). */
+ setgcrefnull(J->trace[traceno]);
+ if (traceno < J->freetrace)
+ J->freetrace = traceno;
+ J->cur.traceno = 0;
+ }
+ L->top--; /* Remove error object */
+ if (e == LJ_TRERR_DOWNREC)
+ return trace_downrec(J);
+ else if (e == LJ_TRERR_MCODEAL)
+ lj_trace_flushall(L);
+ return 0;
+}
+
+/* Perform pending re-patch of a bytecode instruction. */
+static LJ_AINLINE void trace_pendpatch(jit_State *J, int force)
+{
+ if (LJ_UNLIKELY(J->patchpc)) {
+ if (force || J->bcskip == 0) {
+ *J->patchpc = J->patchins;
+ J->patchpc = NULL;
+ } else {
+ J->bcskip = 0;
+ }
+ }
+}
+
+/* State machine for the trace compiler. Protected callback. */
+static TValue *trace_state(lua_State *L, lua_CFunction dummy, void *ud)
+{
+ jit_State *J = (jit_State *)ud;
+ UNUSED(dummy);
+ do {
+ retry:
+ switch (J->state) {
+ case LJ_TRACE_START:
+ J->state = LJ_TRACE_RECORD; /* trace_start() may change state. */
+ trace_start(J);
+ lj_dispatch_update(J2G(J));
+ if (J->state != LJ_TRACE_RECORD_1ST)
+ break;
+ /* fallthrough */
+
+ case LJ_TRACE_RECORD_1ST:
+ J->state = LJ_TRACE_RECORD;
+ /* fallthrough */
+ case LJ_TRACE_RECORD:
+ trace_pendpatch(J, 0);
+ setvmstate(J2G(J), RECORD);
+ lj_vmevent_send_(L, RECORD,
+ /* Save/restore state for trace recorder. */
+ TValue savetv = J2G(J)->tmptv;
+ TValue savetv2 = J2G(J)->tmptv2;
+ TraceNo parent = J->parent;
+ ExitNo exitno = J->exitno;
+ setintV(L->top++, J->cur.traceno);
+ setfuncV(L, L->top++, J->fn);
+ setintV(L->top++, J->pt ? (int32_t)proto_bcpos(J->pt, J->pc) : -1);
+ setintV(L->top++, J->framedepth);
+ ,
+ J2G(J)->tmptv = savetv;
+ J2G(J)->tmptv2 = savetv2;
+ J->parent = parent;
+ J->exitno = exitno;
+ );
+ lj_record_ins(J);
+ break;
+
+ case LJ_TRACE_END:
+ trace_pendpatch(J, 1);
+ J->loopref = 0;
+ if ((J->flags & JIT_F_OPT_LOOP) &&
+ J->cur.link == J->cur.traceno && J->framedepth + J->retdepth == 0) {
+ setvmstate(J2G(J), OPT);
+ lj_opt_dce(J);
+ if (lj_opt_loop(J)) { /* Loop optimization failed? */
+ J->cur.link = 0;
+ J->cur.linktype = LJ_TRLINK_NONE;
+ J->loopref = J->cur.nins;
+ J->state = LJ_TRACE_RECORD; /* Try to continue recording. */
+ break;
+ }
+ J->loopref = J->chain[IR_LOOP]; /* Needed by assembler. */
+ }
+ lj_opt_split(J);
+ lj_opt_sink(J);
+ if (!J->loopref) J->cur.snap[J->cur.nsnap-1].count = SNAPCOUNT_DONE;
+ J->state = LJ_TRACE_ASM;
+ break;
+
+ case LJ_TRACE_ASM:
+ setvmstate(J2G(J), ASM);
+ lj_asm_trace(J, &J->cur);
+ trace_stop(J);
+ setvmstate(J2G(J), INTERP);
+ J->state = LJ_TRACE_IDLE;
+ lj_dispatch_update(J2G(J));
+ return NULL;
+
+ default: /* Trace aborted asynchronously. */
+ setintV(L->top++, (int32_t)LJ_TRERR_RECERR);
+ /* fallthrough */
+ case LJ_TRACE_ERR:
+ trace_pendpatch(J, 1);
+ if (trace_abort(J))
+ goto retry;
+ setvmstate(J2G(J), INTERP);
+ J->state = LJ_TRACE_IDLE;
+ lj_dispatch_update(J2G(J));
+ return NULL;
+ }
+ } while (J->state > LJ_TRACE_RECORD);
+ return NULL;
+}
+
+/* -- Event handling ------------------------------------------------------ */
+
+/* A bytecode instruction is about to be executed. Record it. */
+void lj_trace_ins(jit_State *J, const BCIns *pc)
+{
+ /* Note: J->L must already be set. pc is the true bytecode PC here. */
+ J->pc = pc;
+ J->fn = curr_func(J->L);
+ J->pt = isluafunc(J->fn) ? funcproto(J->fn) : NULL;
+ while (lj_vm_cpcall(J->L, NULL, (void *)J, trace_state) != 0)
+ J->state = LJ_TRACE_ERR;
+}
+
+/* A hotcount triggered. Start recording a root trace. */
+void LJ_FASTCALL lj_trace_hot(jit_State *J, const BCIns *pc)
+{
+ /* Note: pc is the interpreter bytecode PC here. It's offset by 1. */
+ ERRNO_SAVE
+ /* Reset hotcount. */
+ hotcount_set(J2GG(J), pc, J->param[JIT_P_hotloop]*HOTCOUNT_LOOP);
+ /* Only start a new trace if not recording or inside __gc call or vmevent. */
+ if (J->state == LJ_TRACE_IDLE &&
+ !(J2G(J)->hookmask & (HOOK_GC|HOOK_VMEVENT))) {
+ J->parent = 0; /* Root trace. */
+ J->exitno = 0;
+ J->state = LJ_TRACE_START;
+ lj_trace_ins(J, pc-1);
+ }
+ ERRNO_RESTORE
+}
+
+/* Check for a hot side exit. If yes, start recording a side trace. */
+static void trace_hotside(jit_State *J, const BCIns *pc)
+{
+ SnapShot *snap = &traceref(J, J->parent)->snap[J->exitno];
+ if (!(J2G(J)->hookmask & (HOOK_GC|HOOK_VMEVENT)) &&
+ isluafunc(curr_func(J->L)) &&
+ snap->count != SNAPCOUNT_DONE &&
+ ++snap->count >= J->param[JIT_P_hotexit]) {
+ lj_assertJ(J->state == LJ_TRACE_IDLE, "hot side exit while recording");
+ /* J->parent is non-zero for a side trace. */
+ J->state = LJ_TRACE_START;
+ lj_trace_ins(J, pc);
+ }
+}
+
+/* Stitch a new trace to the previous trace. */
+void LJ_FASTCALL lj_trace_stitch(jit_State *J, const BCIns *pc)
+{
+ /* Only start a new trace if not recording or inside __gc call or vmevent. */
+ if (J->state == LJ_TRACE_IDLE &&
+ !(J2G(J)->hookmask & (HOOK_GC|HOOK_VMEVENT))) {
+ J->parent = 0; /* Have to treat it like a root trace. */
+ /* J->exitno is set to the invoking trace. */
+ J->state = LJ_TRACE_START;
+ lj_trace_ins(J, pc);
+ }
+}
+
+
+/* Tiny struct to pass data to protected call. */
+typedef struct ExitDataCP {
+ jit_State *J;
+ void *exptr; /* Pointer to exit state. */
+ const BCIns *pc; /* Restart interpreter at this PC. */
+} ExitDataCP;
+
+/* Need to protect lj_snap_restore because it may throw. */
+static TValue *trace_exit_cp(lua_State *L, lua_CFunction dummy, void *ud)
+{
+ ExitDataCP *exd = (ExitDataCP *)ud;
+ /* Always catch error here and don't call error function. */
+ cframe_errfunc(L->cframe) = 0;
+ cframe_nres(L->cframe) = -2*LUAI_MAXSTACK*(int)sizeof(TValue);
+ exd->pc = lj_snap_restore(exd->J, exd->exptr);
+ UNUSED(dummy);
+ return NULL;
+}
+
+#ifndef LUAJIT_DISABLE_VMEVENT
+/* Push all registers from exit state. */
+static void trace_exit_regs(lua_State *L, ExitState *ex)
+{
+ int32_t i;
+ setintV(L->top++, RID_NUM_GPR);
+ setintV(L->top++, RID_NUM_FPR);
+ for (i = 0; i < RID_NUM_GPR; i++) {
+ if (sizeof(ex->gpr[i]) == sizeof(int32_t))
+ setintV(L->top++, (int32_t)ex->gpr[i]);
+ else
+ setnumV(L->top++, (lua_Number)ex->gpr[i]);
+ }
+#if !LJ_SOFTFP
+ for (i = 0; i < RID_NUM_FPR; i++) {
+ setnumV(L->top, ex->fpr[i]);
+ if (LJ_UNLIKELY(tvisnan(L->top)))
+ setnanV(L->top);
+ L->top++;
+ }
+#endif
+}
+#endif
+
+#if defined(EXITSTATE_PCREG) || (LJ_UNWIND_JIT && !EXITTRACE_VMSTATE)
+/* Determine trace number from pc of exit instruction. */
+static TraceNo trace_exit_find(jit_State *J, MCode *pc)
+{
+ TraceNo traceno;
+ for (traceno = 1; traceno < J->sizetrace; traceno++) {
+ GCtrace *T = traceref(J, traceno);
+ if (T && pc >= T->mcode && pc < (MCode *)((char *)T->mcode + T->szmcode))
+ return traceno;
+ }
+ lj_assertJ(0, "bad exit pc");
+ return 0;
+}
+#endif
+
+/* A trace exited. Restore interpreter state. */
+int LJ_FASTCALL lj_trace_exit(jit_State *J, void *exptr)
+{
+ ERRNO_SAVE
+ lua_State *L = J->L;
+ ExitState *ex = (ExitState *)exptr;
+ ExitDataCP exd;
+ int errcode, exitcode = J->exitcode;
+ TValue exiterr;
+ const BCIns *pc;
+ void *cf;
+ GCtrace *T;
+
+ setnilV(&exiterr);
+ if (exitcode) { /* Trace unwound with error code. */
+ J->exitcode = 0;
+ copyTV(L, &exiterr, L->top-1);
+ }
+
+#ifdef EXITSTATE_PCREG
+ J->parent = trace_exit_find(J, (MCode *)(intptr_t)ex->gpr[EXITSTATE_PCREG]);
+#endif
+ T = traceref(J, J->parent); UNUSED(T);
+#ifdef EXITSTATE_CHECKEXIT
+ if (J->exitno == T->nsnap) { /* Treat stack check like a parent exit. */
+ lj_assertJ(T->root != 0, "stack check in root trace");
+ J->exitno = T->ir[REF_BASE].op2;
+ J->parent = T->ir[REF_BASE].op1;
+ T = traceref(J, J->parent);
+ }
+#endif
+ lj_assertJ(T != NULL && J->exitno < T->nsnap, "bad trace or exit number");
+ exd.J = J;
+ exd.exptr = exptr;
+ errcode = lj_vm_cpcall(L, NULL, &exd, trace_exit_cp);
+ if (errcode)
+ return -errcode; /* Return negated error code. */
+
+ if (exitcode) copyTV(L, L->top++, &exiterr); /* Anchor the error object. */
+
+ if (!(LJ_HASPROFILE && (G(L)->hookmask & HOOK_PROFILE)))
+ lj_vmevent_send(L, TEXIT,
+ lj_state_checkstack(L, 4+RID_NUM_GPR+RID_NUM_FPR+LUA_MINSTACK);
+ setintV(L->top++, J->parent);
+ setintV(L->top++, J->exitno);
+ trace_exit_regs(L, ex);
+ );
+
+ pc = exd.pc;
+ cf = cframe_raw(L->cframe);
+ setcframe_pc(cf, pc);
+ if (exitcode) {
+ return -exitcode;
+ } else if (LJ_HASPROFILE && (G(L)->hookmask & HOOK_PROFILE)) {
+ /* Just exit to interpreter. */
+ } else if (G(L)->gc.state == GCSatomic || G(L)->gc.state == GCSfinalize) {
+ if (!(G(L)->hookmask & HOOK_GC))
+ lj_gc_step(L); /* Exited because of GC: drive GC forward. */
+ } else {
+ trace_hotside(J, pc);
+ }
+ if (bc_op(*pc) == BC_JLOOP) {
+ BCIns *retpc = &traceref(J, bc_d(*pc))->startins;
+ int isret = bc_isret(bc_op(*retpc));
+ if (isret || bc_op(*retpc) == BC_ITERN) {
+ if (J->state == LJ_TRACE_RECORD) {
+ J->patchins = *pc;
+ J->patchpc = (BCIns *)pc;
+ *J->patchpc = *retpc;
+ J->bcskip = 1;
+ } else if (isret) {
+ pc = retpc;
+ setcframe_pc(cf, pc);
+ }
+ }
+ }
+ /* Return MULTRES or 0. */
+ ERRNO_RESTORE
+ switch (bc_op(*pc)) {
+ case BC_CALLM: case BC_CALLMT:
+ return (int)((BCReg)(L->top - L->base) - bc_a(*pc) - bc_c(*pc) - LJ_FR2);
+ case BC_RETM:
+ return (int)((BCReg)(L->top - L->base) + 1 - bc_a(*pc) - bc_d(*pc));
+ case BC_TSETM:
+ return (int)((BCReg)(L->top - L->base) + 1 - bc_a(*pc));
+ default:
+ if (bc_op(*pc) >= BC_FUNCF)
+ return (int)((BCReg)(L->top - L->base) + 1);
+ return 0;
+ }
+}
+
+#if LJ_UNWIND_JIT
+/* Given an mcode address determine trace exit address for unwinding. */
+uintptr_t LJ_FASTCALL lj_trace_unwind(jit_State *J, uintptr_t addr, ExitNo *ep)
+{
+#if EXITTRACE_VMSTATE
+ TraceNo traceno = J2G(J)->vmstate;
+#else
+ TraceNo traceno = trace_exit_find(J, (MCode *)addr);
+#endif
+ GCtrace *T = traceref(J, traceno);
+ if (T
+#if EXITTRACE_VMSTATE
+ && addr >= (uintptr_t)T->mcode && addr < (uintptr_t)T->mcode + T->szmcode
+#endif
+ ) {
+ SnapShot *snap = T->snap;
+ SnapNo lo = 0, exitno = T->nsnap;
+ uintptr_t ofs = (uintptr_t)((MCode *)addr - T->mcode); /* MCode units! */
+ /* Rightmost binary search for mcode offset to determine exit number. */
+ do {
+ SnapNo mid = (lo+exitno) >> 1;
+ if (ofs < snap[mid].mcofs) exitno = mid; else lo = mid + 1;
+ } while (lo < exitno);
+ exitno--;
+ *ep = exitno;
+#ifdef EXITSTUBS_PER_GROUP
+ return (uintptr_t)exitstub_addr(J, exitno);
+#else
+ return (uintptr_t)exitstub_trace_addr(T, exitno);
+#endif
+ }
+ /* Cannot correlate addr with trace/exit. This will be fatal. */
+ lj_assertJ(0, "bad exit pc");
+ return 0;
+}
+#endif
+
+#endif
diff --git a/libs/luajit-cmake/luajit/src/lj_trace.h b/libs/luajit-cmake/luajit/src/lj_trace.h
new file mode 100644
index 0000000..3d7f76f
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/lj_trace.h
@@ -0,0 +1,58 @@
+/*
+** Trace management.
+** Copyright (C) 2005-2022 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LJ_TRACE_H
+#define _LJ_TRACE_H
+
+#include "lj_obj.h"
+
+#if LJ_HASJIT
+#include "lj_jit.h"
+#include "lj_dispatch.h"
+
+/* Trace errors. */
+typedef enum {
+#define TREDEF(name, msg) LJ_TRERR_##name,
+#include "lj_traceerr.h"
+ LJ_TRERR__MAX
+} TraceError;
+
+LJ_FUNC_NORET void lj_trace_err(jit_State *J, TraceError e);
+LJ_FUNC_NORET void lj_trace_err_info(jit_State *J, TraceError e);
+
+/* Trace management. */
+LJ_FUNC GCtrace * LJ_FASTCALL lj_trace_alloc(lua_State *L, GCtrace *T);
+LJ_FUNC void LJ_FASTCALL lj_trace_free(global_State *g, GCtrace *T);
+LJ_FUNC void lj_trace_reenableproto(GCproto *pt);
+LJ_FUNC void lj_trace_flushproto(global_State *g, GCproto *pt);
+LJ_FUNC void lj_trace_flush(jit_State *J, TraceNo traceno);
+LJ_FUNC int lj_trace_flushall(lua_State *L);
+LJ_FUNC void lj_trace_initstate(global_State *g);
+LJ_FUNC void lj_trace_freestate(global_State *g);
+
+/* Event handling. */
+LJ_FUNC void lj_trace_ins(jit_State *J, const BCIns *pc);
+LJ_FUNCA void LJ_FASTCALL lj_trace_hot(jit_State *J, const BCIns *pc);
+LJ_FUNCA void LJ_FASTCALL lj_trace_stitch(jit_State *J, const BCIns *pc);
+LJ_FUNCA int LJ_FASTCALL lj_trace_exit(jit_State *J, void *exptr);
+#if LJ_UNWIND_EXT
+LJ_FUNC uintptr_t LJ_FASTCALL lj_trace_unwind(jit_State *J, uintptr_t addr, ExitNo *ep);
+#endif
+
+/* Signal asynchronous abort of trace or end of trace. */
+#define lj_trace_abort(g) (G2J(g)->state &= ~LJ_TRACE_ACTIVE)
+#define lj_trace_end(J) (J->state = LJ_TRACE_END)
+
+#else
+
+#define lj_trace_flushall(L) (UNUSED(L), 0)
+#define lj_trace_initstate(g) UNUSED(g)
+#define lj_trace_freestate(g) UNUSED(g)
+#define lj_trace_abort(g) UNUSED(g)
+#define lj_trace_end(J) UNUSED(J)
+
+#endif
+
+#endif
diff --git a/libs/luajit-cmake/luajit/src/lj_traceerr.h b/libs/luajit-cmake/luajit/src/lj_traceerr.h
new file mode 100644
index 0000000..8ed8ac8
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/lj_traceerr.h
@@ -0,0 +1,61 @@
+/*
+** Trace compiler error messages.
+** Copyright (C) 2005-2022 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+/* This file may be included multiple times with different TREDEF macros. */
+
+/* Recording. */
+TREDEF(RECERR, "error thrown or hook called during recording")
+TREDEF(TRACEUV, "trace too short")
+TREDEF(TRACEOV, "trace too long")
+TREDEF(STACKOV, "trace too deep")
+TREDEF(SNAPOV, "too many snapshots")
+TREDEF(BLACKL, "blacklisted")
+TREDEF(RETRY, "retry recording")
+TREDEF(NYIBC, "NYI: bytecode %d")
+
+/* Recording loop ops. */
+TREDEF(LLEAVE, "leaving loop in root trace")
+TREDEF(LINNER, "inner loop in root trace")
+TREDEF(LUNROLL, "loop unroll limit reached")
+
+/* Recording calls/returns. */
+TREDEF(BADTYPE, "bad argument type")
+TREDEF(CJITOFF, "JIT compilation disabled for function")
+TREDEF(CUNROLL, "call unroll limit reached")
+TREDEF(DOWNREC, "down-recursion, restarting")
+TREDEF(NYIFFU, "NYI: unsupported variant of FastFunc %s")
+TREDEF(NYIRETL, "NYI: return to lower frame")
+
+/* Recording indexed load/store. */
+TREDEF(STORENN, "store with nil or NaN key")
+TREDEF(NOMM, "missing metamethod")
+TREDEF(IDXLOOP, "looping index lookup")
+TREDEF(NYITMIX, "NYI: mixed sparse/dense table")
+
+/* Recording C data operations. */
+TREDEF(NOCACHE, "symbol not in cache")
+TREDEF(NYICONV, "NYI: unsupported C type conversion")
+TREDEF(NYICALL, "NYI: unsupported C function type")
+
+/* Optimizations. */
+TREDEF(GFAIL, "guard would always fail")
+TREDEF(PHIOV, "too many PHIs")
+TREDEF(TYPEINS, "persistent type instability")
+
+/* Assembler. */
+TREDEF(MCODEAL, "failed to allocate mcode memory")
+TREDEF(MCODEOV, "machine code too long")
+TREDEF(MCODELM, "hit mcode limit (retrying)")
+TREDEF(SPILLOV, "too many spill slots")
+TREDEF(BADRA, "inconsistent register allocation")
+TREDEF(NYIIR, "NYI: cannot assemble IR instruction %d")
+TREDEF(NYIPHI, "NYI: PHI shuffling too complex")
+TREDEF(NYICOAL, "NYI: register coalescing too complex")
+
+#undef TREDEF
+
+/* Detecting unused error messages:
+ awk -F, '/^TREDEF/ { gsub(/TREDEF./, ""); printf "grep -q LJ_TRERR_%s *.[ch] || echo %s\n", $1, $1}' lj_traceerr.h | sh
+*/
diff --git a/libs/luajit-cmake/luajit/src/lj_udata.c b/libs/luajit-cmake/luajit/src/lj_udata.c
new file mode 100644
index 0000000..ee4a145
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/lj_udata.c
@@ -0,0 +1,62 @@
+/*
+** Userdata handling.
+** Copyright (C) 2005-2022 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#define lj_udata_c
+#define LUA_CORE
+
+#include "lj_obj.h"
+#include "lj_gc.h"
+#include "lj_err.h"
+#include "lj_udata.h"
+
+GCudata *lj_udata_new(lua_State *L, MSize sz, GCtab *env)
+{
+ GCudata *ud = lj_mem_newt(L, sizeof(GCudata) + sz, GCudata);
+ global_State *g = G(L);
+ newwhite(g, ud); /* Not finalized. */
+ ud->gct = ~LJ_TUDATA;
+ ud->udtype = UDTYPE_USERDATA;
+ ud->len = sz;
+ /* NOBARRIER: The GCudata is new (marked white). */
+ setgcrefnull(ud->metatable);
+ setgcref(ud->env, obj2gco(env));
+ /* Chain to userdata list (after main thread). */
+ setgcrefr(ud->nextgc, mainthread(g)->nextgc);
+ setgcref(mainthread(g)->nextgc, obj2gco(ud));
+ return ud;
+}
+
+void LJ_FASTCALL lj_udata_free(global_State *g, GCudata *ud)
+{
+ lj_mem_free(g, ud, sizeudata(ud));
+}
+
+#if LJ_64
+void *lj_lightud_intern(lua_State *L, void *p)
+{
+ global_State *g = G(L);
+ uint64_t u = (uint64_t)p;
+ uint32_t up = lightudup(u);
+ uint32_t *segmap = mref(g->gc.lightudseg, uint32_t);
+ MSize segnum = g->gc.lightudnum;
+ if (segmap) {
+ MSize seg;
+ for (seg = 0; seg <= segnum; seg++)
+ if (segmap[seg] == up) /* Fast path. */
+ return (void *)(((uint64_t)seg << LJ_LIGHTUD_BITS_LO) | lightudlo(u));
+ segnum++;
+ /* Leave last segment unused to avoid clash with ITERN key. */
+ if (segnum >= (1 << LJ_LIGHTUD_BITS_SEG)-1) lj_err_msg(L, LJ_ERR_BADLU);
+ }
+ if (!((segnum-1) & segnum) && segnum != 1) {
+ lj_mem_reallocvec(L, segmap, segnum, segnum ? 2*segnum : 2u, uint32_t);
+ setmref(g->gc.lightudseg, segmap);
+ }
+ g->gc.lightudnum = segnum;
+ segmap[segnum] = up;
+ return (void *)(((uint64_t)segnum << LJ_LIGHTUD_BITS_LO) | lightudlo(u));
+}
+#endif
+
diff --git a/libs/luajit-cmake/luajit/src/lj_udata.h b/libs/luajit-cmake/luajit/src/lj_udata.h
new file mode 100644
index 0000000..503c9e3
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/lj_udata.h
@@ -0,0 +1,17 @@
+/*
+** Userdata handling.
+** Copyright (C) 2005-2022 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LJ_UDATA_H
+#define _LJ_UDATA_H
+
+#include "lj_obj.h"
+
+LJ_FUNC GCudata *lj_udata_new(lua_State *L, MSize sz, GCtab *env);
+LJ_FUNC void LJ_FASTCALL lj_udata_free(global_State *g, GCudata *ud);
+#if LJ_64
+LJ_FUNC void * LJ_FASTCALL lj_lightud_intern(lua_State *L, void *p);
+#endif
+
+#endif
diff --git a/libs/luajit-cmake/luajit/src/lj_vm.h b/libs/luajit-cmake/luajit/src/lj_vm.h
new file mode 100644
index 0000000..c66db00
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/lj_vm.h
@@ -0,0 +1,116 @@
+/*
+** Assembler VM interface definitions.
+** Copyright (C) 2005-2022 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LJ_VM_H
+#define _LJ_VM_H
+
+#include "lj_obj.h"
+
+/* Entry points for ASM parts of VM. */
+LJ_ASMF void lj_vm_call(lua_State *L, TValue *base, int nres1);
+LJ_ASMF int lj_vm_pcall(lua_State *L, TValue *base, int nres1, ptrdiff_t ef);
+typedef TValue *(*lua_CPFunction)(lua_State *L, lua_CFunction func, void *ud);
+LJ_ASMF int lj_vm_cpcall(lua_State *L, lua_CFunction func, void *ud,
+ lua_CPFunction cp);
+LJ_ASMF int lj_vm_resume(lua_State *L, TValue *base, int nres1, ptrdiff_t ef);
+LJ_ASMF_NORET void LJ_FASTCALL lj_vm_unwind_c(void *cframe, int errcode);
+LJ_ASMF_NORET void LJ_FASTCALL lj_vm_unwind_ff(void *cframe);
+#if LJ_ABI_WIN && LJ_TARGET_X86
+LJ_ASMF_NORET void LJ_FASTCALL lj_vm_rtlunwind(void *cframe, void *excptrec,
+ void *unwinder, int errcode);
+#endif
+LJ_ASMF void lj_vm_unwind_c_eh(void);
+LJ_ASMF void lj_vm_unwind_ff_eh(void);
+#if LJ_TARGET_X86ORX64
+LJ_ASMF void lj_vm_unwind_rethrow(void);
+#endif
+#if LJ_TARGET_MIPS
+LJ_ASMF void lj_vm_unwind_stub(void);
+#endif
+
+/* Miscellaneous functions. */
+#if LJ_TARGET_X86ORX64
+LJ_ASMF int lj_vm_cpuid(uint32_t f, uint32_t res[4]);
+#endif
+#if LJ_TARGET_PPC
+void lj_vm_cachesync(void *start, void *end);
+#endif
+LJ_ASMF double lj_vm_foldarith(double x, double y, int op);
+#if LJ_HASJIT
+LJ_ASMF double lj_vm_foldfpm(double x, int op);
+#endif
+#if !LJ_ARCH_HASFPU
+/* Declared in lj_obj.h: LJ_ASMF int32_t lj_vm_tobit(double x); */
+#endif
+
+/* Dispatch targets for recording and hooks. */
+LJ_ASMF void lj_vm_record(void);
+LJ_ASMF void lj_vm_inshook(void);
+LJ_ASMF void lj_vm_rethook(void);
+LJ_ASMF void lj_vm_callhook(void);
+LJ_ASMF void lj_vm_profhook(void);
+LJ_ASMF void lj_vm_IITERN(void);
+
+/* Trace exit handling. */
+LJ_ASMF void lj_vm_exit_handler(void);
+LJ_ASMF void lj_vm_exit_interp(void);
+
+/* Internal math helper functions. */
+#if LJ_TARGET_PPC || LJ_TARGET_ARM64 || (LJ_TARGET_MIPS && LJ_ABI_SOFTFP)
+#define lj_vm_floor floor
+#define lj_vm_ceil ceil
+#else
+LJ_ASMF double lj_vm_floor(double);
+LJ_ASMF double lj_vm_ceil(double);
+#if LJ_TARGET_ARM
+LJ_ASMF double lj_vm_floor_sf(double);
+LJ_ASMF double lj_vm_ceil_sf(double);
+#endif
+#endif
+#ifdef LUAJIT_NO_LOG2
+LJ_ASMF double lj_vm_log2(double);
+#else
+#define lj_vm_log2 log2
+#endif
+#if !(defined(_LJ_DISPATCH_H) && LJ_TARGET_MIPS)
+LJ_ASMF int32_t LJ_FASTCALL lj_vm_modi(int32_t, int32_t);
+#endif
+
+#if LJ_HASJIT
+#if LJ_TARGET_X86ORX64
+LJ_ASMF void lj_vm_floor_sse(void);
+LJ_ASMF void lj_vm_ceil_sse(void);
+LJ_ASMF void lj_vm_trunc_sse(void);
+#endif
+#if LJ_TARGET_PPC || LJ_TARGET_ARM64
+#define lj_vm_trunc trunc
+#else
+LJ_ASMF double lj_vm_trunc(double);
+#if LJ_TARGET_ARM
+LJ_ASMF double lj_vm_trunc_sf(double);
+#endif
+#endif
+#if LJ_HASFFI
+LJ_ASMF int lj_vm_errno(void);
+#endif
+LJ_ASMF TValue *lj_vm_next(GCtab *t, uint32_t idx);
+#endif
+
+/* Continuations for metamethods. */
+LJ_ASMF void lj_cont_cat(void); /* Continue with concatenation. */
+LJ_ASMF void lj_cont_ra(void); /* Store result in RA from instruction. */
+LJ_ASMF void lj_cont_nop(void); /* Do nothing, just continue execution. */
+LJ_ASMF void lj_cont_condt(void); /* Branch if result is true. */
+LJ_ASMF void lj_cont_condf(void); /* Branch if result is false. */
+LJ_ASMF void lj_cont_hook(void); /* Continue from hook yield. */
+LJ_ASMF void lj_cont_stitch(void); /* Trace stitching. */
+
+/* Start of the ASM code. */
+LJ_ASMF char lj_vm_asm_begin[];
+
+/* Bytecode offsets are relative to lj_vm_asm_begin. */
+#define makeasmfunc(ofs) ((ASMFunction)(lj_vm_asm_begin + (ofs)))
+
+#endif
diff --git a/libs/luajit-cmake/luajit/src/lj_vmevent.c b/libs/luajit-cmake/luajit/src/lj_vmevent.c
new file mode 100644
index 0000000..c8491d8
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/lj_vmevent.c
@@ -0,0 +1,58 @@
+/*
+** VM event handling.
+** Copyright (C) 2005-2022 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#include <stdio.h>
+
+#define lj_vmevent_c
+#define LUA_CORE
+
+#include "lj_obj.h"
+#include "lj_str.h"
+#include "lj_tab.h"
+#include "lj_state.h"
+#include "lj_dispatch.h"
+#include "lj_vm.h"
+#include "lj_vmevent.h"
+
+ptrdiff_t lj_vmevent_prepare(lua_State *L, VMEvent ev)
+{
+ global_State *g = G(L);
+ GCstr *s = lj_str_newlit(L, LJ_VMEVENTS_REGKEY);
+ cTValue *tv = lj_tab_getstr(tabV(registry(L)), s);
+ if (tvistab(tv)) {
+ int hash = VMEVENT_HASH(ev);
+ tv = lj_tab_getint(tabV(tv), hash);
+ if (tv && tvisfunc(tv)) {
+ lj_state_checkstack(L, LUA_MINSTACK);
+ setfuncV(L, L->top++, funcV(tv));
+ if (LJ_FR2) setnilV(L->top++);
+ return savestack(L, L->top);
+ }
+ }
+ g->vmevmask &= ~VMEVENT_MASK(ev); /* No handler: cache this fact. */
+ return 0;
+}
+
+void lj_vmevent_call(lua_State *L, ptrdiff_t argbase)
+{
+ global_State *g = G(L);
+ uint8_t oldmask = g->vmevmask;
+ uint8_t oldh = hook_save(g);
+ int status;
+ g->vmevmask = 0; /* Disable all events. */
+ hook_vmevent(g);
+ status = lj_vm_pcall(L, restorestack(L, argbase), 0+1, 0);
+ if (LJ_UNLIKELY(status)) {
+ /* Really shouldn't use stderr here, but where else to complain? */
+ L->top--;
+ fputs("VM handler failed: ", stderr);
+ fputs(tvisstr(L->top) ? strVdata(L->top) : "?", stderr);
+ fputc('\n', stderr);
+ }
+ hook_restore(g, oldh);
+ if (g->vmevmask != VMEVENT_NOCACHE)
+ g->vmevmask = oldmask; /* Restore event mask, but not if not modified. */
+}
+
diff --git a/libs/luajit-cmake/luajit/src/lj_vmevent.h b/libs/luajit-cmake/luajit/src/lj_vmevent.h
new file mode 100644
index 0000000..40f9c63
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/lj_vmevent.h
@@ -0,0 +1,59 @@
+/*
+** VM event handling.
+** Copyright (C) 2005-2022 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LJ_VMEVENT_H
+#define _LJ_VMEVENT_H
+
+#include "lj_obj.h"
+
+/* Registry key for VM event handler table. */
+#define LJ_VMEVENTS_REGKEY "_VMEVENTS"
+#define LJ_VMEVENTS_HSIZE 4
+
+#define VMEVENT_MASK(ev) ((uint8_t)1 << ((int)(ev) & 7))
+#define VMEVENT_HASH(ev) ((int)(ev) & ~7)
+#define VMEVENT_HASHIDX(h) ((int)(h) << 3)
+#define VMEVENT_NOCACHE 255
+
+#define VMEVENT_DEF(name, hash) \
+ LJ_VMEVENT_##name##_, \
+ LJ_VMEVENT_##name = ((LJ_VMEVENT_##name##_) & 7)|((hash) << 3)
+
+/* VM event IDs. */
+typedef enum {
+ VMEVENT_DEF(BC, 0x00003883),
+ VMEVENT_DEF(TRACE, 0xb2d91467),
+ VMEVENT_DEF(RECORD, 0x9284bf4f),
+ VMEVENT_DEF(TEXIT, 0xb29df2b0),
+ LJ_VMEVENT__MAX
+} VMEvent;
+
+#ifdef LUAJIT_DISABLE_VMEVENT
+#define lj_vmevent_send(L, ev, args) UNUSED(L)
+#define lj_vmevent_send_(L, ev, args, post) UNUSED(L)
+#else
+#define lj_vmevent_send(L, ev, args) \
+ if (G(L)->vmevmask & VMEVENT_MASK(LJ_VMEVENT_##ev)) { \
+ ptrdiff_t argbase = lj_vmevent_prepare(L, LJ_VMEVENT_##ev); \
+ if (argbase) { \
+ args \
+ lj_vmevent_call(L, argbase); \
+ } \
+ }
+#define lj_vmevent_send_(L, ev, args, post) \
+ if (G(L)->vmevmask & VMEVENT_MASK(LJ_VMEVENT_##ev)) { \
+ ptrdiff_t argbase = lj_vmevent_prepare(L, LJ_VMEVENT_##ev); \
+ if (argbase) { \
+ args \
+ lj_vmevent_call(L, argbase); \
+ post \
+ } \
+ }
+
+LJ_FUNC ptrdiff_t lj_vmevent_prepare(lua_State *L, VMEvent ev);
+LJ_FUNC void lj_vmevent_call(lua_State *L, ptrdiff_t argbase);
+#endif
+
+#endif
diff --git a/libs/luajit-cmake/luajit/src/lj_vmmath.c b/libs/luajit-cmake/luajit/src/lj_vmmath.c
new file mode 100644
index 0000000..b6cc60b
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/lj_vmmath.c
@@ -0,0 +1,107 @@
+/*
+** Math helper functions for assembler VM.
+** Copyright (C) 2005-2022 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#define lj_vmmath_c
+#define LUA_CORE
+
+#include <errno.h>
+#include <math.h>
+
+#include "lj_obj.h"
+#include "lj_ir.h"
+#include "lj_vm.h"
+
+/* -- Wrapper functions --------------------------------------------------- */
+
+#if LJ_TARGET_X86 && __ELF__ && __PIC__
+/* Wrapper functions to deal with the ELF/x86 PIC disaster. */
+LJ_FUNCA double lj_wrap_log(double x) { return log(x); }
+LJ_FUNCA double lj_wrap_log10(double x) { return log10(x); }
+LJ_FUNCA double lj_wrap_exp(double x) { return exp(x); }
+LJ_FUNCA double lj_wrap_sin(double x) { return sin(x); }
+LJ_FUNCA double lj_wrap_cos(double x) { return cos(x); }
+LJ_FUNCA double lj_wrap_tan(double x) { return tan(x); }
+LJ_FUNCA double lj_wrap_asin(double x) { return asin(x); }
+LJ_FUNCA double lj_wrap_acos(double x) { return acos(x); }
+LJ_FUNCA double lj_wrap_atan(double x) { return atan(x); }
+LJ_FUNCA double lj_wrap_sinh(double x) { return sinh(x); }
+LJ_FUNCA double lj_wrap_cosh(double x) { return cosh(x); }
+LJ_FUNCA double lj_wrap_tanh(double x) { return tanh(x); }
+LJ_FUNCA double lj_wrap_atan2(double x, double y) { return atan2(x, y); }
+LJ_FUNCA double lj_wrap_pow(double x, double y) { return pow(x, y); }
+LJ_FUNCA double lj_wrap_fmod(double x, double y) { return fmod(x, y); }
+#endif
+
+/* -- Helper functions ---------------------------------------------------- */
+
+double lj_vm_foldarith(double x, double y, int op)
+{
+ switch (op) {
+ case IR_ADD - IR_ADD: return x+y; break;
+ case IR_SUB - IR_ADD: return x-y; break;
+ case IR_MUL - IR_ADD: return x*y; break;
+ case IR_DIV - IR_ADD: return x/y; break;
+ case IR_MOD - IR_ADD: return x-lj_vm_floor(x/y)*y; break;
+ case IR_POW - IR_ADD: return pow(x, y); break;
+ case IR_NEG - IR_ADD: return -x; break;
+ case IR_ABS - IR_ADD: return fabs(x); break;
+#if LJ_HASJIT
+ case IR_LDEXP - IR_ADD: return ldexp(x, (int)y); break;
+ case IR_MIN - IR_ADD: return x < y ? x : y; break;
+ case IR_MAX - IR_ADD: return x > y ? x : y; break;
+#endif
+ default: return x;
+ }
+}
+
+/* -- Helper functions for generated machine code ------------------------- */
+
+#if (LJ_HASJIT && !(LJ_TARGET_ARM || LJ_TARGET_ARM64 || LJ_TARGET_PPC)) || LJ_TARGET_MIPS
+int32_t LJ_FASTCALL lj_vm_modi(int32_t a, int32_t b)
+{
+ uint32_t y, ua, ub;
+ /* This must be checked before using this function. */
+ lj_assertX(b != 0, "modulo with zero divisor");
+ ua = a < 0 ? (uint32_t)-a : (uint32_t)a;
+ ub = b < 0 ? (uint32_t)-b : (uint32_t)b;
+ y = ua % ub;
+ if (y != 0 && (a^b) < 0) y = y - ub;
+ if (((int32_t)y^b) < 0) y = (uint32_t)-(int32_t)y;
+ return (int32_t)y;
+}
+#endif
+
+#if LJ_HASJIT
+
+#ifdef LUAJIT_NO_LOG2
+double lj_vm_log2(double a)
+{
+ return log(a) * 1.4426950408889634074;
+}
+#endif
+
+/* Computes fpm(x) for extended math functions. */
+double lj_vm_foldfpm(double x, int fpm)
+{
+ switch (fpm) {
+ case IRFPM_FLOOR: return lj_vm_floor(x);
+ case IRFPM_CEIL: return lj_vm_ceil(x);
+ case IRFPM_TRUNC: return lj_vm_trunc(x);
+ case IRFPM_SQRT: return sqrt(x);
+ case IRFPM_LOG: return log(x);
+ case IRFPM_LOG2: return lj_vm_log2(x);
+ default: lj_assertX(0, "bad fpm %d", fpm);
+ }
+ return 0;
+}
+
+#if LJ_HASFFI
+int lj_vm_errno(void)
+{
+ return errno;
+}
+#endif
+
+#endif
diff --git a/libs/luajit-cmake/luajit/src/ljamalg.c b/libs/luajit-cmake/luajit/src/ljamalg.c
new file mode 100644
index 0000000..cae8356
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/ljamalg.c
@@ -0,0 +1,91 @@
+/*
+** LuaJIT core and libraries amalgamation.
+** Copyright (C) 2005-2022 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#define ljamalg_c
+#define LUA_CORE
+
+/* To get the mremap prototype. Must be defined before any system includes. */
+#if defined(__linux__) && !defined(_GNU_SOURCE)
+#define _GNU_SOURCE
+#endif
+
+#ifndef WINVER
+#define WINVER 0x0501
+#endif
+
+#include "lua.h"
+#include "lauxlib.h"
+
+#include "lj_assert.c"
+#include "lj_gc.c"
+#include "lj_err.c"
+#include "lj_char.c"
+#include "lj_bc.c"
+#include "lj_obj.c"
+#include "lj_buf.c"
+#include "lj_str.c"
+#include "lj_tab.c"
+#include "lj_func.c"
+#include "lj_udata.c"
+#include "lj_meta.c"
+#include "lj_debug.c"
+#include "lj_prng.c"
+#include "lj_state.c"
+#include "lj_dispatch.c"
+#include "lj_vmevent.c"
+#include "lj_vmmath.c"
+#include "lj_strscan.c"
+#include "lj_strfmt.c"
+#include "lj_strfmt_num.c"
+#include "lj_serialize.c"
+#include "lj_api.c"
+#include "lj_profile.c"
+#include "lj_lex.c"
+#include "lj_parse.c"
+#include "lj_bcread.c"
+#include "lj_bcwrite.c"
+#include "lj_load.c"
+#include "lj_ctype.c"
+#include "lj_cdata.c"
+#include "lj_cconv.c"
+#include "lj_ccall.c"
+#include "lj_ccallback.c"
+#include "lj_carith.c"
+#include "lj_clib.c"
+#include "lj_cparse.c"
+#include "lj_lib.c"
+#include "lj_ir.c"
+#include "lj_opt_mem.c"
+#include "lj_opt_fold.c"
+#include "lj_opt_narrow.c"
+#include "lj_opt_dce.c"
+#include "lj_opt_loop.c"
+#include "lj_opt_split.c"
+#include "lj_opt_sink.c"
+#include "lj_mcode.c"
+#include "lj_snap.c"
+#include "lj_record.c"
+#include "lj_crecord.c"
+#include "lj_ffrecord.c"
+#include "lj_asm.c"
+#include "lj_trace.c"
+#include "lj_gdbjit.c"
+#include "lj_alloc.c"
+
+#include "lib_aux.c"
+#include "lib_base.c"
+#include "lib_math.c"
+#include "lib_string.c"
+#include "lib_table.c"
+#include "lib_io.c"
+#include "lib_os.c"
+#include "lib_package.c"
+#include "lib_debug.c"
+#include "lib_bit.c"
+#include "lib_jit.c"
+#include "lib_ffi.c"
+#include "lib_buffer.c"
+#include "lib_init.c"
+
diff --git a/libs/luajit-cmake/luajit/src/lua.h b/libs/luajit-cmake/luajit/src/lua.h
new file mode 100644
index 0000000..6d1634d
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/lua.h
@@ -0,0 +1,402 @@
+/*
+** $Id: lua.h,v 1.218.1.5 2008/08/06 13:30:12 roberto Exp $
+** Lua - An Extensible Extension Language
+** Lua.org, PUC-Rio, Brazil (https://www.lua.org)
+** See Copyright Notice at the end of this file
+*/
+
+
+#ifndef lua_h
+#define lua_h
+
+#include <stdarg.h>
+#include <stddef.h>
+
+
+#include "luaconf.h"
+
+
+#define LUA_VERSION "Lua 5.1"
+#define LUA_RELEASE "Lua 5.1.4"
+#define LUA_VERSION_NUM 501
+#define LUA_COPYRIGHT "Copyright (C) 1994-2008 Lua.org, PUC-Rio"
+#define LUA_AUTHORS "R. Ierusalimschy, L. H. de Figueiredo & W. Celes"
+
+
+/* mark for precompiled code (`<esc>Lua') */
+#define LUA_SIGNATURE "\033Lua"
+
+/* option for multiple returns in `lua_pcall' and `lua_call' */
+#define LUA_MULTRET (-1)
+
+
+/*
+** pseudo-indices
+*/
+#define LUA_REGISTRYINDEX (-10000)
+#define LUA_ENVIRONINDEX (-10001)
+#define LUA_GLOBALSINDEX (-10002)
+#define lua_upvalueindex(i) (LUA_GLOBALSINDEX-(i))
+
+
+/* thread status */
+#define LUA_OK 0
+#define LUA_YIELD 1
+#define LUA_ERRRUN 2
+#define LUA_ERRSYNTAX 3
+#define LUA_ERRMEM 4
+#define LUA_ERRERR 5
+
+
+typedef struct lua_State lua_State;
+
+typedef int (*lua_CFunction) (lua_State *L);
+
+
+/*
+** functions that read/write blocks when loading/dumping Lua chunks
+*/
+typedef const char * (*lua_Reader) (lua_State *L, void *ud, size_t *sz);
+
+typedef int (*lua_Writer) (lua_State *L, const void* p, size_t sz, void* ud);
+
+
+/*
+** prototype for memory-allocation functions
+*/
+typedef void * (*lua_Alloc) (void *ud, void *ptr, size_t osize, size_t nsize);
+
+
+/*
+** basic types
+*/
+#define LUA_TNONE (-1)
+
+#define LUA_TNIL 0
+#define LUA_TBOOLEAN 1
+#define LUA_TLIGHTUSERDATA 2
+#define LUA_TNUMBER 3
+#define LUA_TSTRING 4
+#define LUA_TTABLE 5
+#define LUA_TFUNCTION 6
+#define LUA_TUSERDATA 7
+#define LUA_TTHREAD 8
+
+
+
+/* minimum Lua stack available to a C function */
+#define LUA_MINSTACK 20
+
+
+/*
+** generic extra include file
+*/
+#if defined(LUA_USER_H)
+#include LUA_USER_H
+#endif
+
+
+/* type of numbers in Lua */
+typedef LUA_NUMBER lua_Number;
+
+
+/* type for integer functions */
+typedef LUA_INTEGER lua_Integer;
+
+
+
+/*
+** state manipulation
+*/
+LUA_API lua_State *(lua_newstate) (lua_Alloc f, void *ud);
+LUA_API void (lua_close) (lua_State *L);
+LUA_API lua_State *(lua_newthread) (lua_State *L);
+
+LUA_API lua_CFunction (lua_atpanic) (lua_State *L, lua_CFunction panicf);
+
+
+/*
+** basic stack manipulation
+*/
+LUA_API int (lua_gettop) (lua_State *L);
+LUA_API void (lua_settop) (lua_State *L, int idx);
+LUA_API void (lua_pushvalue) (lua_State *L, int idx);
+LUA_API void (lua_remove) (lua_State *L, int idx);
+LUA_API void (lua_insert) (lua_State *L, int idx);
+LUA_API void (lua_replace) (lua_State *L, int idx);
+LUA_API int (lua_checkstack) (lua_State *L, int sz);
+
+LUA_API void (lua_xmove) (lua_State *from, lua_State *to, int n);
+
+
+/*
+** access functions (stack -> C)
+*/
+
+LUA_API int (lua_isnumber) (lua_State *L, int idx);
+LUA_API int (lua_isstring) (lua_State *L, int idx);
+LUA_API int (lua_iscfunction) (lua_State *L, int idx);
+LUA_API int (lua_isuserdata) (lua_State *L, int idx);
+LUA_API int (lua_type) (lua_State *L, int idx);
+LUA_API const char *(lua_typename) (lua_State *L, int tp);
+
+LUA_API int (lua_equal) (lua_State *L, int idx1, int idx2);
+LUA_API int (lua_rawequal) (lua_State *L, int idx1, int idx2);
+LUA_API int (lua_lessthan) (lua_State *L, int idx1, int idx2);
+
+LUA_API lua_Number (lua_tonumber) (lua_State *L, int idx);
+LUA_API lua_Integer (lua_tointeger) (lua_State *L, int idx);
+LUA_API int (lua_toboolean) (lua_State *L, int idx);
+LUA_API const char *(lua_tolstring) (lua_State *L, int idx, size_t *len);
+LUA_API size_t (lua_objlen) (lua_State *L, int idx);
+LUA_API lua_CFunction (lua_tocfunction) (lua_State *L, int idx);
+LUA_API void *(lua_touserdata) (lua_State *L, int idx);
+LUA_API lua_State *(lua_tothread) (lua_State *L, int idx);
+LUA_API const void *(lua_topointer) (lua_State *L, int idx);
+
+
+/*
+** push functions (C -> stack)
+*/
+LUA_API void (lua_pushnil) (lua_State *L);
+LUA_API void (lua_pushnumber) (lua_State *L, lua_Number n);
+LUA_API void (lua_pushinteger) (lua_State *L, lua_Integer n);
+LUA_API void (lua_pushlstring) (lua_State *L, const char *s, size_t l);
+LUA_API void (lua_pushstring) (lua_State *L, const char *s);
+LUA_API const char *(lua_pushvfstring) (lua_State *L, const char *fmt,
+ va_list argp);
+LUA_API const char *(lua_pushfstring) (lua_State *L, const char *fmt, ...);
+LUA_API void (lua_pushcclosure) (lua_State *L, lua_CFunction fn, int n);
+LUA_API void (lua_pushboolean) (lua_State *L, int b);
+LUA_API void (lua_pushlightuserdata) (lua_State *L, void *p);
+LUA_API int (lua_pushthread) (lua_State *L);
+
+
+/*
+** get functions (Lua -> stack)
+*/
+LUA_API void (lua_gettable) (lua_State *L, int idx);
+LUA_API void (lua_getfield) (lua_State *L, int idx, const char *k);
+LUA_API void (lua_rawget) (lua_State *L, int idx);
+LUA_API void (lua_rawgeti) (lua_State *L, int idx, int n);
+LUA_API void (lua_createtable) (lua_State *L, int narr, int nrec);
+LUA_API void *(lua_newuserdata) (lua_State *L, size_t sz);
+LUA_API int (lua_getmetatable) (lua_State *L, int objindex);
+LUA_API void (lua_getfenv) (lua_State *L, int idx);
+
+
+/*
+** set functions (stack -> Lua)
+*/
+LUA_API void (lua_settable) (lua_State *L, int idx);
+LUA_API void (lua_setfield) (lua_State *L, int idx, const char *k);
+LUA_API void (lua_rawset) (lua_State *L, int idx);
+LUA_API void (lua_rawseti) (lua_State *L, int idx, int n);
+LUA_API int (lua_setmetatable) (lua_State *L, int objindex);
+LUA_API int (lua_setfenv) (lua_State *L, int idx);
+
+
+/*
+** `load' and `call' functions (load and run Lua code)
+*/
+LUA_API void (lua_call) (lua_State *L, int nargs, int nresults);
+LUA_API int (lua_pcall) (lua_State *L, int nargs, int nresults, int errfunc);
+LUA_API int (lua_cpcall) (lua_State *L, lua_CFunction func, void *ud);
+LUA_API int (lua_load) (lua_State *L, lua_Reader reader, void *dt,
+ const char *chunkname);
+
+LUA_API int (lua_dump) (lua_State *L, lua_Writer writer, void *data);
+
+
+/*
+** coroutine functions
+*/
+LUA_API int (lua_yield) (lua_State *L, int nresults);
+LUA_API int (lua_resume) (lua_State *L, int narg);
+LUA_API int (lua_status) (lua_State *L);
+
+/*
+** garbage-collection function and options
+*/
+
+#define LUA_GCSTOP 0
+#define LUA_GCRESTART 1
+#define LUA_GCCOLLECT 2
+#define LUA_GCCOUNT 3
+#define LUA_GCCOUNTB 4
+#define LUA_GCSTEP 5
+#define LUA_GCSETPAUSE 6
+#define LUA_GCSETSTEPMUL 7
+#define LUA_GCISRUNNING 9
+
+LUA_API int (lua_gc) (lua_State *L, int what, int data);
+
+
+/*
+** miscellaneous functions
+*/
+
+LUA_API int (lua_error) (lua_State *L);
+
+LUA_API int (lua_next) (lua_State *L, int idx);
+
+LUA_API void (lua_concat) (lua_State *L, int n);
+
+LUA_API lua_Alloc (lua_getallocf) (lua_State *L, void **ud);
+LUA_API void lua_setallocf (lua_State *L, lua_Alloc f, void *ud);
+
+
+
+/*
+** ===============================================================
+** some useful macros
+** ===============================================================
+*/
+
+#define lua_pop(L,n) lua_settop(L, -(n)-1)
+
+#define lua_newtable(L) lua_createtable(L, 0, 0)
+
+#define lua_register(L,n,f) (lua_pushcfunction(L, (f)), lua_setglobal(L, (n)))
+
+#define lua_pushcfunction(L,f) lua_pushcclosure(L, (f), 0)
+
+#define lua_strlen(L,i) lua_objlen(L, (i))
+
+#define lua_isfunction(L,n) (lua_type(L, (n)) == LUA_TFUNCTION)
+#define lua_istable(L,n) (lua_type(L, (n)) == LUA_TTABLE)
+#define lua_islightuserdata(L,n) (lua_type(L, (n)) == LUA_TLIGHTUSERDATA)
+#define lua_isnil(L,n) (lua_type(L, (n)) == LUA_TNIL)
+#define lua_isboolean(L,n) (lua_type(L, (n)) == LUA_TBOOLEAN)
+#define lua_isthread(L,n) (lua_type(L, (n)) == LUA_TTHREAD)
+#define lua_isnone(L,n) (lua_type(L, (n)) == LUA_TNONE)
+#define lua_isnoneornil(L, n) (lua_type(L, (n)) <= 0)
+
+#define lua_pushliteral(L, s) \
+ lua_pushlstring(L, "" s, (sizeof(s)/sizeof(char))-1)
+
+#define lua_setglobal(L,s) lua_setfield(L, LUA_GLOBALSINDEX, (s))
+#define lua_getglobal(L,s) lua_getfield(L, LUA_GLOBALSINDEX, (s))
+
+#define lua_tostring(L,i) lua_tolstring(L, (i), NULL)
+
+
+
+/*
+** compatibility macros and functions
+*/
+
+#define lua_open() luaL_newstate()
+
+#define lua_getregistry(L) lua_pushvalue(L, LUA_REGISTRYINDEX)
+
+#define lua_getgccount(L) lua_gc(L, LUA_GCCOUNT, 0)
+
+#define lua_Chunkreader lua_Reader
+#define lua_Chunkwriter lua_Writer
+
+
+/* hack */
+LUA_API void lua_setlevel (lua_State *from, lua_State *to);
+
+
+/*
+** {======================================================================
+** Debug API
+** =======================================================================
+*/
+
+
+/*
+** Event codes
+*/
+#define LUA_HOOKCALL 0
+#define LUA_HOOKRET 1
+#define LUA_HOOKLINE 2
+#define LUA_HOOKCOUNT 3
+#define LUA_HOOKTAILRET 4
+
+
+/*
+** Event masks
+*/
+#define LUA_MASKCALL (1 << LUA_HOOKCALL)
+#define LUA_MASKRET (1 << LUA_HOOKRET)
+#define LUA_MASKLINE (1 << LUA_HOOKLINE)
+#define LUA_MASKCOUNT (1 << LUA_HOOKCOUNT)
+
+typedef struct lua_Debug lua_Debug; /* activation record */
+
+
+/* Functions to be called by the debuger in specific events */
+typedef void (*lua_Hook) (lua_State *L, lua_Debug *ar);
+
+
+LUA_API int lua_getstack (lua_State *L, int level, lua_Debug *ar);
+LUA_API int lua_getinfo (lua_State *L, const char *what, lua_Debug *ar);
+LUA_API const char *lua_getlocal (lua_State *L, const lua_Debug *ar, int n);
+LUA_API const char *lua_setlocal (lua_State *L, const lua_Debug *ar, int n);
+LUA_API const char *lua_getupvalue (lua_State *L, int funcindex, int n);
+LUA_API const char *lua_setupvalue (lua_State *L, int funcindex, int n);
+LUA_API int lua_sethook (lua_State *L, lua_Hook func, int mask, int count);
+LUA_API lua_Hook lua_gethook (lua_State *L);
+LUA_API int lua_gethookmask (lua_State *L);
+LUA_API int lua_gethookcount (lua_State *L);
+
+/* From Lua 5.2. */
+LUA_API void *lua_upvalueid (lua_State *L, int idx, int n);
+LUA_API void lua_upvaluejoin (lua_State *L, int idx1, int n1, int idx2, int n2);
+LUA_API int lua_loadx (lua_State *L, lua_Reader reader, void *dt,
+ const char *chunkname, const char *mode);
+LUA_API const lua_Number *lua_version (lua_State *L);
+LUA_API void lua_copy (lua_State *L, int fromidx, int toidx);
+LUA_API lua_Number lua_tonumberx (lua_State *L, int idx, int *isnum);
+LUA_API lua_Integer lua_tointegerx (lua_State *L, int idx, int *isnum);
+
+/* From Lua 5.3. */
+LUA_API int lua_isyieldable (lua_State *L);
+
+
+struct lua_Debug {
+ int event;
+ const char *name; /* (n) */
+ const char *namewhat; /* (n) `global', `local', `field', `method' */
+ const char *what; /* (S) `Lua', `C', `main', `tail' */
+ const char *source; /* (S) */
+ int currentline; /* (l) */
+ int nups; /* (u) number of upvalues */
+ int linedefined; /* (S) */
+ int lastlinedefined; /* (S) */
+ char short_src[LUA_IDSIZE]; /* (S) */
+ /* private part */
+ int i_ci; /* active function */
+};
+
+/* }====================================================================== */
+
+
+/******************************************************************************
+* Copyright (C) 1994-2008 Lua.org, PUC-Rio. All rights reserved.
+*
+* Permission is hereby granted, free of charge, to any person obtaining
+* a copy of this software and associated documentation files (the
+* "Software"), to deal in the Software without restriction, including
+* without limitation the rights to use, copy, modify, merge, publish,
+* distribute, sublicense, and/or sell copies of the Software, and to
+* permit persons to whom the Software is furnished to do so, subject to
+* the following conditions:
+*
+* The above copyright notice and this permission notice shall be
+* included in all copies or substantial portions of the Software.
+*
+* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+******************************************************************************/
+
+
+#endif
diff --git a/libs/luajit-cmake/luajit/src/lua.hpp b/libs/luajit-cmake/luajit/src/lua.hpp
new file mode 100644
index 0000000..07e9002
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/lua.hpp
@@ -0,0 +1,9 @@
+// C++ wrapper for LuaJIT header files.
+
+extern "C" {
+#include "lua.h"
+#include "lauxlib.h"
+#include "lualib.h"
+#include "luajit.h"
+}
+
diff --git a/libs/luajit-cmake/luajit/src/luaconf.h b/libs/luajit-cmake/luajit/src/luaconf.h
new file mode 100644
index 0000000..e8790c1
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/luaconf.h
@@ -0,0 +1,152 @@
+/*
+** Configuration header.
+** Copyright (C) 2005-2022 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef luaconf_h
+#define luaconf_h
+
+#ifndef WINVER
+#define WINVER 0x0501
+#endif
+#include <limits.h>
+#include <stddef.h>
+
+/* Default path for loading Lua and C modules with require(). */
+#if defined(_WIN32)
+/*
+** In Windows, any exclamation mark ('!') in the path is replaced by the
+** path of the directory of the executable file of the current process.
+*/
+#define LUA_LDIR "!\\lua\\"
+#define LUA_CDIR "!\\"
+#define LUA_PATH_DEFAULT \
+ ".\\?.lua;" LUA_LDIR"?.lua;" LUA_LDIR"?\\init.lua;"
+#define LUA_CPATH_DEFAULT \
+ ".\\?.dll;" LUA_CDIR"?.dll;" LUA_CDIR"loadall.dll"
+#else
+/*
+** Note to distribution maintainers: do NOT patch the following lines!
+** Please read ../doc/install.html#distro and pass PREFIX=/usr instead.
+*/
+#ifndef LUA_MULTILIB
+#define LUA_MULTILIB "lib"
+#endif
+#ifndef LUA_LMULTILIB
+#define LUA_LMULTILIB "lib"
+#endif
+#define LUA_LROOT "/usr/local"
+#define LUA_LUADIR "/lua/5.1/"
+#define LUA_LJDIR "/luajit-2.1.0-beta3/"
+
+#ifdef LUA_ROOT
+#define LUA_JROOT LUA_ROOT
+#define LUA_RLDIR LUA_ROOT "/share" LUA_LUADIR
+#define LUA_RCDIR LUA_ROOT "/" LUA_MULTILIB LUA_LUADIR
+#define LUA_RLPATH ";" LUA_RLDIR "?.lua;" LUA_RLDIR "?/init.lua"
+#define LUA_RCPATH ";" LUA_RCDIR "?.so"
+#else
+#define LUA_JROOT LUA_LROOT
+#define LUA_RLPATH
+#define LUA_RCPATH
+#endif
+
+#define LUA_JPATH ";" LUA_JROOT "/share" LUA_LJDIR "?.lua"
+#define LUA_LLDIR LUA_LROOT "/share" LUA_LUADIR
+#define LUA_LCDIR LUA_LROOT "/" LUA_LMULTILIB LUA_LUADIR
+#define LUA_LLPATH ";" LUA_LLDIR "?.lua;" LUA_LLDIR "?/init.lua"
+#define LUA_LCPATH1 ";" LUA_LCDIR "?.so"
+#define LUA_LCPATH2 ";" LUA_LCDIR "loadall.so"
+
+#define LUA_PATH_DEFAULT "./?.lua" LUA_JPATH LUA_LLPATH LUA_RLPATH
+#define LUA_CPATH_DEFAULT "./?.so" LUA_LCPATH1 LUA_RCPATH LUA_LCPATH2
+#endif
+
+/* Environment variable names for path overrides and initialization code. */
+#define LUA_PATH "LUA_PATH"
+#define LUA_CPATH "LUA_CPATH"
+#define LUA_INIT "LUA_INIT"
+
+/* Special file system characters. */
+#if defined(_WIN32)
+#define LUA_DIRSEP "\\"
+#else
+#define LUA_DIRSEP "/"
+#endif
+#define LUA_PATHSEP ";"
+#define LUA_PATH_MARK "?"
+#define LUA_EXECDIR "!"
+#define LUA_IGMARK "-"
+#define LUA_PATH_CONFIG \
+ LUA_DIRSEP "\n" LUA_PATHSEP "\n" LUA_PATH_MARK "\n" \
+ LUA_EXECDIR "\n" LUA_IGMARK "\n"
+
+/* Quoting in error messages. */
+#define LUA_QL(x) "'" x "'"
+#define LUA_QS LUA_QL("%s")
+
+/* Various tunables. */
+#define LUAI_MAXSTACK 65500 /* Max. # of stack slots for a thread (<64K). */
+#define LUAI_MAXCSTACK 8000 /* Max. # of stack slots for a C func (<10K). */
+#define LUAI_GCPAUSE 200 /* Pause GC until memory is at 200%. */
+#define LUAI_GCMUL 200 /* Run GC at 200% of allocation speed. */
+#define LUA_MAXCAPTURES 32 /* Max. pattern captures. */
+
+/* Configuration for the frontend (the luajit executable). */
+#if defined(luajit_c)
+#define LUA_PROGNAME "luajit" /* Fallback frontend name. */
+#define LUA_PROMPT "> " /* Interactive prompt. */
+#define LUA_PROMPT2 ">> " /* Continuation prompt. */
+#define LUA_MAXINPUT 512 /* Max. input line length. */
+#endif
+
+/* Note: changing the following defines breaks the Lua 5.1 ABI. */
+#define LUA_INTEGER ptrdiff_t
+#define LUA_IDSIZE 60 /* Size of lua_Debug.short_src. */
+/*
+** Size of lauxlib and io.* on-stack buffers. Weird workaround to avoid using
+** unreasonable amounts of stack space, but still retain ABI compatibility.
+** Blame Lua for depending on BUFSIZ in the ABI, blame **** for wrecking it.
+*/
+#define LUAL_BUFFERSIZE (BUFSIZ > 16384 ? 8192 : BUFSIZ)
+
+/* The following defines are here only for compatibility with luaconf.h
+** from the standard Lua distribution. They must not be changed for LuaJIT.
+*/
+#define LUA_NUMBER_DOUBLE
+#define LUA_NUMBER double
+#define LUAI_UACNUMBER double
+#define LUA_NUMBER_SCAN "%lf"
+#define LUA_NUMBER_FMT "%.14g"
+#define lua_number2str(s, n) sprintf((s), LUA_NUMBER_FMT, (n))
+#define LUAI_MAXNUMBER2STR 32
+#define LUA_INTFRMLEN "l"
+#define LUA_INTFRM_T long
+
+/* Linkage of public API functions. */
+#if defined(LUA_BUILD_AS_DLL)
+#if defined(LUA_CORE) || defined(LUA_LIB)
+#define LUA_API __declspec(dllexport)
+#else
+#define LUA_API __declspec(dllimport)
+#endif
+#else
+#define LUA_API extern
+#endif
+
+#define LUALIB_API LUA_API
+
+/* Compatibility support for assertions. */
+#if defined(LUA_USE_ASSERT) || defined(LUA_USE_APICHECK)
+#include <assert.h>
+#endif
+#ifdef LUA_USE_ASSERT
+#define lua_assert(x) assert(x)
+#endif
+#ifdef LUA_USE_APICHECK
+#define luai_apicheck(L, o) { (void)L; assert(o); }
+#else
+#define luai_apicheck(L, o) { (void)L; }
+#endif
+
+#endif
diff --git a/libs/luajit-cmake/luajit/src/luajit.c b/libs/luajit-cmake/luajit/src/luajit.c
new file mode 100644
index 0000000..6dd6402
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/luajit.c
@@ -0,0 +1,586 @@
+/*
+** LuaJIT frontend. Runs commands, scripts, read-eval-print (REPL) etc.
+** Copyright (C) 2005-2022 Mike Pall. See Copyright Notice in luajit.h
+**
+** Major portions taken verbatim or adapted from the Lua interpreter.
+** Copyright (C) 1994-2008 Lua.org, PUC-Rio. See Copyright Notice in lua.h
+*/
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#define luajit_c
+
+#include "lua.h"
+#include "lauxlib.h"
+#include "lualib.h"
+#include "luajit.h"
+
+#include "lj_arch.h"
+
+#if LJ_TARGET_POSIX
+#include <unistd.h>
+#define lua_stdin_is_tty() isatty(0)
+#elif LJ_TARGET_WINDOWS
+#include <io.h>
+#ifdef __BORLANDC__
+#define lua_stdin_is_tty() isatty(_fileno(stdin))
+#else
+#define lua_stdin_is_tty() _isatty(_fileno(stdin))
+#endif
+#else
+#define lua_stdin_is_tty() 1
+#endif
+
+#if !LJ_TARGET_CONSOLE
+#include <signal.h>
+#endif
+
+static lua_State *globalL = NULL;
+static const char *progname = LUA_PROGNAME;
+static char *empty_argv[2] = { NULL, NULL };
+
+#if !LJ_TARGET_CONSOLE
+static void lstop(lua_State *L, lua_Debug *ar)
+{
+ (void)ar; /* unused arg. */
+ lua_sethook(L, NULL, 0, 0);
+ /* Avoid luaL_error -- a C hook doesn't add an extra frame. */
+ luaL_where(L, 0);
+ lua_pushfstring(L, "%sinterrupted!", lua_tostring(L, -1));
+ lua_error(L);
+}
+
+static void laction(int i)
+{
+ signal(i, SIG_DFL); /* if another SIGINT happens before lstop,
+ terminate process (default action) */
+ lua_sethook(globalL, lstop, LUA_MASKCALL | LUA_MASKRET | LUA_MASKCOUNT, 1);
+}
+#endif
+
+static void print_usage(void)
+{
+ fputs("usage: ", stderr);
+ fputs(progname, stderr);
+ fputs(" [options]... [script [args]...].\n"
+ "Available options are:\n"
+ " -e chunk Execute string " LUA_QL("chunk") ".\n"
+ " -l name Require library " LUA_QL("name") ".\n"
+ " -b ... Save or list bytecode.\n"
+ " -j cmd Perform LuaJIT control command.\n"
+ " -O[opt] Control LuaJIT optimizations.\n"
+ " -i Enter interactive mode after executing " LUA_QL("script") ".\n"
+ " -v Show version information.\n"
+ " -E Ignore environment variables.\n"
+ " -- Stop handling options.\n"
+ " - Execute stdin and stop handling options.\n", stderr);
+ fflush(stderr);
+}
+
+static void l_message(const char *msg)
+{
+ if (progname) { fputs(progname, stderr); fputc(':', stderr); fputc(' ', stderr); }
+ fputs(msg, stderr); fputc('\n', stderr);
+ fflush(stderr);
+}
+
+static int report(lua_State *L, int status)
+{
+ if (status && !lua_isnil(L, -1)) {
+ const char *msg = lua_tostring(L, -1);
+ if (msg == NULL) msg = "(error object is not a string)";
+ l_message(msg);
+ lua_pop(L, 1);
+ }
+ return status;
+}
+
+static int traceback(lua_State *L)
+{
+ if (!lua_isstring(L, 1)) { /* Non-string error object? Try metamethod. */
+ if (lua_isnoneornil(L, 1) ||
+ !luaL_callmeta(L, 1, "__tostring") ||
+ !lua_isstring(L, -1))
+ return 1; /* Return non-string error object. */
+ lua_remove(L, 1); /* Replace object by result of __tostring metamethod. */
+ }
+ luaL_traceback(L, L, lua_tostring(L, 1), 1);
+ return 1;
+}
+
+static int docall(lua_State *L, int narg, int clear)
+{
+ int status;
+ int base = lua_gettop(L) - narg; /* function index */
+ lua_pushcfunction(L, traceback); /* push traceback function */
+ lua_insert(L, base); /* put it under chunk and args */
+#if !LJ_TARGET_CONSOLE
+ signal(SIGINT, laction);
+#endif
+ status = lua_pcall(L, narg, (clear ? 0 : LUA_MULTRET), base);
+#if !LJ_TARGET_CONSOLE
+ signal(SIGINT, SIG_DFL);
+#endif
+ lua_remove(L, base); /* remove traceback function */
+ /* force a complete garbage collection in case of errors */
+ if (status != LUA_OK) lua_gc(L, LUA_GCCOLLECT, 0);
+ return status;
+}
+
+static void print_version(void)
+{
+ fputs(LUAJIT_VERSION " -- " LUAJIT_COPYRIGHT ". " LUAJIT_URL "\n", stdout);
+}
+
+static void print_jit_status(lua_State *L)
+{
+ int n;
+ const char *s;
+ lua_getfield(L, LUA_REGISTRYINDEX, "_LOADED");
+ lua_getfield(L, -1, "jit"); /* Get jit.* module table. */
+ lua_remove(L, -2);
+ lua_getfield(L, -1, "status");
+ lua_remove(L, -2);
+ n = lua_gettop(L);
+ lua_call(L, 0, LUA_MULTRET);
+ fputs(lua_toboolean(L, n) ? "JIT: ON" : "JIT: OFF", stdout);
+ for (n++; (s = lua_tostring(L, n)); n++) {
+ putc(' ', stdout);
+ fputs(s, stdout);
+ }
+ putc('\n', stdout);
+ lua_settop(L, 0); /* clear stack */
+}
+
+static void createargtable(lua_State *L, char **argv, int argc, int argf)
+{
+ int i;
+ lua_createtable(L, argc - argf, argf);
+ for (i = 0; i < argc; i++) {
+ lua_pushstring(L, argv[i]);
+ lua_rawseti(L, -2, i - argf);
+ }
+ lua_setglobal(L, "arg");
+}
+
+static int dofile(lua_State *L, const char *name)
+{
+ int status = luaL_loadfile(L, name) || docall(L, 0, 1);
+ return report(L, status);
+}
+
+static int dostring(lua_State *L, const char *s, const char *name)
+{
+ int status = luaL_loadbuffer(L, s, strlen(s), name) || docall(L, 0, 1);
+ return report(L, status);
+}
+
+static int dolibrary(lua_State *L, const char *name)
+{
+ lua_getglobal(L, "require");
+ lua_pushstring(L, name);
+ return report(L, docall(L, 1, 1));
+}
+
+static void write_prompt(lua_State *L, int firstline)
+{
+ const char *p;
+ lua_getfield(L, LUA_GLOBALSINDEX, firstline ? "_PROMPT" : "_PROMPT2");
+ p = lua_tostring(L, -1);
+ if (p == NULL) p = firstline ? LUA_PROMPT : LUA_PROMPT2;
+ fputs(p, stdout);
+ fflush(stdout);
+ lua_pop(L, 1); /* remove global */
+}
+
+static int incomplete(lua_State *L, int status)
+{
+ if (status == LUA_ERRSYNTAX) {
+ size_t lmsg;
+ const char *msg = lua_tolstring(L, -1, &lmsg);
+ const char *tp = msg + lmsg - (sizeof(LUA_QL("<eof>")) - 1);
+ if (strstr(msg, LUA_QL("<eof>")) == tp) {
+ lua_pop(L, 1);
+ return 1;
+ }
+ }
+ return 0; /* else... */
+}
+
+static int pushline(lua_State *L, int firstline)
+{
+ char buf[LUA_MAXINPUT];
+ write_prompt(L, firstline);
+ if (fgets(buf, LUA_MAXINPUT, stdin)) {
+ size_t len = strlen(buf);
+ if (len > 0 && buf[len-1] == '\n')
+ buf[len-1] = '\0';
+ if (firstline && buf[0] == '=')
+ lua_pushfstring(L, "return %s", buf+1);
+ else
+ lua_pushstring(L, buf);
+ return 1;
+ }
+ return 0;
+}
+
+static int loadline(lua_State *L)
+{
+ int status;
+ lua_settop(L, 0);
+ if (!pushline(L, 1))
+ return -1; /* no input */
+ for (;;) { /* repeat until gets a complete line */
+ status = luaL_loadbuffer(L, lua_tostring(L, 1), lua_strlen(L, 1), "=stdin");
+ if (!incomplete(L, status)) break; /* cannot try to add lines? */
+ if (!pushline(L, 0)) /* no more input? */
+ return -1;
+ lua_pushliteral(L, "\n"); /* add a new line... */
+ lua_insert(L, -2); /* ...between the two lines */
+ lua_concat(L, 3); /* join them */
+ }
+ lua_remove(L, 1); /* remove line */
+ return status;
+}
+
+static void dotty(lua_State *L)
+{
+ int status;
+ const char *oldprogname = progname;
+ progname = NULL;
+ while ((status = loadline(L)) != -1) {
+ if (status == LUA_OK) status = docall(L, 0, 0);
+ report(L, status);
+ if (status == LUA_OK && lua_gettop(L) > 0) { /* any result to print? */
+ lua_getglobal(L, "print");
+ lua_insert(L, 1);
+ if (lua_pcall(L, lua_gettop(L)-1, 0, 0) != 0)
+ l_message(lua_pushfstring(L, "error calling " LUA_QL("print") " (%s)",
+ lua_tostring(L, -1)));
+ }
+ }
+ lua_settop(L, 0); /* clear stack */
+ fputs("\n", stdout);
+ fflush(stdout);
+ progname = oldprogname;
+}
+
+static int handle_script(lua_State *L, char **argx)
+{
+ int status;
+ const char *fname = argx[0];
+ if (strcmp(fname, "-") == 0 && strcmp(argx[-1], "--") != 0)
+ fname = NULL; /* stdin */
+ status = luaL_loadfile(L, fname);
+ if (status == LUA_OK) {
+ /* Fetch args from arg table. LUA_INIT or -e might have changed them. */
+ int narg = 0;
+ lua_getglobal(L, "arg");
+ if (lua_istable(L, -1)) {
+ do {
+ narg++;
+ lua_rawgeti(L, -narg, narg);
+ } while (!lua_isnil(L, -1));
+ lua_pop(L, 1);
+ lua_remove(L, -narg);
+ narg--;
+ } else {
+ lua_pop(L, 1);
+ }
+ status = docall(L, narg, 0);
+ }
+ return report(L, status);
+}
+
+/* Load add-on module. */
+static int loadjitmodule(lua_State *L)
+{
+ lua_getglobal(L, "require");
+ lua_pushliteral(L, "jit.");
+ lua_pushvalue(L, -3);
+ lua_concat(L, 2);
+ if (lua_pcall(L, 1, 1, 0)) {
+ const char *msg = lua_tostring(L, -1);
+ if (msg && !strncmp(msg, "module ", 7))
+ goto nomodule;
+ return report(L, 1);
+ }
+ lua_getfield(L, -1, "start");
+ if (lua_isnil(L, -1)) {
+ nomodule:
+ l_message("unknown luaJIT command or jit.* modules not installed");
+ return 1;
+ }
+ lua_remove(L, -2); /* Drop module table. */
+ return 0;
+}
+
+/* Run command with options. */
+static int runcmdopt(lua_State *L, const char *opt)
+{
+ int narg = 0;
+ if (opt && *opt) {
+ for (;;) { /* Split arguments. */
+ const char *p = strchr(opt, ',');
+ narg++;
+ if (!p) break;
+ if (p == opt)
+ lua_pushnil(L);
+ else
+ lua_pushlstring(L, opt, (size_t)(p - opt));
+ opt = p + 1;
+ }
+ if (*opt)
+ lua_pushstring(L, opt);
+ else
+ lua_pushnil(L);
+ }
+ return report(L, lua_pcall(L, narg, 0, 0));
+}
+
+/* JIT engine control command: try jit library first or load add-on module. */
+static int dojitcmd(lua_State *L, const char *cmd)
+{
+ const char *opt = strchr(cmd, '=');
+ lua_pushlstring(L, cmd, opt ? (size_t)(opt - cmd) : strlen(cmd));
+ lua_getfield(L, LUA_REGISTRYINDEX, "_LOADED");
+ lua_getfield(L, -1, "jit"); /* Get jit.* module table. */
+ lua_remove(L, -2);
+ lua_pushvalue(L, -2);
+ lua_gettable(L, -2); /* Lookup library function. */
+ if (!lua_isfunction(L, -1)) {
+ lua_pop(L, 2); /* Drop non-function and jit.* table, keep module name. */
+ if (loadjitmodule(L))
+ return 1;
+ } else {
+ lua_remove(L, -2); /* Drop jit.* table. */
+ }
+ lua_remove(L, -2); /* Drop module name. */
+ return runcmdopt(L, opt ? opt+1 : opt);
+}
+
+/* Optimization flags. */
+static int dojitopt(lua_State *L, const char *opt)
+{
+ lua_getfield(L, LUA_REGISTRYINDEX, "_LOADED");
+ lua_getfield(L, -1, "jit.opt"); /* Get jit.opt.* module table. */
+ lua_remove(L, -2);
+ lua_getfield(L, -1, "start");
+ lua_remove(L, -2);
+ return runcmdopt(L, opt);
+}
+
+/* Save or list bytecode. */
+static int dobytecode(lua_State *L, char **argv)
+{
+ int narg = 0;
+ lua_pushliteral(L, "bcsave");
+ if (loadjitmodule(L))
+ return 1;
+ if (argv[0][2]) {
+ narg++;
+ argv[0][1] = '-';
+ lua_pushstring(L, argv[0]+1);
+ }
+ for (argv++; *argv != NULL; narg++, argv++)
+ lua_pushstring(L, *argv);
+ report(L, lua_pcall(L, narg, 0, 0));
+ return -1;
+}
+
+/* check that argument has no extra characters at the end */
+#define notail(x) {if ((x)[2] != '\0') return -1;}
+
+#define FLAGS_INTERACTIVE 1
+#define FLAGS_VERSION 2
+#define FLAGS_EXEC 4
+#define FLAGS_OPTION 8
+#define FLAGS_NOENV 16
+
+static int collectargs(char **argv, int *flags)
+{
+ int i;
+ for (i = 1; argv[i] != NULL; i++) {
+ if (argv[i][0] != '-') /* Not an option? */
+ return i;
+ switch (argv[i][1]) { /* Check option. */
+ case '-':
+ notail(argv[i]);
+ return i+1;
+ case '\0':
+ return i;
+ case 'i':
+ notail(argv[i]);
+ *flags |= FLAGS_INTERACTIVE;
+ /* fallthrough */
+ case 'v':
+ notail(argv[i]);
+ *flags |= FLAGS_VERSION;
+ break;
+ case 'e':
+ *flags |= FLAGS_EXEC;
+ /* fallthrough */
+ case 'j': /* LuaJIT extension */
+ case 'l':
+ *flags |= FLAGS_OPTION;
+ if (argv[i][2] == '\0') {
+ i++;
+ if (argv[i] == NULL) return -1;
+ }
+ break;
+ case 'O': break; /* LuaJIT extension */
+ case 'b': /* LuaJIT extension */
+ if (*flags) return -1;
+ *flags |= FLAGS_EXEC;
+ return i+1;
+ case 'E':
+ *flags |= FLAGS_NOENV;
+ break;
+ default: return -1; /* invalid option */
+ }
+ }
+ return i;
+}
+
+static int runargs(lua_State *L, char **argv, int argn)
+{
+ int i;
+ for (i = 1; i < argn; i++) {
+ if (argv[i] == NULL) continue;
+ lua_assert(argv[i][0] == '-');
+ switch (argv[i][1]) {
+ case 'e': {
+ const char *chunk = argv[i] + 2;
+ if (*chunk == '\0') chunk = argv[++i];
+ lua_assert(chunk != NULL);
+ if (dostring(L, chunk, "=(command line)") != 0)
+ return 1;
+ break;
+ }
+ case 'l': {
+ const char *filename = argv[i] + 2;
+ if (*filename == '\0') filename = argv[++i];
+ lua_assert(filename != NULL);
+ if (dolibrary(L, filename))
+ return 1;
+ break;
+ }
+ case 'j': { /* LuaJIT extension. */
+ const char *cmd = argv[i] + 2;
+ if (*cmd == '\0') cmd = argv[++i];
+ lua_assert(cmd != NULL);
+ if (dojitcmd(L, cmd))
+ return 1;
+ break;
+ }
+ case 'O': /* LuaJIT extension. */
+ if (dojitopt(L, argv[i] + 2))
+ return 1;
+ break;
+ case 'b': /* LuaJIT extension. */
+ return dobytecode(L, argv+i);
+ default: break;
+ }
+ }
+ return LUA_OK;
+}
+
+static int handle_luainit(lua_State *L)
+{
+#if LJ_TARGET_CONSOLE
+ const char *init = NULL;
+#else
+ const char *init = getenv(LUA_INIT);
+#endif
+ if (init == NULL)
+ return LUA_OK;
+ else if (init[0] == '@')
+ return dofile(L, init+1);
+ else
+ return dostring(L, init, "=" LUA_INIT);
+}
+
+static struct Smain {
+ char **argv;
+ int argc;
+ int status;
+} smain;
+
+static int pmain(lua_State *L)
+{
+ struct Smain *s = &smain;
+ char **argv = s->argv;
+ int argn;
+ int flags = 0;
+ globalL = L;
+ LUAJIT_VERSION_SYM(); /* Linker-enforced version check. */
+
+ argn = collectargs(argv, &flags);
+ if (argn < 0) { /* Invalid args? */
+ print_usage();
+ s->status = 1;
+ return 0;
+ }
+
+ if ((flags & FLAGS_NOENV)) {
+ lua_pushboolean(L, 1);
+ lua_setfield(L, LUA_REGISTRYINDEX, "LUA_NOENV");
+ }
+
+ /* Stop collector during library initialization. */
+ lua_gc(L, LUA_GCSTOP, 0);
+ luaL_openlibs(L);
+ lua_gc(L, LUA_GCRESTART, -1);
+
+ createargtable(L, argv, s->argc, argn);
+
+ if (!(flags & FLAGS_NOENV)) {
+ s->status = handle_luainit(L);
+ if (s->status != LUA_OK) return 0;
+ }
+
+ if ((flags & FLAGS_VERSION)) print_version();
+
+ s->status = runargs(L, argv, argn);
+ if (s->status != LUA_OK) return 0;
+
+ if (s->argc > argn) {
+ s->status = handle_script(L, argv + argn);
+ if (s->status != LUA_OK) return 0;
+ }
+
+ if ((flags & FLAGS_INTERACTIVE)) {
+ print_jit_status(L);
+ dotty(L);
+ } else if (s->argc == argn && !(flags & (FLAGS_EXEC|FLAGS_VERSION))) {
+ if (lua_stdin_is_tty()) {
+ print_version();
+ print_jit_status(L);
+ dotty(L);
+ } else {
+ dofile(L, NULL); /* Executes stdin as a file. */
+ }
+ }
+ return 0;
+}
+
+int main(int argc, char **argv)
+{
+ int status;
+ lua_State *L;
+ if (!argv[0]) argv = empty_argv; else if (argv[0][0]) progname = argv[0];
+ L = lua_open();
+ if (L == NULL) {
+ l_message("cannot create state: not enough memory");
+ return EXIT_FAILURE;
+ }
+ smain.argc = argc;
+ smain.argv = argv;
+ status = lua_cpcall(L, pmain, NULL);
+ report(L, status);
+ lua_close(L);
+ return (status || smain.status > 0) ? EXIT_FAILURE : EXIT_SUCCESS;
+}
+
diff --git a/libs/luajit-cmake/luajit/src/luajit.h b/libs/luajit-cmake/luajit/src/luajit.h
new file mode 100644
index 0000000..31f1eb1
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/luajit.h
@@ -0,0 +1,79 @@
+/*
+** LuaJIT -- a Just-In-Time Compiler for Lua. https://luajit.org/
+**
+** Copyright (C) 2005-2022 Mike Pall. All rights reserved.
+**
+** Permission is hereby granted, free of charge, to any person obtaining
+** a copy of this software and associated documentation files (the
+** "Software"), to deal in the Software without restriction, including
+** without limitation the rights to use, copy, modify, merge, publish,
+** distribute, sublicense, and/or sell copies of the Software, and to
+** permit persons to whom the Software is furnished to do so, subject to
+** the following conditions:
+**
+** The above copyright notice and this permission notice shall be
+** included in all copies or substantial portions of the Software.
+**
+** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+** EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+** MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+** IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+** CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+** TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+** SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+**
+** [ MIT license: https://www.opensource.org/licenses/mit-license.php ]
+*/
+
+#ifndef _LUAJIT_H
+#define _LUAJIT_H
+
+#include "lua.h"
+
+#define LUAJIT_VERSION "LuaJIT 2.1.0-beta3"
+#define LUAJIT_VERSION_NUM 20100 /* Version 2.1.0 = 02.01.00. */
+#define LUAJIT_VERSION_SYM luaJIT_version_2_1_0_beta3
+#define LUAJIT_COPYRIGHT "Copyright (C) 2005-2022 Mike Pall"
+#define LUAJIT_URL "https://luajit.org/"
+
+/* Modes for luaJIT_setmode. */
+#define LUAJIT_MODE_MASK 0x00ff
+
+enum {
+ LUAJIT_MODE_ENGINE, /* Set mode for whole JIT engine. */
+ LUAJIT_MODE_DEBUG, /* Set debug mode (idx = level). */
+
+ LUAJIT_MODE_FUNC, /* Change mode for a function. */
+ LUAJIT_MODE_ALLFUNC, /* Recurse into subroutine protos. */
+ LUAJIT_MODE_ALLSUBFUNC, /* Change only the subroutines. */
+
+ LUAJIT_MODE_TRACE, /* Flush a compiled trace. */
+
+ LUAJIT_MODE_WRAPCFUNC = 0x10, /* Set wrapper mode for C function calls. */
+
+ LUAJIT_MODE_MAX
+};
+
+/* Flags or'ed in to the mode. */
+#define LUAJIT_MODE_OFF 0x0000 /* Turn feature off. */
+#define LUAJIT_MODE_ON 0x0100 /* Turn feature on. */
+#define LUAJIT_MODE_FLUSH 0x0200 /* Flush JIT-compiled code. */
+
+/* LuaJIT public C API. */
+
+/* Control the JIT engine. */
+LUA_API int luaJIT_setmode(lua_State *L, int idx, int mode);
+
+/* Low-overhead profiling API. */
+typedef void (*luaJIT_profile_callback)(void *data, lua_State *L,
+ int samples, int vmstate);
+LUA_API void luaJIT_profile_start(lua_State *L, const char *mode,
+ luaJIT_profile_callback cb, void *data);
+LUA_API void luaJIT_profile_stop(lua_State *L);
+LUA_API const char *luaJIT_profile_dumpstack(lua_State *L, const char *fmt,
+ int depth, size_t *len);
+
+/* Enforce (dynamic) linker error for version mismatches. Call from main. */
+LUA_API void LUAJIT_VERSION_SYM(void);
+
+#endif
diff --git a/libs/luajit-cmake/luajit/src/lualib.h b/libs/luajit-cmake/luajit/src/lualib.h
new file mode 100644
index 0000000..8774845
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/lualib.h
@@ -0,0 +1,44 @@
+/*
+** Standard library header.
+** Copyright (C) 2005-2022 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#ifndef _LUALIB_H
+#define _LUALIB_H
+
+#include "lua.h"
+
+#define LUA_FILEHANDLE "FILE*"
+
+#define LUA_COLIBNAME "coroutine"
+#define LUA_MATHLIBNAME "math"
+#define LUA_STRLIBNAME "string"
+#define LUA_TABLIBNAME "table"
+#define LUA_IOLIBNAME "io"
+#define LUA_OSLIBNAME "os"
+#define LUA_LOADLIBNAME "package"
+#define LUA_DBLIBNAME "debug"
+#define LUA_BITLIBNAME "bit"
+#define LUA_JITLIBNAME "jit"
+#define LUA_FFILIBNAME "ffi"
+
+LUALIB_API int luaopen_base(lua_State *L);
+LUALIB_API int luaopen_math(lua_State *L);
+LUALIB_API int luaopen_string(lua_State *L);
+LUALIB_API int luaopen_table(lua_State *L);
+LUALIB_API int luaopen_io(lua_State *L);
+LUALIB_API int luaopen_os(lua_State *L);
+LUALIB_API int luaopen_package(lua_State *L);
+LUALIB_API int luaopen_debug(lua_State *L);
+LUALIB_API int luaopen_bit(lua_State *L);
+LUALIB_API int luaopen_jit(lua_State *L);
+LUALIB_API int luaopen_ffi(lua_State *L);
+LUALIB_API int luaopen_string_buffer(lua_State *L);
+
+LUALIB_API void luaL_openlibs(lua_State *L);
+
+#ifndef lua_assert
+#define lua_assert(x) ((void)0)
+#endif
+
+#endif
diff --git a/libs/luajit-cmake/luajit/src/msvcbuild.bat b/libs/luajit-cmake/luajit/src/msvcbuild.bat
new file mode 100644
index 0000000..d323d8d
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/msvcbuild.bat
@@ -0,0 +1,127 @@
+@rem Script to build LuaJIT with MSVC.
+@rem Copyright (C) 2005-2022 Mike Pall. See Copyright Notice in luajit.h
+@rem
+@rem Open a "Visual Studio Command Prompt" (either x86 or x64).
+@rem Then cd to this directory and run this script. Use the following
+@rem options (in order), if needed. The default is a dynamic release build.
+@rem
+@rem nogc64 disable LJ_GC64 mode for x64
+@rem debug emit debug symbols
+@rem amalg amalgamated build
+@rem static static linkage
+
+@if not defined INCLUDE goto :FAIL
+
+@setlocal
+@rem Add more debug flags here, e.g. DEBUGCFLAGS=/DLUA_USE_APICHECK
+@set DEBUGCFLAGS=
+@set LJCOMPILE=cl /nologo /c /O2 /W3 /D_CRT_SECURE_NO_DEPRECATE /D_CRT_STDIO_INLINE=__declspec(dllexport)__inline
+@set LJLINK=link /nologo
+@set LJMT=mt /nologo
+@set LJLIB=lib /nologo /nodefaultlib
+@set DASMDIR=..\dynasm
+@set DASM=%DASMDIR%\dynasm.lua
+@set DASC=vm_x64.dasc
+@set LJDLLNAME=lua51.dll
+@set LJLIBNAME=lua51.lib
+@set BUILDTYPE=release
+@set ALL_LIB=lib_base.c lib_math.c lib_bit.c lib_string.c lib_table.c lib_io.c lib_os.c lib_package.c lib_debug.c lib_jit.c lib_ffi.c lib_buffer.c
+
+%LJCOMPILE% host\minilua.c
+@if errorlevel 1 goto :BAD
+%LJLINK% /out:minilua.exe minilua.obj
+@if errorlevel 1 goto :BAD
+if exist minilua.exe.manifest^
+ %LJMT% -manifest minilua.exe.manifest -outputresource:minilua.exe
+
+@set DASMFLAGS=-D WIN -D JIT -D FFI -D P64
+@set LJARCH=x64
+@minilua
+@if errorlevel 8 goto :X64
+@set DASC=vm_x86.dasc
+@set DASMFLAGS=-D WIN -D JIT -D FFI
+@set LJARCH=x86
+@set LJCOMPILE=%LJCOMPILE% /arch:SSE2
+:X64
+@if "%1" neq "nogc64" goto :GC64
+@shift
+@set DASC=vm_x86.dasc
+@set LJCOMPILE=%LJCOMPILE% /DLUAJIT_DISABLE_GC64
+:GC64
+minilua %DASM% -LN %DASMFLAGS% -o host\buildvm_arch.h %DASC%
+@if errorlevel 1 goto :BAD
+
+%LJCOMPILE% /I "." /I %DASMDIR% host\buildvm*.c
+@if errorlevel 1 goto :BAD
+%LJLINK% /out:buildvm.exe buildvm*.obj
+@if errorlevel 1 goto :BAD
+if exist buildvm.exe.manifest^
+ %LJMT% -manifest buildvm.exe.manifest -outputresource:buildvm.exe
+
+buildvm -m peobj -o lj_vm.obj
+@if errorlevel 1 goto :BAD
+buildvm -m bcdef -o lj_bcdef.h %ALL_LIB%
+@if errorlevel 1 goto :BAD
+buildvm -m ffdef -o lj_ffdef.h %ALL_LIB%
+@if errorlevel 1 goto :BAD
+buildvm -m libdef -o lj_libdef.h %ALL_LIB%
+@if errorlevel 1 goto :BAD
+buildvm -m recdef -o lj_recdef.h %ALL_LIB%
+@if errorlevel 1 goto :BAD
+buildvm -m vmdef -o jit\vmdef.lua %ALL_LIB%
+@if errorlevel 1 goto :BAD
+buildvm -m folddef -o lj_folddef.h lj_opt_fold.c
+@if errorlevel 1 goto :BAD
+
+@if "%1" neq "debug" goto :NODEBUG
+@shift
+@set BUILDTYPE=debug
+@set LJCOMPILE=%LJCOMPILE% /Zi %DEBUGCFLAGS%
+@set LJLINK=%LJLINK% /opt:ref /opt:icf /incremental:no
+:NODEBUG
+@set LJLINK=%LJLINK% /%BUILDTYPE%
+@if "%1"=="amalg" goto :AMALGDLL
+@if "%1"=="static" goto :STATIC
+%LJCOMPILE% /MD /DLUA_BUILD_AS_DLL lj_*.c lib_*.c
+@if errorlevel 1 goto :BAD
+%LJLINK% /DLL /out:%LJDLLNAME% lj_*.obj lib_*.obj
+@if errorlevel 1 goto :BAD
+@goto :MTDLL
+:STATIC
+%LJCOMPILE% lj_*.c lib_*.c
+@if errorlevel 1 goto :BAD
+%LJLIB% /OUT:%LJLIBNAME% lj_*.obj lib_*.obj
+@if errorlevel 1 goto :BAD
+@goto :MTDLL
+:AMALGDLL
+%LJCOMPILE% /MD /DLUA_BUILD_AS_DLL ljamalg.c
+@if errorlevel 1 goto :BAD
+%LJLINK% /DLL /out:%LJDLLNAME% ljamalg.obj lj_vm.obj
+@if errorlevel 1 goto :BAD
+:MTDLL
+if exist %LJDLLNAME%.manifest^
+ %LJMT% -manifest %LJDLLNAME%.manifest -outputresource:%LJDLLNAME%;2
+
+%LJCOMPILE% luajit.c
+@if errorlevel 1 goto :BAD
+%LJLINK% /out:luajit.exe luajit.obj %LJLIBNAME%
+@if errorlevel 1 goto :BAD
+if exist luajit.exe.manifest^
+ %LJMT% -manifest luajit.exe.manifest -outputresource:luajit.exe
+
+@del *.obj *.manifest minilua.exe buildvm.exe
+@del host\buildvm_arch.h
+@del lj_bcdef.h lj_ffdef.h lj_libdef.h lj_recdef.h lj_folddef.h
+@echo.
+@echo === Successfully built LuaJIT for Windows/%LJARCH% ===
+
+@goto :END
+:BAD
+@echo.
+@echo *******************************************************
+@echo *** Build FAILED -- Please check the error messages ***
+@echo *******************************************************
+@goto :END
+:FAIL
+@echo You must open a "Visual Studio Command Prompt" to run this script
+:END
diff --git a/libs/luajit-cmake/luajit/src/nxbuild.bat b/libs/luajit-cmake/luajit/src/nxbuild.bat
new file mode 100644
index 0000000..c4a21f0
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/nxbuild.bat
@@ -0,0 +1,159 @@
+@rem Script to build LuaJIT with NintendoSDK + NX Addon.
+@rem Donated to the public domain by Swyter.
+@rem
+@rem To run this script you must open a "Native Tools Command Prompt for VS".
+@rem
+@rem Either the x86 version for NX32, or x64 for the NX64 target.
+@rem This is because the pointer size of the LuaJIT host tools (buildvm.exe)
+@rem must match the cross-compiled target (32 or 64 bits).
+@rem
+@rem Then cd to this directory and run this script.
+@rem
+@rem Recommended invocation:
+@rem
+@rem nxbuild # release build, amalgamated
+@rem nxbuild debug # debug build, amalgamated
+@rem
+@rem Additional command-line options (not generally recommended):
+@rem
+@rem noamalg # (after debug) non-amalgamated build
+
+@if not defined INCLUDE goto :FAIL
+@if not defined NINTENDO_SDK_ROOT goto :FAIL
+@if not defined PLATFORM goto :FAIL
+
+@if "%platform%" == "x86" goto :DO_NX32
+@if "%platform%" == "x64" goto :DO_NX64
+
+@echo Error: Current host platform is %platform%!
+@echo.
+@goto :FAIL
+
+@setlocal
+
+:DO_NX32
+@set DASC=vm_arm.dasc
+@set DASMFLAGS= -D HFABI -D FPU
+@set DASMTARGET= -D LUAJIT_TARGET=LUAJIT_ARCH_ARM
+@set HOST_PTR_SIZE=4
+goto :BEGIN
+
+:DO_NX64
+@set DASC=vm_arm64.dasc
+@set DASMFLAGS= -D ENDIAN_LE
+@set DASMTARGET= -D LUAJIT_TARGET=LUAJIT_ARCH_ARM64
+@set HOST_PTR_SIZE=8
+
+:BEGIN
+@rem ---- Host compiler ----
+@set LJCOMPILE=cl /nologo /c /MD /O2 /W3 /wo4146 /wo4244 /D_CRT_SECURE_NO_DEPRECATE
+@set LJLINK=link /nologo
+@set LJMT=mt /nologo
+@set DASMDIR=..\dynasm
+@set DASM=%DASMDIR%\dynasm.lua
+@set ALL_LIB=lib_base.c lib_math.c lib_bit.c lib_string.c lib_table.c lib_io.c lib_os.c lib_package.c lib_debug.c lib_jit.c lib_ffi.c lib_buffer.c
+
+%LJCOMPILE% host\minilua.c
+@if errorlevel 1 goto :BAD
+%LJLINK% /out:minilua.exe minilua.obj
+@if errorlevel 1 goto :BAD
+if exist minilua.exe.manifest^
+ %LJMT% -manifest minilua.exe.manifest -outputresource:minilua.exe
+
+@rem Check that we have the right 32/64 bit host compiler to generate the right virtual machine files.
+@minilua
+@if "%ERRORLEVEL%" == "%HOST_PTR_SIZE%" goto :PASSED_PTR_CHECK
+
+@echo The pointer size of the host in bytes (%HOST_PTR_SIZE%) does not match the expected value (%errorlevel%).
+@echo Check that the script is being ran under the correct x86/x64 VS prompt.
+@goto :BAD
+
+:PASSED_PTR_CHECK
+@set DASMFLAGS=%DASMFLAGS% %DASMTARGET% -D LJ_TARGET_NX -D LUAJIT_OS=LUAJIT_OS_OTHER -D LUAJIT_DISABLE_JIT -D LUAJIT_DISABLE_FFI
+minilua %DASM% -LN %DASMFLAGS% -o host\buildvm_arch.h %DASC%
+@if errorlevel 1 goto :BAD
+%LJCOMPILE% /I "." /I %DASMDIR% %DASMTARGET% -D LJ_TARGET_NX -DLUAJIT_OS=LUAJIT_OS_OTHER -DLUAJIT_DISABLE_JIT -DLUAJIT_DISABLE_FFI host\buildvm*.c
+@if errorlevel 1 goto :BAD
+%LJLINK% /out:buildvm.exe buildvm*.obj
+@if errorlevel 1 goto :BAD
+if exist buildvm.exe.manifest^
+ %LJMT% -manifest buildvm.exe.manifest -outputresource:buildvm.exe
+
+buildvm -m elfasm -o lj_vm.s
+@if errorlevel 1 goto :BAD
+buildvm -m bcdef -o lj_bcdef.h %ALL_LIB%
+@if errorlevel 1 goto :BAD
+buildvm -m ffdef -o lj_ffdef.h %ALL_LIB%
+@if errorlevel 1 goto :BAD
+buildvm -m libdef -o lj_libdef.h %ALL_LIB%
+@if errorlevel 1 goto :BAD
+buildvm -m recdef -o lj_recdef.h %ALL_LIB%
+@if errorlevel 1 goto :BAD
+buildvm -m vmdef -o jit\vmdef.lua %ALL_LIB%
+@if errorlevel 1 goto :BAD
+buildvm -m folddef -o lj_folddef.h lj_opt_fold.c
+@if errorlevel 1 goto :BAD
+
+@rem ---- Cross compiler ----
+@if "%platform%" neq "x64" goto :NX32_CROSSBUILD
+@set LJCOMPILE="%NINTENDO_SDK_ROOT%\Compilers\NX\nx\aarch64\bin\clang" -Wall -I%NINTENDO_SDK_ROOT%\Include %DASMTARGET% -DLUAJIT_OS=LUAJIT_OS_OTHER -DLUAJIT_DISABLE_JIT -DLUAJIT_DISABLE_FFI -DLUAJIT_USE_SYSMALLOC -c
+@set LJLIB="%NINTENDO_SDK_ROOT%\Compilers\NX\nx\aarch64\bin\aarch64-nintendo-nx-elf-ar" rc
+@set TARGETLIB_SUFFIX=nx64
+
+%NINTENDO_SDK_ROOT%\Compilers\NX\nx\aarch64\bin\aarch64-nintendo-nx-elf-as -o lj_vm.o lj_vm.s
+goto :DEBUGCHECK
+
+:NX32_CROSSBUILD
+@set LJCOMPILE="%NINTENDO_SDK_ROOT%\Compilers\NX\nx\armv7l\bin\clang" -Wall -I%NINTENDO_SDK_ROOT%\Include %DASMTARGET% -DLUAJIT_OS=LUAJIT_OS_OTHER -DLUAJIT_DISABLE_JIT -DLUAJIT_DISABLE_FFI -DLUAJIT_USE_SYSMALLOC -c
+@set LJLIB="%NINTENDO_SDK_ROOT%\Compilers\NX\nx\armv7l\bin\armv7l-nintendo-nx-eabihf-ar" rc
+@set TARGETLIB_SUFFIX=nx32
+
+%NINTENDO_SDK_ROOT%\Compilers\NX\nx\armv7l\bin\armv7l-nintendo-nx-eabihf-as -o lj_vm.o lj_vm.s
+:DEBUGCHECK
+
+@if "%1" neq "debug" goto :NODEBUG
+@shift
+@set LJCOMPILE=%LJCOMPILE% -DNN_SDK_BUILD_DEBUG -g -O0
+@set TARGETLIB=libluajitD_%TARGETLIB_SUFFIX%.a
+goto :BUILD
+:NODEBUG
+@set LJCOMPILE=%LJCOMPILE% -DNN_SDK_BUILD_RELEASE -O3
+@set TARGETLIB=libluajit_%TARGETLIB_SUFFIX%.a
+:BUILD
+del %TARGETLIB%
+@if "%1" neq "noamalg" goto :AMALG
+for %%f in (lj_*.c lib_*.c) do (
+ %LJCOMPILE% %%f
+ @if errorlevel 1 goto :BAD
+)
+
+%LJLIB% %TARGETLIB% lj_*.o lib_*.o
+@if errorlevel 1 goto :BAD
+@goto :NOAMALG
+:AMALG
+%LJCOMPILE% ljamalg.c
+@if errorlevel 1 goto :BAD
+%LJLIB% %TARGETLIB% ljamalg.o lj_vm.o
+@if errorlevel 1 goto :BAD
+:NOAMALG
+
+@del *.o *.obj *.manifest minilua.exe buildvm.exe
+@echo.
+@echo === Successfully built LuaJIT for Nintendo Switch (%TARGETLIB_SUFFIX%) ===
+
+@goto :END
+:BAD
+@echo.
+@echo *******************************************************
+@echo *** Build FAILED -- Please check the error messages ***
+@echo *******************************************************
+@goto :END
+:FAIL
+@echo To run this script you must open a "Native Tools Command Prompt for VS".
+@echo.
+@echo Either the x86 version for NX32, or x64 for the NX64 target.
+@echo This is because the pointer size of the LuaJIT host tools (buildvm.exe)
+@echo must match the cross-compiled target (32 or 64 bits).
+@echo.
+@echo Keep in mind that NintendoSDK + NX Addon must be installed, too.
+:END
diff --git a/libs/luajit-cmake/luajit/src/ps4build.bat b/libs/luajit-cmake/luajit/src/ps4build.bat
new file mode 100644
index 0000000..fdd09d8
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/ps4build.bat
@@ -0,0 +1,123 @@
+@rem Script to build LuaJIT with the PS4 SDK.
+@rem Donated to the public domain.
+@rem
+@rem Open a "Visual Studio .NET Command Prompt" (64 bit host compiler)
+@rem or "VS2015 x64 Native Tools Command Prompt".
+@rem
+@rem Then cd to this directory and run this script.
+@rem
+@rem Recommended invocation:
+@rem
+@rem ps4build release build, amalgamated, 64-bit GC
+@rem ps4build debug debug build, amalgamated, 64-bit GC
+@rem
+@rem Additional command-line options (not generally recommended):
+@rem
+@rem gc32 (before debug) 32-bit GC
+@rem noamalg (after debug) non-amalgamated build
+
+@if not defined INCLUDE goto :FAIL
+@if not defined SCE_ORBIS_SDK_DIR goto :FAIL
+
+@setlocal
+@rem ---- Host compiler ----
+@set LJCOMPILE=cl /nologo /c /MD /O2 /W3 /D_CRT_SECURE_NO_DEPRECATE
+@set LJLINK=link /nologo
+@set LJMT=mt /nologo
+@set DASMDIR=..\dynasm
+@set DASM=%DASMDIR%\dynasm.lua
+@set ALL_LIB=lib_base.c lib_math.c lib_bit.c lib_string.c lib_table.c lib_io.c lib_os.c lib_package.c lib_debug.c lib_jit.c lib_ffi.c lib_buffer.c
+@set GC64=
+@set DASC=vm_x64.dasc
+
+@if "%1" neq "gc32" goto :NOGC32
+@shift
+@set GC64=-DLUAJIT_DISABLE_GC64
+@set DASC=vm_x86.dasc
+:NOGC32
+
+%LJCOMPILE% host\minilua.c
+@if errorlevel 1 goto :BAD
+%LJLINK% /out:minilua.exe minilua.obj
+@if errorlevel 1 goto :BAD
+if exist minilua.exe.manifest^
+ %LJMT% -manifest minilua.exe.manifest -outputresource:minilua.exe
+
+@rem Check for 64 bit host compiler.
+@minilua
+@if not errorlevel 8 goto :FAIL
+
+@set DASMFLAGS=-D P64 -D NO_UNWIND
+minilua %DASM% -LN %DASMFLAGS% -o host\buildvm_arch.h %DASC%
+@if errorlevel 1 goto :BAD
+
+%LJCOMPILE% /I "." /I %DASMDIR% %GC64% -DLUAJIT_TARGET=LUAJIT_ARCH_X64 -DLUAJIT_OS=LUAJIT_OS_OTHER -DLUAJIT_DISABLE_JIT -DLUAJIT_DISABLE_FFI -DLUAJIT_NO_UNWIND host\buildvm*.c
+@if errorlevel 1 goto :BAD
+%LJLINK% /out:buildvm.exe buildvm*.obj
+@if errorlevel 1 goto :BAD
+if exist buildvm.exe.manifest^
+ %LJMT% -manifest buildvm.exe.manifest -outputresource:buildvm.exe
+
+buildvm -m elfasm -o lj_vm.s
+@if errorlevel 1 goto :BAD
+buildvm -m bcdef -o lj_bcdef.h %ALL_LIB%
+@if errorlevel 1 goto :BAD
+buildvm -m ffdef -o lj_ffdef.h %ALL_LIB%
+@if errorlevel 1 goto :BAD
+buildvm -m libdef -o lj_libdef.h %ALL_LIB%
+@if errorlevel 1 goto :BAD
+buildvm -m recdef -o lj_recdef.h %ALL_LIB%
+@if errorlevel 1 goto :BAD
+buildvm -m vmdef -o jit\vmdef.lua %ALL_LIB%
+@if errorlevel 1 goto :BAD
+buildvm -m folddef -o lj_folddef.h lj_opt_fold.c
+@if errorlevel 1 goto :BAD
+
+@rem ---- Cross compiler ----
+@set LJCOMPILE="%SCE_ORBIS_SDK_DIR%\host_tools\bin\orbis-clang" -c -Wall -DLUAJIT_DISABLE_FFI %GC64%
+@set LJLIB="%SCE_ORBIS_SDK_DIR%\host_tools\bin\orbis-ar" rcus
+@set INCLUDE=""
+
+orbis-as -o lj_vm.o lj_vm.s
+
+@if "%1" neq "debug" goto :NODEBUG
+@shift
+@set LJCOMPILE=%LJCOMPILE% -g -O0
+@set TARGETLIB=libluajitD_ps4.a
+goto :BUILD
+:NODEBUG
+@set LJCOMPILE=%LJCOMPILE% -O2
+@set TARGETLIB=libluajit_ps4.a
+:BUILD
+del %TARGETLIB%
+@if "%1" neq "noamalg" goto :AMALG
+for %%f in (lj_*.c lib_*.c) do (
+ %LJCOMPILE% %%f
+ @if errorlevel 1 goto :BAD
+)
+
+%LJLIB% %TARGETLIB% lj_*.o lib_*.o
+@if errorlevel 1 goto :BAD
+@goto :NOAMALG
+:AMALG
+%LJCOMPILE% ljamalg.c
+@if errorlevel 1 goto :BAD
+%LJLIB% %TARGETLIB% ljamalg.o lj_vm.o
+@if errorlevel 1 goto :BAD
+:NOAMALG
+
+@del *.o *.obj *.manifest minilua.exe buildvm.exe
+@echo.
+@echo === Successfully built LuaJIT for PS4 ===
+
+@goto :END
+:BAD
+@echo.
+@echo *******************************************************
+@echo *** Build FAILED -- Please check the error messages ***
+@echo *******************************************************
+@goto :END
+:FAIL
+@echo To run this script you must open a "Visual Studio .NET Command Prompt"
+@echo (64 bit host compiler). The PS4 Orbis SDK must be installed, too.
+:END
diff --git a/libs/luajit-cmake/luajit/src/ps5build.bat b/libs/luajit-cmake/luajit/src/ps5build.bat
new file mode 100644
index 0000000..0b1ebd5
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/ps5build.bat
@@ -0,0 +1,123 @@
+@rem Script to build LuaJIT with the PS5 SDK.
+@rem Donated to the public domain.
+@rem
+@rem Open a "Visual Studio .NET Command Prompt" (64 bit host compiler)
+@rem or "VS20xx x64 Native Tools Command Prompt".
+@rem
+@rem Then cd to this directory and run this script.
+@rem
+@rem Recommended invocation:
+@rem
+@rem ps5build release build, amalgamated, 64-bit GC
+@rem ps5build debug debug build, amalgamated, 64-bit GC
+@rem
+@rem Additional command-line options (not generally recommended):
+@rem
+@rem gc32 (before debug) 32-bit GC
+@rem noamalg (after debug) non-amalgamated build
+
+@if not defined INCLUDE goto :FAIL
+@if not defined SCE_PROSPERO_SDK_DIR goto :FAIL
+
+@setlocal
+@rem ---- Host compiler ----
+@set LJCOMPILE=cl /nologo /c /MD /O2 /W3 /D_CRT_SECURE_NO_DEPRECATE
+@set LJLINK=link /nologo
+@set LJMT=mt /nologo
+@set DASMDIR=..\dynasm
+@set DASM=%DASMDIR%\dynasm.lua
+@set ALL_LIB=lib_base.c lib_math.c lib_bit.c lib_string.c lib_table.c lib_io.c lib_os.c lib_package.c lib_debug.c lib_jit.c lib_ffi.c lib_buffer.c
+@set GC64=
+@set DASC=vm_x64.dasc
+
+@if "%1" neq "gc32" goto :NOGC32
+@shift
+@set GC64=-DLUAJIT_DISABLE_GC64
+@set DASC=vm_x86.dasc
+:NOGC32
+
+%LJCOMPILE% host\minilua.c
+@if errorlevel 1 goto :BAD
+%LJLINK% /out:minilua.exe minilua.obj
+@if errorlevel 1 goto :BAD
+if exist minilua.exe.manifest^
+ %LJMT% -manifest minilua.exe.manifest -outputresource:minilua.exe
+
+@rem Check for 64 bit host compiler.
+@minilua
+@if not errorlevel 8 goto :FAIL
+
+@set DASMFLAGS=-D P64 -D NO_UNWIND
+minilua %DASM% -LN %DASMFLAGS% -o host\buildvm_arch.h %DASC%
+@if errorlevel 1 goto :BAD
+
+%LJCOMPILE% /I "." /I %DASMDIR% %GC64% -DLUAJIT_TARGET=LUAJIT_ARCH_X64 -DLUAJIT_OS=LUAJIT_OS_OTHER -DLUAJIT_DISABLE_JIT -DLUAJIT_DISABLE_FFI -DLUAJIT_NO_UNWIND host\buildvm*.c
+@if errorlevel 1 goto :BAD
+%LJLINK% /out:buildvm.exe buildvm*.obj
+@if errorlevel 1 goto :BAD
+if exist buildvm.exe.manifest^
+ %LJMT% -manifest buildvm.exe.manifest -outputresource:buildvm.exe
+
+buildvm -m elfasm -o lj_vm.s
+@if errorlevel 1 goto :BAD
+buildvm -m bcdef -o lj_bcdef.h %ALL_LIB%
+@if errorlevel 1 goto :BAD
+buildvm -m ffdef -o lj_ffdef.h %ALL_LIB%
+@if errorlevel 1 goto :BAD
+buildvm -m libdef -o lj_libdef.h %ALL_LIB%
+@if errorlevel 1 goto :BAD
+buildvm -m recdef -o lj_recdef.h %ALL_LIB%
+@if errorlevel 1 goto :BAD
+buildvm -m vmdef -o jit\vmdef.lua %ALL_LIB%
+@if errorlevel 1 goto :BAD
+buildvm -m folddef -o lj_folddef.h lj_opt_fold.c
+@if errorlevel 1 goto :BAD
+
+@rem ---- Cross compiler ----
+@set LJCOMPILE="%SCE_PROSPERO_SDK_DIR%\host_tools\bin\prospero-clang" -c -Wall -DLUAJIT_DISABLE_FFI %GC64%
+@set LJLIB="%SCE_PROSPERO_SDK_DIR%\host_tools\bin\prospero-llvm-ar" rcus
+@set INCLUDE=""
+
+%SCE_PROSPERO_SDK_DIR%\host_tools\bin\prospero-llvm-as -o lj_vm.o lj_vm.s
+
+@if "%1" neq "debug" goto :NODEBUG
+@shift
+@set LJCOMPILE=%LJCOMPILE% -g -O0
+@set TARGETLIB=libluajitD_ps5.a
+goto :BUILD
+:NODEBUG
+@set LJCOMPILE=%LJCOMPILE% -O2
+@set TARGETLIB=libluajit_ps5.a
+:BUILD
+del %TARGETLIB%
+@if "%1" neq "noamalg" goto :AMALG
+for %%f in (lj_*.c lib_*.c) do (
+ %LJCOMPILE% %%f
+ @if errorlevel 1 goto :BAD
+)
+
+%LJLIB% %TARGETLIB% lj_*.o lib_*.o
+@if errorlevel 1 goto :BAD
+@goto :NOAMALG
+:AMALG
+%LJCOMPILE% ljamalg.c
+@if errorlevel 1 goto :BAD
+%LJLIB% %TARGETLIB% ljamalg.o lj_vm.o
+@if errorlevel 1 goto :BAD
+:NOAMALG
+
+@del *.o *.obj *.manifest minilua.exe buildvm.exe
+@echo.
+@echo === Successfully built LuaJIT for PS5 ===
+
+@goto :END
+:BAD
+@echo.
+@echo *******************************************************
+@echo *** Build FAILED -- Please check the error messages ***
+@echo *******************************************************
+@goto :END
+:FAIL
+@echo To run this script you must open a "Visual Studio .NET Command Prompt"
+@echo (64 bit host compiler). The PS5 Prospero SDK must be installed, too.
+:END
diff --git a/libs/luajit-cmake/luajit/src/psvitabuild.bat b/libs/luajit-cmake/luajit/src/psvitabuild.bat
new file mode 100644
index 0000000..2980e15
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/psvitabuild.bat
@@ -0,0 +1,93 @@
+@rem Script to build LuaJIT with the PS Vita SDK.
+@rem Donated to the public domain.
+@rem
+@rem Open a "Visual Studio .NET Command Prompt" (32 bit host compiler)
+@rem Then cd to this directory and run this script.
+
+@if not defined INCLUDE goto :FAIL
+@if not defined SCE_PSP2_SDK_DIR goto :FAIL
+
+@setlocal
+@rem ---- Host compiler ----
+@set LJCOMPILE=cl /nologo /c /MD /O2 /W3 /D_CRT_SECURE_NO_DEPRECATE
+@set LJLINK=link /nologo
+@set LJMT=mt /nologo
+@set DASMDIR=..\dynasm
+@set DASM=%DASMDIR%\dynasm.lua
+@set ALL_LIB=lib_base.c lib_math.c lib_bit.c lib_string.c lib_table.c lib_io.c lib_os.c lib_package.c lib_debug.c lib_jit.c lib_ffi.c lib_buffer.c
+
+%LJCOMPILE% host\minilua.c
+@if errorlevel 1 goto :BAD
+%LJLINK% /out:minilua.exe minilua.obj
+@if errorlevel 1 goto :BAD
+if exist minilua.exe.manifest^
+ %LJMT% -manifest minilua.exe.manifest -outputresource:minilua.exe
+
+@rem Check for 32 bit host compiler.
+@minilua
+@if errorlevel 8 goto :FAIL
+
+@set DASMFLAGS=-D FPU -D HFABI
+minilua %DASM% -LN %DASMFLAGS% -o host\buildvm_arch.h vm_arm.dasc
+@if errorlevel 1 goto :BAD
+
+%LJCOMPILE% /I "." /I %DASMDIR% -DLUAJIT_TARGET=LUAJIT_ARCH_ARM -DLUAJIT_OS=LUAJIT_OS_OTHER -DLUAJIT_DISABLE_JIT -DLUAJIT_DISABLE_FFI -DLJ_TARGET_PSVITA=1 host\buildvm*.c
+@if errorlevel 1 goto :BAD
+%LJLINK% /out:buildvm.exe buildvm*.obj
+@if errorlevel 1 goto :BAD
+if exist buildvm.exe.manifest^
+ %LJMT% -manifest buildvm.exe.manifest -outputresource:buildvm.exe
+
+buildvm -m elfasm -o lj_vm.s
+@if errorlevel 1 goto :BAD
+buildvm -m bcdef -o lj_bcdef.h %ALL_LIB%
+@if errorlevel 1 goto :BAD
+buildvm -m ffdef -o lj_ffdef.h %ALL_LIB%
+@if errorlevel 1 goto :BAD
+buildvm -m libdef -o lj_libdef.h %ALL_LIB%
+@if errorlevel 1 goto :BAD
+buildvm -m recdef -o lj_recdef.h %ALL_LIB%
+@if errorlevel 1 goto :BAD
+buildvm -m vmdef -o jit\vmdef.lua %ALL_LIB%
+@if errorlevel 1 goto :BAD
+buildvm -m folddef -o lj_folddef.h lj_opt_fold.c
+@if errorlevel 1 goto :BAD
+
+@rem ---- Cross compiler ----
+@set LJCOMPILE="%SCE_PSP2_SDK_DIR%\host_tools\build\bin\psp2snc" -c -w -DLUAJIT_DISABLE_FFI -DLUAJIT_USE_SYSMALLOC
+@set LJLIB="%SCE_PSP2_SDK_DIR%\host_tools\build\bin\psp2ld32" -r --output=
+@set INCLUDE=""
+
+"%SCE_PSP2_SDK_DIR%\host_tools\build\bin\psp2as" -o lj_vm.o lj_vm.s
+
+@if "%1" neq "debug" goto :NODEBUG
+@shift
+@set LJCOMPILE=%LJCOMPILE% -g -O0
+@set TARGETLIB=libluajitD.a
+goto :BUILD
+:NODEBUG
+@set LJCOMPILE=%LJCOMPILE% -O2
+@set TARGETLIB=libluajit.a
+:BUILD
+del %TARGETLIB%
+
+%LJCOMPILE% ljamalg.c
+@if errorlevel 1 goto :BAD
+%LJLIB%%TARGETLIB% ljamalg.o lj_vm.o
+@if errorlevel 1 goto :BAD
+
+@del *.o *.obj *.manifest minilua.exe buildvm.exe
+@echo.
+@echo === Successfully built LuaJIT for PS Vita ===
+
+@goto :END
+:BAD
+@echo.
+@echo *******************************************************
+@echo *** Build FAILED -- Please check the error messages ***
+@echo *******************************************************
+@goto :END
+:FAIL
+@echo To run this script you must open a "Visual Studio .NET Command Prompt"
+@echo (32 bit host compiler). The PS Vita SDK must be installed, too.
+:END
diff --git a/libs/luajit-cmake/luajit/src/vm_arm.dasc b/libs/luajit-cmake/luajit/src/vm_arm.dasc
new file mode 100644
index 0000000..770c160
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/vm_arm.dasc
@@ -0,0 +1,4663 @@
+|// Low-level VM code for ARM CPUs.
+|// Bytecode interpreter, fast functions and helper functions.
+|// Copyright (C) 2005-2022 Mike Pall. See Copyright Notice in luajit.h
+|
+|.arch arm
+|.section code_op, code_sub
+|
+|.actionlist build_actionlist
+|.globals GLOB_
+|.globalnames globnames
+|.externnames extnames
+|
+|// Note: The ragged indentation of the instructions is intentional.
+|// The starting columns indicate data dependencies.
+|
+|//-----------------------------------------------------------------------
+|
+|// Fixed register assignments for the interpreter.
+|
+|// The following must be C callee-save.
+|.define MASKR8, r4 // 255*8 constant for fast bytecode decoding.
+|.define KBASE, r5 // Constants of current Lua function.
+|.define PC, r6 // Next PC.
+|.define DISPATCH, r7 // Opcode dispatch table.
+|.define LREG, r8 // Register holding lua_State (also in SAVE_L).
+|
+|// C callee-save in EABI, but often refetched. Temporary in iOS 3.0+.
+|.define BASE, r9 // Base of current Lua stack frame.
+|
+|// The following temporaries are not saved across C calls, except for RA/RC.
+|.define RA, r10 // Callee-save.
+|.define RC, r11 // Callee-save.
+|.define RB, r12
+|.define OP, r12 // Overlaps RB, must not be lr.
+|.define INS, lr
+|
+|// Calling conventions. Also used as temporaries.
+|.define CARG1, r0
+|.define CARG2, r1
+|.define CARG3, r2
+|.define CARG4, r3
+|.define CARG12, r0 // For 1st soft-fp double.
+|.define CARG34, r2 // For 2nd soft-fp double.
+|
+|.define CRET1, r0
+|.define CRET2, r1
+|
+|// Stack layout while in interpreter. Must match with lj_frame.h.
+|.define SAVE_R4, [sp, #28]
+|.define CFRAME_SPACE, #28
+|.define SAVE_ERRF, [sp, #24]
+|.define SAVE_NRES, [sp, #20]
+|.define SAVE_CFRAME, [sp, #16]
+|.define SAVE_L, [sp, #12]
+|.define SAVE_PC, [sp, #8]
+|.define SAVE_MULTRES, [sp, #4]
+|.define ARG5, [sp]
+|
+|.define TMPDhi, [sp, #4]
+|.define TMPDlo, [sp]
+|.define TMPD, [sp]
+|.define TMPDp, sp
+|
+|.if FPU
+|.macro saveregs
+| push {r5, r6, r7, r8, r9, r10, r11, lr}
+| vpush {d8-d15}
+| sub sp, sp, CFRAME_SPACE+4
+| str r4, SAVE_R4
+|.endmacro
+|.macro restoreregs_ret
+| ldr r4, SAVE_R4
+| add sp, sp, CFRAME_SPACE+4
+| vpop {d8-d15}
+| pop {r5, r6, r7, r8, r9, r10, r11, pc}
+|.endmacro
+|.else
+|.macro saveregs
+| push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+| sub sp, sp, CFRAME_SPACE
+|.endmacro
+|.macro restoreregs_ret
+| add sp, sp, CFRAME_SPACE
+| pop {r4, r5, r6, r7, r8, r9, r10, r11, pc}
+|.endmacro
+|.endif
+|
+|// Type definitions. Some of these are only used for documentation.
+|.type L, lua_State, LREG
+|.type GL, global_State
+|.type TVALUE, TValue
+|.type GCOBJ, GCobj
+|.type STR, GCstr
+|.type TAB, GCtab
+|.type LFUNC, GCfuncL
+|.type CFUNC, GCfuncC
+|.type PROTO, GCproto
+|.type UPVAL, GCupval
+|.type NODE, Node
+|.type NARGS8, int
+|.type TRACE, GCtrace
+|.type SBUF, SBuf
+|
+|//-----------------------------------------------------------------------
+|
+|// Trap for not-yet-implemented parts.
+|.macro NYI; ud; .endmacro
+|
+|//-----------------------------------------------------------------------
+|
+|// Access to frame relative to BASE.
+|.define FRAME_FUNC, #-8
+|.define FRAME_PC, #-4
+|
+|.macro decode_RA8, dst, ins; and dst, MASKR8, ins, lsr #5; .endmacro
+|.macro decode_RB8, dst, ins; and dst, MASKR8, ins, lsr #21; .endmacro
+|.macro decode_RC8, dst, ins; and dst, MASKR8, ins, lsr #13; .endmacro
+|.macro decode_RD, dst, ins; lsr dst, ins, #16; .endmacro
+|.macro decode_OP, dst, ins; and dst, ins, #255; .endmacro
+|
+|// Instruction fetch.
+|.macro ins_NEXT1
+| ldrb OP, [PC]
+|.endmacro
+|.macro ins_NEXT2
+| ldr INS, [PC], #4
+|.endmacro
+|// Instruction decode+dispatch.
+|.macro ins_NEXT3
+| ldr OP, [DISPATCH, OP, lsl #2]
+| decode_RA8 RA, INS
+| decode_RD RC, INS
+| bx OP
+|.endmacro
+|.macro ins_NEXT
+| ins_NEXT1
+| ins_NEXT2
+| ins_NEXT3
+|.endmacro
+|
+|// Instruction footer.
+|.if 1
+| // Replicated dispatch. Less unpredictable branches, but higher I-Cache use.
+| .define ins_next, ins_NEXT
+| .define ins_next_, ins_NEXT
+| .define ins_next1, ins_NEXT1
+| .define ins_next2, ins_NEXT2
+| .define ins_next3, ins_NEXT3
+|.else
+| // Common dispatch. Lower I-Cache use, only one (very) unpredictable branch.
+| // Affects only certain kinds of benchmarks (and only with -j off).
+| .macro ins_next
+| b ->ins_next
+| .endmacro
+| .macro ins_next1
+| .endmacro
+| .macro ins_next2
+| .endmacro
+| .macro ins_next3
+| b ->ins_next
+| .endmacro
+| .macro ins_next_
+| ->ins_next:
+| ins_NEXT
+| .endmacro
+|.endif
+|
+|// Avoid register name substitution for field name.
+#define field_pc pc
+|
+|// Call decode and dispatch.
+|.macro ins_callt
+| // BASE = new base, CARG3 = LFUNC/CFUNC, RC = nargs*8, FRAME_PC(BASE) = PC
+| ldr PC, LFUNC:CARG3->field_pc
+| ldrb OP, [PC] // STALL: load PC. early PC.
+| ldr INS, [PC], #4
+| ldr OP, [DISPATCH, OP, lsl #2] // STALL: load OP. early OP.
+| decode_RA8 RA, INS
+| add RA, RA, BASE
+| bx OP
+|.endmacro
+|
+|.macro ins_call
+| // BASE = new base, CARG3 = LFUNC/CFUNC, RC = nargs*8, PC = caller PC
+| str PC, [BASE, FRAME_PC]
+| ins_callt // STALL: locked PC.
+|.endmacro
+|
+|//-----------------------------------------------------------------------
+|
+|// Macros to test operand types.
+|.macro checktp, reg, tp; cmn reg, #-tp; .endmacro
+|.macro checktpeq, reg, tp; cmneq reg, #-tp; .endmacro
+|.macro checktpne, reg, tp; cmnne reg, #-tp; .endmacro
+|.macro checkstr, reg, target; checktp reg, LJ_TSTR; bne target; .endmacro
+|.macro checktab, reg, target; checktp reg, LJ_TTAB; bne target; .endmacro
+|.macro checkfunc, reg, target; checktp reg, LJ_TFUNC; bne target; .endmacro
+|
+|// Assumes DISPATCH is relative to GL.
+#define DISPATCH_GL(field) (GG_DISP2G + (int)offsetof(global_State, field))
+#define DISPATCH_J(field) (GG_DISP2J + (int)offsetof(jit_State, field))
+|
+#define PC2PROTO(field) ((int)offsetof(GCproto, field)-(int)sizeof(GCproto))
+|
+|.macro hotcheck, delta
+| lsr CARG1, PC, #1
+| and CARG1, CARG1, #126
+| sub CARG1, CARG1, #-GG_DISP2HOT
+| ldrh CARG2, [DISPATCH, CARG1]
+| subs CARG2, CARG2, #delta
+| strh CARG2, [DISPATCH, CARG1]
+|.endmacro
+|
+|.macro hotloop
+| hotcheck HOTCOUNT_LOOP
+| blo ->vm_hotloop
+|.endmacro
+|
+|.macro hotcall
+| hotcheck HOTCOUNT_CALL
+| blo ->vm_hotcall
+|.endmacro
+|
+|// Set current VM state.
+|.macro mv_vmstate, reg, st; mvn reg, #LJ_VMST_..st; .endmacro
+|.macro st_vmstate, reg; str reg, [DISPATCH, #DISPATCH_GL(vmstate)]; .endmacro
+|
+|// Move table write barrier back. Overwrites mark and tmp.
+|.macro barrierback, tab, mark, tmp
+| ldr tmp, [DISPATCH, #DISPATCH_GL(gc.grayagain)]
+| bic mark, mark, #LJ_GC_BLACK // black2gray(tab)
+| str tab, [DISPATCH, #DISPATCH_GL(gc.grayagain)]
+| strb mark, tab->marked
+| str tmp, tab->gclist
+|.endmacro
+|
+|.macro .IOS, a, b
+|.if IOS
+| a, b
+|.endif
+|.endmacro
+|
+|//-----------------------------------------------------------------------
+
+#if !LJ_DUALNUM
+#error "Only dual-number mode supported for ARM target"
+#endif
+
+/* Generate subroutines used by opcodes and other parts of the VM. */
+/* The .code_sub section should be last to help static branch prediction. */
+static void build_subroutines(BuildCtx *ctx)
+{
+ |.code_sub
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Return handling ----------------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |->vm_returnp:
+ | // See vm_return. Also: RB = previous base.
+ | tst PC, #FRAME_P
+ | beq ->cont_dispatch
+ |
+ | // Return from pcall or xpcall fast func.
+ | ldr PC, [RB, FRAME_PC] // Fetch PC of previous frame.
+ | mvn CARG2, #~LJ_TTRUE
+ | mov BASE, RB
+ | // Prepending may overwrite the pcall frame, so do it at the end.
+ | str CARG2, [RA, FRAME_PC] // Prepend true to results.
+ | sub RA, RA, #8
+ |
+ |->vm_returnc:
+ | adds RC, RC, #8 // RC = (nresults+1)*8.
+ | mov CRET1, #LUA_YIELD
+ | beq ->vm_unwind_c_eh
+ | str RC, SAVE_MULTRES
+ | ands CARG1, PC, #FRAME_TYPE
+ | beq ->BC_RET_Z // Handle regular return to Lua.
+ |
+ |->vm_return:
+ | // BASE = base, RA = resultptr, RC/MULTRES = (nresults+1)*8, PC = return
+ | // CARG1 = PC & FRAME_TYPE
+ | bic RB, PC, #FRAME_TYPEP
+ | cmp CARG1, #FRAME_C
+ | sub RB, BASE, RB // RB = previous base.
+ | bne ->vm_returnp
+ |
+ | str RB, L->base
+ | ldr KBASE, SAVE_NRES
+ | mv_vmstate CARG4, C
+ | sub BASE, BASE, #8
+ | subs CARG3, RC, #8
+ | lsl KBASE, KBASE, #3 // KBASE = (nresults_wanted+1)*8
+ | st_vmstate CARG4
+ | beq >2
+ |1:
+ | subs CARG3, CARG3, #8
+ | ldrd CARG12, [RA], #8
+ | strd CARG12, [BASE], #8
+ | bne <1
+ |2:
+ | cmp KBASE, RC // More/less results wanted?
+ | bne >6
+ |3:
+ | str BASE, L->top // Store new top.
+ |
+ |->vm_leave_cp:
+ | ldr RC, SAVE_CFRAME // Restore previous C frame.
+ | mov CRET1, #0 // Ok return status for vm_pcall.
+ | str RC, L->cframe
+ |
+ |->vm_leave_unw:
+ | restoreregs_ret
+ |
+ |6:
+ | blt >7 // Less results wanted?
+ | // More results wanted. Check stack size and fill up results with nil.
+ | ldr CARG3, L->maxstack
+ | mvn CARG2, #~LJ_TNIL
+ | cmp BASE, CARG3
+ | bhs >8
+ | str CARG2, [BASE, #4]
+ | add RC, RC, #8
+ | add BASE, BASE, #8
+ | b <2
+ |
+ |7: // Less results wanted.
+ | sub CARG1, RC, KBASE
+ | cmp KBASE, #0 // LUA_MULTRET+1 case?
+ | subne BASE, BASE, CARG1 // Either keep top or shrink it.
+ | b <3
+ |
+ |8: // Corner case: need to grow stack for filling up results.
+ | // This can happen if:
+ | // - A C function grows the stack (a lot).
+ | // - The GC shrinks the stack in between.
+ | // - A return back from a lua_call() with (high) nresults adjustment.
+ | str BASE, L->top // Save current top held in BASE (yes).
+ | lsr CARG2, KBASE, #3
+ | mov CARG1, L
+ | bl extern lj_state_growstack // (lua_State *L, int n)
+ | ldr BASE, L->top // Need the (realloced) L->top in BASE.
+ | b <2
+ |
+ |->vm_unwind_c: // Unwind C stack, return from vm_pcall.
+ | // (void *cframe, int errcode)
+ | mov sp, CARG1
+ | mov CRET1, CARG2
+ |->vm_unwind_c_eh: // Landing pad for external unwinder.
+ | ldr L, SAVE_L
+ | mv_vmstate CARG4, C
+ | ldr GL:CARG3, L->glref
+ | str CARG4, GL:CARG3->vmstate
+ | b ->vm_leave_unw
+ |
+ |->vm_unwind_ff: // Unwind C stack, return from ff pcall.
+ | // (void *cframe)
+ | bic CARG1, CARG1, #~CFRAME_RAWMASK // Use two steps: bic sp is deprecated.
+ | mov sp, CARG1
+ |->vm_unwind_ff_eh: // Landing pad for external unwinder.
+ | ldr L, SAVE_L
+ | mov MASKR8, #255
+ | mov RC, #16 // 2 results: false + error message.
+ | lsl MASKR8, MASKR8, #3 // MASKR8 = 255*8.
+ | ldr BASE, L->base
+ | ldr DISPATCH, L->glref // Setup pointer to dispatch table.
+ | mvn CARG1, #~LJ_TFALSE
+ | sub RA, BASE, #8 // Results start at BASE-8.
+ | ldr PC, [BASE, FRAME_PC] // Fetch PC of previous frame.
+ | add DISPATCH, DISPATCH, #GG_G2DISP
+ | mv_vmstate CARG2, INTERP
+ | str CARG1, [BASE, #-4] // Prepend false to error message.
+ | st_vmstate CARG2
+ | b ->vm_returnc
+ |
+ |->vm_unwind_ext: // Complete external unwind.
+#if !LJ_NO_UNWIND
+ | push {r0, r1, r2, lr}
+ | bl extern _Unwind_Complete
+ | ldr r0, [sp]
+ | bl extern _Unwind_DeleteException
+ | pop {r0, r1, r2, lr}
+ | mov r0, r1
+ | bx r2
+#endif
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Grow stack for calls -----------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |->vm_growstack_c: // Grow stack for C function.
+ | // CARG1 = L
+ | mov CARG2, #LUA_MINSTACK
+ | b >2
+ |
+ |->vm_growstack_l: // Grow stack for Lua function.
+ | // BASE = new base, RA = BASE+framesize*8, RC = nargs*8, PC = first PC
+ | add RC, BASE, RC
+ | sub RA, RA, BASE
+ | mov CARG1, L
+ | str BASE, L->base
+ | add PC, PC, #4 // Must point after first instruction.
+ | str RC, L->top
+ | lsr CARG2, RA, #3
+ |2:
+ | // L->base = new base, L->top = top
+ | str PC, SAVE_PC
+ | bl extern lj_state_growstack // (lua_State *L, int n)
+ | ldr BASE, L->base
+ | ldr RC, L->top
+ | ldr LFUNC:CARG3, [BASE, FRAME_FUNC]
+ | sub NARGS8:RC, RC, BASE
+ | // BASE = new base, RB = LFUNC/CFUNC, RC = nargs*8, FRAME_PC(BASE) = PC
+ | ins_callt // Just retry the call.
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Entry points into the assembler VM ---------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |->vm_resume: // Setup C frame and resume thread.
+ | // (lua_State *L, TValue *base, int nres1 = 0, ptrdiff_t ef = 0)
+ | saveregs
+ | mov L, CARG1
+ | ldr DISPATCH, L:CARG1->glref // Setup pointer to dispatch table.
+ | mov BASE, CARG2
+ | add DISPATCH, DISPATCH, #GG_G2DISP
+ | str L, SAVE_L
+ | mov PC, #FRAME_CP
+ | str CARG3, SAVE_NRES
+ | add CARG2, sp, #CFRAME_RESUME
+ | ldrb CARG1, L->status
+ | str CARG3, SAVE_ERRF
+ | str L, SAVE_PC // Any value outside of bytecode is ok.
+ | str CARG3, SAVE_CFRAME
+ | cmp CARG1, #0
+ | str CARG2, L->cframe
+ | beq >3
+ |
+ | // Resume after yield (like a return).
+ | str L, [DISPATCH, #DISPATCH_GL(cur_L)]
+ | mov RA, BASE
+ | ldr BASE, L->base
+ | ldr CARG1, L->top
+ | mov MASKR8, #255
+ | strb CARG3, L->status
+ | sub RC, CARG1, BASE
+ | ldr PC, [BASE, FRAME_PC]
+ | lsl MASKR8, MASKR8, #3 // MASKR8 = 255*8.
+ | mv_vmstate CARG2, INTERP
+ | add RC, RC, #8
+ | ands CARG1, PC, #FRAME_TYPE
+ | st_vmstate CARG2
+ | str RC, SAVE_MULTRES
+ | beq ->BC_RET_Z
+ | b ->vm_return
+ |
+ |->vm_pcall: // Setup protected C frame and enter VM.
+ | // (lua_State *L, TValue *base, int nres1, ptrdiff_t ef)
+ | saveregs
+ | mov PC, #FRAME_CP
+ | str CARG4, SAVE_ERRF
+ | b >1
+ |
+ |->vm_call: // Setup C frame and enter VM.
+ | // (lua_State *L, TValue *base, int nres1)
+ | saveregs
+ | mov PC, #FRAME_C
+ |
+ |1: // Entry point for vm_pcall above (PC = ftype).
+ | ldr RC, L:CARG1->cframe
+ | str CARG3, SAVE_NRES
+ | mov L, CARG1
+ | str CARG1, SAVE_L
+ | ldr DISPATCH, L->glref // Setup pointer to dispatch table.
+ | mov BASE, CARG2
+ | str CARG1, SAVE_PC // Any value outside of bytecode is ok.
+ | str RC, SAVE_CFRAME
+ | add DISPATCH, DISPATCH, #GG_G2DISP
+ | str sp, L->cframe // Add our C frame to cframe chain.
+ |
+ |3: // Entry point for vm_cpcall/vm_resume (BASE = base, PC = ftype).
+ | str L, [DISPATCH, #DISPATCH_GL(cur_L)]
+ | ldr RB, L->base // RB = old base (for vmeta_call).
+ | ldr CARG1, L->top
+ | mov MASKR8, #255
+ | add PC, PC, BASE
+ | lsl MASKR8, MASKR8, #3 // MASKR8 = 255*8.
+ | sub PC, PC, RB // PC = frame delta + frame type
+ | mv_vmstate CARG2, INTERP
+ | sub NARGS8:RC, CARG1, BASE
+ | st_vmstate CARG2
+ |
+ |->vm_call_dispatch:
+ | // RB = old base, BASE = new base, RC = nargs*8, PC = caller PC
+ | ldrd CARG34, [BASE, FRAME_FUNC]
+ | checkfunc CARG4, ->vmeta_call
+ |
+ |->vm_call_dispatch_f:
+ | ins_call
+ | // BASE = new base, CARG3 = func, RC = nargs*8, PC = caller PC
+ |
+ |->vm_cpcall: // Setup protected C frame, call C.
+ | // (lua_State *L, lua_CFunction func, void *ud, lua_CPFunction cp)
+ | saveregs
+ | mov L, CARG1
+ | ldr RA, L:CARG1->stack
+ | str CARG1, SAVE_L
+ | ldr DISPATCH, L->glref // Setup pointer to dispatch table.
+ | ldr RB, L->top
+ | str CARG1, SAVE_PC // Any value outside of bytecode is ok.
+ | ldr RC, L->cframe
+ | add DISPATCH, DISPATCH, #GG_G2DISP
+ | sub RA, RA, RB // Compute -savestack(L, L->top).
+ | mov RB, #0
+ | str RA, SAVE_NRES // Neg. delta means cframe w/o frame.
+ | str RB, SAVE_ERRF // No error function.
+ | str RC, SAVE_CFRAME
+ | str sp, L->cframe // Add our C frame to cframe chain.
+ | str L, [DISPATCH, #DISPATCH_GL(cur_L)]
+ | blx CARG4 // (lua_State *L, lua_CFunction func, void *ud)
+ | movs BASE, CRET1
+ | mov PC, #FRAME_CP
+ | bne <3 // Else continue with the call.
+ | b ->vm_leave_cp // No base? Just remove C frame.
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Metamethod handling ------------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |//-- Continuation dispatch ----------------------------------------------
+ |
+ |->cont_dispatch:
+ | // BASE = meta base, RA = resultptr, RC = (nresults+1)*8
+ | ldr LFUNC:CARG3, [RB, FRAME_FUNC]
+ | ldr CARG1, [BASE, #-16] // Get continuation.
+ | mov CARG4, BASE
+ | mov BASE, RB // Restore caller BASE.
+ |.if FFI
+ | cmp CARG1, #1
+ |.endif
+ | ldr PC, [CARG4, #-12] // Restore PC from [cont|PC].
+ | mvn INS, #~LJ_TNIL
+ | add CARG2, RA, RC
+ | str INS, [CARG2, #-4] // Ensure one valid arg.
+ |.if FFI
+ | bls >1
+ |.endif
+ | ldr CARG3, LFUNC:CARG3->field_pc
+ | ldr KBASE, [CARG3, #PC2PROTO(k)]
+ | // BASE = base, RA = resultptr, CARG4 = meta base
+ | bx CARG1
+ |
+ |.if FFI
+ |1:
+ | beq ->cont_ffi_callback // cont = 1: return from FFI callback.
+ | // cont = 0: tailcall from C function.
+ | sub CARG4, CARG4, #16
+ | sub RC, CARG4, BASE
+ | b ->vm_call_tail
+ |.endif
+ |
+ |->cont_cat: // RA = resultptr, CARG4 = meta base
+ | ldr INS, [PC, #-4]
+ | sub CARG2, CARG4, #16
+ | ldrd CARG34, [RA]
+ | str BASE, L->base
+ | decode_RB8 RC, INS
+ | decode_RA8 RA, INS
+ | add CARG1, BASE, RC
+ | subs CARG1, CARG2, CARG1
+ | strdne CARG34, [CARG2]
+ | movne CARG3, CARG1
+ | bne ->BC_CAT_Z
+ | strd CARG34, [BASE, RA]
+ | b ->cont_nop
+ |
+ |//-- Table indexing metamethods -----------------------------------------
+ |
+ |->vmeta_tgets1:
+ | add CARG2, BASE, RB
+ | b >2
+ |
+ |->vmeta_tgets:
+ | sub CARG2, DISPATCH, #-DISPATCH_GL(tmptv)
+ | mvn CARG4, #~LJ_TTAB
+ | str TAB:RB, [CARG2]
+ | str CARG4, [CARG2, #4]
+ |2:
+ | mvn CARG4, #~LJ_TSTR
+ | str STR:RC, TMPDlo
+ | str CARG4, TMPDhi
+ | mov CARG3, TMPDp
+ | b >1
+ |
+ |->vmeta_tgetb: // RC = index
+ | decode_RB8 RB, INS
+ | str RC, TMPDlo
+ | mvn CARG4, #~LJ_TISNUM
+ | add CARG2, BASE, RB
+ | str CARG4, TMPDhi
+ | mov CARG3, TMPDp
+ | b >1
+ |
+ |->vmeta_tgetv:
+ | add CARG2, BASE, RB
+ | add CARG3, BASE, RC
+ |1:
+ | str BASE, L->base
+ | mov CARG1, L
+ | str PC, SAVE_PC
+ | bl extern lj_meta_tget // (lua_State *L, TValue *o, TValue *k)
+ | // Returns TValue * (finished) or NULL (metamethod).
+ | .IOS ldr BASE, L->base
+ | cmp CRET1, #0
+ | beq >3
+ | ldrd CARG34, [CRET1]
+ | ins_next1
+ | ins_next2
+ | strd CARG34, [BASE, RA]
+ | ins_next3
+ |
+ |3: // Call __index metamethod.
+ | // BASE = base, L->top = new base, stack = cont/func/t/k
+ | rsb CARG1, BASE, #FRAME_CONT
+ | ldr BASE, L->top
+ | mov NARGS8:RC, #16 // 2 args for func(t, k).
+ | str PC, [BASE, #-12] // [cont|PC]
+ | add PC, CARG1, BASE
+ | ldr LFUNC:CARG3, [BASE, FRAME_FUNC] // Guaranteed to be a function here.
+ | b ->vm_call_dispatch_f
+ |
+ |->vmeta_tgetr:
+ | .IOS mov RC, BASE
+ | bl extern lj_tab_getinth // (GCtab *t, int32_t key)
+ | // Returns cTValue * or NULL.
+ | .IOS mov BASE, RC
+ | cmp CRET1, #0
+ | ldrdne CARG12, [CRET1]
+ | mvneq CARG2, #~LJ_TNIL
+ | b ->BC_TGETR_Z
+ |
+ |//-----------------------------------------------------------------------
+ |
+ |->vmeta_tsets1:
+ | add CARG2, BASE, RB
+ | b >2
+ |
+ |->vmeta_tsets:
+ | sub CARG2, DISPATCH, #-DISPATCH_GL(tmptv)
+ | mvn CARG4, #~LJ_TTAB
+ | str TAB:RB, [CARG2]
+ | str CARG4, [CARG2, #4]
+ |2:
+ | mvn CARG4, #~LJ_TSTR
+ | str STR:RC, TMPDlo
+ | str CARG4, TMPDhi
+ | mov CARG3, TMPDp
+ | b >1
+ |
+ |->vmeta_tsetb: // RC = index
+ | decode_RB8 RB, INS
+ | str RC, TMPDlo
+ | mvn CARG4, #~LJ_TISNUM
+ | add CARG2, BASE, RB
+ | str CARG4, TMPDhi
+ | mov CARG3, TMPDp
+ | b >1
+ |
+ |->vmeta_tsetv:
+ | add CARG2, BASE, RB
+ | add CARG3, BASE, RC
+ |1:
+ | str BASE, L->base
+ | mov CARG1, L
+ | str PC, SAVE_PC
+ | bl extern lj_meta_tset // (lua_State *L, TValue *o, TValue *k)
+ | // Returns TValue * (finished) or NULL (metamethod).
+ | .IOS ldr BASE, L->base
+ | cmp CRET1, #0
+ | ldrd CARG34, [BASE, RA]
+ | beq >3
+ | ins_next1
+ | // NOBARRIER: lj_meta_tset ensures the table is not black.
+ | strd CARG34, [CRET1]
+ | ins_next2
+ | ins_next3
+ |
+ |3: // Call __newindex metamethod.
+ | // BASE = base, L->top = new base, stack = cont/func/t/k/(v)
+ | rsb CARG1, BASE, #FRAME_CONT
+ | ldr BASE, L->top
+ | mov NARGS8:RC, #24 // 3 args for func(t, k, v).
+ | strd CARG34, [BASE, #16] // Copy value to third argument.
+ | str PC, [BASE, #-12] // [cont|PC]
+ | add PC, CARG1, BASE
+ | ldr LFUNC:CARG3, [BASE, FRAME_FUNC] // Guaranteed to be a function here.
+ | b ->vm_call_dispatch_f
+ |
+ |->vmeta_tsetr:
+ | str BASE, L->base
+ | .IOS mov RC, BASE
+ | mov CARG1, L
+ | str PC, SAVE_PC
+ | bl extern lj_tab_setinth // (lua_State *L, GCtab *t, int32_t key)
+ | // Returns TValue *.
+ | .IOS mov BASE, RC
+ | b ->BC_TSETR_Z
+ |
+ |//-- Comparison metamethods ---------------------------------------------
+ |
+ |->vmeta_comp:
+ | mov CARG1, L
+ | sub PC, PC, #4
+ | mov CARG2, RA
+ | str BASE, L->base
+ | mov CARG3, RC
+ | str PC, SAVE_PC
+ | decode_OP CARG4, INS
+ | bl extern lj_meta_comp // (lua_State *L, TValue *o1, *o2, int op)
+ | // Returns 0/1 or TValue * (metamethod).
+ |3:
+ | .IOS ldr BASE, L->base
+ | cmp CRET1, #1
+ | bhi ->vmeta_binop
+ |4:
+ | ldrh RB, [PC, #2]
+ | add PC, PC, #4
+ | add RB, PC, RB, lsl #2
+ | subhs PC, RB, #0x20000
+ |->cont_nop:
+ | ins_next
+ |
+ |->cont_ra: // RA = resultptr
+ | ldr INS, [PC, #-4]
+ | ldrd CARG12, [RA]
+ | decode_RA8 CARG3, INS
+ | strd CARG12, [BASE, CARG3]
+ | b ->cont_nop
+ |
+ |->cont_condt: // RA = resultptr
+ | ldr CARG2, [RA, #4]
+ | mvn CARG1, #~LJ_TTRUE
+ | cmp CARG1, CARG2 // Branch if result is true.
+ | b <4
+ |
+ |->cont_condf: // RA = resultptr
+ | ldr CARG2, [RA, #4]
+ | checktp CARG2, LJ_TFALSE // Branch if result is false.
+ | b <4
+ |
+ |->vmeta_equal:
+ | // CARG2, CARG3, CARG4 are already set by BC_ISEQV/BC_ISNEV.
+ | sub PC, PC, #4
+ | str BASE, L->base
+ | mov CARG1, L
+ | str PC, SAVE_PC
+ | bl extern lj_meta_equal // (lua_State *L, GCobj *o1, *o2, int ne)
+ | // Returns 0/1 or TValue * (metamethod).
+ | b <3
+ |
+ |->vmeta_equal_cd:
+ |.if FFI
+ | sub PC, PC, #4
+ | str BASE, L->base
+ | mov CARG1, L
+ | mov CARG2, INS
+ | str PC, SAVE_PC
+ | bl extern lj_meta_equal_cd // (lua_State *L, BCIns op)
+ | // Returns 0/1 or TValue * (metamethod).
+ | b <3
+ |.endif
+ |
+ |->vmeta_istype:
+ | sub PC, PC, #4
+ | str BASE, L->base
+ | mov CARG1, L
+ | lsr CARG2, RA, #3
+ | mov CARG3, RC
+ | str PC, SAVE_PC
+ | bl extern lj_meta_istype // (lua_State *L, BCReg ra, BCReg tp)
+ | .IOS ldr BASE, L->base
+ | b ->cont_nop
+ |
+ |//-- Arithmetic metamethods ---------------------------------------------
+ |
+ |->vmeta_arith_vn:
+ | decode_RB8 RB, INS
+ | decode_RC8 RC, INS
+ | add CARG3, BASE, RB
+ | add CARG4, KBASE, RC
+ | b >1
+ |
+ |->vmeta_arith_nv:
+ | decode_RB8 RB, INS
+ | decode_RC8 RC, INS
+ | add CARG4, BASE, RB
+ | add CARG3, KBASE, RC
+ | b >1
+ |
+ |->vmeta_unm:
+ | ldr INS, [PC, #-8]
+ | sub PC, PC, #4
+ | add CARG3, BASE, RC
+ | add CARG4, BASE, RC
+ | b >1
+ |
+ |->vmeta_arith_vv:
+ | decode_RB8 RB, INS
+ | decode_RC8 RC, INS
+ | add CARG3, BASE, RB
+ | add CARG4, BASE, RC
+ |1:
+ | decode_OP OP, INS
+ | add CARG2, BASE, RA
+ | str BASE, L->base
+ | mov CARG1, L
+ | str PC, SAVE_PC
+ | str OP, ARG5
+ | bl extern lj_meta_arith // (lua_State *L, TValue *ra,*rb,*rc, BCReg op)
+ | // Returns NULL (finished) or TValue * (metamethod).
+ | .IOS ldr BASE, L->base
+ | cmp CRET1, #0
+ | beq ->cont_nop
+ |
+ | // Call metamethod for binary op.
+ |->vmeta_binop:
+ | // BASE = old base, CRET1 = new base, stack = cont/func/o1/o2
+ | sub CARG2, CRET1, BASE
+ | str PC, [CRET1, #-12] // [cont|PC]
+ | add PC, CARG2, #FRAME_CONT
+ | mov BASE, CRET1
+ | mov NARGS8:RC, #16 // 2 args for func(o1, o2).
+ | b ->vm_call_dispatch
+ |
+ |->vmeta_len:
+ | add CARG2, BASE, RC
+ | str BASE, L->base
+ | mov CARG1, L
+ | str PC, SAVE_PC
+ | bl extern lj_meta_len // (lua_State *L, TValue *o)
+ | // Returns NULL (retry) or TValue * (metamethod base).
+ | .IOS ldr BASE, L->base
+#if LJ_52
+ | cmp CRET1, #0
+ | bne ->vmeta_binop // Binop call for compatibility.
+ | ldr TAB:CARG1, [BASE, RC]
+ | b ->BC_LEN_Z
+#else
+ | b ->vmeta_binop // Binop call for compatibility.
+#endif
+ |
+ |//-- Call metamethod ----------------------------------------------------
+ |
+ |->vmeta_call: // Resolve and call __call metamethod.
+ | // RB = old base, BASE = new base, RC = nargs*8
+ | mov CARG1, L
+ | str RB, L->base // This is the callers base!
+ | sub CARG2, BASE, #8
+ | str PC, SAVE_PC
+ | add CARG3, BASE, NARGS8:RC
+ | .IOS mov RA, BASE
+ | bl extern lj_meta_call // (lua_State *L, TValue *func, TValue *top)
+ | .IOS mov BASE, RA
+ | ldr LFUNC:CARG3, [BASE, FRAME_FUNC] // Guaranteed to be a function here.
+ | add NARGS8:RC, NARGS8:RC, #8 // Got one more argument now.
+ | ins_call
+ |
+ |->vmeta_callt: // Resolve __call for BC_CALLT.
+ | // BASE = old base, RA = new base, RC = nargs*8
+ | mov CARG1, L
+ | str BASE, L->base
+ | sub CARG2, RA, #8
+ | str PC, SAVE_PC
+ | add CARG3, RA, NARGS8:RC
+ | bl extern lj_meta_call // (lua_State *L, TValue *func, TValue *top)
+ | .IOS ldr BASE, L->base
+ | ldr LFUNC:CARG3, [RA, FRAME_FUNC] // Guaranteed to be a function here.
+ | ldr PC, [BASE, FRAME_PC]
+ | add NARGS8:RC, NARGS8:RC, #8 // Got one more argument now.
+ | b ->BC_CALLT2_Z
+ |
+ |//-- Argument coercion for 'for' statement ------------------------------
+ |
+ |->vmeta_for:
+ | mov CARG1, L
+ | str BASE, L->base
+ | mov CARG2, RA
+ | str PC, SAVE_PC
+ | bl extern lj_meta_for // (lua_State *L, TValue *base)
+ | .IOS ldr BASE, L->base
+ |.if JIT
+ | ldrb OP, [PC, #-4]
+ |.endif
+ | ldr INS, [PC, #-4]
+ |.if JIT
+ | cmp OP, #BC_JFORI
+ |.endif
+ | decode_RA8 RA, INS
+ | decode_RD RC, INS
+ |.if JIT
+ | beq =>BC_JFORI
+ |.endif
+ | b =>BC_FORI
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Fast functions -----------------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |.macro .ffunc, name
+ |->ff_ .. name:
+ |.endmacro
+ |
+ |.macro .ffunc_1, name
+ |->ff_ .. name:
+ | ldrd CARG12, [BASE]
+ | cmp NARGS8:RC, #8
+ | blo ->fff_fallback
+ |.endmacro
+ |
+ |.macro .ffunc_2, name
+ |->ff_ .. name:
+ | ldrd CARG12, [BASE]
+ | ldrd CARG34, [BASE, #8]
+ | cmp NARGS8:RC, #16
+ | blo ->fff_fallback
+ |.endmacro
+ |
+ |.macro .ffunc_n, name
+ | .ffunc_1 name
+ | checktp CARG2, LJ_TISNUM
+ | bhs ->fff_fallback
+ |.endmacro
+ |
+ |.macro .ffunc_nn, name
+ | .ffunc_2 name
+ | checktp CARG2, LJ_TISNUM
+ | cmnlo CARG4, #-LJ_TISNUM
+ | bhs ->fff_fallback
+ |.endmacro
+ |
+ |.macro .ffunc_d, name
+ | .ffunc name
+ | ldr CARG2, [BASE, #4]
+ | cmp NARGS8:RC, #8
+ | vldr d0, [BASE]
+ | blo ->fff_fallback
+ | checktp CARG2, LJ_TISNUM
+ | bhs ->fff_fallback
+ |.endmacro
+ |
+ |.macro .ffunc_dd, name
+ | .ffunc name
+ | ldr CARG2, [BASE, #4]
+ | ldr CARG4, [BASE, #12]
+ | cmp NARGS8:RC, #16
+ | vldr d0, [BASE]
+ | vldr d1, [BASE, #8]
+ | blo ->fff_fallback
+ | checktp CARG2, LJ_TISNUM
+ | cmnlo CARG4, #-LJ_TISNUM
+ | bhs ->fff_fallback
+ |.endmacro
+ |
+ |// Inlined GC threshold check. Caveat: uses CARG1 and CARG2.
+ |.macro ffgccheck
+ | ldr CARG1, [DISPATCH, #DISPATCH_GL(gc.total)]
+ | ldr CARG2, [DISPATCH, #DISPATCH_GL(gc.threshold)]
+ | cmp CARG1, CARG2
+ | blge ->fff_gcstep
+ |.endmacro
+ |
+ |//-- Base library: checks -----------------------------------------------
+ |
+ |.ffunc_1 assert
+ | checktp CARG2, LJ_TTRUE
+ | bhi ->fff_fallback
+ | ldr PC, [BASE, FRAME_PC]
+ | strd CARG12, [BASE, #-8]
+ | mov RB, BASE
+ | subs RA, NARGS8:RC, #8
+ | add RC, NARGS8:RC, #8 // Compute (nresults+1)*8.
+ | beq ->fff_res // Done if exactly 1 argument.
+ |1:
+ | ldrd CARG12, [RB, #8]
+ | subs RA, RA, #8
+ | strd CARG12, [RB], #8
+ | bne <1
+ | b ->fff_res
+ |
+ |.ffunc type
+ | ldr CARG2, [BASE, #4]
+ | cmp NARGS8:RC, #8
+ | blo ->fff_fallback
+ | checktp CARG2, LJ_TISNUM
+ | mvnlo CARG2, #~LJ_TISNUM
+ | rsb CARG4, CARG2, #(int)(offsetof(GCfuncC, upvalue)>>3)-1
+ | lsl CARG4, CARG4, #3
+ | ldrd CARG12, [CFUNC:CARG3, CARG4]
+ | b ->fff_restv
+ |
+ |//-- Base library: getters and setters ---------------------------------
+ |
+ |.ffunc_1 getmetatable
+ | checktp CARG2, LJ_TTAB
+ | cmnne CARG2, #-LJ_TUDATA
+ | bne >6
+ |1: // Field metatable must be at same offset for GCtab and GCudata!
+ | ldr TAB:RB, TAB:CARG1->metatable
+ |2:
+ | mvn CARG2, #~LJ_TNIL
+ | ldr STR:RC, [DISPATCH, #DISPATCH_GL(gcroot[GCROOT_MMNAME+MM_metatable])]
+ | cmp TAB:RB, #0
+ | beq ->fff_restv
+ | ldr CARG3, TAB:RB->hmask
+ | ldr CARG4, STR:RC->sid
+ | ldr NODE:INS, TAB:RB->node
+ | and CARG3, CARG3, CARG4 // idx = str->sid & tab->hmask
+ | add CARG3, CARG3, CARG3, lsl #1
+ | add NODE:INS, NODE:INS, CARG3, lsl #3 // node = tab->node + idx*3*8
+ |3: // Rearranged logic, because we expect _not_ to find the key.
+ | ldrd CARG34, NODE:INS->key // STALL: early NODE:INS.
+ | ldrd CARG12, NODE:INS->val
+ | ldr NODE:INS, NODE:INS->next
+ | checktp CARG4, LJ_TSTR
+ | cmpeq CARG3, STR:RC
+ | beq >5
+ | cmp NODE:INS, #0
+ | bne <3
+ |4:
+ | mov CARG1, RB // Use metatable as default result.
+ | mvn CARG2, #~LJ_TTAB
+ | b ->fff_restv
+ |5:
+ | checktp CARG2, LJ_TNIL
+ | bne ->fff_restv
+ | b <4
+ |
+ |6:
+ | checktp CARG2, LJ_TISNUM
+ | mvnhs CARG2, CARG2
+ | movlo CARG2, #~LJ_TISNUM
+ | add CARG4, DISPATCH, CARG2, lsl #2
+ | ldr TAB:RB, [CARG4, #DISPATCH_GL(gcroot[GCROOT_BASEMT])]
+ | b <2
+ |
+ |.ffunc_2 setmetatable
+ | // Fast path: no mt for table yet and not clearing the mt.
+ | checktp CARG2, LJ_TTAB
+ | ldreq TAB:RB, TAB:CARG1->metatable
+ | checktpeq CARG4, LJ_TTAB
+ | ldrbeq CARG4, TAB:CARG1->marked
+ | cmpeq TAB:RB, #0
+ | bne ->fff_fallback
+ | tst CARG4, #LJ_GC_BLACK // isblack(table)
+ | str TAB:CARG3, TAB:CARG1->metatable
+ | beq ->fff_restv
+ | barrierback TAB:CARG1, CARG4, CARG3
+ | b ->fff_restv
+ |
+ |.ffunc rawget
+ | ldrd CARG34, [BASE]
+ | cmp NARGS8:RC, #16
+ | blo ->fff_fallback
+ | mov CARG2, CARG3
+ | checktab CARG4, ->fff_fallback
+ | mov CARG1, L
+ | add CARG3, BASE, #8
+ | .IOS mov RA, BASE
+ | bl extern lj_tab_get // (lua_State *L, GCtab *t, cTValue *key)
+ | // Returns cTValue *.
+ | .IOS mov BASE, RA
+ | ldrd CARG12, [CRET1]
+ | b ->fff_restv
+ |
+ |//-- Base library: conversions ------------------------------------------
+ |
+ |.ffunc tonumber
+ | // Only handles the number case inline (without a base argument).
+ | ldrd CARG12, [BASE]
+ | cmp NARGS8:RC, #8
+ | bne ->fff_fallback
+ | checktp CARG2, LJ_TISNUM
+ | bls ->fff_restv
+ | b ->fff_fallback
+ |
+ |.ffunc_1 tostring
+ | // Only handles the string or number case inline.
+ | checktp CARG2, LJ_TSTR
+ | // A __tostring method in the string base metatable is ignored.
+ | beq ->fff_restv
+ | // Handle numbers inline, unless a number base metatable is present.
+ | ldr CARG4, [DISPATCH, #DISPATCH_GL(gcroot[GCROOT_BASEMT_NUM])]
+ | str BASE, L->base
+ | checktp CARG2, LJ_TISNUM
+ | cmpls CARG4, #0
+ | str PC, SAVE_PC // Redundant (but a defined value).
+ | bhi ->fff_fallback
+ | ffgccheck
+ | mov CARG1, L
+ | mov CARG2, BASE
+ | bl extern lj_strfmt_number // (lua_State *L, cTValue *o)
+ | // Returns GCstr *.
+ | ldr BASE, L->base
+ | mvn CARG2, #~LJ_TSTR
+ | b ->fff_restv
+ |
+ |//-- Base library: iterators -------------------------------------------
+ |
+ |.ffunc_1 next
+ | mvn CARG4, #~LJ_TNIL
+ | checktab CARG2, ->fff_fallback
+ | strd CARG34, [BASE, NARGS8:RC] // Set missing 2nd arg to nil.
+ | ldr PC, [BASE, FRAME_PC]
+ | add CARG2, BASE, #8
+ | sub CARG3, BASE, #8
+ | bl extern lj_tab_next // (GCtab *t, cTValue *key, TValue *o)
+ | // Returns 1=found, 0=end, -1=error.
+ | .IOS ldr BASE, L->base
+ | cmp CRET1, #0
+ | mov RC, #(2+1)*8
+ | bgt ->fff_res // Found key/value.
+ | bmi ->fff_fallback // Invalid key.
+ | // End of traversal: return nil.
+ | mvn CRET2, #~LJ_TNIL
+ | b ->fff_restv
+ |
+ |.ffunc_1 pairs
+ | checktab CARG2, ->fff_fallback
+#if LJ_52
+ | ldr TAB:RB, TAB:CARG1->metatable
+#endif
+ | ldrd CFUNC:CARG34, CFUNC:CARG3->upvalue[0]
+ | ldr PC, [BASE, FRAME_PC]
+#if LJ_52
+ | cmp TAB:RB, #0
+ | bne ->fff_fallback
+#endif
+ | mvn CARG2, #~LJ_TNIL
+ | mov RC, #(3+1)*8
+ | strd CFUNC:CARG34, [BASE, #-8]
+ | str CARG2, [BASE, #12]
+ | b ->fff_res
+ |
+ |.ffunc_2 ipairs_aux
+ | checktp CARG2, LJ_TTAB
+ | checktpeq CARG4, LJ_TISNUM
+ | bne ->fff_fallback
+ | ldr RB, TAB:CARG1->asize
+ | ldr RC, TAB:CARG1->array
+ | add CARG3, CARG3, #1
+ | ldr PC, [BASE, FRAME_PC]
+ | cmp CARG3, RB
+ | add RC, RC, CARG3, lsl #3
+ | strd CARG34, [BASE, #-8]
+ | ldrdlo CARG12, [RC]
+ | mov RC, #(0+1)*8
+ | bhs >2 // Not in array part?
+ |1:
+ | checktp CARG2, LJ_TNIL
+ | movne RC, #(2+1)*8
+ | strdne CARG12, [BASE]
+ | b ->fff_res
+ |2: // Check for empty hash part first. Otherwise call C function.
+ | ldr RB, TAB:CARG1->hmask
+ | mov CARG2, CARG3
+ | cmp RB, #0
+ | beq ->fff_res
+ | .IOS mov RA, BASE
+ | bl extern lj_tab_getinth // (GCtab *t, int32_t key)
+ | // Returns cTValue * or NULL.
+ | .IOS mov BASE, RA
+ | cmp CRET1, #0
+ | beq ->fff_res
+ | ldrd CARG12, [CRET1]
+ | b <1
+ |
+ |.ffunc_1 ipairs
+ | checktab CARG2, ->fff_fallback
+#if LJ_52
+ | ldr TAB:RB, TAB:CARG1->metatable
+#endif
+ | ldrd CFUNC:CARG34, CFUNC:CARG3->upvalue[0]
+ | ldr PC, [BASE, FRAME_PC]
+#if LJ_52
+ | cmp TAB:RB, #0
+ | bne ->fff_fallback
+#endif
+ | mov CARG1, #0
+ | mvn CARG2, #~LJ_TISNUM
+ | mov RC, #(3+1)*8
+ | strd CFUNC:CARG34, [BASE, #-8]
+ | strd CARG12, [BASE, #8]
+ | b ->fff_res
+ |
+ |//-- Base library: catch errors ----------------------------------------
+ |
+ |.ffunc pcall
+ | ldrb RA, [DISPATCH, #DISPATCH_GL(hookmask)]
+ | cmp NARGS8:RC, #8
+ | blo ->fff_fallback
+ | tst RA, #HOOK_ACTIVE // Remember active hook before pcall.
+ | mov RB, BASE
+ | add BASE, BASE, #8
+ | moveq PC, #8+FRAME_PCALL
+ | movne PC, #8+FRAME_PCALLH
+ | sub NARGS8:RC, NARGS8:RC, #8
+ | b ->vm_call_dispatch
+ |
+ |.ffunc_2 xpcall
+ | ldrb RA, [DISPATCH, #DISPATCH_GL(hookmask)]
+ | checkfunc CARG4, ->fff_fallback // Traceback must be a function.
+ | mov RB, BASE
+ | strd CARG12, [BASE, #8] // Swap function and traceback.
+ | strd CARG34, [BASE]
+ | tst RA, #HOOK_ACTIVE // Remember active hook before pcall.
+ | add BASE, BASE, #16
+ | moveq PC, #16+FRAME_PCALL
+ | movne PC, #16+FRAME_PCALLH
+ | sub NARGS8:RC, NARGS8:RC, #16
+ | b ->vm_call_dispatch
+ |
+ |//-- Coroutine library --------------------------------------------------
+ |
+ |.macro coroutine_resume_wrap, resume
+ |.if resume
+ |.ffunc_1 coroutine_resume
+ | checktp CARG2, LJ_TTHREAD
+ | bne ->fff_fallback
+ |.else
+ |.ffunc coroutine_wrap_aux
+ | ldr L:CARG1, CFUNC:CARG3->upvalue[0].gcr
+ |.endif
+ | ldr PC, [BASE, FRAME_PC]
+ | str BASE, L->base
+ | ldr CARG2, L:CARG1->top
+ | ldrb RA, L:CARG1->status
+ | ldr RB, L:CARG1->base
+ | add CARG3, CARG2, NARGS8:RC
+ | add CARG4, CARG2, RA
+ | str PC, SAVE_PC
+ | cmp CARG4, RB
+ | beq ->fff_fallback
+ | ldr CARG4, L:CARG1->maxstack
+ | ldr RB, L:CARG1->cframe
+ | cmp RA, #LUA_YIELD
+ | cmpls CARG3, CARG4
+ | cmpls RB, #0
+ | bhi ->fff_fallback
+ |1:
+ |.if resume
+ | sub CARG3, CARG3, #8 // Keep resumed thread in stack for GC.
+ | add BASE, BASE, #8
+ | sub NARGS8:RC, NARGS8:RC, #8
+ |.endif
+ | str CARG3, L:CARG1->top
+ | str BASE, L->top
+ |2: // Move args to coroutine.
+ | ldrd CARG34, [BASE, RB]
+ | cmp RB, NARGS8:RC
+ | strdne CARG34, [CARG2, RB]
+ | add RB, RB, #8
+ | bne <2
+ |
+ | mov CARG3, #0
+ | mov L:RA, L:CARG1
+ | mov CARG4, #0
+ | bl ->vm_resume // (lua_State *L, TValue *base, 0, 0)
+ | // Returns thread status.
+ |4:
+ | ldr CARG3, L:RA->base
+ | mv_vmstate CARG2, INTERP
+ | ldr CARG4, L:RA->top
+ | cmp CRET1, #LUA_YIELD
+ | ldr BASE, L->base
+ | str L, [DISPATCH, #DISPATCH_GL(cur_L)]
+ | st_vmstate CARG2
+ | bhi >8
+ | subs RC, CARG4, CARG3
+ | ldr CARG1, L->maxstack
+ | add CARG2, BASE, RC
+ | beq >6 // No results?
+ | cmp CARG2, CARG1
+ | mov RB, #0
+ | bhi >9 // Need to grow stack?
+ |
+ | sub CARG4, RC, #8
+ | str CARG3, L:RA->top // Clear coroutine stack.
+ |5: // Move results from coroutine.
+ | ldrd CARG12, [CARG3, RB]
+ | cmp RB, CARG4
+ | strd CARG12, [BASE, RB]
+ | add RB, RB, #8
+ | bne <5
+ |6:
+ |.if resume
+ | mvn CARG3, #~LJ_TTRUE
+ | add RC, RC, #16
+ |7:
+ | str CARG3, [BASE, #-4] // Prepend true/false to results.
+ | sub RA, BASE, #8
+ |.else
+ | mov RA, BASE
+ | add RC, RC, #8
+ |.endif
+ | ands CARG1, PC, #FRAME_TYPE
+ | str PC, SAVE_PC
+ | str RC, SAVE_MULTRES
+ | beq ->BC_RET_Z
+ | b ->vm_return
+ |
+ |8: // Coroutine returned with error (at co->top-1).
+ |.if resume
+ | ldrd CARG12, [CARG4, #-8]!
+ | mvn CARG3, #~LJ_TFALSE
+ | mov RC, #(2+1)*8
+ | str CARG4, L:RA->top // Remove error from coroutine stack.
+ | strd CARG12, [BASE] // Copy error message.
+ | b <7
+ |.else
+ | mov CARG1, L
+ | mov CARG2, L:RA
+ | bl extern lj_ffh_coroutine_wrap_err // (lua_State *L, lua_State *co)
+ | // Never returns.
+ |.endif
+ |
+ |9: // Handle stack expansion on return from yield.
+ | mov CARG1, L
+ | lsr CARG2, RC, #3
+ | bl extern lj_state_growstack // (lua_State *L, int n)
+ | mov CRET1, #0
+ | b <4
+ |.endmacro
+ |
+ | coroutine_resume_wrap 1 // coroutine.resume
+ | coroutine_resume_wrap 0 // coroutine.wrap
+ |
+ |.ffunc coroutine_yield
+ | ldr CARG1, L->cframe
+ | add CARG2, BASE, NARGS8:RC
+ | str BASE, L->base
+ | tst CARG1, #CFRAME_RESUME
+ | str CARG2, L->top
+ | mov CRET1, #LUA_YIELD
+ | mov CARG3, #0
+ | beq ->fff_fallback
+ | str CARG3, L->cframe
+ | strb CRET1, L->status
+ | b ->vm_leave_unw
+ |
+ |//-- Math library -------------------------------------------------------
+ |
+ |.macro math_round, func
+ | .ffunc_1 math_ .. func
+ | checktp CARG2, LJ_TISNUM
+ | beq ->fff_restv
+ | bhi ->fff_fallback
+ | // Round FP value and normalize result.
+ | lsl CARG3, CARG2, #1
+ | adds RB, CARG3, #0x00200000
+ | bpl >2 // |x| < 1?
+ | mvn CARG4, #0x3e0
+ | subs RB, CARG4, RB, asr #21
+ | lsl CARG4, CARG2, #11
+ | lsl CARG3, CARG1, #11
+ | orr CARG4, CARG4, #0x80000000
+ | rsb INS, RB, #32
+ | orr CARG4, CARG4, CARG1, lsr #21
+ | bls >3 // |x| >= 2^31?
+ | orr CARG3, CARG3, CARG4, lsl INS
+ | lsr CARG1, CARG4, RB
+ |.if "func" == "floor"
+ | tst CARG3, CARG2, asr #31
+ | addne CARG1, CARG1, #1
+ |.else
+ | bics CARG3, CARG3, CARG2, asr #31
+ | addsne CARG1, CARG1, #1
+ | ldrdvs CARG12, >9
+ | bvs ->fff_restv
+ |.endif
+ | cmp CARG2, #0
+ | rsblt CARG1, CARG1, #0
+ |1:
+ | mvn CARG2, #~LJ_TISNUM
+ | b ->fff_restv
+ |
+ |2: // |x| < 1
+ | bcs ->fff_restv // |x| is not finite.
+ | orr CARG3, CARG3, CARG1 // ztest = abs(hi) | lo
+ |.if "func" == "floor"
+ | tst CARG3, CARG2, asr #31 // return (ztest & sign) == 0 ? 0 : -1
+ | moveq CARG1, #0
+ | mvnne CARG1, #0
+ |.else
+ | bics CARG3, CARG3, CARG2, asr #31 // return (ztest & ~sign) == 0 ? 0 : 1
+ | moveq CARG1, #0
+ | movne CARG1, #1
+ |.endif
+ | mvn CARG2, #~LJ_TISNUM
+ | b ->fff_restv
+ |
+ |3: // |x| >= 2^31. Check for x == -(2^31).
+ | cmpeq CARG4, #0x80000000
+ |.if "func" == "floor"
+ | cmpeq CARG3, #0
+ |.endif
+ | bne >4
+ | cmp CARG2, #0
+ | movmi CARG1, #0x80000000
+ | bmi <1
+ |4:
+ | bl ->vm_..func.._sf
+ | b ->fff_restv
+ |.endmacro
+ |
+ | math_round floor
+ | math_round ceil
+ |
+ |.align 8
+ |9:
+ | .long 0x00000000, 0x41e00000 // 2^31.
+ |
+ |.ffunc_1 math_abs
+ | checktp CARG2, LJ_TISNUM
+ | bhi ->fff_fallback
+ | bicne CARG2, CARG2, #0x80000000
+ | bne ->fff_restv
+ | cmp CARG1, #0
+ | rsbslt CARG1, CARG1, #0
+ | ldrdvs CARG12, <9
+ | // Fallthrough.
+ |
+ |->fff_restv:
+ | // CARG12 = TValue result.
+ | ldr PC, [BASE, FRAME_PC]
+ | strd CARG12, [BASE, #-8]
+ |->fff_res1:
+ | // PC = return.
+ | mov RC, #(1+1)*8
+ |->fff_res:
+ | // RC = (nresults+1)*8, PC = return.
+ | ands CARG1, PC, #FRAME_TYPE
+ | ldreq INS, [PC, #-4]
+ | str RC, SAVE_MULTRES
+ | sub RA, BASE, #8
+ | bne ->vm_return
+ | decode_RB8 RB, INS
+ |5:
+ | cmp RB, RC // More results expected?
+ | bhi >6
+ | decode_RA8 CARG1, INS
+ | ins_next1
+ | ins_next2
+ | // Adjust BASE. KBASE is assumed to be set for the calling frame.
+ | sub BASE, RA, CARG1
+ | ins_next3
+ |
+ |6: // Fill up results with nil.
+ | add CARG2, RA, RC
+ | mvn CARG1, #~LJ_TNIL
+ | add RC, RC, #8
+ | str CARG1, [CARG2, #-4]
+ | b <5
+ |
+ |.macro math_extern, func
+ |.if HFABI
+ | .ffunc_d math_ .. func
+ |.else
+ | .ffunc_n math_ .. func
+ |.endif
+ | .IOS mov RA, BASE
+ | bl extern func
+ | .IOS mov BASE, RA
+ |.if HFABI
+ | b ->fff_resd
+ |.else
+ | b ->fff_restv
+ |.endif
+ |.endmacro
+ |
+ |.macro math_extern2, func
+ |.if HFABI
+ | .ffunc_dd math_ .. func
+ |.else
+ | .ffunc_nn math_ .. func
+ |.endif
+ | .IOS mov RA, BASE
+ | bl extern func
+ | .IOS mov BASE, RA
+ |.if HFABI
+ | b ->fff_resd
+ |.else
+ | b ->fff_restv
+ |.endif
+ |.endmacro
+ |
+ |.if FPU
+ | .ffunc_d math_sqrt
+ | vsqrt.f64 d0, d0
+ |->fff_resd:
+ | ldr PC, [BASE, FRAME_PC]
+ | vstr d0, [BASE, #-8]
+ | b ->fff_res1
+ |.else
+ | math_extern sqrt
+ |.endif
+ |
+ |.ffunc math_log
+ |.if HFABI
+ | ldr CARG2, [BASE, #4]
+ | cmp NARGS8:RC, #8 // Need exactly 1 argument.
+ | vldr d0, [BASE]
+ | bne ->fff_fallback
+ |.else
+ | ldrd CARG12, [BASE]
+ | cmp NARGS8:RC, #8 // Need exactly 1 argument.
+ | bne ->fff_fallback
+ |.endif
+ | checktp CARG2, LJ_TISNUM
+ | bhs ->fff_fallback
+ | .IOS mov RA, BASE
+ | bl extern log
+ | .IOS mov BASE, RA
+ |.if HFABI
+ | b ->fff_resd
+ |.else
+ | b ->fff_restv
+ |.endif
+ |
+ | math_extern log10
+ | math_extern exp
+ | math_extern sin
+ | math_extern cos
+ | math_extern tan
+ | math_extern asin
+ | math_extern acos
+ | math_extern atan
+ | math_extern sinh
+ | math_extern cosh
+ | math_extern tanh
+ | math_extern2 pow
+ | math_extern2 atan2
+ | math_extern2 fmod
+ |
+ |.if HFABI
+ | .ffunc math_ldexp
+ | ldr CARG4, [BASE, #4]
+ | ldrd CARG12, [BASE, #8]
+ | cmp NARGS8:RC, #16
+ | blo ->fff_fallback
+ | vldr d0, [BASE]
+ | checktp CARG4, LJ_TISNUM
+ | bhs ->fff_fallback
+ | checktp CARG2, LJ_TISNUM
+ | bne ->fff_fallback
+ | .IOS mov RA, BASE
+ | bl extern ldexp // (double x, int exp)
+ | .IOS mov BASE, RA
+ | b ->fff_resd
+ |.else
+ |.ffunc_2 math_ldexp
+ | checktp CARG2, LJ_TISNUM
+ | bhs ->fff_fallback
+ | checktp CARG4, LJ_TISNUM
+ | bne ->fff_fallback
+ | .IOS mov RA, BASE
+ | bl extern ldexp // (double x, int exp)
+ | .IOS mov BASE, RA
+ | b ->fff_restv
+ |.endif
+ |
+ |.if HFABI
+ |.ffunc_d math_frexp
+ | mov CARG1, sp
+ | .IOS mov RA, BASE
+ | bl extern frexp
+ | .IOS mov BASE, RA
+ | ldr CARG3, [sp]
+ | mvn CARG4, #~LJ_TISNUM
+ | ldr PC, [BASE, FRAME_PC]
+ | vstr d0, [BASE, #-8]
+ | mov RC, #(2+1)*8
+ | strd CARG34, [BASE]
+ | b ->fff_res
+ |.else
+ |.ffunc_n math_frexp
+ | mov CARG3, sp
+ | .IOS mov RA, BASE
+ | bl extern frexp
+ | .IOS mov BASE, RA
+ | ldr CARG3, [sp]
+ | mvn CARG4, #~LJ_TISNUM
+ | ldr PC, [BASE, FRAME_PC]
+ | strd CARG12, [BASE, #-8]
+ | mov RC, #(2+1)*8
+ | strd CARG34, [BASE]
+ | b ->fff_res
+ |.endif
+ |
+ |.if HFABI
+ |.ffunc_d math_modf
+ | sub CARG1, BASE, #8
+ | ldr PC, [BASE, FRAME_PC]
+ | .IOS mov RA, BASE
+ | bl extern modf
+ | .IOS mov BASE, RA
+ | mov RC, #(2+1)*8
+ | vstr d0, [BASE]
+ | b ->fff_res
+ |.else
+ |.ffunc_n math_modf
+ | sub CARG3, BASE, #8
+ | ldr PC, [BASE, FRAME_PC]
+ | .IOS mov RA, BASE
+ | bl extern modf
+ | .IOS mov BASE, RA
+ | mov RC, #(2+1)*8
+ | strd CARG12, [BASE]
+ | b ->fff_res
+ |.endif
+ |
+ |.macro math_minmax, name, cond, fcond
+ |.if FPU
+ | .ffunc_1 name
+ | add RB, BASE, RC
+ | checktp CARG2, LJ_TISNUM
+ | add RA, BASE, #8
+ | bne >4
+ |1: // Handle integers.
+ | ldrd CARG34, [RA]
+ | cmp RA, RB
+ | bhs ->fff_restv
+ | checktp CARG4, LJ_TISNUM
+ | bne >3
+ | cmp CARG1, CARG3
+ | add RA, RA, #8
+ | mov..cond CARG1, CARG3
+ | b <1
+ |3: // Convert intermediate result to number and continue below.
+ | vmov s4, CARG1
+ | bhi ->fff_fallback
+ | vldr d1, [RA]
+ | vcvt.f64.s32 d0, s4
+ | b >6
+ |
+ |4:
+ | vldr d0, [BASE]
+ | bhi ->fff_fallback
+ |5: // Handle numbers.
+ | ldrd CARG34, [RA]
+ | vldr d1, [RA]
+ | cmp RA, RB
+ | bhs ->fff_resd
+ | checktp CARG4, LJ_TISNUM
+ | bhs >7
+ |6:
+ | vcmp.f64 d0, d1
+ | vmrs
+ | add RA, RA, #8
+ | vmov..fcond.f64 d0, d1
+ | b <5
+ |7: // Convert integer to number and continue above.
+ | vmov s4, CARG3
+ | bhi ->fff_fallback
+ | vcvt.f64.s32 d1, s4
+ | b <6
+ |
+ |.else
+ |
+ | .ffunc_1 name
+ | checktp CARG2, LJ_TISNUM
+ | mov RA, #8
+ | bne >4
+ |1: // Handle integers.
+ | ldrd CARG34, [BASE, RA]
+ | cmp RA, RC
+ | bhs ->fff_restv
+ | checktp CARG4, LJ_TISNUM
+ | bne >3
+ | cmp CARG1, CARG3
+ | add RA, RA, #8
+ | mov..cond CARG1, CARG3
+ | b <1
+ |3: // Convert intermediate result to number and continue below.
+ | bhi ->fff_fallback
+ | bl extern __aeabi_i2d
+ | ldrd CARG34, [BASE, RA]
+ | b >6
+ |
+ |4:
+ | bhi ->fff_fallback
+ |5: // Handle numbers.
+ | ldrd CARG34, [BASE, RA]
+ | cmp RA, RC
+ | bhs ->fff_restv
+ | checktp CARG4, LJ_TISNUM
+ | bhs >7
+ |6:
+ | bl extern __aeabi_cdcmple
+ | add RA, RA, #8
+ | mov..fcond CARG1, CARG3
+ | mov..fcond CARG2, CARG4
+ | b <5
+ |7: // Convert integer to number and continue above.
+ | bhi ->fff_fallback
+ | strd CARG12, TMPD
+ | mov CARG1, CARG3
+ | bl extern __aeabi_i2d
+ | ldrd CARG34, TMPD
+ | b <6
+ |.endif
+ |.endmacro
+ |
+ | math_minmax math_min, gt, pl
+ | math_minmax math_max, lt, le
+ |
+ |//-- String library -----------------------------------------------------
+ |
+ |.ffunc string_byte // Only handle the 1-arg case here.
+ | ldrd CARG12, [BASE]
+ | ldr PC, [BASE, FRAME_PC]
+ | cmp NARGS8:RC, #8
+ | checktpeq CARG2, LJ_TSTR // Need exactly 1 argument.
+ | bne ->fff_fallback
+ | ldr CARG3, STR:CARG1->len
+ | ldrb CARG1, STR:CARG1[1] // Access is always ok (NUL at end).
+ | mvn CARG2, #~LJ_TISNUM
+ | cmp CARG3, #0
+ | moveq RC, #(0+1)*8
+ | movne RC, #(1+1)*8
+ | strd CARG12, [BASE, #-8]
+ | b ->fff_res
+ |
+ |.ffunc string_char // Only handle the 1-arg case here.
+ | ffgccheck
+ | ldrd CARG12, [BASE]
+ | ldr PC, [BASE, FRAME_PC]
+ | cmp NARGS8:RC, #8 // Need exactly 1 argument.
+ | checktpeq CARG2, LJ_TISNUM
+ | bicseq CARG4, CARG1, #255
+ | mov CARG3, #1
+ | bne ->fff_fallback
+ | str CARG1, TMPD
+ | mov CARG2, TMPDp // Points to stack. Little-endian.
+ |->fff_newstr:
+ | // CARG2 = str, CARG3 = len.
+ | str BASE, L->base
+ | mov CARG1, L
+ | str PC, SAVE_PC
+ | bl extern lj_str_new // (lua_State *L, char *str, size_t l)
+ |->fff_resstr:
+ | // Returns GCstr *.
+ | ldr BASE, L->base
+ | mvn CARG2, #~LJ_TSTR
+ | b ->fff_restv
+ |
+ |.ffunc string_sub
+ | ffgccheck
+ | ldrd CARG12, [BASE]
+ | ldrd CARG34, [BASE, #16]
+ | cmp NARGS8:RC, #16
+ | mvn RB, #0
+ | beq >1
+ | blo ->fff_fallback
+ | checktp CARG4, LJ_TISNUM
+ | mov RB, CARG3
+ | bne ->fff_fallback
+ |1:
+ | ldrd CARG34, [BASE, #8]
+ | checktp CARG2, LJ_TSTR
+ | ldreq CARG2, STR:CARG1->len
+ | checktpeq CARG4, LJ_TISNUM
+ | bne ->fff_fallback
+ | // CARG1 = str, CARG2 = str->len, CARG3 = start, RB = end
+ | add CARG4, CARG2, #1
+ | cmp CARG3, #0 // if (start < 0) start += len+1
+ | addlt CARG3, CARG3, CARG4
+ | cmp CARG3, #1 // if (start < 1) start = 1
+ | movlt CARG3, #1
+ | cmp RB, #0 // if (end < 0) end += len+1
+ | addlt RB, RB, CARG4
+ | bic RB, RB, RB, asr #31 // if (end < 0) end = 0
+ | cmp RB, CARG2 // if (end > len) end = len
+ | add CARG1, STR:CARG1, #sizeof(GCstr)-1
+ | movgt RB, CARG2
+ | add CARG2, CARG1, CARG3
+ | subs CARG3, RB, CARG3 // len = end - start
+ | add CARG3, CARG3, #1 // len += 1
+ | bge ->fff_newstr
+ |->fff_emptystr:
+ | sub STR:CARG1, DISPATCH, #-DISPATCH_GL(strempty)
+ | mvn CARG2, #~LJ_TSTR
+ | b ->fff_restv
+ |
+ |.macro ffstring_op, name
+ | .ffunc string_ .. name
+ | ffgccheck
+ | ldr CARG3, [BASE, #4]
+ | cmp NARGS8:RC, #8
+ | ldr STR:CARG2, [BASE]
+ | blo ->fff_fallback
+ | sub SBUF:CARG1, DISPATCH, #-DISPATCH_GL(tmpbuf)
+ | checkstr CARG3, ->fff_fallback
+ | ldr CARG4, SBUF:CARG1->b
+ | str BASE, L->base
+ | str PC, SAVE_PC
+ | str L, SBUF:CARG1->L
+ | str CARG4, SBUF:CARG1->w
+ | bl extern lj_buf_putstr_ .. name
+ | bl extern lj_buf_tostr
+ | b ->fff_resstr
+ |.endmacro
+ |
+ |ffstring_op reverse
+ |ffstring_op lower
+ |ffstring_op upper
+ |
+ |//-- Bit library --------------------------------------------------------
+ |
+ |// FP number to bit conversion for soft-float. Clobbers r0-r3.
+ |->vm_tobit_fb:
+ | bhi ->fff_fallback
+ |->vm_tobit:
+ | lsl RB, CARG2, #1
+ | adds RB, RB, #0x00200000
+ | movpl CARG1, #0 // |x| < 1?
+ | bxpl lr
+ | mvn CARG4, #0x3e0
+ | subs RB, CARG4, RB, asr #21
+ | bmi >1 // |x| >= 2^32?
+ | lsl CARG4, CARG2, #11
+ | orr CARG4, CARG4, #0x80000000
+ | orr CARG4, CARG4, CARG1, lsr #21
+ | cmp CARG2, #0
+ | lsr CARG1, CARG4, RB
+ | rsblt CARG1, CARG1, #0
+ | bx lr
+ |1:
+ | add RB, RB, #21
+ | lsr CARG4, CARG1, RB
+ | rsb RB, RB, #20
+ | lsl CARG1, CARG2, #12
+ | cmp CARG2, #0
+ | orr CARG1, CARG4, CARG1, lsl RB
+ | rsblt CARG1, CARG1, #0
+ | bx lr
+ |
+ |.macro .ffunc_bit, name
+ | .ffunc_1 bit_..name
+ | checktp CARG2, LJ_TISNUM
+ | blne ->vm_tobit_fb
+ |.endmacro
+ |
+ |.ffunc_bit tobit
+ | mvn CARG2, #~LJ_TISNUM
+ | b ->fff_restv
+ |
+ |.macro .ffunc_bit_op, name, ins
+ | .ffunc_bit name
+ | mov CARG3, CARG1
+ | mov RA, #8
+ |1:
+ | ldrd CARG12, [BASE, RA]
+ | cmp RA, NARGS8:RC
+ | add RA, RA, #8
+ | bge >2
+ | checktp CARG2, LJ_TISNUM
+ | blne ->vm_tobit_fb
+ | ins CARG3, CARG3, CARG1
+ | b <1
+ |.endmacro
+ |
+ |.ffunc_bit_op band, and
+ |.ffunc_bit_op bor, orr
+ |.ffunc_bit_op bxor, eor
+ |
+ |2:
+ | mvn CARG4, #~LJ_TISNUM
+ | ldr PC, [BASE, FRAME_PC]
+ | strd CARG34, [BASE, #-8]
+ | b ->fff_res1
+ |
+ |.ffunc_bit bswap
+ | eor CARG3, CARG1, CARG1, ror #16
+ | bic CARG3, CARG3, #0x00ff0000
+ | ror CARG1, CARG1, #8
+ | mvn CARG2, #~LJ_TISNUM
+ | eor CARG1, CARG1, CARG3, lsr #8
+ | b ->fff_restv
+ |
+ |.ffunc_bit bnot
+ | mvn CARG1, CARG1
+ | mvn CARG2, #~LJ_TISNUM
+ | b ->fff_restv
+ |
+ |.macro .ffunc_bit_sh, name, ins, shmod
+ | .ffunc bit_..name
+ | ldrd CARG12, [BASE, #8]
+ | cmp NARGS8:RC, #16
+ | blo ->fff_fallback
+ | checktp CARG2, LJ_TISNUM
+ | blne ->vm_tobit_fb
+ |.if shmod == 0
+ | and RA, CARG1, #31
+ |.else
+ | rsb RA, CARG1, #0
+ |.endif
+ | ldrd CARG12, [BASE]
+ | checktp CARG2, LJ_TISNUM
+ | blne ->vm_tobit_fb
+ | ins CARG1, CARG1, RA
+ | mvn CARG2, #~LJ_TISNUM
+ | b ->fff_restv
+ |.endmacro
+ |
+ |.ffunc_bit_sh lshift, lsl, 0
+ |.ffunc_bit_sh rshift, lsr, 0
+ |.ffunc_bit_sh arshift, asr, 0
+ |.ffunc_bit_sh rol, ror, 1
+ |.ffunc_bit_sh ror, ror, 0
+ |
+ |//-----------------------------------------------------------------------
+ |
+ |->fff_fallback: // Call fast function fallback handler.
+ | // BASE = new base, RC = nargs*8
+ | ldr CARG3, [BASE, FRAME_FUNC]
+ | ldr CARG2, L->maxstack
+ | add CARG1, BASE, NARGS8:RC
+ | ldr PC, [BASE, FRAME_PC] // Fallback may overwrite PC.
+ | str CARG1, L->top
+ | ldr CARG3, CFUNC:CARG3->f
+ | str BASE, L->base
+ | add CARG1, CARG1, #8*LUA_MINSTACK
+ | str PC, SAVE_PC // Redundant (but a defined value).
+ | cmp CARG1, CARG2
+ | mov CARG1, L
+ | bhi >5 // Need to grow stack.
+ | blx CARG3 // (lua_State *L)
+ | // Either throws an error, or recovers and returns -1, 0 or nresults+1.
+ | ldr BASE, L->base
+ | cmp CRET1, #0
+ | lsl RC, CRET1, #3
+ | sub RA, BASE, #8
+ | bgt ->fff_res // Returned nresults+1?
+ |1: // Returned 0 or -1: retry fast path.
+ | ldr CARG1, L->top
+ | ldr LFUNC:CARG3, [BASE, FRAME_FUNC]
+ | sub NARGS8:RC, CARG1, BASE
+ | bne ->vm_call_tail // Returned -1?
+ | ins_callt // Returned 0: retry fast path.
+ |
+ |// Reconstruct previous base for vmeta_call during tailcall.
+ |->vm_call_tail:
+ | ands CARG1, PC, #FRAME_TYPE
+ | bic CARG2, PC, #FRAME_TYPEP
+ | ldreq INS, [PC, #-4]
+ | andeq CARG2, MASKR8, INS, lsr #5 // Conditional decode_RA8.
+ | addeq CARG2, CARG2, #8
+ | sub RB, BASE, CARG2
+ | b ->vm_call_dispatch // Resolve again for tailcall.
+ |
+ |5: // Grow stack for fallback handler.
+ | mov CARG2, #LUA_MINSTACK
+ | bl extern lj_state_growstack // (lua_State *L, int n)
+ | ldr BASE, L->base
+ | cmp CARG1, CARG1 // Set zero-flag to force retry.
+ | b <1
+ |
+ |->fff_gcstep: // Call GC step function.
+ | // BASE = new base, RC = nargs*8
+ | mov RA, lr
+ | str BASE, L->base
+ | add CARG2, BASE, NARGS8:RC
+ | str PC, SAVE_PC // Redundant (but a defined value).
+ | str CARG2, L->top
+ | mov CARG1, L
+ | bl extern lj_gc_step // (lua_State *L)
+ | ldr BASE, L->base
+ | mov lr, RA // Help return address predictor.
+ | ldr CFUNC:CARG3, [BASE, FRAME_FUNC]
+ | bx lr
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Special dispatch targets -------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |->vm_record: // Dispatch target for recording phase.
+ |.if JIT
+ | ldrb CARG1, [DISPATCH, #DISPATCH_GL(hookmask)]
+ | tst CARG1, #HOOK_VMEVENT // No recording while in vmevent.
+ | bne >5
+ | // Decrement the hookcount for consistency, but always do the call.
+ | ldr CARG2, [DISPATCH, #DISPATCH_GL(hookcount)]
+ | tst CARG1, #HOOK_ACTIVE
+ | bne >1
+ | sub CARG2, CARG2, #1
+ | tst CARG1, #LUA_MASKLINE|LUA_MASKCOUNT
+ | strne CARG2, [DISPATCH, #DISPATCH_GL(hookcount)]
+ | b >1
+ |.endif
+ |
+ |->vm_rethook: // Dispatch target for return hooks.
+ | ldrb CARG1, [DISPATCH, #DISPATCH_GL(hookmask)]
+ | tst CARG1, #HOOK_ACTIVE // Hook already active?
+ | beq >1
+ |5: // Re-dispatch to static ins.
+ | decode_OP OP, INS
+ | add OP, DISPATCH, OP, lsl #2
+ | ldr pc, [OP, #GG_DISP2STATIC]
+ |
+ |->vm_inshook: // Dispatch target for instr/line hooks.
+ | ldrb CARG1, [DISPATCH, #DISPATCH_GL(hookmask)]
+ | ldr CARG2, [DISPATCH, #DISPATCH_GL(hookcount)]
+ | tst CARG1, #HOOK_ACTIVE // Hook already active?
+ | bne <5
+ | tst CARG1, #LUA_MASKLINE|LUA_MASKCOUNT
+ | beq <5
+ | subs CARG2, CARG2, #1
+ | str CARG2, [DISPATCH, #DISPATCH_GL(hookcount)]
+ | beq >1
+ | tst CARG1, #LUA_MASKLINE
+ | beq <5
+ |1:
+ | mov CARG1, L
+ | str BASE, L->base
+ | mov CARG2, PC
+ | // SAVE_PC must hold the _previous_ PC. The callee updates it with PC.
+ | bl extern lj_dispatch_ins // (lua_State *L, const BCIns *pc)
+ |3:
+ | ldr BASE, L->base
+ |4: // Re-dispatch to static ins.
+ | ldrb OP, [PC, #-4]
+ | ldr INS, [PC, #-4]
+ | add OP, DISPATCH, OP, lsl #2
+ | ldr OP, [OP, #GG_DISP2STATIC]
+ | decode_RA8 RA, INS
+ | decode_RD RC, INS
+ | bx OP
+ |
+ |->cont_hook: // Continue from hook yield.
+ | ldr CARG1, [CARG4, #-24]
+ | add PC, PC, #4
+ | str CARG1, SAVE_MULTRES // Restore MULTRES for *M ins.
+ | b <4
+ |
+ |->vm_hotloop: // Hot loop counter underflow.
+ |.if JIT
+ | ldr LFUNC:CARG3, [BASE, FRAME_FUNC] // Same as curr_topL(L).
+ | sub CARG1, DISPATCH, #-GG_DISP2J
+ | str PC, SAVE_PC
+ | ldr CARG3, LFUNC:CARG3->field_pc
+ | mov CARG2, PC
+ | str L, [DISPATCH, #DISPATCH_J(L)]
+ | ldrb CARG3, [CARG3, #PC2PROTO(framesize)]
+ | str BASE, L->base
+ | add CARG3, BASE, CARG3, lsl #3
+ | str CARG3, L->top
+ | bl extern lj_trace_hot // (jit_State *J, const BCIns *pc)
+ | b <3
+ |.endif
+ |
+ |->vm_callhook: // Dispatch target for call hooks.
+ | mov CARG2, PC
+ |.if JIT
+ | b >1
+ |.endif
+ |
+ |->vm_hotcall: // Hot call counter underflow.
+ |.if JIT
+ | orr CARG2, PC, #1
+ |1:
+ |.endif
+ | add CARG4, BASE, RC
+ | str PC, SAVE_PC
+ | mov CARG1, L
+ | str BASE, L->base
+ | sub RA, RA, BASE
+ | str CARG4, L->top
+ | bl extern lj_dispatch_call // (lua_State *L, const BCIns *pc)
+ | // Returns ASMFunction.
+ | ldr BASE, L->base
+ | ldr CARG4, L->top
+ | mov CARG2, #0
+ | add RA, BASE, RA
+ | sub NARGS8:RC, CARG4, BASE
+ | str CARG2, SAVE_PC // Invalidate for subsequent line hook.
+ | ldr LFUNC:CARG3, [BASE, FRAME_FUNC]
+ | ldr INS, [PC, #-4]
+ | bx CRET1
+ |
+ |->cont_stitch: // Trace stitching.
+ |.if JIT
+ | // RA = resultptr, CARG4 = meta base
+ | ldr RB, SAVE_MULTRES
+ | ldr INS, [PC, #-4]
+ | ldr TRACE:CARG3, [CARG4, #-24] // Save previous trace.
+ | subs RB, RB, #8
+ | decode_RA8 RC, INS // Call base.
+ | beq >2
+ |1: // Move results down.
+ | ldrd CARG12, [RA]
+ | add RA, RA, #8
+ | subs RB, RB, #8
+ | strd CARG12, [BASE, RC]
+ | add RC, RC, #8
+ | bne <1
+ |2:
+ | decode_RA8 RA, INS
+ | decode_RB8 RB, INS
+ | add RA, RA, RB
+ |3:
+ | cmp RA, RC
+ | mvn CARG2, #~LJ_TNIL
+ | bhi >9 // More results wanted?
+ |
+ | ldrh RA, TRACE:CARG3->traceno
+ | ldrh RC, TRACE:CARG3->link
+ | cmp RC, RA
+ | beq ->cont_nop // Blacklisted.
+ | cmp RC, #0
+ | bne =>BC_JLOOP // Jump to stitched trace.
+ |
+ | // Stitch a new trace to the previous trace.
+ | str RA, [DISPATCH, #DISPATCH_J(exitno)]
+ | str L, [DISPATCH, #DISPATCH_J(L)]
+ | str BASE, L->base
+ | sub CARG1, DISPATCH, #-GG_DISP2J
+ | mov CARG2, PC
+ | bl extern lj_dispatch_stitch // (jit_State *J, const BCIns *pc)
+ | ldr BASE, L->base
+ | b ->cont_nop
+ |
+ |9: // Fill up results with nil.
+ | strd CARG12, [BASE, RC]
+ | add RC, RC, #8
+ | b <3
+ |.endif
+ |
+ |->vm_profhook: // Dispatch target for profiler hook.
+#if LJ_HASPROFILE
+ | mov CARG1, L
+ | str BASE, L->base
+ | mov CARG2, PC
+ | bl extern lj_dispatch_profile // (lua_State *L, const BCIns *pc)
+ | // HOOK_PROFILE is off again, so re-dispatch to dynamic instruction.
+ | ldr BASE, L->base
+ | sub PC, PC, #4
+ | b ->cont_nop
+#endif
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Trace exit handler -------------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |->vm_exit_handler:
+ |.if JIT
+ | sub sp, sp, #12
+ | push {r0,r1,r2,r3,r4,r5,r6,r7,r8,r9,r10,r11,r12}
+ | ldr CARG1, [sp, #64] // Load original value of lr.
+ | ldr DISPATCH, [lr] // Load DISPATCH.
+ | add CARG3, sp, #64 // Recompute original value of sp.
+ | mv_vmstate CARG4, EXIT
+ | str CARG3, [sp, #52] // Store sp in RID_SP
+ | st_vmstate CARG4
+ | ldr CARG2, [CARG1, #-4]! // Get exit instruction.
+ | str CARG1, [sp, #56] // Store exit pc in RID_LR and RID_PC.
+ | str CARG1, [sp, #60]
+ |.if FPU
+ | vpush {d0-d15}
+ |.endif
+ | lsl CARG2, CARG2, #8
+ | add CARG1, CARG1, CARG2, asr #6
+ | ldr CARG2, [lr, #4] // Load exit stub group offset.
+ | sub CARG1, CARG1, lr
+ | ldr L, [DISPATCH, #DISPATCH_GL(cur_L)]
+ | add CARG1, CARG2, CARG1, lsr #2 // Compute exit number.
+ | ldr BASE, [DISPATCH, #DISPATCH_GL(jit_base)]
+ | str CARG1, [DISPATCH, #DISPATCH_J(exitno)]
+ | mov CARG4, #0
+ | str BASE, L->base
+ | str L, [DISPATCH, #DISPATCH_J(L)]
+ | str CARG4, [DISPATCH, #DISPATCH_GL(jit_base)]
+ | sub CARG1, DISPATCH, #-GG_DISP2J
+ | mov CARG2, sp
+ | bl extern lj_trace_exit // (jit_State *J, ExitState *ex)
+ | // Returns MULTRES (unscaled) or negated error code.
+ | ldr CARG2, L->cframe
+ | ldr BASE, L->base
+ | bic CARG2, CARG2, #~CFRAME_RAWMASK // Use two steps: bic sp is deprecated.
+ | mov sp, CARG2
+ | ldr PC, SAVE_PC // Get SAVE_PC.
+ | str L, SAVE_L // Set SAVE_L (on-trace resume/yield).
+ | b >1
+ |.endif
+ |->vm_exit_interp:
+ | // CARG1 = MULTRES or negated error code, BASE, PC and DISPATCH set.
+ |.if JIT
+ | ldr L, SAVE_L
+ |1:
+ | cmp CARG1, #0
+ | blt >9 // Check for error from exit.
+ | lsl RC, CARG1, #3
+ | ldr LFUNC:CARG2, [BASE, FRAME_FUNC]
+ | str RC, SAVE_MULTRES
+ | mov CARG3, #0
+ | str BASE, L->base
+ | ldr CARG2, LFUNC:CARG2->field_pc
+ | str CARG3, [DISPATCH, #DISPATCH_GL(jit_base)]
+ | mv_vmstate CARG4, INTERP
+ | ldr KBASE, [CARG2, #PC2PROTO(k)]
+ | // Modified copy of ins_next which handles function header dispatch, too.
+ | ldrb OP, [PC]
+ | mov MASKR8, #255
+ | ldr INS, [PC], #4
+ | lsl MASKR8, MASKR8, #3 // MASKR8 = 255*8.
+ | st_vmstate CARG4
+ | cmp OP, #BC_FUNCC+2 // Fast function?
+ | bhs >4
+ |2:
+ | cmp OP, #BC_FUNCF // Function header?
+ | ldr OP, [DISPATCH, OP, lsl #2]
+ | decode_RA8 RA, INS
+ | lsrlo RC, INS, #16 // No: Decode operands A*8 and D.
+ | subhs RC, RC, #8
+ | addhs RA, RA, BASE // Yes: RA = BASE+framesize*8, RC = nargs*8
+ | ldrhs CARG3, [BASE, FRAME_FUNC]
+ | bx OP
+ |
+ |4: // Check frame below fast function.
+ | ldr CARG1, [BASE, FRAME_PC]
+ | ands CARG2, CARG1, #FRAME_TYPE
+ | bne <2 // Trace stitching continuation?
+ | // Otherwise set KBASE for Lua function below fast function.
+ | ldr CARG3, [CARG1, #-4]
+ | decode_RA8 CARG1, CARG3
+ | sub CARG2, BASE, CARG1
+ | ldr LFUNC:CARG3, [CARG2, #-16]
+ | ldr CARG3, LFUNC:CARG3->field_pc
+ | ldr KBASE, [CARG3, #PC2PROTO(k)]
+ | b <2
+ |
+ |9: // Rethrow error from the right C frame.
+ | rsb CARG2, CARG1, #0
+ | mov CARG1, L
+ | bl extern lj_err_trace // (lua_State *L, int errcode)
+ |.endif
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Math helper functions ----------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |// FP value rounding. Called from JIT code.
+ |//
+ |// double lj_vm_floor/ceil/trunc(double x);
+ |.macro vm_round, func, hf
+ |.if hf == 1
+ | vmov CARG1, CARG2, d0
+ |.endif
+ | lsl CARG3, CARG2, #1
+ | adds RB, CARG3, #0x00200000
+ | bpl >2 // |x| < 1?
+ | mvn CARG4, #0x3cc
+ | subs RB, CARG4, RB, asr #21 // 2^0: RB = 51, 2^51: RB = 0.
+ | bxlo lr // |x| >= 2^52: done.
+ | mvn CARG4, #1
+ | bic CARG3, CARG1, CARG4, lsl RB // ztest = lo & ~lomask
+ | and CARG1, CARG1, CARG4, lsl RB // lo &= lomask
+ | subs RB, RB, #32
+ | bicpl CARG4, CARG2, CARG4, lsl RB // |x| <= 2^20: ztest |= hi & ~himask
+ | orrpl CARG3, CARG3, CARG4
+ | mvnpl CARG4, #1
+ | andpl CARG2, CARG2, CARG4, lsl RB // |x| <= 2^20: hi &= himask
+ |.if "func" == "floor"
+ | tst CARG3, CARG2, asr #31 // iszero = ((ztest & signmask) == 0)
+ |.else
+ | bics CARG3, CARG3, CARG2, asr #31 // iszero = ((ztest & ~signmask) == 0)
+ |.endif
+ |.if hf == 1
+ | vmoveq d0, CARG1, CARG2
+ |.endif
+ | bxeq lr // iszero: done.
+ | mvn CARG4, #1
+ | cmp RB, #0
+ | lslpl CARG3, CARG4, RB
+ | mvnmi CARG3, #0
+ | add RB, RB, #32
+ | subs CARG1, CARG1, CARG4, lsl RB // lo = lo-lomask
+ | sbc CARG2, CARG2, CARG3 // hi = hi-himask+carry
+ |.if hf == 1
+ | vmov d0, CARG1, CARG2
+ |.endif
+ | bx lr
+ |
+ |2: // |x| < 1:
+ | bxcs lr // |x| is not finite.
+ | orr CARG3, CARG3, CARG1 // ztest = (2*hi) | lo
+ |.if "func" == "floor"
+ | tst CARG3, CARG2, asr #31 // iszero = ((ztest & signmask) == 0)
+ |.else
+ | bics CARG3, CARG3, CARG2, asr #31 // iszero = ((ztest & ~signmask) == 0)
+ |.endif
+ | mov CARG1, #0 // lo = 0
+ | and CARG2, CARG2, #0x80000000
+ | ldrne CARG4, <9 // hi = sign(x) | (iszero ? 0.0 : 1.0)
+ | orrne CARG2, CARG2, CARG4
+ |.if hf == 1
+ | vmov d0, CARG1, CARG2
+ |.endif
+ | bx lr
+ |.endmacro
+ |
+ |9:
+ | .long 0x3ff00000 // hiword(+1.0)
+ |
+ |->vm_floor:
+ |.if HFABI
+ | vm_round floor, 1
+ |.endif
+ |->vm_floor_sf:
+ | vm_round floor, 0
+ |
+ |->vm_ceil:
+ |.if HFABI
+ | vm_round ceil, 1
+ |.endif
+ |->vm_ceil_sf:
+ | vm_round ceil, 0
+ |
+ |.macro vm_trunc, hf
+ |.if JIT
+ |.if hf == 1
+ | vmov CARG1, CARG2, d0
+ |.endif
+ | lsl CARG3, CARG2, #1
+ | adds RB, CARG3, #0x00200000
+ | andpl CARG2, CARG2, #0x80000000 // |x| < 1? hi = sign(x), lo = 0.
+ | movpl CARG1, #0
+ |.if hf == 1
+ | vmovpl d0, CARG1, CARG2
+ |.endif
+ | bxpl lr
+ | mvn CARG4, #0x3cc
+ | subs RB, CARG4, RB, asr #21 // 2^0: RB = 51, 2^51: RB = 0.
+ | bxlo lr // |x| >= 2^52: already done.
+ | mvn CARG4, #1
+ | and CARG1, CARG1, CARG4, lsl RB // lo &= lomask
+ | subs RB, RB, #32
+ | andpl CARG2, CARG2, CARG4, lsl RB // |x| <= 2^20: hi &= himask
+ |.if hf == 1
+ | vmov d0, CARG1, CARG2
+ |.endif
+ | bx lr
+ |.endif
+ |.endmacro
+ |
+ |->vm_trunc:
+ |.if HFABI
+ | vm_trunc 1
+ |.endif
+ |->vm_trunc_sf:
+ | vm_trunc 0
+ |
+ | // double lj_vm_mod(double dividend, double divisor);
+ |->vm_mod:
+ |.if FPU
+ | // Special calling convention. Also, RC (r11) is not preserved.
+ | vdiv.f64 d0, d6, d7
+ | mov RC, lr
+ | vmov CARG1, CARG2, d0
+ | bl ->vm_floor_sf
+ | vmov d0, CARG1, CARG2
+ | vmul.f64 d0, d0, d7
+ | mov lr, RC
+ | vsub.f64 d6, d6, d0
+ | bx lr
+ |.else
+ | push {r0, r1, r2, r3, r4, lr}
+ | bl extern __aeabi_ddiv
+ | bl ->vm_floor_sf
+ | ldrd CARG34, [sp, #8]
+ | bl extern __aeabi_dmul
+ | ldrd CARG34, [sp]
+ | eor CARG2, CARG2, #0x80000000
+ | bl extern __aeabi_dadd
+ | add sp, sp, #20
+ | pop {pc}
+ |.endif
+ |
+ | // int lj_vm_modi(int dividend, int divisor);
+ |->vm_modi:
+ | ands RB, CARG1, #0x80000000
+ | rsbmi CARG1, CARG1, #0 // a = |dividend|
+ | eor RB, RB, CARG2, asr #1 // Keep signdiff and sign(divisor).
+ | cmp CARG2, #0
+ | rsbmi CARG2, CARG2, #0 // b = |divisor|
+ | subs CARG4, CARG2, #1
+ | cmpne CARG1, CARG2
+ | moveq CARG1, #0 // if (b == 1 || a == b) a = 0
+ | tsthi CARG2, CARG4
+ | andeq CARG1, CARG1, CARG4 // else if ((b & (b-1)) == 0) a &= b-1
+ | bls >1
+ | // Use repeated subtraction to get the remainder.
+ | clz CARG3, CARG1
+ | clz CARG4, CARG2
+ | sub CARG4, CARG4, CARG3
+ | rsbs CARG3, CARG4, #31 // entry = (31-(clz(b)-clz(a)))*8
+ | addne pc, pc, CARG3, lsl #3 // Duff's device.
+ | nop
+ {
+ int i;
+ for (i = 31; i >= 0; i--) {
+ | cmp CARG1, CARG2, lsl #i
+ | subhs CARG1, CARG1, CARG2, lsl #i
+ }
+ }
+ |1:
+ | cmp CARG1, #0
+ | cmpne RB, #0
+ | submi CARG1, CARG1, CARG2 // if (y != 0 && signdiff) y = y - b
+ | eors CARG2, CARG1, RB, lsl #1
+ | rsbmi CARG1, CARG1, #0 // if (sign(divisor) != sign(y)) y = -y
+ | bx lr
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Miscellaneous functions --------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |.define NEXT_TAB, TAB:CARG1
+ |.define NEXT_RES, CARG1
+ |.define NEXT_IDX, CARG2
+ |.define NEXT_TMP0, CARG3
+ |.define NEXT_TMP1, CARG4
+ |.define NEXT_LIM, r12
+ |.define NEXT_RES_PTR, sp
+ |.define NEXT_RES_VAL, [sp]
+ |.define NEXT_RES_KEY_I, [sp, #8]
+ |.define NEXT_RES_KEY_IT, [sp, #12]
+ |
+ |// TValue *lj_vm_next(GCtab *t, uint32_t idx)
+ |// Next idx returned in CRET2.
+ |->vm_next:
+ |.if JIT
+ | ldr NEXT_TMP0, NEXT_TAB->array
+ | ldr NEXT_LIM, NEXT_TAB->asize
+ | add NEXT_TMP0, NEXT_TMP0, NEXT_IDX, lsl #3
+ |1: // Traverse array part.
+ | subs NEXT_TMP1, NEXT_IDX, NEXT_LIM
+ | bhs >5
+ | ldr NEXT_TMP1, [NEXT_TMP0, #4]
+ | str NEXT_IDX, NEXT_RES_KEY_I
+ | add NEXT_TMP0, NEXT_TMP0, #8
+ | add NEXT_IDX, NEXT_IDX, #1
+ | checktp NEXT_TMP1, LJ_TNIL
+ | beq <1 // Skip holes in array part.
+ | ldr NEXT_TMP0, [NEXT_TMP0, #-8]
+ | mov NEXT_RES, NEXT_RES_PTR
+ | strd NEXT_TMP0, NEXT_RES_VAL // Stores NEXT_TMP1, too.
+ | mvn NEXT_TMP0, #~LJ_TISNUM
+ | str NEXT_TMP0, NEXT_RES_KEY_IT
+ | bx lr
+ |
+ |5: // Traverse hash part.
+ | ldr NEXT_TMP0, NEXT_TAB->hmask
+ | ldr NODE:NEXT_RES, NEXT_TAB->node
+ | add NEXT_TMP1, NEXT_TMP1, NEXT_TMP1, lsl #1
+ | add NEXT_LIM, NEXT_LIM, NEXT_TMP0
+ | add NODE:NEXT_RES, NODE:NEXT_RES, NEXT_TMP1, lsl #3
+ |6:
+ | cmp NEXT_IDX, NEXT_LIM
+ | bhi >9
+ | ldr NEXT_TMP1, NODE:NEXT_RES->val.it
+ | checktp NEXT_TMP1, LJ_TNIL
+ | add NEXT_IDX, NEXT_IDX, #1
+ | bxne lr
+ | // Skip holes in hash part.
+ | add NEXT_RES, NEXT_RES, #sizeof(Node)
+ | b <6
+ |
+ |9: // End of iteration. Set the key to nil (not the value).
+ | mvn NEXT_TMP0, #0
+ | mov NEXT_RES, NEXT_RES_PTR
+ | str NEXT_TMP0, NEXT_RES_KEY_IT
+ | bx lr
+ |.endif
+ |
+ |//-----------------------------------------------------------------------
+ |//-- FFI helper functions -----------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |// Handler for callback functions.
+ |// Saveregs already performed. Callback slot number in [sp], g in r12.
+ |->vm_ffi_callback:
+ |.if FFI
+ |.type CTSTATE, CTState, PC
+ | ldr CTSTATE, GL:r12->ctype_state
+ | add DISPATCH, r12, #GG_G2DISP
+ |.if FPU
+ | str r4, SAVE_R4
+ | add r4, sp, CFRAME_SPACE+4+8*8
+ | vstmdb r4!, {d8-d15}
+ |.endif
+ |.if HFABI
+ | add r12, CTSTATE, #offsetof(CTState, cb.fpr[8])
+ |.endif
+ | strd CARG34, CTSTATE->cb.gpr[2]
+ | strd CARG12, CTSTATE->cb.gpr[0]
+ |.if HFABI
+ | vstmdb r12!, {d0-d7}
+ |.endif
+ | ldr CARG4, [sp]
+ | add CARG3, sp, #CFRAME_SIZE
+ | mov CARG1, CTSTATE
+ | lsr CARG4, CARG4, #3
+ | str CARG3, CTSTATE->cb.stack
+ | mov CARG2, sp
+ | str CARG4, CTSTATE->cb.slot
+ | str CTSTATE, SAVE_PC // Any value outside of bytecode is ok.
+ | bl extern lj_ccallback_enter // (CTState *cts, void *cf)
+ | // Returns lua_State *.
+ | ldr BASE, L:CRET1->base
+ | mv_vmstate CARG2, INTERP
+ | ldr RC, L:CRET1->top
+ | mov MASKR8, #255
+ | ldr LFUNC:CARG3, [BASE, FRAME_FUNC]
+ | mov L, CRET1
+ | sub RC, RC, BASE
+ | lsl MASKR8, MASKR8, #3 // MASKR8 = 255*8.
+ | st_vmstate CARG2
+ | ins_callt
+ |.endif
+ |
+ |->cont_ffi_callback: // Return from FFI callback.
+ |.if FFI
+ | ldr CTSTATE, [DISPATCH, #DISPATCH_GL(ctype_state)]
+ | str BASE, L->base
+ | str CARG4, L->top
+ | str L, CTSTATE->L
+ | mov CARG1, CTSTATE
+ | mov CARG2, RA
+ | bl extern lj_ccallback_leave // (CTState *cts, TValue *o)
+ | ldrd CARG12, CTSTATE->cb.gpr[0]
+ |.if HFABI
+ | vldr d0, CTSTATE->cb.fpr[0]
+ |.endif
+ | b ->vm_leave_unw
+ |.endif
+ |
+ |->vm_ffi_call: // Call C function via FFI.
+ | // Caveat: needs special frame unwinding, see below.
+ |.if FFI
+ | .type CCSTATE, CCallState, r4
+ | push {CCSTATE, r5, r11, lr}
+ | mov CCSTATE, CARG1
+ | ldr CARG1, CCSTATE:CARG1->spadj
+ | ldrb CARG2, CCSTATE->nsp
+ | add CARG3, CCSTATE, #offsetof(CCallState, stack)
+ |.if HFABI
+ | add RB, CCSTATE, #offsetof(CCallState, fpr[0])
+ |.endif
+ | mov r11, sp
+ | sub sp, sp, CARG1 // Readjust stack.
+ | subs CARG2, CARG2, #1
+ |.if HFABI
+ | vldm RB, {d0-d7}
+ |.endif
+ | ldr RB, CCSTATE->func
+ | bmi >2
+ |1: // Copy stack slots.
+ | ldr CARG4, [CARG3, CARG2, lsl #2]
+ | str CARG4, [sp, CARG2, lsl #2]
+ | subs CARG2, CARG2, #1
+ | bpl <1
+ |2:
+ | ldrd CARG12, CCSTATE->gpr[0]
+ | ldrd CARG34, CCSTATE->gpr[2]
+ | blx RB
+ | mov sp, r11
+ |.if HFABI
+ | add r12, CCSTATE, #offsetof(CCallState, fpr[4])
+ |.endif
+ | strd CRET1, CCSTATE->gpr[0]
+ |.if HFABI
+ | vstmdb r12!, {d0-d3}
+ |.endif
+ | pop {CCSTATE, r5, r11, pc}
+ |.endif
+ |// Note: vm_ffi_call must be the last function in this object file!
+ |
+ |//-----------------------------------------------------------------------
+}
+
+/* Generate the code for a single instruction. */
+static void build_ins(BuildCtx *ctx, BCOp op, int defop)
+{
+ int vk = 0;
+ |=>defop:
+
+ switch (op) {
+
+ /* -- Comparison ops ---------------------------------------------------- */
+
+ /* Remember: all ops branch for a true comparison, fall through otherwise. */
+
+ case BC_ISLT: case BC_ISGE: case BC_ISLE: case BC_ISGT:
+ | // RA = src1*8, RC = src2, JMP with RC = target
+ | lsl RC, RC, #3
+ | ldrd CARG12, [RA, BASE]!
+ | ldrh RB, [PC, #2]
+ | ldrd CARG34, [RC, BASE]!
+ | add PC, PC, #4
+ | add RB, PC, RB, lsl #2
+ | checktp CARG2, LJ_TISNUM
+ | bne >3
+ | checktp CARG4, LJ_TISNUM
+ | bne >4
+ | cmp CARG1, CARG3
+ if (op == BC_ISLT) {
+ | sublt PC, RB, #0x20000
+ } else if (op == BC_ISGE) {
+ | subge PC, RB, #0x20000
+ } else if (op == BC_ISLE) {
+ | suble PC, RB, #0x20000
+ } else {
+ | subgt PC, RB, #0x20000
+ }
+ |1:
+ | ins_next
+ |
+ |3: // CARG12 is not an integer.
+ |.if FPU
+ | vldr d0, [RA]
+ | bhi ->vmeta_comp
+ | // d0 is a number.
+ | checktp CARG4, LJ_TISNUM
+ | vldr d1, [RC]
+ | blo >5
+ | bhi ->vmeta_comp
+ | // d0 is a number, CARG3 is an integer.
+ | vmov s4, CARG3
+ | vcvt.f64.s32 d1, s4
+ | b >5
+ |4: // CARG1 is an integer, CARG34 is not an integer.
+ | vldr d1, [RC]
+ | bhi ->vmeta_comp
+ | // CARG1 is an integer, d1 is a number.
+ | vmov s4, CARG1
+ | vcvt.f64.s32 d0, s4
+ |5: // d0 and d1 are numbers.
+ | vcmp.f64 d0, d1
+ | vmrs
+ | // To preserve NaN semantics GE/GT branch on unordered, but LT/LE don't.
+ if (op == BC_ISLT) {
+ | sublo PC, RB, #0x20000
+ } else if (op == BC_ISGE) {
+ | subhs PC, RB, #0x20000
+ } else if (op == BC_ISLE) {
+ | subls PC, RB, #0x20000
+ } else {
+ | subhi PC, RB, #0x20000
+ }
+ | b <1
+ |.else
+ | bhi ->vmeta_comp
+ | // CARG12 is a number.
+ | checktp CARG4, LJ_TISNUM
+ | movlo RA, RB // Save RB.
+ | blo >5
+ | bhi ->vmeta_comp
+ | // CARG12 is a number, CARG3 is an integer.
+ | mov CARG1, CARG3
+ | mov RC, RA
+ | mov RA, RB // Save RB.
+ | bl extern __aeabi_i2d
+ | mov CARG3, CARG1
+ | mov CARG4, CARG2
+ | ldrd CARG12, [RC] // Restore first operand.
+ | b >5
+ |4: // CARG1 is an integer, CARG34 is not an integer.
+ | bhi ->vmeta_comp
+ | // CARG1 is an integer, CARG34 is a number.
+ | mov RA, RB // Save RB.
+ | bl extern __aeabi_i2d
+ | ldrd CARG34, [RC] // Restore second operand.
+ |5: // CARG12 and CARG34 are numbers.
+ | bl extern __aeabi_cdcmple
+ | // To preserve NaN semantics GE/GT branch on unordered, but LT/LE don't.
+ if (op == BC_ISLT) {
+ | sublo PC, RA, #0x20000
+ } else if (op == BC_ISGE) {
+ | subhs PC, RA, #0x20000
+ } else if (op == BC_ISLE) {
+ | subls PC, RA, #0x20000
+ } else {
+ | subhi PC, RA, #0x20000
+ }
+ | b <1
+ |.endif
+ break;
+
+ case BC_ISEQV: case BC_ISNEV:
+ vk = op == BC_ISEQV;
+ | // RA = src1*8, RC = src2, JMP with RC = target
+ | lsl RC, RC, #3
+ | ldrd CARG12, [RA, BASE]!
+ | ldrh RB, [PC, #2]
+ | ldrd CARG34, [RC, BASE]!
+ | add PC, PC, #4
+ | add RB, PC, RB, lsl #2
+ | checktp CARG2, LJ_TISNUM
+ | cmnls CARG4, #-LJ_TISNUM
+ if (vk) {
+ | bls ->BC_ISEQN_Z
+ } else {
+ | bls ->BC_ISNEN_Z
+ }
+ | // Either or both types are not numbers.
+ |.if FFI
+ | checktp CARG2, LJ_TCDATA
+ | checktpne CARG4, LJ_TCDATA
+ | beq ->vmeta_equal_cd
+ |.endif
+ | cmp CARG2, CARG4 // Compare types.
+ | bne >2 // Not the same type?
+ | checktp CARG2, LJ_TISPRI
+ | bhs >1 // Same type and primitive type?
+ |
+ | // Same types and not a primitive type. Compare GCobj or pvalue.
+ | cmp CARG1, CARG3
+ if (vk) {
+ | bne >3 // Different GCobjs or pvalues?
+ |1: // Branch if same.
+ | sub PC, RB, #0x20000
+ |2: // Different.
+ | ins_next
+ |3:
+ | checktp CARG2, LJ_TISTABUD
+ | bhi <2 // Different objects and not table/ud?
+ } else {
+ | beq >1 // Same GCobjs or pvalues?
+ | checktp CARG2, LJ_TISTABUD
+ | bhi >2 // Different objects and not table/ud?
+ }
+ | // Different tables or userdatas. Need to check __eq metamethod.
+ | // Field metatable must be at same offset for GCtab and GCudata!
+ | ldr TAB:RA, TAB:CARG1->metatable
+ | cmp TAB:RA, #0
+ if (vk) {
+ | beq <2 // No metatable?
+ } else {
+ | beq >2 // No metatable?
+ }
+ | ldrb RA, TAB:RA->nomm
+ | mov CARG4, #1-vk // ne = 0 or 1.
+ | mov CARG2, CARG1
+ | tst RA, #1<<MM_eq
+ | beq ->vmeta_equal // 'no __eq' flag not set?
+ if (vk) {
+ | b <2
+ } else {
+ |2: // Branch if different.
+ | sub PC, RB, #0x20000
+ |1: // Same.
+ | ins_next
+ }
+ break;
+
+ case BC_ISEQS: case BC_ISNES:
+ vk = op == BC_ISEQS;
+ | // RA = src*8, RC = str_const (~), JMP with RC = target
+ | mvn RC, RC
+ | ldrd CARG12, [BASE, RA]
+ | ldrh RB, [PC, #2]
+ | ldr STR:CARG3, [KBASE, RC, lsl #2]
+ | add PC, PC, #4
+ | add RB, PC, RB, lsl #2
+ | checktp CARG2, LJ_TSTR
+ |.if FFI
+ | bne >7
+ | cmp CARG1, CARG3
+ |.else
+ | cmpeq CARG1, CARG3
+ |.endif
+ if (vk) {
+ | subeq PC, RB, #0x20000
+ |1:
+ } else {
+ |1:
+ | subne PC, RB, #0x20000
+ }
+ | ins_next
+ |
+ |.if FFI
+ |7:
+ | checktp CARG2, LJ_TCDATA
+ | bne <1
+ | b ->vmeta_equal_cd
+ |.endif
+ break;
+
+ case BC_ISEQN: case BC_ISNEN:
+ vk = op == BC_ISEQN;
+ | // RA = src*8, RC = num_const (~), JMP with RC = target
+ | lsl RC, RC, #3
+ | ldrd CARG12, [RA, BASE]!
+ | ldrh RB, [PC, #2]
+ | ldrd CARG34, [RC, KBASE]!
+ | add PC, PC, #4
+ | add RB, PC, RB, lsl #2
+ if (vk) {
+ |->BC_ISEQN_Z:
+ } else {
+ |->BC_ISNEN_Z:
+ }
+ | checktp CARG2, LJ_TISNUM
+ | bne >3
+ | checktp CARG4, LJ_TISNUM
+ | bne >4
+ | cmp CARG1, CARG3
+ if (vk) {
+ | subeq PC, RB, #0x20000
+ |1:
+ } else {
+ |1:
+ | subne PC, RB, #0x20000
+ }
+ |2:
+ | ins_next
+ |
+ |3: // CARG12 is not an integer.
+ |.if FFI
+ | bhi >7
+ |.else
+ if (!vk) {
+ | subhi PC, RB, #0x20000
+ }
+ | bhi <2
+ |.endif
+ |.if FPU
+ | checktp CARG4, LJ_TISNUM
+ | vmov s4, CARG3
+ | vldr d0, [RA]
+ | vldrlo d1, [RC]
+ | vcvths.f64.s32 d1, s4
+ | b >5
+ |4: // CARG1 is an integer, d1 is a number.
+ | vmov s4, CARG1
+ | vldr d1, [RC]
+ | vcvt.f64.s32 d0, s4
+ |5: // d0 and d1 are numbers.
+ | vcmp.f64 d0, d1
+ | vmrs
+ if (vk) {
+ | subeq PC, RB, #0x20000
+ } else {
+ | subne PC, RB, #0x20000
+ }
+ | b <2
+ |.else
+ | // CARG12 is a number.
+ | checktp CARG4, LJ_TISNUM
+ | movlo RA, RB // Save RB.
+ | blo >5
+ | // CARG12 is a number, CARG3 is an integer.
+ | mov CARG1, CARG3
+ | mov RC, RA
+ |4: // CARG1 is an integer, CARG34 is a number.
+ | mov RA, RB // Save RB.
+ | bl extern __aeabi_i2d
+ | ldrd CARG34, [RC] // Restore other operand.
+ |5: // CARG12 and CARG34 are numbers.
+ | bl extern __aeabi_cdcmpeq
+ if (vk) {
+ | subeq PC, RA, #0x20000
+ } else {
+ | subne PC, RA, #0x20000
+ }
+ | b <2
+ |.endif
+ |
+ |.if FFI
+ |7:
+ | checktp CARG2, LJ_TCDATA
+ | bne <1
+ | b ->vmeta_equal_cd
+ |.endif
+ break;
+
+ case BC_ISEQP: case BC_ISNEP:
+ vk = op == BC_ISEQP;
+ | // RA = src*8, RC = primitive_type (~), JMP with RC = target
+ | ldrd CARG12, [BASE, RA]
+ | ldrh RB, [PC, #2]
+ | add PC, PC, #4
+ | mvn RC, RC
+ | add RB, PC, RB, lsl #2
+ |.if FFI
+ | checktp CARG2, LJ_TCDATA
+ | beq ->vmeta_equal_cd
+ |.endif
+ | cmp CARG2, RC
+ if (vk) {
+ | subeq PC, RB, #0x20000
+ } else {
+ | subne PC, RB, #0x20000
+ }
+ | ins_next
+ break;
+
+ /* -- Unary test and copy ops ------------------------------------------- */
+
+ case BC_ISTC: case BC_ISFC: case BC_IST: case BC_ISF:
+ | // RA = dst*8 or unused, RC = src, JMP with RC = target
+ | add RC, BASE, RC, lsl #3
+ | ldrh RB, [PC, #2]
+ | ldrd CARG12, [RC]
+ | add PC, PC, #4
+ | add RB, PC, RB, lsl #2
+ | checktp CARG2, LJ_TTRUE
+ if (op == BC_ISTC || op == BC_IST) {
+ | subls PC, RB, #0x20000
+ if (op == BC_ISTC) {
+ | strdls CARG12, [BASE, RA]
+ }
+ } else {
+ | subhi PC, RB, #0x20000
+ if (op == BC_ISFC) {
+ | strdhi CARG12, [BASE, RA]
+ }
+ }
+ | ins_next
+ break;
+
+ case BC_ISTYPE:
+ | // RA = src*8, RC = -type
+ | ldrd CARG12, [BASE, RA]
+ | ins_next1
+ | cmn CARG2, RC
+ | ins_next2
+ | bne ->vmeta_istype
+ | ins_next3
+ break;
+ case BC_ISNUM:
+ | // RA = src*8, RC = -(TISNUM-1)
+ | ldrd CARG12, [BASE, RA]
+ | ins_next1
+ | checktp CARG2, LJ_TISNUM
+ | ins_next2
+ | bhs ->vmeta_istype
+ | ins_next3
+ break;
+
+ /* -- Unary ops --------------------------------------------------------- */
+
+ case BC_MOV:
+ | // RA = dst*8, RC = src
+ | lsl RC, RC, #3
+ | ins_next1
+ | ldrd CARG12, [BASE, RC]
+ | ins_next2
+ | strd CARG12, [BASE, RA]
+ | ins_next3
+ break;
+ case BC_NOT:
+ | // RA = dst*8, RC = src
+ | add RC, BASE, RC, lsl #3
+ | ins_next1
+ | ldr CARG1, [RC, #4]
+ | add RA, BASE, RA
+ | ins_next2
+ | checktp CARG1, LJ_TTRUE
+ | mvnls CARG2, #~LJ_TFALSE
+ | mvnhi CARG2, #~LJ_TTRUE
+ | str CARG2, [RA, #4]
+ | ins_next3
+ break;
+ case BC_UNM:
+ | // RA = dst*8, RC = src
+ | lsl RC, RC, #3
+ | ldrd CARG12, [BASE, RC]
+ | ins_next1
+ | ins_next2
+ | checktp CARG2, LJ_TISNUM
+ | bhi ->vmeta_unm
+ | eorne CARG2, CARG2, #0x80000000
+ | bne >5
+ | rsbseq CARG1, CARG1, #0
+ | ldrdvs CARG12, >9
+ |5:
+ | strd CARG12, [BASE, RA]
+ | ins_next3
+ |
+ |.align 8
+ |9:
+ | .long 0x00000000, 0x41e00000 // 2^31.
+ break;
+ case BC_LEN:
+ | // RA = dst*8, RC = src
+ | lsl RC, RC, #3
+ | ldrd CARG12, [BASE, RC]
+ | checkstr CARG2, >2
+ | ldr CARG1, STR:CARG1->len
+ |1:
+ | mvn CARG2, #~LJ_TISNUM
+ | ins_next1
+ | ins_next2
+ | strd CARG12, [BASE, RA]
+ | ins_next3
+ |2:
+ | checktab CARG2, ->vmeta_len
+#if LJ_52
+ | ldr TAB:CARG3, TAB:CARG1->metatable
+ | cmp TAB:CARG3, #0
+ | bne >9
+ |3:
+#endif
+ |->BC_LEN_Z:
+ | .IOS mov RC, BASE
+ | bl extern lj_tab_len // (GCtab *t)
+ | // Returns uint32_t (but less than 2^31).
+ | .IOS mov BASE, RC
+ | b <1
+#if LJ_52
+ |9:
+ | ldrb CARG4, TAB:CARG3->nomm
+ | tst CARG4, #1<<MM_len
+ | bne <3 // 'no __len' flag set: done.
+ | b ->vmeta_len
+#endif
+ break;
+
+ /* -- Binary ops -------------------------------------------------------- */
+
+ |.macro ins_arithcheck, cond, ncond, target
+ ||if (vk == 1) {
+ | cmn CARG4, #-LJ_TISNUM
+ | cmn..cond CARG2, #-LJ_TISNUM
+ ||} else {
+ | cmn CARG2, #-LJ_TISNUM
+ | cmn..cond CARG4, #-LJ_TISNUM
+ ||}
+ | b..ncond target
+ |.endmacro
+ |.macro ins_arithcheck_int, target
+ | ins_arithcheck eq, ne, target
+ |.endmacro
+ |.macro ins_arithcheck_num, target
+ | ins_arithcheck lo, hs, target
+ |.endmacro
+ |
+ |.macro ins_arithpre
+ | decode_RB8 RB, INS
+ | decode_RC8 RC, INS
+ | // RA = dst*8, RB = src1*8, RC = src2*8 | num_const*8
+ ||vk = ((int)op - BC_ADDVN) / (BC_ADDNV-BC_ADDVN);
+ ||switch (vk) {
+ ||case 0:
+ | .if FPU
+ | ldrd CARG12, [RB, BASE]!
+ | ldrd CARG34, [RC, KBASE]!
+ | .else
+ | ldrd CARG12, [BASE, RB]
+ | ldrd CARG34, [KBASE, RC]
+ | .endif
+ || break;
+ ||case 1:
+ | .if FPU
+ | ldrd CARG34, [RB, BASE]!
+ | ldrd CARG12, [RC, KBASE]!
+ | .else
+ | ldrd CARG34, [BASE, RB]
+ | ldrd CARG12, [KBASE, RC]
+ | .endif
+ || break;
+ ||default:
+ | .if FPU
+ | ldrd CARG12, [RB, BASE]!
+ | ldrd CARG34, [RC, BASE]!
+ | .else
+ | ldrd CARG12, [BASE, RB]
+ | ldrd CARG34, [BASE, RC]
+ | .endif
+ || break;
+ ||}
+ |.endmacro
+ |
+ |.macro ins_arithpre_fpu, reg1, reg2
+ |.if FPU
+ ||if (vk == 1) {
+ | vldr reg2, [RB]
+ | vldr reg1, [RC]
+ ||} else {
+ | vldr reg1, [RB]
+ | vldr reg2, [RC]
+ ||}
+ |.endif
+ |.endmacro
+ |
+ |.macro ins_arithpost_fpu, reg
+ | ins_next1
+ | add RA, BASE, RA
+ | ins_next2
+ | vstr reg, [RA]
+ | ins_next3
+ |.endmacro
+ |
+ |.macro ins_arithfallback, ins
+ ||switch (vk) {
+ ||case 0:
+ | ins ->vmeta_arith_vn
+ || break;
+ ||case 1:
+ | ins ->vmeta_arith_nv
+ || break;
+ ||default:
+ | ins ->vmeta_arith_vv
+ || break;
+ ||}
+ |.endmacro
+ |
+ |.macro ins_arithdn, intins, fpins, fpcall
+ | ins_arithpre
+ |.if "intins" ~= "vm_modi" and not FPU
+ | ins_next1
+ |.endif
+ | ins_arithcheck_int >5
+ |.if "intins" == "smull"
+ | smull CARG1, RC, CARG3, CARG1
+ | cmp RC, CARG1, asr #31
+ | ins_arithfallback bne
+ |.elif "intins" == "vm_modi"
+ | movs CARG2, CARG3
+ | ins_arithfallback beq
+ | bl ->vm_modi
+ | mvn CARG2, #~LJ_TISNUM
+ |.else
+ | intins CARG1, CARG1, CARG3
+ | ins_arithfallback bvs
+ |.endif
+ |4:
+ |.if "intins" == "vm_modi" or FPU
+ | ins_next1
+ |.endif
+ | ins_next2
+ | strd CARG12, [BASE, RA]
+ | ins_next3
+ |5: // FP variant.
+ | ins_arithpre_fpu d6, d7
+ | ins_arithfallback ins_arithcheck_num
+ |.if FPU
+ |.if "intins" == "vm_modi"
+ | bl fpcall
+ |.else
+ | fpins d6, d6, d7
+ |.endif
+ | ins_arithpost_fpu d6
+ |.else
+ | bl fpcall
+ |.if "intins" ~= "vm_modi"
+ | ins_next1
+ |.endif
+ | b <4
+ |.endif
+ |.endmacro
+ |
+ |.macro ins_arithfp, fpins, fpcall
+ | ins_arithpre
+ |.if "fpins" ~= "extern" or HFABI
+ | ins_arithpre_fpu d0, d1
+ |.endif
+ | ins_arithfallback ins_arithcheck_num
+ |.if "fpins" == "extern"
+ | .IOS mov RC, BASE
+ | bl fpcall
+ | .IOS mov BASE, RC
+ |.elif FPU
+ | fpins d0, d0, d1
+ |.else
+ | bl fpcall
+ |.endif
+ |.if ("fpins" ~= "extern" or HFABI) and FPU
+ | ins_arithpost_fpu d0
+ |.else
+ | ins_next1
+ | ins_next2
+ | strd CARG12, [BASE, RA]
+ | ins_next3
+ |.endif
+ |.endmacro
+
+ case BC_ADDVN: case BC_ADDNV: case BC_ADDVV:
+ | ins_arithdn adds, vadd.f64, extern __aeabi_dadd
+ break;
+ case BC_SUBVN: case BC_SUBNV: case BC_SUBVV:
+ | ins_arithdn subs, vsub.f64, extern __aeabi_dsub
+ break;
+ case BC_MULVN: case BC_MULNV: case BC_MULVV:
+ | ins_arithdn smull, vmul.f64, extern __aeabi_dmul
+ break;
+ case BC_DIVVN: case BC_DIVNV: case BC_DIVVV:
+ | ins_arithfp vdiv.f64, extern __aeabi_ddiv
+ break;
+ case BC_MODVN: case BC_MODNV: case BC_MODVV:
+ | ins_arithdn vm_modi, vm_mod, ->vm_mod
+ break;
+ case BC_POW:
+ | // NYI: (partial) integer arithmetic.
+ | ins_arithfp extern, extern pow
+ break;
+
+ case BC_CAT:
+ | decode_RB8 RC, INS
+ | decode_RC8 RB, INS
+ | // RA = dst*8, RC = src_start*8, RB = src_end*8 (note: RB/RC swapped!)
+ | sub CARG3, RB, RC
+ | str BASE, L->base
+ | add CARG2, BASE, RB
+ |->BC_CAT_Z:
+ | // RA = dst*8, RC = src_start*8, CARG2 = top-1
+ | mov CARG1, L
+ | str PC, SAVE_PC
+ | lsr CARG3, CARG3, #3
+ | bl extern lj_meta_cat // (lua_State *L, TValue *top, int left)
+ | // Returns NULL (finished) or TValue * (metamethod).
+ | ldr BASE, L->base
+ | cmp CRET1, #0
+ | bne ->vmeta_binop
+ | ldrd CARG34, [BASE, RC]
+ | ins_next1
+ | ins_next2
+ | strd CARG34, [BASE, RA] // Copy result to RA.
+ | ins_next3
+ break;
+
+ /* -- Constant ops ------------------------------------------------------ */
+
+ case BC_KSTR:
+ | // RA = dst*8, RC = str_const (~)
+ | mvn RC, RC
+ | ins_next1
+ | ldr CARG1, [KBASE, RC, lsl #2]
+ | mvn CARG2, #~LJ_TSTR
+ | ins_next2
+ | strd CARG12, [BASE, RA]
+ | ins_next3
+ break;
+ case BC_KCDATA:
+ |.if FFI
+ | // RA = dst*8, RC = cdata_const (~)
+ | mvn RC, RC
+ | ins_next1
+ | ldr CARG1, [KBASE, RC, lsl #2]
+ | mvn CARG2, #~LJ_TCDATA
+ | ins_next2
+ | strd CARG12, [BASE, RA]
+ | ins_next3
+ |.endif
+ break;
+ case BC_KSHORT:
+ | // RA = dst*8, (RC = int16_literal)
+ | mov CARG1, INS, asr #16 // Refetch sign-extended reg.
+ | mvn CARG2, #~LJ_TISNUM
+ | ins_next1
+ | ins_next2
+ | strd CARG12, [BASE, RA]
+ | ins_next3
+ break;
+ case BC_KNUM:
+ | // RA = dst*8, RC = num_const
+ | lsl RC, RC, #3
+ | ins_next1
+ | ldrd CARG12, [KBASE, RC]
+ | ins_next2
+ | strd CARG12, [BASE, RA]
+ | ins_next3
+ break;
+ case BC_KPRI:
+ | // RA = dst*8, RC = primitive_type (~)
+ | add RA, BASE, RA
+ | mvn RC, RC
+ | ins_next1
+ | ins_next2
+ | str RC, [RA, #4]
+ | ins_next3
+ break;
+ case BC_KNIL:
+ | // RA = base*8, RC = end
+ | add RA, BASE, RA
+ | add RC, BASE, RC, lsl #3
+ | mvn CARG1, #~LJ_TNIL
+ | str CARG1, [RA, #4]
+ | add RA, RA, #8
+ |1:
+ | str CARG1, [RA, #4]
+ | cmp RA, RC
+ | add RA, RA, #8
+ | blt <1
+ | ins_next_
+ break;
+
+ /* -- Upvalue and function ops ------------------------------------------ */
+
+ case BC_UGET:
+ | // RA = dst*8, RC = uvnum
+ | ldr LFUNC:CARG2, [BASE, FRAME_FUNC]
+ | lsl RC, RC, #2
+ | add RC, RC, #offsetof(GCfuncL, uvptr)
+ | ldr UPVAL:CARG2, [LFUNC:CARG2, RC]
+ | ldr CARG2, UPVAL:CARG2->v
+ | ldrd CARG34, [CARG2]
+ | ins_next1
+ | ins_next2
+ | strd CARG34, [BASE, RA]
+ | ins_next3
+ break;
+ case BC_USETV:
+ | // RA = uvnum*8, RC = src
+ | ldr LFUNC:CARG2, [BASE, FRAME_FUNC]
+ | lsr RA, RA, #1
+ | add RA, RA, #offsetof(GCfuncL, uvptr)
+ | lsl RC, RC, #3
+ | ldr UPVAL:CARG2, [LFUNC:CARG2, RA]
+ | ldrd CARG34, [BASE, RC]
+ | ldrb RB, UPVAL:CARG2->marked
+ | ldrb RC, UPVAL:CARG2->closed
+ | ldr CARG2, UPVAL:CARG2->v
+ | tst RB, #LJ_GC_BLACK // isblack(uv)
+ | add RB, CARG4, #-LJ_TISGCV
+ | cmpne RC, #0
+ | strd CARG34, [CARG2]
+ | bne >2 // Upvalue is closed and black?
+ |1:
+ | ins_next
+ |
+ |2: // Check if new value is collectable.
+ | cmn RB, #-(LJ_TNUMX - LJ_TISGCV)
+ | ldrbhi RC, GCOBJ:CARG3->gch.marked
+ | bls <1 // tvisgcv(v)
+ | sub CARG1, DISPATCH, #-GG_DISP2G
+ | tst RC, #LJ_GC_WHITES
+ | // Crossed a write barrier. Move the barrier forward.
+ |.if IOS
+ | beq <1
+ | mov RC, BASE
+ | bl extern lj_gc_barrieruv // (global_State *g, TValue *tv)
+ | mov BASE, RC
+ |.else
+ | blne extern lj_gc_barrieruv // (global_State *g, TValue *tv)
+ |.endif
+ | b <1
+ break;
+ case BC_USETS:
+ | // RA = uvnum*8, RC = str_const (~)
+ | ldr LFUNC:CARG2, [BASE, FRAME_FUNC]
+ | lsr RA, RA, #1
+ | add RA, RA, #offsetof(GCfuncL, uvptr)
+ | mvn RC, RC
+ | ldr UPVAL:CARG2, [LFUNC:CARG2, RA]
+ | ldr STR:CARG3, [KBASE, RC, lsl #2]
+ | ldrb RB, UPVAL:CARG2->marked
+ | ldrb RC, UPVAL:CARG2->closed
+ | ldr CARG2, UPVAL:CARG2->v
+ | mvn CARG4, #~LJ_TSTR
+ | tst RB, #LJ_GC_BLACK // isblack(uv)
+ | ldrb RB, STR:CARG3->marked
+ | strd CARG34, [CARG2]
+ | bne >2
+ |1:
+ | ins_next
+ |
+ |2: // Check if string is white and ensure upvalue is closed.
+ | tst RB, #LJ_GC_WHITES // iswhite(str)
+ | cmpne RC, #0
+ | sub CARG1, DISPATCH, #-GG_DISP2G
+ | // Crossed a write barrier. Move the barrier forward.
+ |.if IOS
+ | beq <1
+ | mov RC, BASE
+ | bl extern lj_gc_barrieruv // (global_State *g, TValue *tv)
+ | mov BASE, RC
+ |.else
+ | blne extern lj_gc_barrieruv // (global_State *g, TValue *tv)
+ |.endif
+ | b <1
+ break;
+ case BC_USETN:
+ | // RA = uvnum*8, RC = num_const
+ | ldr LFUNC:CARG2, [BASE, FRAME_FUNC]
+ | lsr RA, RA, #1
+ | add RA, RA, #offsetof(GCfuncL, uvptr)
+ | lsl RC, RC, #3
+ | ldr UPVAL:CARG2, [LFUNC:CARG2, RA]
+ | ldrd CARG34, [KBASE, RC]
+ | ldr CARG2, UPVAL:CARG2->v
+ | ins_next1
+ | ins_next2
+ | strd CARG34, [CARG2]
+ | ins_next3
+ break;
+ case BC_USETP:
+ | // RA = uvnum*8, RC = primitive_type (~)
+ | ldr LFUNC:CARG2, [BASE, FRAME_FUNC]
+ | lsr RA, RA, #1
+ | add RA, RA, #offsetof(GCfuncL, uvptr)
+ | ldr UPVAL:CARG2, [LFUNC:CARG2, RA]
+ | mvn RC, RC
+ | ldr CARG2, UPVAL:CARG2->v
+ | ins_next1
+ | ins_next2
+ | str RC, [CARG2, #4]
+ | ins_next3
+ break;
+
+ case BC_UCLO:
+ | // RA = level*8, RC = target
+ | ldr CARG3, L->openupval
+ | add RC, PC, RC, lsl #2
+ | str BASE, L->base
+ | cmp CARG3, #0
+ | sub PC, RC, #0x20000
+ | beq >1
+ | mov CARG1, L
+ | add CARG2, BASE, RA
+ | bl extern lj_func_closeuv // (lua_State *L, TValue *level)
+ | ldr BASE, L->base
+ |1:
+ | ins_next
+ break;
+
+ case BC_FNEW:
+ | // RA = dst*8, RC = proto_const (~) (holding function prototype)
+ | mvn RC, RC
+ | str BASE, L->base
+ | ldr CARG2, [KBASE, RC, lsl #2]
+ | str PC, SAVE_PC
+ | ldr CARG3, [BASE, FRAME_FUNC]
+ | mov CARG1, L
+ | // (lua_State *L, GCproto *pt, GCfuncL *parent)
+ | bl extern lj_func_newL_gc
+ | // Returns GCfuncL *.
+ | ldr BASE, L->base
+ | mvn CARG2, #~LJ_TFUNC
+ | ins_next1
+ | ins_next2
+ | strd CARG12, [BASE, RA]
+ | ins_next3
+ break;
+
+ /* -- Table ops --------------------------------------------------------- */
+
+ case BC_TNEW:
+ case BC_TDUP:
+ | // RA = dst*8, RC = (hbits|asize) | tab_const (~)
+ if (op == BC_TDUP) {
+ | mvn RC, RC
+ }
+ | ldr CARG3, [DISPATCH, #DISPATCH_GL(gc.total)]
+ | ldr CARG4, [DISPATCH, #DISPATCH_GL(gc.threshold)]
+ | str BASE, L->base
+ | str PC, SAVE_PC
+ | cmp CARG3, CARG4
+ | mov CARG1, L
+ | bhs >5
+ |1:
+ if (op == BC_TNEW) {
+ | lsl CARG2, RC, #21
+ | lsr CARG3, RC, #11
+ | asr RC, CARG2, #21
+ | lsr CARG2, CARG2, #21
+ | cmn RC, #1
+ | addeq CARG2, CARG2, #2
+ | bl extern lj_tab_new // (lua_State *L, int32_t asize, uint32_t hbits)
+ | // Returns GCtab *.
+ } else {
+ | ldr CARG2, [KBASE, RC, lsl #2]
+ | bl extern lj_tab_dup // (lua_State *L, Table *kt)
+ | // Returns GCtab *.
+ }
+ | ldr BASE, L->base
+ | mvn CARG2, #~LJ_TTAB
+ | ins_next1
+ | ins_next2
+ | strd CARG12, [BASE, RA]
+ | ins_next3
+ |5:
+ | bl extern lj_gc_step_fixtop // (lua_State *L)
+ | mov CARG1, L
+ | b <1
+ break;
+
+ case BC_GGET:
+ | // RA = dst*8, RC = str_const (~)
+ case BC_GSET:
+ | // RA = dst*8, RC = str_const (~)
+ | ldr LFUNC:CARG2, [BASE, FRAME_FUNC]
+ | mvn RC, RC
+ | ldr TAB:CARG1, LFUNC:CARG2->env
+ | ldr STR:RC, [KBASE, RC, lsl #2]
+ if (op == BC_GGET) {
+ | b ->BC_TGETS_Z
+ } else {
+ | b ->BC_TSETS_Z
+ }
+ break;
+
+ case BC_TGETV:
+ | decode_RB8 RB, INS
+ | decode_RC8 RC, INS
+ | // RA = dst*8, RB = table*8, RC = key*8
+ | ldrd TAB:CARG12, [BASE, RB]
+ | ldrd CARG34, [BASE, RC]
+ | checktab CARG2, ->vmeta_tgetv // STALL: load CARG12.
+ | checktp CARG4, LJ_TISNUM // Integer key?
+ | ldreq CARG4, TAB:CARG1->array
+ | ldreq CARG2, TAB:CARG1->asize
+ | bne >9
+ |
+ | add CARG4, CARG4, CARG3, lsl #3
+ | cmp CARG3, CARG2 // In array part?
+ | ldrdlo CARG34, [CARG4]
+ | bhs ->vmeta_tgetv
+ | ins_next1 // Overwrites RB!
+ | checktp CARG4, LJ_TNIL
+ | beq >5
+ |1:
+ | ins_next2
+ | strd CARG34, [BASE, RA]
+ | ins_next3
+ |
+ |5: // Check for __index if table value is nil.
+ | ldr TAB:CARG2, TAB:CARG1->metatable
+ | cmp TAB:CARG2, #0
+ | beq <1 // No metatable: done.
+ | ldrb CARG2, TAB:CARG2->nomm
+ | tst CARG2, #1<<MM_index
+ | bne <1 // 'no __index' flag set: done.
+ | decode_RB8 RB, INS // Restore RB.
+ | b ->vmeta_tgetv
+ |
+ |9:
+ | checktp CARG4, LJ_TSTR // String key?
+ | moveq STR:RC, CARG3
+ | beq ->BC_TGETS_Z
+ | b ->vmeta_tgetv
+ break;
+ case BC_TGETS:
+ | decode_RB8 RB, INS
+ | and RC, RC, #255
+ | // RA = dst*8, RB = table*8, RC = str_const (~)
+ | ldrd CARG12, [BASE, RB]
+ | mvn RC, RC
+ | ldr STR:RC, [KBASE, RC, lsl #2] // STALL: early RC.
+ | checktab CARG2, ->vmeta_tgets1
+ |->BC_TGETS_Z:
+ | // (TAB:RB =) TAB:CARG1 = GCtab *, STR:RC = GCstr *, RA = dst*8
+ | ldr CARG3, TAB:CARG1->hmask
+ | ldr CARG4, STR:RC->sid
+ | ldr NODE:INS, TAB:CARG1->node
+ | mov TAB:RB, TAB:CARG1
+ | and CARG3, CARG3, CARG4 // idx = str->sid & tab->hmask
+ | add CARG3, CARG3, CARG3, lsl #1
+ | add NODE:INS, NODE:INS, CARG3, lsl #3 // node = tab->node + idx*3*8
+ |1:
+ | ldrd CARG12, NODE:INS->key // STALL: early NODE:INS.
+ | ldrd CARG34, NODE:INS->val
+ | ldr NODE:INS, NODE:INS->next
+ | checktp CARG2, LJ_TSTR
+ | cmpeq CARG1, STR:RC
+ | bne >4
+ | checktp CARG4, LJ_TNIL
+ | beq >5
+ |3:
+ | ins_next1
+ | ins_next2
+ | strd CARG34, [BASE, RA]
+ | ins_next3
+ |
+ |4: // Follow hash chain.
+ | cmp NODE:INS, #0
+ | bne <1
+ | // End of hash chain: key not found, nil result.
+ |
+ |5: // Check for __index if table value is nil.
+ | ldr TAB:CARG1, TAB:RB->metatable
+ | mov CARG3, #0 // Optional clear of undef. value (during load stall).
+ | mvn CARG4, #~LJ_TNIL
+ | cmp TAB:CARG1, #0
+ | beq <3 // No metatable: done.
+ | ldrb CARG2, TAB:CARG1->nomm
+ | tst CARG2, #1<<MM_index
+ | bne <3 // 'no __index' flag set: done.
+ | b ->vmeta_tgets
+ break;
+ case BC_TGETB:
+ | decode_RB8 RB, INS
+ | and RC, RC, #255
+ | // RA = dst*8, RB = table*8, RC = index
+ | ldrd CARG12, [BASE, RB]
+ | checktab CARG2, ->vmeta_tgetb // STALL: load CARG12.
+ | ldr CARG3, TAB:CARG1->asize
+ | ldr CARG4, TAB:CARG1->array
+ | lsl CARG2, RC, #3
+ | cmp RC, CARG3
+ | ldrdlo CARG34, [CARG4, CARG2]
+ | bhs ->vmeta_tgetb
+ | ins_next1 // Overwrites RB!
+ | checktp CARG4, LJ_TNIL
+ | beq >5
+ |1:
+ | ins_next2
+ | strd CARG34, [BASE, RA]
+ | ins_next3
+ |
+ |5: // Check for __index if table value is nil.
+ | ldr TAB:CARG2, TAB:CARG1->metatable
+ | cmp TAB:CARG2, #0
+ | beq <1 // No metatable: done.
+ | ldrb CARG2, TAB:CARG2->nomm
+ | tst CARG2, #1<<MM_index
+ | bne <1 // 'no __index' flag set: done.
+ | b ->vmeta_tgetb
+ break;
+ case BC_TGETR:
+ | decode_RB8 RB, INS
+ | decode_RC8 RC, INS
+ | // RA = dst*8, RB = table*8, RC = key*8
+ | ldr TAB:CARG1, [BASE, RB]
+ | ldr CARG2, [BASE, RC]
+ | ldr CARG4, TAB:CARG1->array
+ | ldr CARG3, TAB:CARG1->asize
+ | add CARG4, CARG4, CARG2, lsl #3
+ | cmp CARG2, CARG3 // In array part?
+ | bhs ->vmeta_tgetr
+ | ldrd CARG12, [CARG4]
+ |->BC_TGETR_Z:
+ | ins_next1
+ | ins_next2
+ | strd CARG12, [BASE, RA]
+ | ins_next3
+ break;
+
+ case BC_TSETV:
+ | decode_RB8 RB, INS
+ | decode_RC8 RC, INS
+ | // RA = src*8, RB = table*8, RC = key*8
+ | ldrd TAB:CARG12, [BASE, RB]
+ | ldrd CARG34, [BASE, RC]
+ | checktab CARG2, ->vmeta_tsetv // STALL: load CARG12.
+ | checktp CARG4, LJ_TISNUM // Integer key?
+ | ldreq CARG2, TAB:CARG1->array
+ | ldreq CARG4, TAB:CARG1->asize
+ | bne >9
+ |
+ | add CARG2, CARG2, CARG3, lsl #3
+ | cmp CARG3, CARG4 // In array part?
+ | ldrlo INS, [CARG2, #4]
+ | bhs ->vmeta_tsetv
+ | ins_next1 // Overwrites RB!
+ | checktp INS, LJ_TNIL
+ | ldrb INS, TAB:CARG1->marked
+ | ldrd CARG34, [BASE, RA]
+ | beq >5
+ |1:
+ | tst INS, #LJ_GC_BLACK // isblack(table)
+ | strd CARG34, [CARG2]
+ | bne >7
+ |2:
+ | ins_next2
+ | ins_next3
+ |
+ |5: // Check for __newindex if previous value is nil.
+ | ldr TAB:RA, TAB:CARG1->metatable
+ | cmp TAB:RA, #0
+ | beq <1 // No metatable: done.
+ | ldrb RA, TAB:RA->nomm
+ | tst RA, #1<<MM_newindex
+ | bne <1 // 'no __newindex' flag set: done.
+ | ldr INS, [PC, #-4] // Restore RA and RB.
+ | decode_RB8 RB, INS
+ | decode_RA8 RA, INS
+ | b ->vmeta_tsetv
+ |
+ |7: // Possible table write barrier for the value. Skip valiswhite check.
+ | barrierback TAB:CARG1, INS, CARG3
+ | b <2
+ |
+ |9:
+ | checktp CARG4, LJ_TSTR // String key?
+ | moveq STR:RC, CARG3
+ | beq ->BC_TSETS_Z
+ | b ->vmeta_tsetv
+ break;
+ case BC_TSETS:
+ | decode_RB8 RB, INS
+ | and RC, RC, #255
+ | // RA = src*8, RB = table*8, RC = str_const (~)
+ | ldrd CARG12, [BASE, RB]
+ | mvn RC, RC
+ | ldr STR:RC, [KBASE, RC, lsl #2] // STALL: early RC.
+ | checktab CARG2, ->vmeta_tsets1
+ |->BC_TSETS_Z:
+ | // (TAB:RB =) TAB:CARG1 = GCtab *, STR:RC = GCstr *, RA = dst*8
+ | ldr CARG3, TAB:CARG1->hmask
+ | ldr CARG4, STR:RC->sid
+ | ldr NODE:INS, TAB:CARG1->node
+ | mov TAB:RB, TAB:CARG1
+ | and CARG3, CARG3, CARG4 // idx = str->sid & tab->hmask
+ | add CARG3, CARG3, CARG3, lsl #1
+ | mov CARG4, #0
+ | add NODE:INS, NODE:INS, CARG3, lsl #3 // node = tab->node + idx*3*8
+ | strb CARG4, TAB:RB->nomm // Clear metamethod cache.
+ |1:
+ | ldrd CARG12, NODE:INS->key
+ | ldr CARG4, NODE:INS->val.it
+ | ldr NODE:CARG3, NODE:INS->next
+ | checktp CARG2, LJ_TSTR
+ | cmpeq CARG1, STR:RC
+ | bne >5
+ | ldrb CARG2, TAB:RB->marked
+ | checktp CARG4, LJ_TNIL // Key found, but nil value?
+ | ldrd CARG34, [BASE, RA]
+ | beq >4
+ |2:
+ | tst CARG2, #LJ_GC_BLACK // isblack(table)
+ | strd CARG34, NODE:INS->val
+ | bne >7
+ |3:
+ | ins_next
+ |
+ |4: // Check for __newindex if previous value is nil.
+ | ldr TAB:CARG1, TAB:RB->metatable
+ | cmp TAB:CARG1, #0
+ | beq <2 // No metatable: done.
+ | ldrb CARG1, TAB:CARG1->nomm
+ | tst CARG1, #1<<MM_newindex
+ | bne <2 // 'no __newindex' flag set: done.
+ | b ->vmeta_tsets
+ |
+ |5: // Follow hash chain.
+ | movs NODE:INS, NODE:CARG3
+ | bne <1
+ | // End of hash chain: key not found, add a new one.
+ |
+ | // But check for __newindex first.
+ | ldr TAB:CARG1, TAB:RB->metatable
+ | mov CARG3, TMPDp
+ | str PC, SAVE_PC
+ | cmp TAB:CARG1, #0 // No metatable: continue.
+ | str BASE, L->base
+ | ldrbne CARG2, TAB:CARG1->nomm
+ | mov CARG1, L
+ | beq >6
+ | tst CARG2, #1<<MM_newindex
+ | beq ->vmeta_tsets // 'no __newindex' flag NOT set: check.
+ |6:
+ | mvn CARG4, #~LJ_TSTR
+ | str STR:RC, TMPDlo
+ | mov CARG2, TAB:RB
+ | str CARG4, TMPDhi
+ | bl extern lj_tab_newkey // (lua_State *L, GCtab *t, TValue *k)
+ | // Returns TValue *.
+ | ldr BASE, L->base
+ | ldrd CARG34, [BASE, RA]
+ | strd CARG34, [CRET1]
+ | b <3 // No 2nd write barrier needed.
+ |
+ |7: // Possible table write barrier for the value. Skip valiswhite check.
+ | barrierback TAB:RB, CARG2, CARG3
+ | b <3
+ break;
+ case BC_TSETB:
+ | decode_RB8 RB, INS
+ | and RC, RC, #255
+ | // RA = src*8, RB = table*8, RC = index
+ | ldrd CARG12, [BASE, RB]
+ | checktab CARG2, ->vmeta_tsetb // STALL: load CARG12.
+ | ldr CARG3, TAB:CARG1->asize
+ | ldr RB, TAB:CARG1->array
+ | lsl CARG2, RC, #3
+ | cmp RC, CARG3
+ | ldrdlo CARG34, [CARG2, RB]!
+ | bhs ->vmeta_tsetb
+ | ins_next1 // Overwrites RB!
+ | checktp CARG4, LJ_TNIL
+ | ldrb INS, TAB:CARG1->marked
+ | ldrd CARG34, [BASE, RA]
+ | beq >5
+ |1:
+ | tst INS, #LJ_GC_BLACK // isblack(table)
+ | strd CARG34, [CARG2]
+ | bne >7
+ |2:
+ | ins_next2
+ | ins_next3
+ |
+ |5: // Check for __newindex if previous value is nil.
+ | ldr TAB:RA, TAB:CARG1->metatable
+ | cmp TAB:RA, #0
+ | beq <1 // No metatable: done.
+ | ldrb RA, TAB:RA->nomm
+ | tst RA, #1<<MM_newindex
+ | bne <1 // 'no __newindex' flag set: done.
+ | ldr INS, [PC, #-4] // Restore INS.
+ | decode_RA8 RA, INS
+ | b ->vmeta_tsetb
+ |
+ |7: // Possible table write barrier for the value. Skip valiswhite check.
+ | barrierback TAB:CARG1, INS, CARG3
+ | b <2
+ break;
+ case BC_TSETR:
+ | decode_RB8 RB, INS
+ | decode_RC8 RC, INS
+ | // RA = src*8, RB = table*8, RC = key*8
+ | ldr TAB:CARG2, [BASE, RB]
+ | ldr CARG3, [BASE, RC]
+ | ldrb INS, TAB:CARG2->marked
+ | ldr CARG1, TAB:CARG2->array
+ | ldr CARG4, TAB:CARG2->asize
+ | tst INS, #LJ_GC_BLACK // isblack(table)
+ | add CARG1, CARG1, CARG3, lsl #3
+ | bne >7
+ |2:
+ | cmp CARG3, CARG4 // In array part?
+ | bhs ->vmeta_tsetr
+ |->BC_TSETR_Z:
+ | ldrd CARG34, [BASE, RA]
+ | ins_next1
+ | ins_next2
+ | strd CARG34, [CARG1]
+ | ins_next3
+ |
+ |7: // Possible table write barrier for the value. Skip valiswhite check.
+ | barrierback TAB:CARG2, INS, RB
+ | b <2
+ break;
+
+ case BC_TSETM:
+ | // RA = base*8 (table at base-1), RC = num_const (start index)
+ | add RA, BASE, RA
+ |1:
+ | ldr RB, SAVE_MULTRES
+ | ldr TAB:CARG2, [RA, #-8] // Guaranteed to be a table.
+ | ldr CARG1, [KBASE, RC, lsl #3] // Integer constant is in lo-word.
+ | subs RB, RB, #8
+ | ldr CARG4, TAB:CARG2->asize
+ | beq >4 // Nothing to copy?
+ | add CARG3, CARG1, RB, lsr #3
+ | cmp CARG3, CARG4
+ | ldr CARG4, TAB:CARG2->array
+ | add RB, RA, RB
+ | bhi >5
+ | add INS, CARG4, CARG1, lsl #3
+ | ldrb CARG1, TAB:CARG2->marked
+ |3: // Copy result slots to table.
+ | ldrd CARG34, [RA], #8
+ | strd CARG34, [INS], #8
+ | cmp RA, RB
+ | blo <3
+ | tst CARG1, #LJ_GC_BLACK // isblack(table)
+ | bne >7
+ |4:
+ | ins_next
+ |
+ |5: // Need to resize array part.
+ | str BASE, L->base
+ | mov CARG1, L
+ | str PC, SAVE_PC
+ | bl extern lj_tab_reasize // (lua_State *L, GCtab *t, int nasize)
+ | // Must not reallocate the stack.
+ | .IOS ldr BASE, L->base
+ | b <1
+ |
+ |7: // Possible table write barrier for any value. Skip valiswhite check.
+ | barrierback TAB:CARG2, CARG1, CARG3
+ | b <4
+ break;
+
+ /* -- Calls and vararg handling ----------------------------------------- */
+
+ case BC_CALLM:
+ | // RA = base*8, (RB = nresults+1,) RC = extra_nargs
+ | ldr CARG1, SAVE_MULTRES
+ | decode_RC8 NARGS8:RC, INS
+ | add NARGS8:RC, NARGS8:RC, CARG1
+ | b ->BC_CALL_Z
+ break;
+ case BC_CALL:
+ | decode_RC8 NARGS8:RC, INS
+ | // RA = base*8, (RB = nresults+1,) RC = (nargs+1)*8
+ |->BC_CALL_Z:
+ | mov RB, BASE // Save old BASE for vmeta_call.
+ | ldrd CARG34, [BASE, RA]!
+ | sub NARGS8:RC, NARGS8:RC, #8
+ | add BASE, BASE, #8
+ | checkfunc CARG4, ->vmeta_call
+ | ins_call
+ break;
+
+ case BC_CALLMT:
+ | // RA = base*8, (RB = 0,) RC = extra_nargs
+ | ldr CARG1, SAVE_MULTRES
+ | add NARGS8:RC, CARG1, RC, lsl #3
+ | b ->BC_CALLT1_Z
+ break;
+ case BC_CALLT:
+ | lsl NARGS8:RC, RC, #3
+ | // RA = base*8, (RB = 0,) RC = (nargs+1)*8
+ |->BC_CALLT1_Z:
+ | ldrd LFUNC:CARG34, [RA, BASE]!
+ | sub NARGS8:RC, NARGS8:RC, #8
+ | add RA, RA, #8
+ | checkfunc CARG4, ->vmeta_callt
+ | ldr PC, [BASE, FRAME_PC]
+ |->BC_CALLT2_Z:
+ | mov RB, #0
+ | ldrb CARG4, LFUNC:CARG3->ffid
+ | tst PC, #FRAME_TYPE
+ | bne >7
+ |1:
+ | str LFUNC:CARG3, [BASE, FRAME_FUNC] // Copy function down, but keep PC.
+ | cmp NARGS8:RC, #0
+ | beq >3
+ |2:
+ | ldrd CARG12, [RA, RB]
+ | add INS, RB, #8
+ | cmp INS, NARGS8:RC
+ | strd CARG12, [BASE, RB]
+ | mov RB, INS
+ | bne <2
+ |3:
+ | cmp CARG4, #1 // (> FF_C) Calling a fast function?
+ | bhi >5
+ |4:
+ | ins_callt
+ |
+ |5: // Tailcall to a fast function with a Lua frame below.
+ | ldr INS, [PC, #-4]
+ | decode_RA8 RA, INS
+ | sub CARG1, BASE, RA
+ | ldr LFUNC:CARG1, [CARG1, #-16]
+ | ldr CARG1, LFUNC:CARG1->field_pc
+ | ldr KBASE, [CARG1, #PC2PROTO(k)]
+ | b <4
+ |
+ |7: // Tailcall from a vararg function.
+ | eor PC, PC, #FRAME_VARG
+ | tst PC, #FRAME_TYPEP // Vararg frame below?
+ | movne CARG4, #0 // Clear ffid if no Lua function below.
+ | bne <1
+ | sub BASE, BASE, PC
+ | ldr PC, [BASE, FRAME_PC]
+ | tst PC, #FRAME_TYPE
+ | movne CARG4, #0 // Clear ffid if no Lua function below.
+ | b <1
+ break;
+
+ case BC_ITERC:
+ | // RA = base*8, (RB = nresults+1, RC = nargs+1 (2+1))
+ | add RA, BASE, RA
+ | mov RB, BASE // Save old BASE for vmeta_call.
+ | ldrd CARG34, [RA, #-16]
+ | ldrd CARG12, [RA, #-8]
+ | add BASE, RA, #8
+ | strd CARG34, [RA, #8] // Copy state.
+ | strd CARG12, [RA, #16] // Copy control var.
+ | // STALL: locked CARG34.
+ | ldrd LFUNC:CARG34, [RA, #-24]
+ | mov NARGS8:RC, #16 // Iterators get 2 arguments.
+ | // STALL: load CARG34.
+ | strd LFUNC:CARG34, [RA] // Copy callable.
+ | checkfunc CARG4, ->vmeta_call
+ | ins_call
+ break;
+
+ case BC_ITERN:
+ |.if JIT
+ | hotloop
+ |.endif
+ |->vm_IITERN:
+ | // RA = base*8, (RB = nresults+1, RC = nargs+1 (2+1))
+ | add RA, BASE, RA
+ | ldr TAB:RB, [RA, #-16]
+ | ldr CARG1, [RA, #-8] // Get index from control var.
+ | ldr INS, TAB:RB->asize
+ | ldr CARG2, TAB:RB->array
+ | add PC, PC, #4
+ |1: // Traverse array part.
+ | subs RC, CARG1, INS
+ | add CARG3, CARG2, CARG1, lsl #3
+ | bhs >5 // Index points after array part?
+ | ldrd CARG34, [CARG3]
+ | checktp CARG4, LJ_TNIL
+ | addeq CARG1, CARG1, #1 // Skip holes in array part.
+ | beq <1
+ | ldrh RC, [PC, #-2]
+ | mvn CARG2, #~LJ_TISNUM
+ | strd CARG34, [RA, #8]
+ | add RC, PC, RC, lsl #2
+ | add RB, CARG1, #1
+ | strd CARG12, [RA]
+ | sub PC, RC, #0x20000
+ | str RB, [RA, #-8] // Update control var.
+ |3:
+ | ins_next
+ |
+ |5: // Traverse hash part.
+ | ldr CARG4, TAB:RB->hmask
+ | ldr NODE:RB, TAB:RB->node
+ |6:
+ | add CARG1, RC, RC, lsl #1
+ | cmp RC, CARG4 // End of iteration? Branch to ITERL+1.
+ | add NODE:CARG3, NODE:RB, CARG1, lsl #3 // node = tab->node + idx*3*8
+ | bhi <3
+ | ldrd CARG12, NODE:CARG3->val
+ | checktp CARG2, LJ_TNIL
+ | add RC, RC, #1
+ | beq <6 // Skip holes in hash part.
+ | ldrh RB, [PC, #-2]
+ | add RC, RC, INS
+ | ldrd CARG34, NODE:CARG3->key
+ | str RC, [RA, #-8] // Update control var.
+ | strd CARG12, [RA, #8]
+ | add RC, PC, RB, lsl #2
+ | sub PC, RC, #0x20000
+ | strd CARG34, [RA]
+ | b <3
+ break;
+
+ case BC_ISNEXT:
+ | // RA = base*8, RC = target (points to ITERN)
+ | add RA, BASE, RA
+ | add RC, PC, RC, lsl #2
+ | ldrd CFUNC:CARG12, [RA, #-24]
+ | ldr CARG3, [RA, #-12]
+ | ldr CARG4, [RA, #-4]
+ | checktp CARG2, LJ_TFUNC
+ | ldrbeq CARG1, CFUNC:CARG1->ffid
+ | checktpeq CARG3, LJ_TTAB
+ | checktpeq CARG4, LJ_TNIL
+ | cmpeq CARG1, #FF_next_N
+ | subeq PC, RC, #0x20000
+ | bne >5
+ | ins_next1
+ | ins_next2
+ | mov CARG1, #0
+ | mvn CARG2, #~LJ_KEYINDEX
+ | strd CARG1, [RA, #-8] // Initialize control var.
+ |1:
+ | ins_next3
+ |5: // Despecialize bytecode if any of the checks fail.
+ | mov CARG1, #BC_JMP
+ | mov OP, #BC_ITERC
+ | strb CARG1, [PC, #-4]
+ | sub PC, RC, #0x20000
+ |.if JIT
+ | ldrb CARG1, [PC]
+ | cmp CARG1, #BC_ITERN
+ | bne >6
+ |.endif
+ | strb OP, [PC] // Subsumes ins_next1.
+ | ins_next2
+ | b <1
+ |.if JIT
+ |6: // Unpatch JLOOP.
+ | ldr CARG1, [DISPATCH, #DISPATCH_J(trace)]
+ | ldrh CARG2, [PC, #2]
+ | ldr TRACE:CARG1, [CARG1, CARG2, lsl #2]
+ | // Subsumes ins_next1 and ins_next2.
+ | ldr INS, TRACE:CARG1->startins
+ | bfi INS, OP, #0, #8
+ | str INS, [PC], #4
+ | b <1
+ |.endif
+ break;
+
+ case BC_VARG:
+ | decode_RB8 RB, INS
+ | decode_RC8 RC, INS
+ | // RA = base*8, RB = (nresults+1)*8, RC = numparams*8
+ | ldr CARG1, [BASE, FRAME_PC]
+ | add RC, BASE, RC
+ | add RA, BASE, RA
+ | add RC, RC, #FRAME_VARG
+ | add CARG4, RA, RB
+ | sub CARG3, BASE, #8 // CARG3 = vtop
+ | sub RC, RC, CARG1 // RC = vbase
+ | // Note: RC may now be even _above_ BASE if nargs was < numparams.
+ | cmp RB, #0
+ | sub CARG1, CARG3, RC
+ | beq >5 // Copy all varargs?
+ | sub CARG4, CARG4, #16
+ |1: // Copy vararg slots to destination slots.
+ | cmp RC, CARG3
+ | ldrdlo CARG12, [RC], #8
+ | mvnhs CARG2, #~LJ_TNIL
+ | cmp RA, CARG4
+ | strd CARG12, [RA], #8
+ | blo <1
+ |2:
+ | ins_next
+ |
+ |5: // Copy all varargs.
+ | ldr CARG4, L->maxstack
+ | cmp CARG1, #0
+ | movle RB, #8 // MULTRES = (0+1)*8
+ | addgt RB, CARG1, #8
+ | add CARG2, RA, CARG1
+ | str RB, SAVE_MULTRES
+ | ble <2
+ | cmp CARG2, CARG4
+ | bhi >7
+ |6:
+ | ldrd CARG12, [RC], #8
+ | strd CARG12, [RA], #8
+ | cmp RC, CARG3
+ | blo <6
+ | b <2
+ |
+ |7: // Grow stack for varargs.
+ | lsr CARG2, CARG1, #3
+ | str RA, L->top
+ | mov CARG1, L
+ | str BASE, L->base
+ | sub RC, RC, BASE // Need delta, because BASE may change.
+ | str PC, SAVE_PC
+ | sub RA, RA, BASE
+ | bl extern lj_state_growstack // (lua_State *L, int n)
+ | ldr BASE, L->base
+ | add RA, BASE, RA
+ | add RC, BASE, RC
+ | sub CARG3, BASE, #8
+ | b <6
+ break;
+
+ /* -- Returns ----------------------------------------------------------- */
+
+ case BC_RETM:
+ | // RA = results*8, RC = extra results
+ | ldr CARG1, SAVE_MULTRES
+ | ldr PC, [BASE, FRAME_PC]
+ | add RA, BASE, RA
+ | add RC, CARG1, RC, lsl #3
+ | b ->BC_RETM_Z
+ break;
+
+ case BC_RET:
+ | // RA = results*8, RC = nresults+1
+ | ldr PC, [BASE, FRAME_PC]
+ | lsl RC, RC, #3
+ | add RA, BASE, RA
+ |->BC_RETM_Z:
+ | str RC, SAVE_MULTRES
+ |1:
+ | ands CARG1, PC, #FRAME_TYPE
+ | eor CARG2, PC, #FRAME_VARG
+ | bne ->BC_RETV2_Z
+ |
+ |->BC_RET_Z:
+ | // BASE = base, RA = resultptr, RC = (nresults+1)*8, PC = return
+ | ldr INS, [PC, #-4]
+ | subs CARG4, RC, #8
+ | sub CARG3, BASE, #8
+ | beq >3
+ |2:
+ | ldrd CARG12, [RA], #8
+ | add BASE, BASE, #8
+ | subs CARG4, CARG4, #8
+ | strd CARG12, [BASE, #-16]
+ | bne <2
+ |3:
+ | decode_RA8 RA, INS
+ | sub CARG4, CARG3, RA
+ | decode_RB8 RB, INS
+ | ldr LFUNC:CARG1, [CARG4, FRAME_FUNC]
+ |5:
+ | cmp RB, RC // More results expected?
+ | bhi >6
+ | mov BASE, CARG4
+ | ldr CARG2, LFUNC:CARG1->field_pc
+ | ins_next1
+ | ins_next2
+ | ldr KBASE, [CARG2, #PC2PROTO(k)]
+ | ins_next3
+ |
+ |6: // Fill up results with nil.
+ | mvn CARG2, #~LJ_TNIL
+ | add BASE, BASE, #8
+ | add RC, RC, #8
+ | str CARG2, [BASE, #-12]
+ | b <5
+ |
+ |->BC_RETV1_Z: // Non-standard return case.
+ | add RA, BASE, RA
+ |->BC_RETV2_Z:
+ | tst CARG2, #FRAME_TYPEP
+ | bne ->vm_return
+ | // Return from vararg function: relocate BASE down.
+ | sub BASE, BASE, CARG2
+ | ldr PC, [BASE, FRAME_PC]
+ | b <1
+ break;
+
+ case BC_RET0: case BC_RET1:
+ | // RA = results*8, RC = nresults+1
+ | ldr PC, [BASE, FRAME_PC]
+ | lsl RC, RC, #3
+ | str RC, SAVE_MULTRES
+ | ands CARG1, PC, #FRAME_TYPE
+ | eor CARG2, PC, #FRAME_VARG
+ | ldreq INS, [PC, #-4]
+ | bne ->BC_RETV1_Z
+ if (op == BC_RET1) {
+ | ldrd CARG12, [BASE, RA]
+ }
+ | sub CARG4, BASE, #8
+ | decode_RA8 RA, INS
+ if (op == BC_RET1) {
+ | strd CARG12, [CARG4]
+ }
+ | sub BASE, CARG4, RA
+ | decode_RB8 RB, INS
+ | ldr LFUNC:CARG1, [BASE, FRAME_FUNC]
+ |5:
+ | cmp RB, RC
+ | bhi >6
+ | ldr CARG2, LFUNC:CARG1->field_pc
+ | ins_next1
+ | ins_next2
+ | ldr KBASE, [CARG2, #PC2PROTO(k)]
+ | ins_next3
+ |
+ |6: // Fill up results with nil.
+ | sub CARG2, CARG4, #4
+ | mvn CARG3, #~LJ_TNIL
+ | str CARG3, [CARG2, RC]
+ | add RC, RC, #8
+ | b <5
+ break;
+
+ /* -- Loops and branches ------------------------------------------------ */
+
+ |.define FOR_IDX, [RA]; .define FOR_TIDX, [RA, #4]
+ |.define FOR_STOP, [RA, #8]; .define FOR_TSTOP, [RA, #12]
+ |.define FOR_STEP, [RA, #16]; .define FOR_TSTEP, [RA, #20]
+ |.define FOR_EXT, [RA, #24]; .define FOR_TEXT, [RA, #28]
+
+ case BC_FORL:
+ |.if JIT
+ | hotloop
+ |.endif
+ | // Fall through. Assumes BC_IFORL follows.
+ break;
+
+ case BC_JFORI:
+ case BC_JFORL:
+#if !LJ_HASJIT
+ break;
+#endif
+ case BC_FORI:
+ case BC_IFORL:
+ | // RA = base*8, RC = target (after end of loop or start of loop)
+ vk = (op == BC_IFORL || op == BC_JFORL);
+ | ldrd CARG12, [RA, BASE]!
+ if (op != BC_JFORL) {
+ | add RC, PC, RC, lsl #2
+ }
+ if (!vk) {
+ | ldrd CARG34, FOR_STOP
+ | checktp CARG2, LJ_TISNUM
+ | ldr RB, FOR_TSTEP
+ | bne >5
+ | checktp CARG4, LJ_TISNUM
+ | ldr CARG4, FOR_STEP
+ | checktpeq RB, LJ_TISNUM
+ | bne ->vmeta_for
+ | cmp CARG4, #0
+ | blt >4
+ | cmp CARG1, CARG3
+ } else {
+ | ldrd CARG34, FOR_STEP
+ | checktp CARG2, LJ_TISNUM
+ | bne >5
+ | adds CARG1, CARG1, CARG3
+ | ldr CARG4, FOR_STOP
+ if (op == BC_IFORL) {
+ | addvs RC, PC, #0x20000 // Overflow: prevent branch.
+ } else {
+ | bvs >2 // Overflow: do not enter mcode.
+ }
+ | cmp CARG3, #0
+ | blt >4
+ | cmp CARG1, CARG4
+ }
+ |1:
+ if (op == BC_FORI) {
+ | subgt PC, RC, #0x20000
+ } else if (op == BC_JFORI) {
+ | sub PC, RC, #0x20000
+ | ldrhle RC, [PC, #-2]
+ } else if (op == BC_IFORL) {
+ | suble PC, RC, #0x20000
+ }
+ if (vk) {
+ | strd CARG12, FOR_IDX
+ }
+ |2:
+ | ins_next1
+ | ins_next2
+ | strd CARG12, FOR_EXT
+ if (op == BC_JFORI || op == BC_JFORL) {
+ | ble =>BC_JLOOP
+ }
+ |3:
+ | ins_next3
+ |
+ |4: // Invert check for negative step.
+ if (!vk) {
+ | cmp CARG3, CARG1
+ } else {
+ | cmp CARG4, CARG1
+ }
+ | b <1
+ |
+ |5: // FP loop.
+ if (!vk) {
+ | cmnlo CARG4, #-LJ_TISNUM
+ | cmnlo RB, #-LJ_TISNUM
+ | bhs ->vmeta_for
+ |.if FPU
+ | vldr d0, FOR_IDX
+ | vldr d1, FOR_STOP
+ | cmp RB, #0
+ | vstr d0, FOR_EXT
+ |.else
+ | cmp RB, #0
+ | strd CARG12, FOR_EXT
+ | blt >8
+ |.endif
+ } else {
+ |.if FPU
+ | vldr d0, FOR_IDX
+ | vldr d2, FOR_STEP
+ | vldr d1, FOR_STOP
+ | cmp CARG4, #0
+ | vadd.f64 d0, d0, d2
+ |.else
+ | cmp CARG4, #0
+ | blt >8
+ | bl extern __aeabi_dadd
+ | strd CARG12, FOR_IDX
+ | ldrd CARG34, FOR_STOP
+ | strd CARG12, FOR_EXT
+ |.endif
+ }
+ |6:
+ |.if FPU
+ | vcmpge.f64 d0, d1
+ | vcmplt.f64 d1, d0
+ | vmrs
+ |.else
+ | bl extern __aeabi_cdcmple
+ |.endif
+ if (vk) {
+ |.if FPU
+ | vstr d0, FOR_IDX
+ | vstr d0, FOR_EXT
+ |.endif
+ }
+ if (op == BC_FORI) {
+ | subhi PC, RC, #0x20000
+ } else if (op == BC_JFORI) {
+ | sub PC, RC, #0x20000
+ | ldrhls RC, [PC, #-2]
+ | bls =>BC_JLOOP
+ } else if (op == BC_IFORL) {
+ | subls PC, RC, #0x20000
+ } else {
+ | bls =>BC_JLOOP
+ }
+ | ins_next1
+ | ins_next2
+ | b <3
+ |
+ |.if not FPU
+ |8: // Invert check for negative step.
+ if (vk) {
+ | bl extern __aeabi_dadd
+ | strd CARG12, FOR_IDX
+ | strd CARG12, FOR_EXT
+ }
+ | mov CARG3, CARG1
+ | mov CARG4, CARG2
+ | ldrd CARG12, FOR_STOP
+ | b <6
+ |.endif
+ break;
+
+ case BC_ITERL:
+ |.if JIT
+ | hotloop
+ |.endif
+ | // Fall through. Assumes BC_IITERL follows.
+ break;
+
+ case BC_JITERL:
+#if !LJ_HASJIT
+ break;
+#endif
+ case BC_IITERL:
+ | // RA = base*8, RC = target
+ | ldrd CARG12, [RA, BASE]!
+ if (op == BC_JITERL) {
+ | cmn CARG2, #-LJ_TNIL // Stop if iterator returned nil.
+ | strdne CARG12, [RA, #-8]
+ | bne =>BC_JLOOP
+ } else {
+ | add RC, PC, RC, lsl #2
+ | // STALL: load CARG12.
+ | cmn CARG2, #-LJ_TNIL // Stop if iterator returned nil.
+ | subne PC, RC, #0x20000 // Otherwise save control var + branch.
+ | strdne CARG12, [RA, #-8]
+ }
+ | ins_next
+ break;
+
+ case BC_LOOP:
+ | // RA = base*8, RC = target (loop extent)
+ | // Note: RA/RC is only used by trace recorder to determine scope/extent
+ | // This opcode does NOT jump, it's only purpose is to detect a hot loop.
+ |.if JIT
+ | hotloop
+ |.endif
+ | // Fall through. Assumes BC_ILOOP follows.
+ break;
+
+ case BC_ILOOP:
+ | // RA = base*8, RC = target (loop extent)
+ | ins_next
+ break;
+
+ case BC_JLOOP:
+ |.if JIT
+ | // RA = base (ignored), RC = traceno
+ | ldr CARG1, [DISPATCH, #DISPATCH_J(trace)]
+ | mov CARG2, #0 // Traces on ARM don't store the trace number, so use 0.
+ | ldr TRACE:RC, [CARG1, RC, lsl #2]
+ | st_vmstate CARG2
+ | ldr RA, TRACE:RC->mcode
+ | str BASE, [DISPATCH, #DISPATCH_GL(jit_base)]
+ | str L, [DISPATCH, #DISPATCH_GL(tmpbuf.L)]
+ | bx RA
+ |.endif
+ break;
+
+ case BC_JMP:
+ | // RA = base*8 (only used by trace recorder), RC = target
+ | add RC, PC, RC, lsl #2
+ | sub PC, RC, #0x20000
+ | ins_next
+ break;
+
+ /* -- Function headers -------------------------------------------------- */
+
+ case BC_FUNCF:
+ |.if JIT
+ | hotcall
+ |.endif
+ case BC_FUNCV: /* NYI: compiled vararg functions. */
+ | // Fall through. Assumes BC_IFUNCF/BC_IFUNCV follow.
+ break;
+
+ case BC_JFUNCF:
+#if !LJ_HASJIT
+ break;
+#endif
+ case BC_IFUNCF:
+ | // BASE = new base, RA = BASE+framesize*8, CARG3 = LFUNC, RC = nargs*8
+ | ldr CARG1, L->maxstack
+ | ldrb CARG2, [PC, #-4+PC2PROTO(numparams)]
+ | ldr KBASE, [PC, #-4+PC2PROTO(k)]
+ | cmp RA, CARG1
+ | bhi ->vm_growstack_l
+ if (op != BC_JFUNCF) {
+ | ins_next1
+ | ins_next2
+ }
+ |2:
+ | cmp NARGS8:RC, CARG2, lsl #3 // Check for missing parameters.
+ | mvn CARG4, #~LJ_TNIL
+ | blo >3
+ if (op == BC_JFUNCF) {
+ | decode_RD RC, INS
+ | b =>BC_JLOOP
+ } else {
+ | ins_next3
+ }
+ |
+ |3: // Clear missing parameters.
+ | strd CARG34, [BASE, NARGS8:RC]
+ | add NARGS8:RC, NARGS8:RC, #8
+ | b <2
+ break;
+
+ case BC_JFUNCV:
+#if !LJ_HASJIT
+ break;
+#endif
+ | NYI // NYI: compiled vararg functions
+ break; /* NYI: compiled vararg functions. */
+
+ case BC_IFUNCV:
+ | // BASE = new base, RA = BASE+framesize*8, CARG3 = LFUNC, RC = nargs*8
+ | ldr CARG1, L->maxstack
+ | add CARG4, BASE, RC
+ | add RA, RA, RC
+ | str LFUNC:CARG3, [CARG4] // Store copy of LFUNC.
+ | add CARG2, RC, #8+FRAME_VARG
+ | ldr KBASE, [PC, #-4+PC2PROTO(k)]
+ | cmp RA, CARG1
+ | str CARG2, [CARG4, #4] // Store delta + FRAME_VARG.
+ | bhs ->vm_growstack_l
+ | ldrb RB, [PC, #-4+PC2PROTO(numparams)]
+ | mov RA, BASE
+ | mov RC, CARG4
+ | cmp RB, #0
+ | add BASE, CARG4, #8
+ | beq >3
+ | mvn CARG3, #~LJ_TNIL
+ |1:
+ | cmp RA, RC // Less args than parameters?
+ | ldrdlo CARG12, [RA], #8
+ | movhs CARG2, CARG3
+ | strlo CARG3, [RA, #-4] // Clear old fixarg slot (help the GC).
+ |2:
+ | subs RB, RB, #1
+ | strd CARG12, [CARG4, #8]!
+ | bne <1
+ |3:
+ | ins_next
+ break;
+
+ case BC_FUNCC:
+ case BC_FUNCCW:
+ | // BASE = new base, RA = BASE+framesize*8, CARG3 = CFUNC, RC = nargs*8
+ if (op == BC_FUNCC) {
+ | ldr CARG4, CFUNC:CARG3->f
+ } else {
+ | ldr CARG4, [DISPATCH, #DISPATCH_GL(wrapf)]
+ }
+ | add CARG2, RA, NARGS8:RC
+ | ldr CARG1, L->maxstack
+ | add RC, BASE, NARGS8:RC
+ | str BASE, L->base
+ | cmp CARG2, CARG1
+ | str RC, L->top
+ if (op == BC_FUNCCW) {
+ | ldr CARG2, CFUNC:CARG3->f
+ }
+ | mv_vmstate CARG3, C
+ | mov CARG1, L
+ | bhi ->vm_growstack_c // Need to grow stack.
+ | st_vmstate CARG3
+ | blx CARG4 // (lua_State *L [, lua_CFunction f])
+ | // Returns nresults.
+ | ldr BASE, L->base
+ | mv_vmstate CARG3, INTERP
+ | ldr CRET2, L->top
+ | str L, [DISPATCH, #DISPATCH_GL(cur_L)]
+ | lsl RC, CRET1, #3
+ | st_vmstate CARG3
+ | ldr PC, [BASE, FRAME_PC]
+ | sub RA, CRET2, RC // RA = L->top - nresults*8
+ | b ->vm_returnc
+ break;
+
+ /* ---------------------------------------------------------------------- */
+
+ default:
+ fprintf(stderr, "Error: undefined opcode BC_%s\n", bc_names[op]);
+ exit(2);
+ break;
+ }
+}
+
+static int build_backend(BuildCtx *ctx)
+{
+ int op;
+
+ dasm_growpc(Dst, BC__MAX);
+
+ build_subroutines(ctx);
+
+ |.code_op
+ for (op = 0; op < BC__MAX; op++)
+ build_ins(ctx, (BCOp)op, op);
+
+ return BC__MAX;
+}
+
+/* Emit pseudo frame-info for all assembler functions. */
+static void emit_asm_debug(BuildCtx *ctx)
+{
+ int fcofs = (int)((uint8_t *)ctx->glob[GLOB_vm_ffi_call] - ctx->code);
+ int i;
+ switch (ctx->mode) {
+ case BUILD_elfasm:
+ fprintf(ctx->fp, "\t.section .debug_frame,\"\",%%progbits\n");
+ fprintf(ctx->fp,
+ ".Lframe0:\n"
+ "\t.long .LECIE0-.LSCIE0\n"
+ ".LSCIE0:\n"
+ "\t.long 0xffffffff\n"
+ "\t.byte 0x1\n"
+ "\t.string \"\"\n"
+ "\t.uleb128 0x1\n"
+ "\t.sleb128 -4\n"
+ "\t.byte 0xe\n" /* Return address is in lr. */
+ "\t.byte 0xc\n\t.uleb128 0xd\n\t.uleb128 0\n" /* def_cfa sp */
+ "\t.align 2\n"
+ ".LECIE0:\n\n");
+ fprintf(ctx->fp,
+ ".LSFDE0:\n"
+ "\t.long .LEFDE0-.LASFDE0\n"
+ ".LASFDE0:\n"
+ "\t.long .Lframe0\n"
+ "\t.long .Lbegin\n"
+ "\t.long %d\n"
+ "\t.byte 0xe\n\t.uleb128 %d\n" /* def_cfa_offset */
+ "\t.byte 0x8e\n\t.uleb128 1\n", /* offset lr */
+ fcofs, CFRAME_SIZE);
+ for (i = 11; i >= (LJ_ARCH_HASFPU ? 5 : 4); i--) /* offset r4-r11 */
+ fprintf(ctx->fp, "\t.byte %d\n\t.uleb128 %d\n", 0x80+i, 2+(11-i));
+#if LJ_ARCH_HASFPU
+ for (i = 15; i >= 8; i--) /* offset d8-d15 */
+ fprintf(ctx->fp, "\t.byte 5\n\t.uleb128 %d, %d\n",
+ 64+2*i, 10+2*(15-i));
+ fprintf(ctx->fp, "\t.byte 0x84\n\t.uleb128 %d\n", 25); /* offset r4 */
+#endif
+ fprintf(ctx->fp,
+ "\t.align 2\n"
+ ".LEFDE0:\n\n");
+#if LJ_HASFFI
+ fprintf(ctx->fp,
+ ".LSFDE1:\n"
+ "\t.long .LEFDE1-.LASFDE1\n"
+ ".LASFDE1:\n"
+ "\t.long .Lframe0\n"
+ "\t.long lj_vm_ffi_call\n"
+ "\t.long %d\n"
+ "\t.byte 0xe\n\t.uleb128 16\n" /* def_cfa_offset */
+ "\t.byte 0x8e\n\t.uleb128 1\n" /* offset lr */
+ "\t.byte 0x8b\n\t.uleb128 2\n" /* offset r11 */
+ "\t.byte 0x85\n\t.uleb128 3\n" /* offset r5 */
+ "\t.byte 0x84\n\t.uleb128 4\n" /* offset r4 */
+ "\t.byte 0xd\n\t.uleb128 0xb\n" /* def_cfa_register r11 */
+ "\t.align 2\n"
+ ".LEFDE1:\n\n", (int)ctx->codesz - fcofs);
+#endif
+ break;
+ default:
+ break;
+ }
+}
+
diff --git a/libs/luajit-cmake/luajit/src/vm_arm64.dasc b/libs/luajit-cmake/luajit/src/vm_arm64.dasc
new file mode 100644
index 0000000..3448d0d
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/vm_arm64.dasc
@@ -0,0 +1,4158 @@
+|// Low-level VM code for ARM64 CPUs.
+|// Bytecode interpreter, fast functions and helper functions.
+|// Copyright (C) 2005-2022 Mike Pall. See Copyright Notice in luajit.h
+|
+|.arch arm64
+|.section code_op, code_sub
+|
+|.actionlist build_actionlist
+|.globals GLOB_
+|.globalnames globnames
+|.externnames extnames
+|
+|// Note: The ragged indentation of the instructions is intentional.
+|// The starting columns indicate data dependencies.
+|
+|//-----------------------------------------------------------------------
+|
+|// ARM64 registers and the AAPCS64 ABI 1.0 at a glance:
+|//
+|// x0-x17 temp, x19-x28 callee-saved, x29 fp, x30 lr
+|// x18 is reserved on most platforms. Don't use it, save it or restore it.
+|// x31 doesn't exist. Register number 31 either means xzr/wzr (zero) or sp,
+|// depending on the instruction.
+|// v0-v7 temp, v8-v15 callee-saved (only d8-d15 preserved), v16-v31 temp
+|//
+|// x0-x7/v0-v7 hold parameters and results.
+|
+|// Fixed register assignments for the interpreter.
+|
+|// The following must be C callee-save.
+|.define BASE, x19 // Base of current Lua stack frame.
+|.define KBASE, x20 // Constants of current Lua function.
+|.define PC, x21 // Next PC.
+|.define GLREG, x22 // Global state.
+|.define LREG, x23 // Register holding lua_State (also in SAVE_L).
+|.define TISNUM, x24 // Constant LJ_TISNUM << 47.
+|.define TISNUMhi, x25 // Constant LJ_TISNUM << 15.
+|.define TISNIL, x26 // Constant -1LL.
+|.define fp, x29 // Yes, we have to maintain a frame pointer.
+|
+|.define ST_INTERP, w26 // Constant -1.
+|
+|// The following temporaries are not saved across C calls, except for RA/RC.
+|.define RA, x27
+|.define RC, x28
+|.define RB, x17
+|.define RAw, w27
+|.define RCw, w28
+|.define RBw, w17
+|.define INS, x16
+|.define INSw, w16
+|.define ITYPE, x15
+|.define TMP0, x8
+|.define TMP1, x9
+|.define TMP2, x10
+|.define TMP3, x11
+|.define TMP0w, w8
+|.define TMP1w, w9
+|.define TMP2w, w10
+|.define TMP3w, w11
+|
+|// Calling conventions. Also used as temporaries.
+|.define CARG1, x0
+|.define CARG2, x1
+|.define CARG3, x2
+|.define CARG4, x3
+|.define CARG5, x4
+|.define CARG1w, w0
+|.define CARG2w, w1
+|.define CARG3w, w2
+|.define CARG4w, w3
+|.define CARG5w, w4
+|
+|.define FARG1, d0
+|.define FARG2, d1
+|
+|.define CRET1, x0
+|.define CRET1w, w0
+|
+|// Stack layout while in interpreter. Must match with lj_frame.h.
+|
+|.define CFRAME_SPACE, 208
+|//----- 16 byte aligned, <-- sp entering interpreter
+|.define SAVE_FP_LR_, 192
+|.define SAVE_GPR_, 112 // 112+10*8: 64 bit GPR saves
+|.define SAVE_FPR_, 48 // 48+8*8: 64 bit FPR saves
+|// Unused [sp, #44] // 32 bit values
+|.define SAVE_NRES, [sp, #40]
+|.define SAVE_ERRF, [sp, #36]
+|.define SAVE_MULTRES, [sp, #32]
+|.define TMPD, [sp, #24] // 64 bit values
+|.define SAVE_L, [sp, #16]
+|.define SAVE_PC, [sp, #8]
+|.define SAVE_CFRAME, [sp, #0]
+|//----- 16 byte aligned, <-- sp while in interpreter.
+|
+|.define TMPDofs, #24
+|
+|.macro save_, gpr1, gpr2, fpr1, fpr2
+| stp d..fpr2, d..fpr1, [sp, # SAVE_FPR_+(14-fpr1)*8]
+| stp x..gpr2, x..gpr1, [sp, # SAVE_GPR_+(27-gpr1)*8]
+|.endmacro
+|.macro rest_, gpr1, gpr2, fpr1, fpr2
+| ldp d..fpr2, d..fpr1, [sp, # SAVE_FPR_+(14-fpr1)*8]
+| ldp x..gpr2, x..gpr1, [sp, # SAVE_GPR_+(27-gpr1)*8]
+|.endmacro
+|
+|.macro saveregs
+| sub sp, sp, # CFRAME_SPACE
+| stp fp, lr, [sp, # SAVE_FP_LR_]
+| add fp, sp, # SAVE_FP_LR_
+| stp x20, x19, [sp, # SAVE_GPR_+(27-19)*8]
+| save_ 21, 22, 8, 9
+| save_ 23, 24, 10, 11
+| save_ 25, 26, 12, 13
+| save_ 27, 28, 14, 15
+|.endmacro
+|.macro restoreregs
+| ldp x20, x19, [sp, # SAVE_GPR_+(27-19)*8]
+| rest_ 21, 22, 8, 9
+| rest_ 23, 24, 10, 11
+| rest_ 25, 26, 12, 13
+| rest_ 27, 28, 14, 15
+| ldp fp, lr, [sp, # SAVE_FP_LR_]
+| add sp, sp, # CFRAME_SPACE
+|.endmacro
+|
+|// Type definitions. Some of these are only used for documentation.
+|.type L, lua_State, LREG
+|.type GL, global_State, GLREG
+|.type TVALUE, TValue
+|.type GCOBJ, GCobj
+|.type STR, GCstr
+|.type TAB, GCtab
+|.type LFUNC, GCfuncL
+|.type CFUNC, GCfuncC
+|.type PROTO, GCproto
+|.type UPVAL, GCupval
+|.type NODE, Node
+|.type NARGS8, int
+|.type TRACE, GCtrace
+|.type SBUF, SBuf
+|
+|//-----------------------------------------------------------------------
+|
+|// Trap for not-yet-implemented parts.
+|.macro NYI; brk; .endmacro
+|
+|//-----------------------------------------------------------------------
+|
+|// Access to frame relative to BASE.
+|.define FRAME_FUNC, #-16
+|.define FRAME_PC, #-8
+|
+|// Endian-specific defines.
+|.if ENDIAN_LE
+|.define LO, 0
+|.define OFS_RD, 2
+|.define OFS_RB, 3
+|.define OFS_RA, 1
+|.define OFS_OP, 0
+|.else
+|.define LO, 4
+|.define OFS_RD, 0
+|.define OFS_RB, 0
+|.define OFS_RA, 2
+|.define OFS_OP, 3
+|.endif
+|
+|.macro decode_RA, dst, ins; ubfx dst, ins, #8, #8; .endmacro
+|.macro decode_RB, dst, ins; ubfx dst, ins, #24, #8; .endmacro
+|.macro decode_RC, dst, ins; ubfx dst, ins, #16, #8; .endmacro
+|.macro decode_RD, dst, ins; ubfx dst, ins, #16, #16; .endmacro
+|.macro decode_RC8RD, dst, src; ubfiz dst, src, #3, #8; .endmacro
+|
+|// Instruction decode+dispatch.
+|.macro ins_NEXT
+| ldr INSw, [PC], #4
+| add TMP1, GL, INS, uxtb #3
+| decode_RA RA, INS
+| ldr TMP0, [TMP1, #GG_G2DISP]
+| decode_RD RC, INS
+| br TMP0
+|.endmacro
+|
+|// Instruction footer.
+|.if 1
+| // Replicated dispatch. Less unpredictable branches, but higher I-Cache use.
+| .define ins_next, ins_NEXT
+| .define ins_next_, ins_NEXT
+|.else
+| // Common dispatch. Lower I-Cache use, only one (very) unpredictable branch.
+| // Affects only certain kinds of benchmarks (and only with -j off).
+| .macro ins_next
+| b ->ins_next
+| .endmacro
+| .macro ins_next_
+| ->ins_next:
+| ins_NEXT
+| .endmacro
+|.endif
+|
+|// Call decode and dispatch.
+|.macro ins_callt
+| // BASE = new base, CARG3 = LFUNC/CFUNC, RC = nargs*8, FRAME_PC(BASE) = PC
+| ldr PC, LFUNC:CARG3->pc
+| ldr INSw, [PC], #4
+| add TMP1, GL, INS, uxtb #3
+| decode_RA RA, INS
+| ldr TMP0, [TMP1, #GG_G2DISP]
+| add RA, BASE, RA, lsl #3
+| br TMP0
+|.endmacro
+|
+|.macro ins_call
+| // BASE = new base, CARG3 = LFUNC/CFUNC, RC = nargs*8, PC = caller PC
+| str PC, [BASE, FRAME_PC]
+| ins_callt
+|.endmacro
+|
+|//-----------------------------------------------------------------------
+|
+|// Macros to check the TValue type and extract the GCobj. Branch on failure.
+|.macro checktp, reg, tp, target
+| asr ITYPE, reg, #47
+| cmn ITYPE, #-tp
+| and reg, reg, #LJ_GCVMASK
+| bne target
+|.endmacro
+|.macro checktp, dst, reg, tp, target
+| asr ITYPE, reg, #47
+| cmn ITYPE, #-tp
+| and dst, reg, #LJ_GCVMASK
+| bne target
+|.endmacro
+|.macro checkstr, reg, target; checktp reg, LJ_TSTR, target; .endmacro
+|.macro checktab, reg, target; checktp reg, LJ_TTAB, target; .endmacro
+|.macro checkfunc, reg, target; checktp reg, LJ_TFUNC, target; .endmacro
+|.macro checkint, reg, target
+| cmp TISNUMhi, reg, lsr #32
+| bne target
+|.endmacro
+|.macro checknum, reg, target
+| cmp TISNUMhi, reg, lsr #32
+| bls target
+|.endmacro
+|.macro checknumber, reg, target
+| cmp TISNUMhi, reg, lsr #32
+| blo target
+|.endmacro
+|
+|.macro mov_false, reg; movn reg, #0x8000, lsl #32; .endmacro
+|.macro mov_true, reg; movn reg, #0x0001, lsl #48; .endmacro
+|
+#define GL_J(field) (GG_G2J + (int)offsetof(jit_State, field))
+|
+#define PC2PROTO(field) ((int)offsetof(GCproto, field)-(int)sizeof(GCproto))
+|
+|.macro hotcheck, delta
+| lsr CARG1, PC, #1
+| and CARG1, CARG1, #126
+| add CARG1, CARG1, #GG_G2DISP+GG_DISP2HOT
+| ldrh CARG2w, [GL, CARG1]
+| subs CARG2, CARG2, #delta
+| strh CARG2w, [GL, CARG1]
+|.endmacro
+|
+|.macro hotloop
+| hotcheck HOTCOUNT_LOOP
+| blo ->vm_hotloop
+|.endmacro
+|
+|.macro hotcall
+| hotcheck HOTCOUNT_CALL
+| blo ->vm_hotcall
+|.endmacro
+|
+|// Set current VM state.
+|.macro mv_vmstate, reg, st; movn reg, #LJ_VMST_..st; .endmacro
+|.macro st_vmstate, reg; str reg, GL->vmstate; .endmacro
+|
+|// Move table write barrier back. Overwrites mark and tmp.
+|.macro barrierback, tab, mark, tmp
+| ldr tmp, GL->gc.grayagain
+| and mark, mark, #~LJ_GC_BLACK // black2gray(tab)
+| str tab, GL->gc.grayagain
+| strb mark, tab->marked
+| str tmp, tab->gclist
+|.endmacro
+|
+|//-----------------------------------------------------------------------
+
+#if !LJ_DUALNUM
+#error "Only dual-number mode supported for ARM64 target"
+#endif
+
+/* Generate subroutines used by opcodes and other parts of the VM. */
+/* The .code_sub section should be last to help static branch prediction. */
+static void build_subroutines(BuildCtx *ctx)
+{
+ |.code_sub
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Return handling ----------------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |->vm_returnp:
+ | // See vm_return. Also: RB = previous base.
+ | tbz PC, #2, ->cont_dispatch // (PC & FRAME_P) == 0?
+ |
+ | // Return from pcall or xpcall fast func.
+ | ldr PC, [RB, FRAME_PC] // Fetch PC of previous frame.
+ | mov_true TMP0
+ | mov BASE, RB
+ | // Prepending may overwrite the pcall frame, so do it at the end.
+ | str TMP0, [RA, #-8]! // Prepend true to results.
+ |
+ |->vm_returnc:
+ | adds RC, RC, #8 // RC = (nresults+1)*8.
+ | mov CRET1, #LUA_YIELD
+ | beq ->vm_unwind_c_eh
+ | str RCw, SAVE_MULTRES
+ | ands CARG1, PC, #FRAME_TYPE
+ | beq ->BC_RET_Z // Handle regular return to Lua.
+ |
+ |->vm_return:
+ | // BASE = base, RA = resultptr, RC/MULTRES = (nresults+1)*8, PC = return
+ | // CARG1 = PC & FRAME_TYPE
+ | and RB, PC, #~FRAME_TYPEP
+ | cmp CARG1, #FRAME_C
+ | sub RB, BASE, RB // RB = previous base.
+ | bne ->vm_returnp
+ |
+ | str RB, L->base
+ | ldrsw CARG2, SAVE_NRES // CARG2 = nresults+1.
+ | mv_vmstate TMP0w, C
+ | sub BASE, BASE, #16
+ | subs TMP2, RC, #8
+ | st_vmstate TMP0w
+ | beq >2
+ |1:
+ | subs TMP2, TMP2, #8
+ | ldr TMP0, [RA], #8
+ | str TMP0, [BASE], #8
+ | bne <1
+ |2:
+ | cmp RC, CARG2, lsl #3 // More/less results wanted?
+ | bne >6
+ |3:
+ | str BASE, L->top // Store new top.
+ |
+ |->vm_leave_cp:
+ | ldr RC, SAVE_CFRAME // Restore previous C frame.
+ | mov CRET1, #0 // Ok return status for vm_pcall.
+ | str RC, L->cframe
+ |
+ |->vm_leave_unw:
+ | restoreregs
+ | ret
+ |
+ |6:
+ | bgt >7 // Less results wanted?
+ | // More results wanted. Check stack size and fill up results with nil.
+ | ldr CARG3, L->maxstack
+ | cmp BASE, CARG3
+ | bhs >8
+ | str TISNIL, [BASE], #8
+ | add RC, RC, #8
+ | b <2
+ |
+ |7: // Less results wanted.
+ | cbz CARG2, <3 // LUA_MULTRET+1 case?
+ | sub CARG1, RC, CARG2, lsl #3
+ | sub BASE, BASE, CARG1 // Shrink top.
+ | b <3
+ |
+ |8: // Corner case: need to grow stack for filling up results.
+ | // This can happen if:
+ | // - A C function grows the stack (a lot).
+ | // - The GC shrinks the stack in between.
+ | // - A return back from a lua_call() with (high) nresults adjustment.
+ | str BASE, L->top // Save current top held in BASE (yes).
+ | mov CARG1, L
+ | bl extern lj_state_growstack // (lua_State *L, int n)
+ | ldr BASE, L->top // Need the (realloced) L->top in BASE.
+ | ldrsw CARG2, SAVE_NRES
+ | b <2
+ |
+ |->vm_unwind_c: // Unwind C stack, return from vm_pcall.
+ | // (void *cframe, int errcode)
+ | mov sp, CARG1
+ | mov CRET1, CARG2
+ |->vm_unwind_c_eh: // Landing pad for external unwinder.
+ | ldr L, SAVE_L
+ | mv_vmstate TMP0w, C
+ | ldr GL, L->glref
+ | st_vmstate TMP0w
+ | b ->vm_leave_unw
+ |
+ |->vm_unwind_ff: // Unwind C stack, return from ff pcall.
+ | // (void *cframe)
+ | and sp, CARG1, #CFRAME_RAWMASK
+ |->vm_unwind_ff_eh: // Landing pad for external unwinder.
+ | ldr L, SAVE_L
+ | movz TISNUM, #(LJ_TISNUM>>1)&0xffff, lsl #48
+ | movz TISNUMhi, #(LJ_TISNUM>>1)&0xffff, lsl #16
+ | movn TISNIL, #0
+ | mov RC, #16 // 2 results: false + error message.
+ | ldr BASE, L->base
+ | ldr GL, L->glref // Setup pointer to global state.
+ | mov_false TMP0
+ | sub RA, BASE, #8 // Results start at BASE-8.
+ | ldr PC, [BASE, FRAME_PC] // Fetch PC of previous frame.
+ | str TMP0, [BASE, #-8] // Prepend false to error message.
+ | st_vmstate ST_INTERP
+ | b ->vm_returnc
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Grow stack for calls -----------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |->vm_growstack_c: // Grow stack for C function.
+ | // CARG1 = L
+ | mov CARG2, #LUA_MINSTACK
+ | b >2
+ |
+ |->vm_growstack_l: // Grow stack for Lua function.
+ | // BASE = new base, RA = BASE+framesize*8, RC = nargs*8, PC = first PC
+ | add RC, BASE, RC
+ | sub RA, RA, BASE
+ | mov CARG1, L
+ | stp BASE, RC, L->base
+ | add PC, PC, #4 // Must point after first instruction.
+ | lsr CARG2, RA, #3
+ |2:
+ | // L->base = new base, L->top = top
+ | str PC, SAVE_PC
+ | bl extern lj_state_growstack // (lua_State *L, int n)
+ | ldp BASE, RC, L->base
+ | ldr LFUNC:CARG3, [BASE, FRAME_FUNC]
+ | sub NARGS8:RC, RC, BASE
+ | and LFUNC:CARG3, CARG3, #LJ_GCVMASK
+ | // BASE = new base, RB = LFUNC/CFUNC, RC = nargs*8, FRAME_PC(BASE) = PC
+ | ins_callt // Just retry the call.
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Entry points into the assembler VM ---------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |->vm_resume: // Setup C frame and resume thread.
+ | // (lua_State *L, TValue *base, int nres1 = 0, ptrdiff_t ef = 0)
+ | saveregs
+ | mov L, CARG1
+ | ldr GL, L->glref // Setup pointer to global state.
+ | mov BASE, CARG2
+ | str L, SAVE_L
+ | mov PC, #FRAME_CP
+ | str wzr, SAVE_NRES
+ | add TMP0, sp, #CFRAME_RESUME
+ | ldrb TMP1w, L->status
+ | str wzr, SAVE_ERRF
+ | str L, SAVE_PC // Any value outside of bytecode is ok.
+ | str xzr, SAVE_CFRAME
+ | str TMP0, L->cframe
+ | cbz TMP1w, >3
+ |
+ | // Resume after yield (like a return).
+ | str L, GL->cur_L
+ | mov RA, BASE
+ | ldp BASE, CARG1, L->base
+ | movz TISNUM, #(LJ_TISNUM>>1)&0xffff, lsl #48
+ | movz TISNUMhi, #(LJ_TISNUM>>1)&0xffff, lsl #16
+ | ldr PC, [BASE, FRAME_PC]
+ | strb wzr, L->status
+ | movn TISNIL, #0
+ | sub RC, CARG1, BASE
+ | ands CARG1, PC, #FRAME_TYPE
+ | add RC, RC, #8
+ | st_vmstate ST_INTERP
+ | str RCw, SAVE_MULTRES
+ | beq ->BC_RET_Z
+ | b ->vm_return
+ |
+ |->vm_pcall: // Setup protected C frame and enter VM.
+ | // (lua_State *L, TValue *base, int nres1, ptrdiff_t ef)
+ | saveregs
+ | mov PC, #FRAME_CP
+ | str CARG4w, SAVE_ERRF
+ | b >1
+ |
+ |->vm_call: // Setup C frame and enter VM.
+ | // (lua_State *L, TValue *base, int nres1)
+ | saveregs
+ | mov PC, #FRAME_C
+ |
+ |1: // Entry point for vm_pcall above (PC = ftype).
+ | ldr RC, L:CARG1->cframe
+ | str CARG3w, SAVE_NRES
+ | mov L, CARG1
+ | str CARG1, SAVE_L
+ | ldr GL, L->glref // Setup pointer to global state.
+ | mov BASE, CARG2
+ | str CARG1, SAVE_PC // Any value outside of bytecode is ok.
+ | add TMP0, sp, #0
+ | str RC, SAVE_CFRAME
+ | str TMP0, L->cframe // Add our C frame to cframe chain.
+ |
+ |3: // Entry point for vm_cpcall/vm_resume (BASE = base, PC = ftype).
+ | str L, GL->cur_L
+ | ldp RB, CARG1, L->base // RB = old base (for vmeta_call).
+ | movz TISNUM, #(LJ_TISNUM>>1)&0xffff, lsl #48
+ | movz TISNUMhi, #(LJ_TISNUM>>1)&0xffff, lsl #16
+ | add PC, PC, BASE
+ | movn TISNIL, #0
+ | sub PC, PC, RB // PC = frame delta + frame type
+ | sub NARGS8:RC, CARG1, BASE
+ | st_vmstate ST_INTERP
+ |
+ |->vm_call_dispatch:
+ | // RB = old base, BASE = new base, RC = nargs*8, PC = caller PC
+ | ldr CARG3, [BASE, FRAME_FUNC]
+ | checkfunc CARG3, ->vmeta_call
+ |
+ |->vm_call_dispatch_f:
+ | ins_call
+ | // BASE = new base, CARG3 = func, RC = nargs*8, PC = caller PC
+ |
+ |->vm_cpcall: // Setup protected C frame, call C.
+ | // (lua_State *L, lua_CFunction func, void *ud, lua_CPFunction cp)
+ | saveregs
+ | mov L, CARG1
+ | ldr RA, L:CARG1->stack
+ | str CARG1, SAVE_L
+ | ldr GL, L->glref // Setup pointer to global state.
+ | ldr RB, L->top
+ | str CARG1, SAVE_PC // Any value outside of bytecode is ok.
+ | ldr RC, L->cframe
+ | sub RA, RA, RB // Compute -savestack(L, L->top).
+ | str RAw, SAVE_NRES // Neg. delta means cframe w/o frame.
+ | str wzr, SAVE_ERRF // No error function.
+ | add TMP0, sp, #0
+ | str RC, SAVE_CFRAME
+ | str TMP0, L->cframe // Add our C frame to cframe chain.
+ | str L, GL->cur_L
+ | blr CARG4 // (lua_State *L, lua_CFunction func, void *ud)
+ | mov BASE, CRET1
+ | mov PC, #FRAME_CP
+ | cbnz BASE, <3 // Else continue with the call.
+ | b ->vm_leave_cp // No base? Just remove C frame.
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Metamethod handling ------------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |//-- Continuation dispatch ----------------------------------------------
+ |
+ |->cont_dispatch:
+ | // BASE = meta base, RA = resultptr, RC = (nresults+1)*8
+ | ldr LFUNC:CARG3, [RB, FRAME_FUNC]
+ | ldr CARG1, [BASE, #-32] // Get continuation.
+ | mov CARG4, BASE
+ | mov BASE, RB // Restore caller BASE.
+ | and LFUNC:CARG3, CARG3, #LJ_GCVMASK
+ |.if FFI
+ | cmp CARG1, #1
+ |.endif
+ | ldr PC, [CARG4, #-24] // Restore PC from [cont|PC].
+ | add TMP0, RA, RC
+ | str TISNIL, [TMP0, #-8] // Ensure one valid arg.
+ |.if FFI
+ | bls >1
+ |.endif
+ | ldr CARG3, LFUNC:CARG3->pc
+ | ldr KBASE, [CARG3, #PC2PROTO(k)]
+ | // BASE = base, RA = resultptr, CARG4 = meta base
+ | br CARG1
+ |
+ |.if FFI
+ |1:
+ | beq ->cont_ffi_callback // cont = 1: return from FFI callback.
+ | // cont = 0: tailcall from C function.
+ | sub CARG4, CARG4, #32
+ | sub RC, CARG4, BASE
+ | b ->vm_call_tail
+ |.endif
+ |
+ |->cont_cat: // RA = resultptr, CARG4 = meta base
+ | ldr INSw, [PC, #-4]
+ | sub CARG2, CARG4, #32
+ | ldr TMP0, [RA]
+ | str BASE, L->base
+ | decode_RB RB, INS
+ | decode_RA RA, INS
+ | add TMP1, BASE, RB, lsl #3
+ | subs TMP1, CARG2, TMP1
+ | beq >1
+ | str TMP0, [CARG2]
+ | lsr CARG3, TMP1, #3
+ | b ->BC_CAT_Z
+ |
+ |1:
+ | str TMP0, [BASE, RA, lsl #3]
+ | b ->cont_nop
+ |
+ |//-- Table indexing metamethods -----------------------------------------
+ |
+ |->vmeta_tgets1:
+ | movn CARG4, #~LJ_TSTR
+ | add CARG2, BASE, RB, lsl #3
+ | add CARG4, STR:RC, CARG4, lsl #47
+ | b >2
+ |
+ |->vmeta_tgets:
+ | movk CARG2, #(LJ_TTAB>>1)&0xffff, lsl #48
+ | str CARG2, GL->tmptv
+ | add CARG2, GL, #offsetof(global_State, tmptv)
+ |2:
+ | add CARG3, sp, TMPDofs
+ | str CARG4, TMPD
+ | b >1
+ |
+ |->vmeta_tgetb: // RB = table, RC = index
+ | add RC, RC, TISNUM
+ | add CARG2, BASE, RB, lsl #3
+ | add CARG3, sp, TMPDofs
+ | str RC, TMPD
+ | b >1
+ |
+ |->vmeta_tgetv: // RB = table, RC = key
+ | add CARG2, BASE, RB, lsl #3
+ | add CARG3, BASE, RC, lsl #3
+ |1:
+ | str BASE, L->base
+ | mov CARG1, L
+ | str PC, SAVE_PC
+ | bl extern lj_meta_tget // (lua_State *L, TValue *o, TValue *k)
+ | // Returns TValue * (finished) or NULL (metamethod).
+ | cbz CRET1, >3
+ | ldr TMP0, [CRET1]
+ | str TMP0, [BASE, RA, lsl #3]
+ | ins_next
+ |
+ |3: // Call __index metamethod.
+ | // BASE = base, L->top = new base, stack = cont/func/t/k
+ | sub TMP1, BASE, #FRAME_CONT
+ | ldr BASE, L->top
+ | mov NARGS8:RC, #16 // 2 args for func(t, k).
+ | ldr LFUNC:CARG3, [BASE, FRAME_FUNC] // Guaranteed to be a function here.
+ | str PC, [BASE, #-24] // [cont|PC]
+ | sub PC, BASE, TMP1
+ | and LFUNC:CARG3, CARG3, #LJ_GCVMASK
+ | b ->vm_call_dispatch_f
+ |
+ |->vmeta_tgetr:
+ | sxtw CARG2, TMP1w
+ | bl extern lj_tab_getinth // (GCtab *t, int32_t key)
+ | // Returns cTValue * or NULL.
+ | mov TMP0, TISNIL
+ | cbz CRET1, ->BC_TGETR_Z
+ | ldr TMP0, [CRET1]
+ | b ->BC_TGETR_Z
+ |
+ |//-----------------------------------------------------------------------
+ |
+ |->vmeta_tsets1:
+ | movn CARG4, #~LJ_TSTR
+ | add CARG2, BASE, RB, lsl #3
+ | add CARG4, STR:RC, CARG4, lsl #47
+ | b >2
+ |
+ |->vmeta_tsets:
+ | movk CARG2, #(LJ_TTAB>>1)&0xffff, lsl #48
+ | str CARG2, GL->tmptv
+ | add CARG2, GL, #offsetof(global_State, tmptv)
+ |2:
+ | add CARG3, sp, TMPDofs
+ | str CARG4, TMPD
+ | b >1
+ |
+ |->vmeta_tsetb: // RB = table, RC = index
+ | add RC, RC, TISNUM
+ | add CARG2, BASE, RB, lsl #3
+ | add CARG3, sp, TMPDofs
+ | str RC, TMPD
+ | b >1
+ |
+ |->vmeta_tsetv:
+ | add CARG2, BASE, RB, lsl #3
+ | add CARG3, BASE, RC, lsl #3
+ |1:
+ | str BASE, L->base
+ | mov CARG1, L
+ | str PC, SAVE_PC
+ | bl extern lj_meta_tset // (lua_State *L, TValue *o, TValue *k)
+ | // Returns TValue * (finished) or NULL (metamethod).
+ | ldr TMP0, [BASE, RA, lsl #3]
+ | cbz CRET1, >3
+ | // NOBARRIER: lj_meta_tset ensures the table is not black.
+ | str TMP0, [CRET1]
+ | ins_next
+ |
+ |3: // Call __newindex metamethod.
+ | // BASE = base, L->top = new base, stack = cont/func/t/k/(v)
+ | sub TMP1, BASE, #FRAME_CONT
+ | ldr BASE, L->top
+ | mov NARGS8:RC, #24 // 3 args for func(t, k, v).
+ | ldr LFUNC:CARG3, [BASE, FRAME_FUNC] // Guaranteed to be a function here.
+ | str TMP0, [BASE, #16] // Copy value to third argument.
+ | str PC, [BASE, #-24] // [cont|PC]
+ | sub PC, BASE, TMP1
+ | and LFUNC:CARG3, CARG3, #LJ_GCVMASK
+ | b ->vm_call_dispatch_f
+ |
+ |->vmeta_tsetr:
+ | sxtw CARG3, TMP1w
+ | str BASE, L->base
+ | mov CARG1, L
+ | str PC, SAVE_PC
+ | bl extern lj_tab_setinth // (lua_State *L, GCtab *t, int32_t key)
+ | // Returns TValue *.
+ | b ->BC_TSETR_Z
+ |
+ |//-- Comparison metamethods ---------------------------------------------
+ |
+ |->vmeta_comp:
+ | add CARG2, BASE, RA, lsl #3
+ | sub PC, PC, #4
+ | add CARG3, BASE, RC, lsl #3
+ | str BASE, L->base
+ | mov CARG1, L
+ | str PC, SAVE_PC
+ | uxtb CARG4w, INSw
+ | bl extern lj_meta_comp // (lua_State *L, TValue *o1, *o2, int op)
+ | // Returns 0/1 or TValue * (metamethod).
+ |3:
+ | cmp CRET1, #1
+ | bhi ->vmeta_binop
+ |4:
+ | ldrh RBw, [PC, # OFS_RD]
+ | add PC, PC, #4
+ | add RB, PC, RB, lsl #2
+ | sub RB, RB, #0x20000
+ | csel PC, PC, RB, lo
+ |->cont_nop:
+ | ins_next
+ |
+ |->cont_ra: // RA = resultptr
+ | ldr INSw, [PC, #-4]
+ | ldr TMP0, [RA]
+ | decode_RA TMP1, INS
+ | str TMP0, [BASE, TMP1, lsl #3]
+ | b ->cont_nop
+ |
+ |->cont_condt: // RA = resultptr
+ | ldr TMP0, [RA]
+ | mov_true TMP1
+ | cmp TMP1, TMP0 // Branch if result is true.
+ | b <4
+ |
+ |->cont_condf: // RA = resultptr
+ | ldr TMP0, [RA]
+ | mov_false TMP1
+ | cmp TMP0, TMP1 // Branch if result is false.
+ | b <4
+ |
+ |->vmeta_equal:
+ | // CARG2, CARG3, CARG4 are already set by BC_ISEQV/BC_ISNEV.
+ | and TAB:CARG3, CARG3, #LJ_GCVMASK
+ | sub PC, PC, #4
+ | str BASE, L->base
+ | mov CARG1, L
+ | str PC, SAVE_PC
+ | bl extern lj_meta_equal // (lua_State *L, GCobj *o1, *o2, int ne)
+ | // Returns 0/1 or TValue * (metamethod).
+ | b <3
+ |
+ |->vmeta_equal_cd:
+ |.if FFI
+ | sub PC, PC, #4
+ | str BASE, L->base
+ | mov CARG1, L
+ | mov CARG2, INS
+ | str PC, SAVE_PC
+ | bl extern lj_meta_equal_cd // (lua_State *L, BCIns op)
+ | // Returns 0/1 or TValue * (metamethod).
+ | b <3
+ |.endif
+ |
+ |->vmeta_istype:
+ | sub PC, PC, #4
+ | str BASE, L->base
+ | mov CARG1, L
+ | mov CARG2, RA
+ | mov CARG3, RC
+ | str PC, SAVE_PC
+ | bl extern lj_meta_istype // (lua_State *L, BCReg ra, BCReg tp)
+ | b ->cont_nop
+ |
+ |//-- Arithmetic metamethods ---------------------------------------------
+ |
+ |->vmeta_arith_vn:
+ | add CARG3, BASE, RB, lsl #3
+ | add CARG4, KBASE, RC, lsl #3
+ | b >1
+ |
+ |->vmeta_arith_nv:
+ | add CARG4, BASE, RB, lsl #3
+ | add CARG3, KBASE, RC, lsl #3
+ | b >1
+ |
+ |->vmeta_unm:
+ | add CARG3, BASE, RC, lsl #3
+ | mov CARG4, CARG3
+ | b >1
+ |
+ |->vmeta_arith_vv:
+ | add CARG3, BASE, RB, lsl #3
+ | add CARG4, BASE, RC, lsl #3
+ |1:
+ | uxtb CARG5w, INSw
+ | add CARG2, BASE, RA, lsl #3
+ | str BASE, L->base
+ | mov CARG1, L
+ | str PC, SAVE_PC
+ | bl extern lj_meta_arith // (lua_State *L, TValue *ra,*rb,*rc, BCReg op)
+ | // Returns NULL (finished) or TValue * (metamethod).
+ | cbz CRET1, ->cont_nop
+ |
+ | // Call metamethod for binary op.
+ |->vmeta_binop:
+ | // BASE = old base, CRET1 = new base, stack = cont/func/o1/o2
+ | sub TMP1, CRET1, BASE
+ | str PC, [CRET1, #-24] // [cont|PC]
+ | add PC, TMP1, #FRAME_CONT
+ | mov BASE, CRET1
+ | mov NARGS8:RC, #16 // 2 args for func(o1, o2).
+ | b ->vm_call_dispatch
+ |
+ |->vmeta_len:
+ | add CARG2, BASE, RC, lsl #3
+#if LJ_52
+ | mov TAB:RC, TAB:CARG1 // Save table (ignored for other types).
+#endif
+ | str BASE, L->base
+ | mov CARG1, L
+ | str PC, SAVE_PC
+ | bl extern lj_meta_len // (lua_State *L, TValue *o)
+ | // Returns NULL (retry) or TValue * (metamethod base).
+#if LJ_52
+ | cbnz CRET1, ->vmeta_binop // Binop call for compatibility.
+ | mov TAB:CARG1, TAB:RC
+ | b ->BC_LEN_Z
+#else
+ | b ->vmeta_binop // Binop call for compatibility.
+#endif
+ |
+ |//-- Call metamethod ----------------------------------------------------
+ |
+ |->vmeta_call: // Resolve and call __call metamethod.
+ | // RB = old base, BASE = new base, RC = nargs*8
+ | mov CARG1, L
+ | str RB, L->base // This is the callers base!
+ | sub CARG2, BASE, #16
+ | str PC, SAVE_PC
+ | add CARG3, BASE, NARGS8:RC
+ | bl extern lj_meta_call // (lua_State *L, TValue *func, TValue *top)
+ | ldr LFUNC:CARG3, [BASE, FRAME_FUNC] // Guaranteed to be a function here.
+ | add NARGS8:RC, NARGS8:RC, #8 // Got one more argument now.
+ | and LFUNC:CARG3, CARG3, #LJ_GCVMASK
+ | ins_call
+ |
+ |->vmeta_callt: // Resolve __call for BC_CALLT.
+ | // BASE = old base, RA = new base, RC = nargs*8
+ | mov CARG1, L
+ | str BASE, L->base
+ | sub CARG2, RA, #16
+ | str PC, SAVE_PC
+ | add CARG3, RA, NARGS8:RC
+ | bl extern lj_meta_call // (lua_State *L, TValue *func, TValue *top)
+ | ldr TMP1, [RA, FRAME_FUNC] // Guaranteed to be a function here.
+ | ldr PC, [BASE, FRAME_PC]
+ | add NARGS8:RC, NARGS8:RC, #8 // Got one more argument now.
+ | and LFUNC:CARG3, TMP1, #LJ_GCVMASK
+ | b ->BC_CALLT2_Z
+ |
+ |//-- Argument coercion for 'for' statement ------------------------------
+ |
+ |->vmeta_for:
+ | mov CARG1, L
+ | str BASE, L->base
+ | mov CARG2, RA
+ | str PC, SAVE_PC
+ | bl extern lj_meta_for // (lua_State *L, TValue *base)
+ | ldr INSw, [PC, #-4]
+ |.if JIT
+ | uxtb TMP0w, INSw
+ |.endif
+ | decode_RA RA, INS
+ | decode_RD RC, INS
+ |.if JIT
+ | cmp TMP0, #BC_JFORI
+ | beq =>BC_JFORI
+ |.endif
+ | b =>BC_FORI
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Fast functions -----------------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |.macro .ffunc, name
+ |->ff_ .. name:
+ |.endmacro
+ |
+ |.macro .ffunc_1, name
+ |->ff_ .. name:
+ | ldr CARG1, [BASE]
+ | cmp NARGS8:RC, #8
+ | blo ->fff_fallback
+ |.endmacro
+ |
+ |.macro .ffunc_2, name
+ |->ff_ .. name:
+ | ldp CARG1, CARG2, [BASE]
+ | cmp NARGS8:RC, #16
+ | blo ->fff_fallback
+ |.endmacro
+ |
+ |.macro .ffunc_n, name
+ | .ffunc name
+ | ldr CARG1, [BASE]
+ | cmp NARGS8:RC, #8
+ | ldr FARG1, [BASE]
+ | blo ->fff_fallback
+ | checknum CARG1, ->fff_fallback
+ |.endmacro
+ |
+ |.macro .ffunc_nn, name
+ | .ffunc name
+ | ldp CARG1, CARG2, [BASE]
+ | cmp NARGS8:RC, #16
+ | ldp FARG1, FARG2, [BASE]
+ | blo ->fff_fallback
+ | checknum CARG1, ->fff_fallback
+ | checknum CARG2, ->fff_fallback
+ |.endmacro
+ |
+ |// Inlined GC threshold check. Caveat: uses CARG1 and CARG2.
+ |.macro ffgccheck
+ | ldp CARG1, CARG2, GL->gc.total // Assumes threshold follows total.
+ | cmp CARG1, CARG2
+ | blt >1
+ | bl ->fff_gcstep
+ |1:
+ |.endmacro
+ |
+ |//-- Base library: checks -----------------------------------------------
+ |
+ |.ffunc_1 assert
+ | ldr PC, [BASE, FRAME_PC]
+ | mov_false TMP1
+ | cmp CARG1, TMP1
+ | bhs ->fff_fallback
+ | str CARG1, [BASE, #-16]
+ | sub RB, BASE, #8
+ | subs RA, NARGS8:RC, #8
+ | add RC, NARGS8:RC, #8 // Compute (nresults+1)*8.
+ | cbz RA, ->fff_res // Done if exactly 1 argument.
+ |1:
+ | ldr CARG1, [RB, #16]
+ | sub RA, RA, #8
+ | str CARG1, [RB], #8
+ | cbnz RA, <1
+ | b ->fff_res
+ |
+ |.ffunc_1 type
+ | mov TMP0, #~LJ_TISNUM
+ | asr ITYPE, CARG1, #47
+ | cmn ITYPE, #~LJ_TISNUM
+ | csinv TMP1, TMP0, ITYPE, lo
+ | add TMP1, TMP1, #offsetof(GCfuncC, upvalue)/8
+ | ldr CARG1, [CFUNC:CARG3, TMP1, lsl #3]
+ | b ->fff_restv
+ |
+ |//-- Base library: getters and setters ---------------------------------
+ |
+ |.ffunc_1 getmetatable
+ | asr ITYPE, CARG1, #47
+ | cmn ITYPE, #-LJ_TTAB
+ | ccmn ITYPE, #-LJ_TUDATA, #4, ne
+ | and TAB:CARG1, CARG1, #LJ_GCVMASK
+ | bne >6
+ |1: // Field metatable must be at same offset for GCtab and GCudata!
+ | ldr TAB:RB, TAB:CARG1->metatable
+ |2:
+ | mov CARG1, TISNIL
+ | ldr STR:RC, GL->gcroot[GCROOT_MMNAME+MM_metatable]
+ | cbz TAB:RB, ->fff_restv
+ | ldr TMP1w, TAB:RB->hmask
+ | ldr TMP2w, STR:RC->sid
+ | ldr NODE:CARG3, TAB:RB->node
+ | and TMP1w, TMP1w, TMP2w // idx = str->sid & tab->hmask
+ | add TMP1, TMP1, TMP1, lsl #1
+ | movn CARG4, #~LJ_TSTR
+ | add NODE:CARG3, NODE:CARG3, TMP1, lsl #3 // node = tab->node + idx*3*8
+ | add CARG4, STR:RC, CARG4, lsl #47 // Tagged key to look for.
+ |3: // Rearranged logic, because we expect _not_ to find the key.
+ | ldp CARG1, TMP0, NODE:CARG3->val
+ | ldr NODE:CARG3, NODE:CARG3->next
+ | cmp TMP0, CARG4
+ | beq >5
+ | cbnz NODE:CARG3, <3
+ |4:
+ | mov CARG1, RB // Use metatable as default result.
+ | movk CARG1, #(LJ_TTAB>>1)&0xffff, lsl #48
+ | b ->fff_restv
+ |5:
+ | cmp TMP0, TISNIL
+ | bne ->fff_restv
+ | b <4
+ |
+ |6:
+ | movn TMP0, #~LJ_TISNUM
+ | cmp ITYPE, TMP0
+ | csel ITYPE, ITYPE, TMP0, hs
+ | sub TMP1, GL, ITYPE, lsl #3
+ | ldr TAB:RB, [TMP1, #offsetof(global_State, gcroot[GCROOT_BASEMT])-8]
+ | b <2
+ |
+ |.ffunc_2 setmetatable
+ | // Fast path: no mt for table yet and not clearing the mt.
+ | checktp TMP1, CARG1, LJ_TTAB, ->fff_fallback
+ | ldr TAB:TMP0, TAB:TMP1->metatable
+ | asr ITYPE, CARG2, #47
+ | ldrb TMP2w, TAB:TMP1->marked
+ | cmn ITYPE, #-LJ_TTAB
+ | and TAB:CARG2, CARG2, #LJ_GCVMASK
+ | ccmp TAB:TMP0, #0, #0, eq
+ | bne ->fff_fallback
+ | str TAB:CARG2, TAB:TMP1->metatable
+ | tbz TMP2w, #2, ->fff_restv // isblack(table)
+ | barrierback TAB:TMP1, TMP2w, TMP0
+ | b ->fff_restv
+ |
+ |.ffunc rawget
+ | ldr CARG2, [BASE]
+ | cmp NARGS8:RC, #16
+ | blo ->fff_fallback
+ | checktab CARG2, ->fff_fallback
+ | mov CARG1, L
+ | add CARG3, BASE, #8
+ | bl extern lj_tab_get // (lua_State *L, GCtab *t, cTValue *key)
+ | // Returns cTValue *.
+ | ldr CARG1, [CRET1]
+ | b ->fff_restv
+ |
+ |//-- Base library: conversions ------------------------------------------
+ |
+ |.ffunc tonumber
+ | // Only handles the number case inline (without a base argument).
+ | ldr CARG1, [BASE]
+ | cmp NARGS8:RC, #8
+ | bne ->fff_fallback
+ | checknumber CARG1, ->fff_fallback
+ | b ->fff_restv
+ |
+ |.ffunc_1 tostring
+ | // Only handles the string or number case inline.
+ | asr ITYPE, CARG1, #47
+ | cmn ITYPE, #-LJ_TSTR
+ | // A __tostring method in the string base metatable is ignored.
+ | beq ->fff_restv
+ | // Handle numbers inline, unless a number base metatable is present.
+ | ldr TMP1, GL->gcroot[GCROOT_BASEMT_NUM]
+ | str BASE, L->base
+ | cmn ITYPE, #-LJ_TISNUM
+ | ccmp TMP1, #0, #0, ls
+ | str PC, SAVE_PC // Redundant (but a defined value).
+ | bne ->fff_fallback
+ | ffgccheck
+ | mov CARG1, L
+ | mov CARG2, BASE
+ | bl extern lj_strfmt_number // (lua_State *L, cTValue *o)
+ | // Returns GCstr *.
+ | movn TMP1, #~LJ_TSTR
+ | ldr BASE, L->base
+ | add CARG1, CARG1, TMP1, lsl #47
+ | b ->fff_restv
+ |
+ |//-- Base library: iterators -------------------------------------------
+ |
+ |.ffunc_1 next
+ | checktp CARG1, LJ_TTAB, ->fff_fallback
+ | str TISNIL, [BASE, NARGS8:RC] // Set missing 2nd arg to nil.
+ | ldr PC, [BASE, FRAME_PC]
+ | add CARG2, BASE, #8
+ | sub CARG3, BASE, #16
+ | bl extern lj_tab_next // (GCtab *t, cTValue *key, TValue *o)
+ | // Returns 1=found, 0=end, -1=error.
+ | mov RC, #(2+1)*8
+ | tbnz CRET1w, #31, ->fff_fallback // Invalid key.
+ | cbnz CRET1, ->fff_res // Found key/value.
+ | // End of traversal: return nil.
+ | str TISNIL, [BASE, #-16]
+ | b ->fff_res1
+ |
+ |.ffunc_1 pairs
+ | checktp TMP1, CARG1, LJ_TTAB, ->fff_fallback
+#if LJ_52
+ | ldr TAB:CARG2, TAB:TMP1->metatable
+#endif
+ | ldr CFUNC:CARG4, CFUNC:CARG3->upvalue[0]
+ | ldr PC, [BASE, FRAME_PC]
+#if LJ_52
+ | cbnz TAB:CARG2, ->fff_fallback
+#endif
+ | mov RC, #(3+1)*8
+ | stp CARG1, TISNIL, [BASE, #-8]
+ | str CFUNC:CARG4, [BASE, #-16]
+ | b ->fff_res
+ |
+ |.ffunc_2 ipairs_aux
+ | checktab CARG1, ->fff_fallback
+ | checkint CARG2, ->fff_fallback
+ | ldr TMP1w, TAB:CARG1->asize
+ | ldr CARG3, TAB:CARG1->array
+ | ldr TMP0w, TAB:CARG1->hmask
+ | add CARG2w, CARG2w, #1
+ | cmp CARG2w, TMP1w
+ | ldr PC, [BASE, FRAME_PC]
+ | add TMP2, CARG2, TISNUM
+ | mov RC, #(0+1)*8
+ | str TMP2, [BASE, #-16]
+ | bhs >2 // Not in array part?
+ | ldr TMP0, [CARG3, CARG2, lsl #3]
+ |1:
+ | mov TMP1, #(2+1)*8
+ | cmp TMP0, TISNIL
+ | str TMP0, [BASE, #-8]
+ | csel RC, RC, TMP1, eq
+ | b ->fff_res
+ |2: // Check for empty hash part first. Otherwise call C function.
+ | cbz TMP0w, ->fff_res
+ | bl extern lj_tab_getinth // (GCtab *t, int32_t key)
+ | // Returns cTValue * or NULL.
+ | cbz CRET1, ->fff_res
+ | ldr TMP0, [CRET1]
+ | b <1
+ |
+ |.ffunc_1 ipairs
+ | checktp TMP1, CARG1, LJ_TTAB, ->fff_fallback
+#if LJ_52
+ | ldr TAB:CARG2, TAB:TMP1->metatable
+#endif
+ | ldr CFUNC:CARG4, CFUNC:CARG3->upvalue[0]
+ | ldr PC, [BASE, FRAME_PC]
+#if LJ_52
+ | cbnz TAB:CARG2, ->fff_fallback
+#endif
+ | mov RC, #(3+1)*8
+ | stp CARG1, TISNUM, [BASE, #-8]
+ | str CFUNC:CARG4, [BASE, #-16]
+ | b ->fff_res
+ |
+ |//-- Base library: catch errors ----------------------------------------
+ |
+ |.ffunc pcall
+ | cmp NARGS8:RC, #8
+ | ldrb TMP0w, GL->hookmask
+ | blo ->fff_fallback
+ | sub NARGS8:RC, NARGS8:RC, #8
+ | mov RB, BASE
+ | add BASE, BASE, #16
+ | ubfx TMP0w, TMP0w, #HOOK_ACTIVE_SHIFT, #1
+ | add PC, TMP0, #16+FRAME_PCALL
+ | beq ->vm_call_dispatch
+ |1:
+ | add TMP2, BASE, NARGS8:RC
+ |2:
+ | ldr TMP0, [TMP2, #-16]
+ | str TMP0, [TMP2, #-8]!
+ | cmp TMP2, BASE
+ | bne <2
+ | b ->vm_call_dispatch
+ |
+ |.ffunc xpcall
+ | ldp CARG1, CARG2, [BASE]
+ | ldrb TMP0w, GL->hookmask
+ | subs NARGS8:TMP1, NARGS8:RC, #16
+ | blo ->fff_fallback
+ | mov RB, BASE
+ | asr ITYPE, CARG2, #47
+ | ubfx TMP0w, TMP0w, #HOOK_ACTIVE_SHIFT, #1
+ | cmn ITYPE, #-LJ_TFUNC
+ | add PC, TMP0, #24+FRAME_PCALL
+ | bne ->fff_fallback // Traceback must be a function.
+ | mov NARGS8:RC, NARGS8:TMP1
+ | add BASE, BASE, #24
+ | stp CARG2, CARG1, [RB] // Swap function and traceback.
+ | cbz NARGS8:RC, ->vm_call_dispatch
+ | b <1
+ |
+ |//-- Coroutine library --------------------------------------------------
+ |
+ |.macro coroutine_resume_wrap, resume
+ |.if resume
+ |.ffunc_1 coroutine_resume
+ | checktp CARG1, LJ_TTHREAD, ->fff_fallback
+ |.else
+ |.ffunc coroutine_wrap_aux
+ | ldr L:CARG1, CFUNC:CARG3->upvalue[0].gcr
+ | and L:CARG1, CARG1, #LJ_GCVMASK
+ |.endif
+ | ldr PC, [BASE, FRAME_PC]
+ | str BASE, L->base
+ | ldp RB, CARG2, L:CARG1->base
+ | ldrb TMP1w, L:CARG1->status
+ | add TMP0, CARG2, TMP1
+ | str PC, SAVE_PC
+ | cmp TMP0, RB
+ | beq ->fff_fallback
+ | cmp TMP1, #LUA_YIELD
+ | add TMP0, CARG2, #8
+ | csel CARG2, CARG2, TMP0, hs
+ | ldr CARG4, L:CARG1->maxstack
+ | add CARG3, CARG2, NARGS8:RC
+ | ldr RB, L:CARG1->cframe
+ | ccmp CARG3, CARG4, #2, ls
+ | ccmp RB, #0, #2, ls
+ | bhi ->fff_fallback
+ |.if resume
+ | sub CARG3, CARG3, #8 // Keep resumed thread in stack for GC.
+ | add BASE, BASE, #8
+ | sub NARGS8:RC, NARGS8:RC, #8
+ |.endif
+ | str CARG3, L:CARG1->top
+ | str BASE, L->top
+ | cbz NARGS8:RC, >3
+ |2: // Move args to coroutine.
+ | ldr TMP0, [BASE, RB]
+ | cmp RB, NARGS8:RC
+ | str TMP0, [CARG2, RB]
+ | add RB, RB, #8
+ | bne <2
+ |3:
+ | mov CARG3, #0
+ | mov L:RA, L:CARG1
+ | mov CARG4, #0
+ | bl ->vm_resume // (lua_State *L, TValue *base, 0, 0)
+ | // Returns thread status.
+ |4:
+ | ldp CARG3, CARG4, L:RA->base
+ | cmp CRET1, #LUA_YIELD
+ | ldr BASE, L->base
+ | str L, GL->cur_L
+ | st_vmstate ST_INTERP
+ | bhi >8
+ | sub RC, CARG4, CARG3
+ | ldr CARG1, L->maxstack
+ | add CARG2, BASE, RC
+ | cbz RC, >6 // No results?
+ | cmp CARG2, CARG1
+ | mov RB, #0
+ | bhi >9 // Need to grow stack?
+ |
+ | sub CARG4, RC, #8
+ | str CARG3, L:RA->top // Clear coroutine stack.
+ |5: // Move results from coroutine.
+ | ldr TMP0, [CARG3, RB]
+ | cmp RB, CARG4
+ | str TMP0, [BASE, RB]
+ | add RB, RB, #8
+ | bne <5
+ |6:
+ |.if resume
+ | mov_true TMP1
+ | add RC, RC, #16
+ |7:
+ | str TMP1, [BASE, #-8] // Prepend true/false to results.
+ | sub RA, BASE, #8
+ |.else
+ | mov RA, BASE
+ | add RC, RC, #8
+ |.endif
+ | ands CARG1, PC, #FRAME_TYPE
+ | str PC, SAVE_PC
+ | str RCw, SAVE_MULTRES
+ | beq ->BC_RET_Z
+ | b ->vm_return
+ |
+ |8: // Coroutine returned with error (at co->top-1).
+ |.if resume
+ | ldr TMP0, [CARG4, #-8]!
+ | mov_false TMP1
+ | mov RC, #(2+1)*8
+ | str CARG4, L:RA->top // Remove error from coroutine stack.
+ | str TMP0, [BASE] // Copy error message.
+ | b <7
+ |.else
+ | mov CARG1, L
+ | mov CARG2, L:RA
+ | bl extern lj_ffh_coroutine_wrap_err // (lua_State *L, lua_State *co)
+ | // Never returns.
+ |.endif
+ |
+ |9: // Handle stack expansion on return from yield.
+ | mov CARG1, L
+ | lsr CARG2, RC, #3
+ | bl extern lj_state_growstack // (lua_State *L, int n)
+ | mov CRET1, #0
+ | b <4
+ |.endmacro
+ |
+ | coroutine_resume_wrap 1 // coroutine.resume
+ | coroutine_resume_wrap 0 // coroutine.wrap
+ |
+ |.ffunc coroutine_yield
+ | ldr TMP0, L->cframe
+ | add TMP1, BASE, NARGS8:RC
+ | mov CRET1, #LUA_YIELD
+ | stp BASE, TMP1, L->base
+ | tbz TMP0, #0, ->fff_fallback
+ | str xzr, L->cframe
+ | strb CRET1w, L->status
+ | b ->vm_leave_unw
+ |
+ |//-- Math library -------------------------------------------------------
+ |
+ |.macro math_round, func, round
+ | .ffunc math_ .. func
+ | ldr CARG1, [BASE]
+ | cmp NARGS8:RC, #8
+ | ldr d0, [BASE]
+ | blo ->fff_fallback
+ | cmp TISNUMhi, CARG1, lsr #32
+ | beq ->fff_restv
+ | blo ->fff_fallback
+ | round d0, d0
+ | b ->fff_resn
+ |.endmacro
+ |
+ | math_round floor, frintm
+ | math_round ceil, frintp
+ |
+ |.ffunc_1 math_abs
+ | checknumber CARG1, ->fff_fallback
+ | and CARG1, CARG1, #U64x(7fffffff,ffffffff)
+ | bne ->fff_restv
+ | eor CARG2w, CARG1w, CARG1w, asr #31
+ | movz CARG3, #0x41e0, lsl #48 // 2^31.
+ | subs CARG1w, CARG2w, CARG1w, asr #31
+ | add CARG1, CARG1, TISNUM
+ | csel CARG1, CARG1, CARG3, pl
+ | // Fallthrough.
+ |
+ |->fff_restv:
+ | // CARG1 = TValue result.
+ | ldr PC, [BASE, FRAME_PC]
+ | str CARG1, [BASE, #-16]
+ |->fff_res1:
+ | // PC = return.
+ | mov RC, #(1+1)*8
+ |->fff_res:
+ | // RC = (nresults+1)*8, PC = return.
+ | ands CARG1, PC, #FRAME_TYPE
+ | str RCw, SAVE_MULTRES
+ | sub RA, BASE, #16
+ | bne ->vm_return
+ | ldr INSw, [PC, #-4]
+ | decode_RB RB, INS
+ |5:
+ | cmp RC, RB, lsl #3 // More results expected?
+ | blo >6
+ | decode_RA TMP1, INS
+ | // Adjust BASE. KBASE is assumed to be set for the calling frame.
+ | sub BASE, RA, TMP1, lsl #3
+ | ins_next
+ |
+ |6: // Fill up results with nil.
+ | add TMP1, RA, RC
+ | add RC, RC, #8
+ | str TISNIL, [TMP1, #-8]
+ | b <5
+ |
+ |.macro math_extern, func
+ | .ffunc_n math_ .. func
+ | bl extern func
+ | b ->fff_resn
+ |.endmacro
+ |
+ |.macro math_extern2, func
+ | .ffunc_nn math_ .. func
+ | bl extern func
+ | b ->fff_resn
+ |.endmacro
+ |
+ |.ffunc_n math_sqrt
+ | fsqrt d0, d0
+ |->fff_resn:
+ | ldr PC, [BASE, FRAME_PC]
+ | str d0, [BASE, #-16]
+ | b ->fff_res1
+ |
+ |.ffunc math_log
+ | ldr CARG1, [BASE]
+ | cmp NARGS8:RC, #8
+ | ldr FARG1, [BASE]
+ | bne ->fff_fallback // Need exactly 1 argument.
+ | checknum CARG1, ->fff_fallback
+ | bl extern log
+ | b ->fff_resn
+ |
+ | math_extern log10
+ | math_extern exp
+ | math_extern sin
+ | math_extern cos
+ | math_extern tan
+ | math_extern asin
+ | math_extern acos
+ | math_extern atan
+ | math_extern sinh
+ | math_extern cosh
+ | math_extern tanh
+ | math_extern2 pow
+ | math_extern2 atan2
+ | math_extern2 fmod
+ |
+ |.ffunc_2 math_ldexp
+ | ldr FARG1, [BASE]
+ | checknum CARG1, ->fff_fallback
+ | checkint CARG2, ->fff_fallback
+ | sxtw CARG1, CARG2w
+ | bl extern ldexp // (double x, int exp)
+ | b ->fff_resn
+ |
+ |.ffunc_n math_frexp
+ | add CARG1, sp, TMPDofs
+ | bl extern frexp
+ | ldr CARG2w, TMPD
+ | ldr PC, [BASE, FRAME_PC]
+ | str d0, [BASE, #-16]
+ | mov RC, #(2+1)*8
+ | add CARG2, CARG2, TISNUM
+ | str CARG2, [BASE, #-8]
+ | b ->fff_res
+ |
+ |.ffunc_n math_modf
+ | sub CARG1, BASE, #16
+ | ldr PC, [BASE, FRAME_PC]
+ | bl extern modf
+ | mov RC, #(2+1)*8
+ | str d0, [BASE, #-8]
+ | b ->fff_res
+ |
+ |.macro math_minmax, name, cond, fcond
+ | .ffunc_1 name
+ | add RB, BASE, RC
+ | add RA, BASE, #8
+ | checkint CARG1, >4
+ |1: // Handle integers.
+ | ldr CARG2, [RA]
+ | cmp RA, RB
+ | bhs ->fff_restv
+ | checkint CARG2, >3
+ | cmp CARG1w, CARG2w
+ | add RA, RA, #8
+ | csel CARG1, CARG2, CARG1, cond
+ | b <1
+ |3: // Convert intermediate result to number and continue below.
+ | scvtf d0, CARG1w
+ | blo ->fff_fallback
+ | ldr d1, [RA]
+ | b >6
+ |
+ |4:
+ | ldr d0, [BASE]
+ | blo ->fff_fallback
+ |5: // Handle numbers.
+ | ldr CARG2, [RA]
+ | ldr d1, [RA]
+ | cmp RA, RB
+ | bhs ->fff_resn
+ | checknum CARG2, >7
+ |6:
+ | fcmp d0, d1
+ | add RA, RA, #8
+ | fcsel d0, d1, d0, fcond
+ | b <5
+ |7: // Convert integer to number and continue above.
+ | scvtf d1, CARG2w
+ | blo ->fff_fallback
+ | b <6
+ |.endmacro
+ |
+ | math_minmax math_min, gt, pl
+ | math_minmax math_max, lt, le
+ |
+ |//-- String library -----------------------------------------------------
+ |
+ |.ffunc string_byte // Only handle the 1-arg case here.
+ | ldp PC, CARG1, [BASE, FRAME_PC]
+ | cmp NARGS8:RC, #8
+ | asr ITYPE, CARG1, #47
+ | ccmn ITYPE, #-LJ_TSTR, #0, eq
+ | and STR:CARG1, CARG1, #LJ_GCVMASK
+ | bne ->fff_fallback
+ | ldrb TMP0w, STR:CARG1[1] // Access is always ok (NUL at end).
+ | ldr CARG3w, STR:CARG1->len
+ | add TMP0, TMP0, TISNUM
+ | str TMP0, [BASE, #-16]
+ | mov RC, #(0+1)*8
+ | cbz CARG3, ->fff_res
+ | b ->fff_res1
+ |
+ |.ffunc string_char // Only handle the 1-arg case here.
+ | ffgccheck
+ | ldp PC, CARG1, [BASE, FRAME_PC]
+ | cmp CARG1w, #255
+ | ccmp NARGS8:RC, #8, #0, ls // Need exactly 1 argument.
+ | bne ->fff_fallback
+ | checkint CARG1, ->fff_fallback
+ | mov CARG3, #1
+ | // Point to the char inside the integer in the stack slot.
+ |.if ENDIAN_LE
+ | mov CARG2, BASE
+ |.else
+ | add CARG2, BASE, #7
+ |.endif
+ |->fff_newstr:
+ | // CARG2 = str, CARG3 = len.
+ | str BASE, L->base
+ | mov CARG1, L
+ | str PC, SAVE_PC
+ | bl extern lj_str_new // (lua_State *L, char *str, size_t l)
+ |->fff_resstr:
+ | // Returns GCstr *.
+ | ldr BASE, L->base
+ | movn TMP1, #~LJ_TSTR
+ | add CARG1, CARG1, TMP1, lsl #47
+ | b ->fff_restv
+ |
+ |.ffunc string_sub
+ | ffgccheck
+ | ldr CARG1, [BASE]
+ | ldr CARG3, [BASE, #16]
+ | cmp NARGS8:RC, #16
+ | movn RB, #0
+ | beq >1
+ | blo ->fff_fallback
+ | checkint CARG3, ->fff_fallback
+ | sxtw RB, CARG3w
+ |1:
+ | ldr CARG2, [BASE, #8]
+ | checkstr CARG1, ->fff_fallback
+ | ldr TMP1w, STR:CARG1->len
+ | checkint CARG2, ->fff_fallback
+ | sxtw CARG2, CARG2w
+ | // CARG1 = str, TMP1 = str->len, CARG2 = start, RB = end
+ | add TMP2, RB, TMP1
+ | cmp RB, #0
+ | add TMP0, CARG2, TMP1
+ | csinc RB, RB, TMP2, ge // if (end < 0) end += len+1
+ | cmp CARG2, #0
+ | csinc CARG2, CARG2, TMP0, ge // if (start < 0) start += len+1
+ | cmp RB, #0
+ | csel RB, RB, xzr, ge // if (end < 0) end = 0
+ | cmp CARG2, #1
+ | csinc CARG2, CARG2, xzr, ge // if (start < 1) start = 1
+ | cmp RB, TMP1
+ | csel RB, RB, TMP1, le // if (end > len) end = len
+ | add CARG1, STR:CARG1, #sizeof(GCstr)-1
+ | subs CARG3, RB, CARG2 // len = end - start
+ | add CARG2, CARG1, CARG2
+ | add CARG3, CARG3, #1 // len += 1
+ | bge ->fff_newstr
+ | add STR:CARG1, GL, #offsetof(global_State, strempty)
+ | movn TMP1, #~LJ_TSTR
+ | add CARG1, CARG1, TMP1, lsl #47
+ | b ->fff_restv
+ |
+ |.macro ffstring_op, name
+ | .ffunc string_ .. name
+ | ffgccheck
+ | ldr CARG2, [BASE]
+ | cmp NARGS8:RC, #8
+ | asr ITYPE, CARG2, #47
+ | ccmn ITYPE, #-LJ_TSTR, #0, hs
+ | and STR:CARG2, CARG2, #LJ_GCVMASK
+ | bne ->fff_fallback
+ | ldr TMP0, GL->tmpbuf.b
+ | add SBUF:CARG1, GL, #offsetof(global_State, tmpbuf)
+ | str BASE, L->base
+ | str PC, SAVE_PC
+ | str L, GL->tmpbuf.L
+ | str TMP0, GL->tmpbuf.w
+ | bl extern lj_buf_putstr_ .. name
+ | bl extern lj_buf_tostr
+ | b ->fff_resstr
+ |.endmacro
+ |
+ |ffstring_op reverse
+ |ffstring_op lower
+ |ffstring_op upper
+ |
+ |//-- Bit library --------------------------------------------------------
+ |
+ |// FP number to bit conversion for soft-float. Clobbers CARG1-CARG3
+ |->vm_tobit_fb:
+ | bls ->fff_fallback
+ | add CARG2, CARG1, CARG1
+ | mov CARG3, #1076
+ | sub CARG3, CARG3, CARG2, lsr #53
+ | cmp CARG3, #53
+ | bhi >1
+ | and CARG2, CARG2, #U64x(001fffff,ffffffff)
+ | orr CARG2, CARG2, #U64x(00200000,00000000)
+ | cmp CARG1, #0
+ | lsr CARG2, CARG2, CARG3
+ | cneg CARG1w, CARG2w, mi
+ | br lr
+ |1:
+ | mov CARG1w, #0
+ | br lr
+ |
+ |.macro .ffunc_bit, name
+ | .ffunc_1 bit_..name
+ | adr lr, >1
+ | checkint CARG1, ->vm_tobit_fb
+ |1:
+ |.endmacro
+ |
+ |.macro .ffunc_bit_op, name, ins
+ | .ffunc_bit name
+ | mov RA, #8
+ | mov TMP0w, CARG1w
+ | adr lr, >2
+ |1:
+ | ldr CARG1, [BASE, RA]
+ | cmp RA, NARGS8:RC
+ | add RA, RA, #8
+ | bge >9
+ | checkint CARG1, ->vm_tobit_fb
+ |2:
+ | ins TMP0w, TMP0w, CARG1w
+ | b <1
+ |.endmacro
+ |
+ |.ffunc_bit_op band, and
+ |.ffunc_bit_op bor, orr
+ |.ffunc_bit_op bxor, eor
+ |
+ |.ffunc_bit tobit
+ | mov TMP0w, CARG1w
+ |9: // Label reused by .ffunc_bit_op users.
+ | add CARG1, TMP0, TISNUM
+ | b ->fff_restv
+ |
+ |.ffunc_bit bswap
+ | rev TMP0w, CARG1w
+ | add CARG1, TMP0, TISNUM
+ | b ->fff_restv
+ |
+ |.ffunc_bit bnot
+ | mvn TMP0w, CARG1w
+ | add CARG1, TMP0, TISNUM
+ | b ->fff_restv
+ |
+ |.macro .ffunc_bit_sh, name, ins, shmod
+ | .ffunc bit_..name
+ | ldp TMP0, CARG1, [BASE]
+ | cmp NARGS8:RC, #16
+ | blo ->fff_fallback
+ | adr lr, >1
+ | checkint CARG1, ->vm_tobit_fb
+ |1:
+ |.if shmod == 0
+ | mov TMP1, CARG1
+ |.else
+ | neg TMP1, CARG1
+ |.endif
+ | mov CARG1, TMP0
+ | adr lr, >2
+ | checkint CARG1, ->vm_tobit_fb
+ |2:
+ | ins TMP0w, CARG1w, TMP1w
+ | add CARG1, TMP0, TISNUM
+ | b ->fff_restv
+ |.endmacro
+ |
+ |.ffunc_bit_sh lshift, lsl, 0
+ |.ffunc_bit_sh rshift, lsr, 0
+ |.ffunc_bit_sh arshift, asr, 0
+ |.ffunc_bit_sh rol, ror, 1
+ |.ffunc_bit_sh ror, ror, 0
+ |
+ |//-----------------------------------------------------------------------
+ |
+ |->fff_fallback: // Call fast function fallback handler.
+ | // BASE = new base, RC = nargs*8
+ | ldp CFUNC:CARG3, PC, [BASE, FRAME_FUNC] // Fallback may overwrite PC.
+ | ldr TMP2, L->maxstack
+ | add TMP1, BASE, NARGS8:RC
+ | stp BASE, TMP1, L->base
+ | and CFUNC:CARG3, CARG3, #LJ_GCVMASK
+ | add TMP1, TMP1, #8*LUA_MINSTACK
+ | ldr CARG3, CFUNC:CARG3->f
+ | str PC, SAVE_PC // Redundant (but a defined value).
+ | cmp TMP1, TMP2
+ | mov CARG1, L
+ | bhi >5 // Need to grow stack.
+ | blr CARG3 // (lua_State *L)
+ | // Either throws an error, or recovers and returns -1, 0 or nresults+1.
+ | ldr BASE, L->base
+ | cmp CRET1w, #0
+ | lsl RC, CRET1, #3
+ | sub RA, BASE, #16
+ | bgt ->fff_res // Returned nresults+1?
+ |1: // Returned 0 or -1: retry fast path.
+ | ldr CARG1, L->top
+ | ldr CFUNC:CARG3, [BASE, FRAME_FUNC]
+ | sub NARGS8:RC, CARG1, BASE
+ | bne ->vm_call_tail // Returned -1?
+ | and CFUNC:CARG3, CARG3, #LJ_GCVMASK
+ | ins_callt // Returned 0: retry fast path.
+ |
+ |// Reconstruct previous base for vmeta_call during tailcall.
+ |->vm_call_tail:
+ | ands TMP0, PC, #FRAME_TYPE
+ | and TMP1, PC, #~FRAME_TYPEP
+ | bne >3
+ | ldrb RAw, [PC, #-4+OFS_RA]
+ | lsl RA, RA, #3
+ | add TMP1, RA, #16
+ |3:
+ | sub RB, BASE, TMP1
+ | b ->vm_call_dispatch // Resolve again for tailcall.
+ |
+ |5: // Grow stack for fallback handler.
+ | mov CARG2, #LUA_MINSTACK
+ | bl extern lj_state_growstack // (lua_State *L, int n)
+ | ldr BASE, L->base
+ | cmp CARG1, CARG1 // Set zero-flag to force retry.
+ | b <1
+ |
+ |->fff_gcstep: // Call GC step function.
+ | // BASE = new base, RC = nargs*8
+ | add CARG2, BASE, NARGS8:RC // Calculate L->top.
+ | mov RA, lr
+ | stp BASE, CARG2, L->base
+ | str PC, SAVE_PC // Redundant (but a defined value).
+ | mov CARG1, L
+ | bl extern lj_gc_step // (lua_State *L)
+ | ldp BASE, CARG2, L->base
+ | ldr CFUNC:CARG3, [BASE, FRAME_FUNC]
+ | mov lr, RA // Help return address predictor.
+ | sub NARGS8:RC, CARG2, BASE // Calculate nargs*8.
+ | and CFUNC:CARG3, CARG3, #LJ_GCVMASK
+ | ret
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Special dispatch targets -------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |->vm_record: // Dispatch target for recording phase.
+ |.if JIT
+ | ldrb CARG1w, GL->hookmask
+ | tst CARG1, #HOOK_VMEVENT // No recording while in vmevent.
+ | bne >5
+ | // Decrement the hookcount for consistency, but always do the call.
+ | ldr CARG2w, GL->hookcount
+ | tst CARG1, #HOOK_ACTIVE
+ | bne >1
+ | sub CARG2w, CARG2w, #1
+ | tst CARG1, #LUA_MASKLINE|LUA_MASKCOUNT
+ | beq >1
+ | str CARG2w, GL->hookcount
+ | b >1
+ |.endif
+ |
+ |->vm_rethook: // Dispatch target for return hooks.
+ | ldrb TMP2w, GL->hookmask
+ | tbz TMP2w, #HOOK_ACTIVE_SHIFT, >1 // Hook already active?
+ |5: // Re-dispatch to static ins.
+ | ldr TMP0, [TMP1, #GG_G2DISP+GG_DISP2STATIC]
+ | br TMP0
+ |
+ |->vm_inshook: // Dispatch target for instr/line hooks.
+ | ldrb TMP2w, GL->hookmask
+ | ldr TMP3w, GL->hookcount
+ | tbnz TMP2w, #HOOK_ACTIVE_SHIFT, <5 // Hook already active?
+ | tst TMP2w, #LUA_MASKLINE|LUA_MASKCOUNT
+ | beq <5
+ | sub TMP3w, TMP3w, #1
+ | str TMP3w, GL->hookcount
+ | cbz TMP3w, >1
+ | tbz TMP2w, #LUA_HOOKLINE, <5
+ |1:
+ | mov CARG1, L
+ | str BASE, L->base
+ | mov CARG2, PC
+ | // SAVE_PC must hold the _previous_ PC. The callee updates it with PC.
+ | bl extern lj_dispatch_ins // (lua_State *L, const BCIns *pc)
+ |3:
+ | ldr BASE, L->base
+ |4: // Re-dispatch to static ins.
+ | ldr INSw, [PC, #-4]
+ | add TMP1, GL, INS, uxtb #3
+ | decode_RA RA, INS
+ | ldr TMP0, [TMP1, #GG_G2DISP+GG_DISP2STATIC]
+ | decode_RD RC, INS
+ | br TMP0
+ |
+ |->cont_hook: // Continue from hook yield.
+ | ldr CARG1, [CARG4, #-40]
+ | add PC, PC, #4
+ | str CARG1w, SAVE_MULTRES // Restore MULTRES for *M ins.
+ | b <4
+ |
+ |->vm_hotloop: // Hot loop counter underflow.
+ |.if JIT
+ | ldr LFUNC:CARG3, [BASE, FRAME_FUNC] // Same as curr_topL(L).
+ | add CARG1, GL, #GG_G2DISP+GG_DISP2J
+ | and LFUNC:CARG3, CARG3, #LJ_GCVMASK
+ | str PC, SAVE_PC
+ | ldr CARG3, LFUNC:CARG3->pc
+ | mov CARG2, PC
+ | str L, [GL, #GL_J(L)]
+ | ldrb CARG3w, [CARG3, #PC2PROTO(framesize)]
+ | str BASE, L->base
+ | add CARG3, BASE, CARG3, lsl #3
+ | str CARG3, L->top
+ | bl extern lj_trace_hot // (jit_State *J, const BCIns *pc)
+ | b <3
+ |.endif
+ |
+ |->vm_callhook: // Dispatch target for call hooks.
+ | mov CARG2, PC
+ |.if JIT
+ | b >1
+ |.endif
+ |
+ |->vm_hotcall: // Hot call counter underflow.
+ |.if JIT
+ | orr CARG2, PC, #1
+ |1:
+ |.endif
+ | add TMP1, BASE, NARGS8:RC
+ | str PC, SAVE_PC
+ | mov CARG1, L
+ | sub RA, RA, BASE
+ | stp BASE, TMP1, L->base
+ | bl extern lj_dispatch_call // (lua_State *L, const BCIns *pc)
+ | // Returns ASMFunction.
+ | ldp BASE, TMP1, L->base
+ | str xzr, SAVE_PC // Invalidate for subsequent line hook.
+ | ldr LFUNC:CARG3, [BASE, FRAME_FUNC]
+ | add RA, BASE, RA
+ | sub NARGS8:RC, TMP1, BASE
+ | ldr INSw, [PC, #-4]
+ | and LFUNC:CARG3, CARG3, #LJ_GCVMASK
+ | br CRET1
+ |
+ |->cont_stitch: // Trace stitching.
+ |.if JIT
+ | // RA = resultptr, CARG4 = meta base
+ | ldr RBw, SAVE_MULTRES
+ | ldr INSw, [PC, #-4]
+ | ldr TRACE:CARG3, [CARG4, #-40] // Save previous trace.
+ | subs RB, RB, #8
+ | decode_RA RC, INS // Call base.
+ | and CARG3, CARG3, #LJ_GCVMASK
+ | beq >2
+ |1: // Move results down.
+ | ldr CARG1, [RA]
+ | add RA, RA, #8
+ | subs RB, RB, #8
+ | str CARG1, [BASE, RC, lsl #3]
+ | add RC, RC, #1
+ | bne <1
+ |2:
+ | decode_RA RA, INS
+ | decode_RB RB, INS
+ | add RA, RA, RB
+ |3:
+ | cmp RA, RC
+ | bhi >9 // More results wanted?
+ |
+ | ldrh RAw, TRACE:CARG3->traceno
+ | ldrh RCw, TRACE:CARG3->link
+ | cmp RCw, RAw
+ | beq ->cont_nop // Blacklisted.
+ | cmp RCw, #0
+ | bne =>BC_JLOOP // Jump to stitched trace.
+ |
+ | // Stitch a new trace to the previous trace.
+ | mov CARG1, #GL_J(exitno)
+ | str RAw, [GL, CARG1]
+ | mov CARG1, #GL_J(L)
+ | str L, [GL, CARG1]
+ | str BASE, L->base
+ | add CARG1, GL, #GG_G2J
+ | mov CARG2, PC
+ | bl extern lj_dispatch_stitch // (jit_State *J, const BCIns *pc)
+ | ldr BASE, L->base
+ | b ->cont_nop
+ |
+ |9: // Fill up results with nil.
+ | str TISNIL, [BASE, RC, lsl #3]
+ | add RC, RC, #1
+ | b <3
+ |.endif
+ |
+ |->vm_profhook: // Dispatch target for profiler hook.
+#if LJ_HASPROFILE
+ | mov CARG1, L
+ | str BASE, L->base
+ | mov CARG2, PC
+ | bl extern lj_dispatch_profile // (lua_State *L, const BCIns *pc)
+ | // HOOK_PROFILE is off again, so re-dispatch to dynamic instruction.
+ | ldr BASE, L->base
+ | sub PC, PC, #4
+ | b ->cont_nop
+#endif
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Trace exit handler -------------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |.macro savex_, a, b
+ | stp d..a, d..b, [sp, #a*8]
+ | stp x..a, x..b, [sp, #32*8+a*8]
+ |.endmacro
+ |
+ |->vm_exit_handler:
+ |.if JIT
+ | sub sp, sp, #(64*8)
+ | savex_, 0, 1
+ | savex_, 2, 3
+ | savex_, 4, 5
+ | savex_, 6, 7
+ | savex_, 8, 9
+ | savex_, 10, 11
+ | savex_, 12, 13
+ | savex_, 14, 15
+ | savex_, 16, 17
+ | savex_, 18, 19
+ | savex_, 20, 21
+ | savex_, 22, 23
+ | savex_, 24, 25
+ | savex_, 26, 27
+ | savex_, 28, 29
+ | stp d30, d31, [sp, #30*8]
+ | ldr CARG1, [sp, #64*8] // Load original value of lr.
+ | add CARG3, sp, #64*8 // Recompute original value of sp.
+ | mv_vmstate CARG4w, EXIT
+ | stp xzr, CARG3, [sp, #62*8] // Store 0/sp in RID_LR/RID_SP.
+ | sub CARG1, CARG1, lr
+ | ldr L, GL->cur_L
+ | lsr CARG1, CARG1, #2
+ | ldr BASE, GL->jit_base
+ | sub CARG1, CARG1, #2
+ | ldr CARG2w, [lr] // Load trace number.
+ | st_vmstate CARG4w
+ |.if ENDIAN_BE
+ | rev32 CARG2, CARG2
+ |.endif
+ | str BASE, L->base
+ | ubfx CARG2w, CARG2w, #5, #16
+ | str CARG1w, [GL, #GL_J(exitno)]
+ | str CARG2w, [GL, #GL_J(parent)]
+ | str L, [GL, #GL_J(L)]
+ | str xzr, GL->jit_base
+ | add CARG1, GL, #GG_G2J
+ | mov CARG2, sp
+ | bl extern lj_trace_exit // (jit_State *J, ExitState *ex)
+ | // Returns MULTRES (unscaled) or negated error code.
+ | ldr CARG2, L->cframe
+ | ldr BASE, L->base
+ | and sp, CARG2, #CFRAME_RAWMASK
+ | ldr PC, SAVE_PC // Get SAVE_PC.
+ | str L, SAVE_L // Set SAVE_L (on-trace resume/yield).
+ | b >1
+ |.endif
+ |
+ |->vm_exit_interp:
+ | // CARG1 = MULTRES or negated error code, BASE, PC and GL set.
+ |.if JIT
+ | ldr L, SAVE_L
+ |1:
+ | cmp CARG1w, #0
+ | blt >9 // Check for error from exit.
+ | lsl RC, CARG1, #3
+ | ldr LFUNC:CARG2, [BASE, FRAME_FUNC]
+ | movz TISNUM, #(LJ_TISNUM>>1)&0xffff, lsl #48
+ | movz TISNUMhi, #(LJ_TISNUM>>1)&0xffff, lsl #16
+ | movn TISNIL, #0
+ | and LFUNC:CARG2, CARG2, #LJ_GCVMASK
+ | str RCw, SAVE_MULTRES
+ | str BASE, L->base
+ | ldr CARG2, LFUNC:CARG2->pc
+ | str xzr, GL->jit_base
+ | mv_vmstate CARG4w, INTERP
+ | ldr KBASE, [CARG2, #PC2PROTO(k)]
+ | // Modified copy of ins_next which handles function header dispatch, too.
+ | ldrb RBw, [PC, # OFS_OP]
+ | ldr INSw, [PC], #4
+ | st_vmstate CARG4w
+ | cmp RBw, #BC_FUNCC+2 // Fast function?
+ | add TMP1, GL, INS, uxtb #3
+ | bhs >4
+ |2:
+ | cmp RBw, #BC_FUNCF // Function header?
+ | add TMP0, GL, RB, uxtb #3
+ | ldr RB, [TMP0, #GG_G2DISP]
+ | decode_RA RA, INS
+ | lsr TMP0, INS, #16
+ | csel RC, TMP0, RC, lo
+ | blo >5
+ | ldr CARG3, [BASE, FRAME_FUNC]
+ | sub RC, RC, #8
+ | add RA, BASE, RA, lsl #3 // Yes: RA = BASE+framesize*8, RC = nargs*8
+ | and LFUNC:CARG3, CARG3, #LJ_GCVMASK
+ |5:
+ | br RB
+ |
+ |4: // Check frame below fast function.
+ | ldr CARG1, [BASE, FRAME_PC]
+ | ands CARG2, CARG1, #FRAME_TYPE
+ | bne <2 // Trace stitching continuation?
+ | // Otherwise set KBASE for Lua function below fast function.
+ | ldr CARG3w, [CARG1, #-4]
+ | decode_RA CARG1, CARG3
+ | sub CARG2, BASE, CARG1, lsl #3
+ | ldr LFUNC:CARG3, [CARG2, #-32]
+ | and LFUNC:CARG3, CARG3, #LJ_GCVMASK
+ | ldr CARG3, LFUNC:CARG3->pc
+ | ldr KBASE, [CARG3, #PC2PROTO(k)]
+ | b <2
+ |
+ |9: // Rethrow error from the right C frame.
+ | neg CARG2w, CARG1w
+ | mov CARG1, L
+ | bl extern lj_err_trace // (lua_State *L, int errcode)
+ |.endif
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Math helper functions ----------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ | // int lj_vm_modi(int dividend, int divisor);
+ |->vm_modi:
+ | eor CARG4w, CARG1w, CARG2w
+ | cmp CARG4w, #0
+ | eor CARG3w, CARG1w, CARG1w, asr #31
+ | eor CARG4w, CARG2w, CARG2w, asr #31
+ | sub CARG3w, CARG3w, CARG1w, asr #31
+ | sub CARG4w, CARG4w, CARG2w, asr #31
+ | udiv CARG1w, CARG3w, CARG4w
+ | msub CARG1w, CARG1w, CARG4w, CARG3w
+ | ccmp CARG1w, #0, #4, mi
+ | sub CARG3w, CARG1w, CARG4w
+ | csel CARG1w, CARG1w, CARG3w, eq
+ | eor CARG3w, CARG1w, CARG2w
+ | cmp CARG3w, #0
+ | cneg CARG1w, CARG1w, mi
+ | ret
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Miscellaneous functions --------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |.define NEXT_TAB, TAB:CARG1
+ |.define NEXT_RES, CARG1
+ |.define NEXT_IDX, CARG2w
+ |.define NEXT_LIM, CARG3w
+ |.define NEXT_TMP0, TMP0
+ |.define NEXT_TMP0w, TMP0w
+ |.define NEXT_TMP1, TMP1
+ |.define NEXT_TMP1w, TMP1w
+ |.define NEXT_RES_PTR, sp
+ |.define NEXT_RES_VAL, [sp]
+ |.define NEXT_RES_KEY, [sp, #8]
+ |
+ |// TValue *lj_vm_next(GCtab *t, uint32_t idx)
+ |// Next idx returned in CRET2w.
+ |->vm_next:
+ |.if JIT
+ | ldr NEXT_LIM, NEXT_TAB->asize
+ | ldr NEXT_TMP1, NEXT_TAB->array
+ |1: // Traverse array part.
+ | subs NEXT_TMP0w, NEXT_IDX, NEXT_LIM
+ | bhs >5 // Index points after array part?
+ | ldr NEXT_TMP0, [NEXT_TMP1, NEXT_IDX, uxtw #3]
+ | cmn NEXT_TMP0, #-LJ_TNIL
+ | cinc NEXT_IDX, NEXT_IDX, eq
+ | beq <1 // Skip holes in array part.
+ | str NEXT_TMP0, NEXT_RES_VAL
+ | movz NEXT_TMP0w, #(LJ_TISNUM>>1)&0xffff, lsl #16
+ | stp NEXT_IDX, NEXT_TMP0w, NEXT_RES_KEY
+ | add NEXT_IDX, NEXT_IDX, #1
+ | mov NEXT_RES, NEXT_RES_PTR
+ |4:
+ | ret
+ |
+ |5: // Traverse hash part.
+ | ldr NEXT_TMP1w, NEXT_TAB->hmask
+ | ldr NODE:NEXT_RES, NEXT_TAB->node
+ | add NEXT_TMP0w, NEXT_TMP0w, NEXT_TMP0w, lsl #1
+ | add NEXT_LIM, NEXT_LIM, NEXT_TMP1w
+ | add NODE:NEXT_RES, NODE:NEXT_RES, NEXT_TMP0w, uxtw #3
+ |6:
+ | cmp NEXT_IDX, NEXT_LIM
+ | bhi >9
+ | ldr NEXT_TMP0, NODE:NEXT_RES->val
+ | cmn NEXT_TMP0, #-LJ_TNIL
+ | add NEXT_IDX, NEXT_IDX, #1
+ | bne <4
+ | // Skip holes in hash part.
+ | add NODE:NEXT_RES, NODE:NEXT_RES, #sizeof(Node)
+ | b <6
+ |
+ |9: // End of iteration. Set the key to nil (not the value).
+ | movn NEXT_TMP0, #0
+ | str NEXT_TMP0, NEXT_RES_KEY
+ | mov NEXT_RES, NEXT_RES_PTR
+ | ret
+ |.endif
+ |
+ |//-----------------------------------------------------------------------
+ |//-- FFI helper functions -----------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |// Handler for callback functions.
+ |// Saveregs already performed. Callback slot number in [sp], g in r12.
+ |->vm_ffi_callback:
+ |.if FFI
+ |.type CTSTATE, CTState, PC
+ | saveregs
+ | ldr CTSTATE, GL:x10->ctype_state
+ | mov GL, x10
+ | add x10, sp, # CFRAME_SPACE
+ | str w9, CTSTATE->cb.slot
+ | stp x0, x1, CTSTATE->cb.gpr[0]
+ | stp d0, d1, CTSTATE->cb.fpr[0]
+ | stp x2, x3, CTSTATE->cb.gpr[2]
+ | stp d2, d3, CTSTATE->cb.fpr[2]
+ | stp x4, x5, CTSTATE->cb.gpr[4]
+ | stp d4, d5, CTSTATE->cb.fpr[4]
+ | stp x6, x7, CTSTATE->cb.gpr[6]
+ | stp d6, d7, CTSTATE->cb.fpr[6]
+ | str x10, CTSTATE->cb.stack
+ | mov CARG1, CTSTATE
+ | str CTSTATE, SAVE_PC // Any value outside of bytecode is ok.
+ | mov CARG2, sp
+ | bl extern lj_ccallback_enter // (CTState *cts, void *cf)
+ | // Returns lua_State *.
+ | ldp BASE, RC, L:CRET1->base
+ | movz TISNUM, #(LJ_TISNUM>>1)&0xffff, lsl #48
+ | movz TISNUMhi, #(LJ_TISNUM>>1)&0xffff, lsl #16
+ | movn TISNIL, #0
+ | mov L, CRET1
+ | ldr LFUNC:CARG3, [BASE, FRAME_FUNC]
+ | sub RC, RC, BASE
+ | st_vmstate ST_INTERP
+ | and LFUNC:CARG3, CARG3, #LJ_GCVMASK
+ | ins_callt
+ |.endif
+ |
+ |->cont_ffi_callback: // Return from FFI callback.
+ |.if FFI
+ | ldr CTSTATE, GL->ctype_state
+ | stp BASE, CARG4, L->base
+ | str L, CTSTATE->L
+ | mov CARG1, CTSTATE
+ | mov CARG2, RA
+ | bl extern lj_ccallback_leave // (CTState *cts, TValue *o)
+ | ldp x0, x1, CTSTATE->cb.gpr[0]
+ | ldp d0, d1, CTSTATE->cb.fpr[0]
+ | b ->vm_leave_unw
+ |.endif
+ |
+ |->vm_ffi_call: // Call C function via FFI.
+ | // Caveat: needs special frame unwinding, see below.
+ |.if FFI
+ | .type CCSTATE, CCallState, x19
+ | stp x20, CCSTATE, [sp, #-32]!
+ | stp fp, lr, [sp, #16]
+ | add fp, sp, #16
+ | mov CCSTATE, x0
+ | ldr TMP0w, CCSTATE:x0->spadj
+ | ldrb TMP1w, CCSTATE->nsp
+ | add TMP2, CCSTATE, #offsetof(CCallState, stack)
+ | subs TMP1, TMP1, #1
+ | ldr TMP3, CCSTATE->func
+ | sub sp, sp, TMP0
+ | bmi >2
+ |1: // Copy stack slots
+ | ldr TMP0, [TMP2, TMP1, lsl #3]
+ | str TMP0, [sp, TMP1, lsl #3]
+ | subs TMP1, TMP1, #1
+ | bpl <1
+ |2:
+ | ldp x0, x1, CCSTATE->gpr[0]
+ | ldp d0, d1, CCSTATE->fpr[0]
+ | ldp x2, x3, CCSTATE->gpr[2]
+ | ldp d2, d3, CCSTATE->fpr[2]
+ | ldp x4, x5, CCSTATE->gpr[4]
+ | ldp d4, d5, CCSTATE->fpr[4]
+ | ldp x6, x7, CCSTATE->gpr[6]
+ | ldp d6, d7, CCSTATE->fpr[6]
+ | ldr x8, CCSTATE->retp
+ | blr TMP3
+ | sub sp, fp, #16
+ | stp x0, x1, CCSTATE->gpr[0]
+ | stp d0, d1, CCSTATE->fpr[0]
+ | stp d2, d3, CCSTATE->fpr[2]
+ | ldp fp, lr, [sp, #16]
+ | ldp x20, CCSTATE, [sp], #32
+ | ret
+ |.endif
+ |// Note: vm_ffi_call must be the last function in this object file!
+ |
+ |//-----------------------------------------------------------------------
+}
+
+/* Generate the code for a single instruction. */
+static void build_ins(BuildCtx *ctx, BCOp op, int defop)
+{
+ int vk = 0;
+ |=>defop:
+
+ switch (op) {
+
+ /* -- Comparison ops ---------------------------------------------------- */
+
+ /* Remember: all ops branch for a true comparison, fall through otherwise. */
+
+ case BC_ISLT: case BC_ISGE: case BC_ISLE: case BC_ISGT:
+ | // RA = src1, RC = src2, JMP with RC = target
+ | ldr CARG1, [BASE, RA, lsl #3]
+ | ldrh RBw, [PC, # OFS_RD]
+ | ldr CARG2, [BASE, RC, lsl #3]
+ | add PC, PC, #4
+ | add RB, PC, RB, lsl #2
+ | sub RB, RB, #0x20000
+ | checkint CARG1, >3
+ | checkint CARG2, >4
+ | cmp CARG1w, CARG2w
+ if (op == BC_ISLT) {
+ | csel PC, RB, PC, lt
+ } else if (op == BC_ISGE) {
+ | csel PC, RB, PC, ge
+ } else if (op == BC_ISLE) {
+ | csel PC, RB, PC, le
+ } else {
+ | csel PC, RB, PC, gt
+ }
+ |1:
+ | ins_next
+ |
+ |3: // RA not int.
+ | ldr FARG1, [BASE, RA, lsl #3]
+ | blo ->vmeta_comp
+ | ldr FARG2, [BASE, RC, lsl #3]
+ | cmp TISNUMhi, CARG2, lsr #32
+ | bhi >5
+ | bne ->vmeta_comp
+ | // RA number, RC int.
+ | scvtf FARG2, CARG2w
+ | b >5
+ |
+ |4: // RA int, RC not int
+ | ldr FARG2, [BASE, RC, lsl #3]
+ | blo ->vmeta_comp
+ | // RA int, RC number.
+ | scvtf FARG1, CARG1w
+ |
+ |5: // RA number, RC number
+ | fcmp FARG1, FARG2
+ | // To preserve NaN semantics GE/GT branch on unordered, but LT/LE don't.
+ if (op == BC_ISLT) {
+ | csel PC, RB, PC, lo
+ } else if (op == BC_ISGE) {
+ | csel PC, RB, PC, hs
+ } else if (op == BC_ISLE) {
+ | csel PC, RB, PC, ls
+ } else {
+ | csel PC, RB, PC, hi
+ }
+ | b <1
+ break;
+
+ case BC_ISEQV: case BC_ISNEV:
+ vk = op == BC_ISEQV;
+ | // RA = src1, RC = src2, JMP with RC = target
+ | ldr CARG1, [BASE, RA, lsl #3]
+ | add RC, BASE, RC, lsl #3
+ | ldrh RBw, [PC, # OFS_RD]
+ | ldr CARG3, [RC]
+ | add PC, PC, #4
+ | add RB, PC, RB, lsl #2
+ | sub RB, RB, #0x20000
+ | asr ITYPE, CARG3, #47
+ | cmn ITYPE, #-LJ_TISNUM
+ if (vk) {
+ | bls ->BC_ISEQN_Z
+ } else {
+ | bls ->BC_ISNEN_Z
+ }
+ | // RC is not a number.
+ | asr TMP0, CARG1, #47
+ |.if FFI
+ | // Check if RC or RA is a cdata.
+ | cmn ITYPE, #-LJ_TCDATA
+ | ccmn TMP0, #-LJ_TCDATA, #4, ne
+ | beq ->vmeta_equal_cd
+ |.endif
+ | cmp CARG1, CARG3
+ | bne >2
+ | // Tag and value are equal.
+ if (vk) {
+ |->BC_ISEQV_Z:
+ | mov PC, RB // Perform branch.
+ }
+ |1:
+ | ins_next
+ |
+ |2: // Check if the tags are the same and it's a table or userdata.
+ | cmp ITYPE, TMP0
+ | ccmn ITYPE, #-LJ_TISTABUD, #2, eq
+ if (vk) {
+ | bhi <1
+ } else {
+ | bhi ->BC_ISEQV_Z // Reuse code from opposite instruction.
+ }
+ | // Different tables or userdatas. Need to check __eq metamethod.
+ | // Field metatable must be at same offset for GCtab and GCudata!
+ | and TAB:CARG2, CARG1, #LJ_GCVMASK
+ | ldr TAB:TMP2, TAB:CARG2->metatable
+ if (vk) {
+ | cbz TAB:TMP2, <1 // No metatable?
+ | ldrb TMP1w, TAB:TMP2->nomm
+ | mov CARG4, #0 // ne = 0
+ | tbnz TMP1w, #MM_eq, <1 // 'no __eq' flag set: done.
+ } else {
+ | cbz TAB:TMP2, ->BC_ISEQV_Z // No metatable?
+ | ldrb TMP1w, TAB:TMP2->nomm
+ | mov CARG4, #1 // ne = 1.
+ | tbnz TMP1w, #MM_eq, ->BC_ISEQV_Z // 'no __eq' flag set: done.
+ }
+ | b ->vmeta_equal
+ break;
+
+ case BC_ISEQS: case BC_ISNES:
+ vk = op == BC_ISEQS;
+ | // RA = src, RC = str_const (~), JMP with RC = target
+ | ldr CARG1, [BASE, RA, lsl #3]
+ | mvn RC, RC
+ | ldrh RBw, [PC, # OFS_RD]
+ | ldr CARG2, [KBASE, RC, lsl #3]
+ | add PC, PC, #4
+ | movn TMP0, #~LJ_TSTR
+ |.if FFI
+ | asr ITYPE, CARG1, #47
+ |.endif
+ | add RB, PC, RB, lsl #2
+ | add CARG2, CARG2, TMP0, lsl #47
+ | sub RB, RB, #0x20000
+ |.if FFI
+ | cmn ITYPE, #-LJ_TCDATA
+ | beq ->vmeta_equal_cd
+ |.endif
+ | cmp CARG1, CARG2
+ if (vk) {
+ | csel PC, RB, PC, eq
+ } else {
+ | csel PC, RB, PC, ne
+ }
+ | ins_next
+ break;
+
+ case BC_ISEQN: case BC_ISNEN:
+ vk = op == BC_ISEQN;
+ | // RA = src, RC = num_const (~), JMP with RC = target
+ | ldr CARG1, [BASE, RA, lsl #3]
+ | add RC, KBASE, RC, lsl #3
+ | ldrh RBw, [PC, # OFS_RD]
+ | ldr CARG3, [RC]
+ | add PC, PC, #4
+ | add RB, PC, RB, lsl #2
+ | sub RB, RB, #0x20000
+ if (vk) {
+ |->BC_ISEQN_Z:
+ } else {
+ |->BC_ISNEN_Z:
+ }
+ | checkint CARG1, >4
+ | checkint CARG3, >6
+ | cmp CARG1w, CARG3w
+ |1:
+ if (vk) {
+ | csel PC, RB, PC, eq
+ |2:
+ } else {
+ |2:
+ | csel PC, RB, PC, ne
+ }
+ |3:
+ | ins_next
+ |
+ |4: // RA not int.
+ |.if FFI
+ | blo >7
+ |.else
+ | blo <2
+ |.endif
+ | ldr FARG1, [BASE, RA, lsl #3]
+ | ldr FARG2, [RC]
+ | cmp TISNUMhi, CARG3, lsr #32
+ | bne >5
+ | // RA number, RC int.
+ | scvtf FARG2, CARG3w
+ |5:
+ | // RA number, RC number.
+ | fcmp FARG1, FARG2
+ | b <1
+ |
+ |6: // RA int, RC number
+ | ldr FARG2, [RC]
+ | scvtf FARG1, CARG1w
+ | fcmp FARG1, FARG2
+ | b <1
+ |
+ |.if FFI
+ |7:
+ | asr ITYPE, CARG1, #47
+ | cmn ITYPE, #-LJ_TCDATA
+ | bne <2
+ | b ->vmeta_equal_cd
+ |.endif
+ break;
+
+ case BC_ISEQP: case BC_ISNEP:
+ vk = op == BC_ISEQP;
+ | // RA = src, RC = primitive_type (~), JMP with RC = target
+ | ldr TMP0, [BASE, RA, lsl #3]
+ | ldrh RBw, [PC, # OFS_RD]
+ | add PC, PC, #4
+ | add RC, RC, #1
+ | add RB, PC, RB, lsl #2
+ |.if FFI
+ | asr ITYPE, TMP0, #47
+ | cmn ITYPE, #-LJ_TCDATA
+ | beq ->vmeta_equal_cd
+ | cmn RC, ITYPE
+ |.else
+ | cmn RC, TMP0, asr #47
+ |.endif
+ | sub RB, RB, #0x20000
+ if (vk) {
+ | csel PC, RB, PC, eq
+ } else {
+ | csel PC, RB, PC, ne
+ }
+ | ins_next
+ break;
+
+ /* -- Unary test and copy ops ------------------------------------------- */
+
+ case BC_ISTC: case BC_ISFC: case BC_IST: case BC_ISF:
+ | // RA = dst or unused, RC = src, JMP with RC = target
+ | ldrh RBw, [PC, # OFS_RD]
+ | ldr TMP0, [BASE, RC, lsl #3]
+ | add PC, PC, #4
+ | mov_false TMP1
+ | add RB, PC, RB, lsl #2
+ | cmp TMP0, TMP1
+ | sub RB, RB, #0x20000
+ if (op == BC_ISTC || op == BC_IST) {
+ if (op == BC_ISTC) {
+ | csel RA, RA, RC, lo
+ }
+ | csel PC, RB, PC, lo
+ } else {
+ if (op == BC_ISFC) {
+ | csel RA, RA, RC, hs
+ }
+ | csel PC, RB, PC, hs
+ }
+ if (op == BC_ISTC || op == BC_ISFC) {
+ | str TMP0, [BASE, RA, lsl #3]
+ }
+ | ins_next
+ break;
+
+ case BC_ISTYPE:
+ | // RA = src, RC = -type
+ | ldr TMP0, [BASE, RA, lsl #3]
+ | cmn RC, TMP0, asr #47
+ | bne ->vmeta_istype
+ | ins_next
+ break;
+ case BC_ISNUM:
+ | // RA = src, RC = -(TISNUM-1)
+ | ldr TMP0, [BASE, RA]
+ | checknum TMP0, ->vmeta_istype
+ | ins_next
+ break;
+
+ /* -- Unary ops --------------------------------------------------------- */
+
+ case BC_MOV:
+ | // RA = dst, RC = src
+ | ldr TMP0, [BASE, RC, lsl #3]
+ | str TMP0, [BASE, RA, lsl #3]
+ | ins_next
+ break;
+ case BC_NOT:
+ | // RA = dst, RC = src
+ | ldr TMP0, [BASE, RC, lsl #3]
+ | mov_false TMP1
+ | mov_true TMP2
+ | cmp TMP0, TMP1
+ | csel TMP0, TMP1, TMP2, lo
+ | str TMP0, [BASE, RA, lsl #3]
+ | ins_next
+ break;
+ case BC_UNM:
+ | // RA = dst, RC = src
+ | ldr TMP0, [BASE, RC, lsl #3]
+ | asr ITYPE, TMP0, #47
+ | cmn ITYPE, #-LJ_TISNUM
+ | bhi ->vmeta_unm
+ | eor TMP0, TMP0, #U64x(80000000,00000000)
+ | bne >5
+ | negs TMP0w, TMP0w
+ | movz CARG3, #0x41e0, lsl #48 // 2^31.
+ | add TMP0, TMP0, TISNUM
+ | csel TMP0, TMP0, CARG3, vc
+ |5:
+ | str TMP0, [BASE, RA, lsl #3]
+ | ins_next
+ break;
+ case BC_LEN:
+ | // RA = dst, RC = src
+ | ldr CARG1, [BASE, RC, lsl #3]
+ | asr ITYPE, CARG1, #47
+ | cmn ITYPE, #-LJ_TSTR
+ | and CARG1, CARG1, #LJ_GCVMASK
+ | bne >2
+ | ldr CARG1w, STR:CARG1->len
+ |1:
+ | add CARG1, CARG1, TISNUM
+ | str CARG1, [BASE, RA, lsl #3]
+ | ins_next
+ |
+ |2:
+ | cmn ITYPE, #-LJ_TTAB
+ | bne ->vmeta_len
+#if LJ_52
+ | ldr TAB:CARG2, TAB:CARG1->metatable
+ | cbnz TAB:CARG2, >9
+ |3:
+#endif
+ |->BC_LEN_Z:
+ | bl extern lj_tab_len // (GCtab *t)
+ | // Returns uint32_t (but less than 2^31).
+ | b <1
+ |
+#if LJ_52
+ |9:
+ | ldrb TMP1w, TAB:CARG2->nomm
+ | tbnz TMP1w, #MM_len, <3 // 'no __len' flag set: done.
+ | b ->vmeta_len
+#endif
+ break;
+
+ /* -- Binary ops -------------------------------------------------------- */
+
+ |.macro ins_arithcheck_int, target
+ | checkint CARG1, target
+ | checkint CARG2, target
+ |.endmacro
+ |
+ |.macro ins_arithcheck_num, target
+ | checknum CARG1, target
+ | checknum CARG2, target
+ |.endmacro
+ |
+ |.macro ins_arithcheck_nzdiv, target
+ | cbz CARG2w, target
+ |.endmacro
+ |
+ |.macro ins_arithhead
+ ||vk = ((int)op - BC_ADDVN) / (BC_ADDNV-BC_ADDVN);
+ ||if (vk == 1) {
+ | and RC, RC, #255
+ | decode_RB RB, INS
+ ||} else {
+ | decode_RB RB, INS
+ | and RC, RC, #255
+ ||}
+ |.endmacro
+ |
+ |.macro ins_arithload, reg1, reg2
+ | // RA = dst, RB = src1, RC = src2 | num_const
+ ||switch (vk) {
+ ||case 0:
+ | ldr reg1, [BASE, RB, lsl #3]
+ | ldr reg2, [KBASE, RC, lsl #3]
+ || break;
+ ||case 1:
+ | ldr reg1, [KBASE, RC, lsl #3]
+ | ldr reg2, [BASE, RB, lsl #3]
+ || break;
+ ||default:
+ | ldr reg1, [BASE, RB, lsl #3]
+ | ldr reg2, [BASE, RC, lsl #3]
+ || break;
+ ||}
+ |.endmacro
+ |
+ |.macro ins_arithfallback, ins
+ ||switch (vk) {
+ ||case 0:
+ | ins ->vmeta_arith_vn
+ || break;
+ ||case 1:
+ | ins ->vmeta_arith_nv
+ || break;
+ ||default:
+ | ins ->vmeta_arith_vv
+ || break;
+ ||}
+ |.endmacro
+ |
+ |.macro ins_arithmod, res, reg1, reg2
+ | fdiv d2, reg1, reg2
+ | frintm d2, d2
+ | fmsub res, d2, reg2, reg1
+ |.endmacro
+ |
+ |.macro ins_arithdn, intins, fpins
+ | ins_arithhead
+ | ins_arithload CARG1, CARG2
+ | ins_arithcheck_int >5
+ |.if "intins" == "smull"
+ | smull CARG1, CARG1w, CARG2w
+ | cmp CARG1, CARG1, sxtw
+ | mov CARG1w, CARG1w
+ | ins_arithfallback bne
+ |.elif "intins" == "ins_arithmodi"
+ | ins_arithfallback ins_arithcheck_nzdiv
+ | bl ->vm_modi
+ |.else
+ | intins CARG1w, CARG1w, CARG2w
+ | ins_arithfallback bvs
+ |.endif
+ | add CARG1, CARG1, TISNUM
+ | str CARG1, [BASE, RA, lsl #3]
+ |4:
+ | ins_next
+ |
+ |5: // FP variant.
+ | ins_arithload FARG1, FARG2
+ | ins_arithfallback ins_arithcheck_num
+ | fpins FARG1, FARG1, FARG2
+ | str FARG1, [BASE, RA, lsl #3]
+ | b <4
+ |.endmacro
+ |
+ |.macro ins_arithfp, fpins
+ | ins_arithhead
+ | ins_arithload CARG1, CARG2
+ | ins_arithload FARG1, FARG2
+ | ins_arithfallback ins_arithcheck_num
+ |.if "fpins" == "fpow"
+ | bl extern pow
+ |.else
+ | fpins FARG1, FARG1, FARG2
+ |.endif
+ | str FARG1, [BASE, RA, lsl #3]
+ | ins_next
+ |.endmacro
+
+ case BC_ADDVN: case BC_ADDNV: case BC_ADDVV:
+ | ins_arithdn adds, fadd
+ break;
+ case BC_SUBVN: case BC_SUBNV: case BC_SUBVV:
+ | ins_arithdn subs, fsub
+ break;
+ case BC_MULVN: case BC_MULNV: case BC_MULVV:
+ | ins_arithdn smull, fmul
+ break;
+ case BC_DIVVN: case BC_DIVNV: case BC_DIVVV:
+ | ins_arithfp fdiv
+ break;
+ case BC_MODVN: case BC_MODNV: case BC_MODVV:
+ | ins_arithdn ins_arithmodi, ins_arithmod
+ break;
+ case BC_POW:
+ | // NYI: (partial) integer arithmetic.
+ | ins_arithfp fpow
+ break;
+
+ case BC_CAT:
+ | decode_RB RB, INS
+ | and RC, RC, #255
+ | // RA = dst, RB = src_start, RC = src_end
+ | str BASE, L->base
+ | sub CARG3, RC, RB
+ | add CARG2, BASE, RC, lsl #3
+ |->BC_CAT_Z:
+ | // RA = dst, CARG2 = top-1, CARG3 = left
+ | mov CARG1, L
+ | str PC, SAVE_PC
+ | bl extern lj_meta_cat // (lua_State *L, TValue *top, int left)
+ | // Returns NULL (finished) or TValue * (metamethod).
+ | ldrb RBw, [PC, #-4+OFS_RB]
+ | ldr BASE, L->base
+ | cbnz CRET1, ->vmeta_binop
+ | ldr TMP0, [BASE, RB, lsl #3]
+ | str TMP0, [BASE, RA, lsl #3] // Copy result to RA.
+ | ins_next
+ break;
+
+ /* -- Constant ops ------------------------------------------------------ */
+
+ case BC_KSTR:
+ | // RA = dst, RC = str_const (~)
+ | mvn RC, RC
+ | ldr TMP0, [KBASE, RC, lsl #3]
+ | movn TMP1, #~LJ_TSTR
+ | add TMP0, TMP0, TMP1, lsl #47
+ | str TMP0, [BASE, RA, lsl #3]
+ | ins_next
+ break;
+ case BC_KCDATA:
+ |.if FFI
+ | // RA = dst, RC = cdata_const (~)
+ | mvn RC, RC
+ | ldr TMP0, [KBASE, RC, lsl #3]
+ | movn TMP1, #~LJ_TCDATA
+ | add TMP0, TMP0, TMP1, lsl #47
+ | str TMP0, [BASE, RA, lsl #3]
+ | ins_next
+ |.endif
+ break;
+ case BC_KSHORT:
+ | // RA = dst, RC = int16_literal
+ | sxth RCw, RCw
+ | add TMP0, RC, TISNUM
+ | str TMP0, [BASE, RA, lsl #3]
+ | ins_next
+ break;
+ case BC_KNUM:
+ | // RA = dst, RC = num_const
+ | ldr TMP0, [KBASE, RC, lsl #3]
+ | str TMP0, [BASE, RA, lsl #3]
+ | ins_next
+ break;
+ case BC_KPRI:
+ | // RA = dst, RC = primitive_type (~)
+ | mvn TMP0, RC, lsl #47
+ | str TMP0, [BASE, RA, lsl #3]
+ | ins_next
+ break;
+ case BC_KNIL:
+ | // RA = base, RC = end
+ | add RA, BASE, RA, lsl #3
+ | add RC, BASE, RC, lsl #3
+ | str TISNIL, [RA], #8
+ |1:
+ | cmp RA, RC
+ | str TISNIL, [RA], #8
+ | blt <1
+ | ins_next_
+ break;
+
+ /* -- Upvalue and function ops ------------------------------------------ */
+
+ case BC_UGET:
+ | // RA = dst, RC = uvnum
+ | ldr LFUNC:CARG2, [BASE, FRAME_FUNC]
+ | add RC, RC, #offsetof(GCfuncL, uvptr)/8
+ | and LFUNC:CARG2, CARG2, #LJ_GCVMASK
+ | ldr UPVAL:CARG2, [LFUNC:CARG2, RC, lsl #3]
+ | ldr CARG2, UPVAL:CARG2->v
+ | ldr TMP0, [CARG2]
+ | str TMP0, [BASE, RA, lsl #3]
+ | ins_next
+ break;
+ case BC_USETV:
+ | // RA = uvnum, RC = src
+ | ldr LFUNC:CARG2, [BASE, FRAME_FUNC]
+ | add RA, RA, #offsetof(GCfuncL, uvptr)/8
+ | and LFUNC:CARG2, CARG2, #LJ_GCVMASK
+ | ldr UPVAL:CARG1, [LFUNC:CARG2, RA, lsl #3]
+ | ldr CARG3, [BASE, RC, lsl #3]
+ | ldr CARG2, UPVAL:CARG1->v
+ | ldrb TMP2w, UPVAL:CARG1->marked
+ | ldrb TMP0w, UPVAL:CARG1->closed
+ | asr ITYPE, CARG3, #47
+ | str CARG3, [CARG2]
+ | add ITYPE, ITYPE, #-LJ_TISGCV
+ | tst TMP2w, #LJ_GC_BLACK // isblack(uv)
+ | ccmp TMP0w, #0, #4, ne // && uv->closed
+ | ccmn ITYPE, #-(LJ_TNUMX - LJ_TISGCV), #0, ne // && tvisgcv(v)
+ | bhi >2
+ |1:
+ | ins_next
+ |
+ |2: // Check if new value is white.
+ | and GCOBJ:CARG3, CARG3, #LJ_GCVMASK
+ | ldrb TMP1w, GCOBJ:CARG3->gch.marked
+ | tst TMP1w, #LJ_GC_WHITES // iswhite(str)
+ | beq <1
+ | // Crossed a write barrier. Move the barrier forward.
+ | mov CARG1, GL
+ | bl extern lj_gc_barrieruv // (global_State *g, TValue *tv)
+ | b <1
+ break;
+ case BC_USETS:
+ | // RA = uvnum, RC = str_const (~)
+ | ldr LFUNC:CARG2, [BASE, FRAME_FUNC]
+ | add RA, RA, #offsetof(GCfuncL, uvptr)/8
+ | mvn RC, RC
+ | and LFUNC:CARG2, CARG2, #LJ_GCVMASK
+ | ldr UPVAL:CARG1, [LFUNC:CARG2, RA, lsl #3]
+ | ldr STR:CARG3, [KBASE, RC, lsl #3]
+ | movn TMP0, #~LJ_TSTR
+ | ldr CARG2, UPVAL:CARG1->v
+ | ldrb TMP2w, UPVAL:CARG1->marked
+ | add TMP0, STR:CARG3, TMP0, lsl #47
+ | ldrb TMP1w, STR:CARG3->marked
+ | str TMP0, [CARG2]
+ | tbnz TMP2w, #2, >2 // isblack(uv)
+ |1:
+ | ins_next
+ |
+ |2: // Check if string is white and ensure upvalue is closed.
+ | ldrb TMP0w, UPVAL:CARG1->closed
+ | tst TMP1w, #LJ_GC_WHITES // iswhite(str)
+ | ccmp TMP0w, #0, #4, ne
+ | beq <1
+ | // Crossed a write barrier. Move the barrier forward.
+ | mov CARG1, GL
+ | bl extern lj_gc_barrieruv // (global_State *g, TValue *tv)
+ | b <1
+ break;
+ case BC_USETN:
+ | // RA = uvnum, RC = num_const
+ | ldr LFUNC:CARG2, [BASE, FRAME_FUNC]
+ | add RA, RA, #offsetof(GCfuncL, uvptr)/8
+ | and LFUNC:CARG2, CARG2, #LJ_GCVMASK
+ | ldr UPVAL:CARG2, [LFUNC:CARG2, RA, lsl #3]
+ | ldr TMP0, [KBASE, RC, lsl #3]
+ | ldr CARG2, UPVAL:CARG2->v
+ | str TMP0, [CARG2]
+ | ins_next
+ break;
+ case BC_USETP:
+ | // RA = uvnum, RC = primitive_type (~)
+ | ldr LFUNC:CARG2, [BASE, FRAME_FUNC]
+ | add RA, RA, #offsetof(GCfuncL, uvptr)/8
+ | and LFUNC:CARG2, CARG2, #LJ_GCVMASK
+ | ldr UPVAL:CARG2, [LFUNC:CARG2, RA, lsl #3]
+ | mvn TMP0, RC, lsl #47
+ | ldr CARG2, UPVAL:CARG2->v
+ | str TMP0, [CARG2]
+ | ins_next
+ break;
+
+ case BC_UCLO:
+ | // RA = level, RC = target
+ | ldr CARG3, L->openupval
+ | add RC, PC, RC, lsl #2
+ | str BASE, L->base
+ | sub PC, RC, #0x20000
+ | cbz CARG3, >1
+ | mov CARG1, L
+ | add CARG2, BASE, RA, lsl #3
+ | bl extern lj_func_closeuv // (lua_State *L, TValue *level)
+ | ldr BASE, L->base
+ |1:
+ | ins_next
+ break;
+
+ case BC_FNEW:
+ | // RA = dst, RC = proto_const (~) (holding function prototype)
+ | mvn RC, RC
+ | str BASE, L->base
+ | ldr LFUNC:CARG3, [BASE, FRAME_FUNC]
+ | str PC, SAVE_PC
+ | ldr CARG2, [KBASE, RC, lsl #3]
+ | mov CARG1, L
+ | and LFUNC:CARG3, CARG3, #LJ_GCVMASK
+ | // (lua_State *L, GCproto *pt, GCfuncL *parent)
+ | bl extern lj_func_newL_gc
+ | // Returns GCfuncL *.
+ | ldr BASE, L->base
+ | movn TMP0, #~LJ_TFUNC
+ | add CRET1, CRET1, TMP0, lsl #47
+ | str CRET1, [BASE, RA, lsl #3]
+ | ins_next
+ break;
+
+ /* -- Table ops --------------------------------------------------------- */
+
+ case BC_TNEW:
+ case BC_TDUP:
+ | // RA = dst, RC = (hbits|asize) | tab_const (~)
+ | ldp CARG3, CARG4, GL->gc.total // Assumes threshold follows total.
+ | str BASE, L->base
+ | str PC, SAVE_PC
+ | mov CARG1, L
+ | cmp CARG3, CARG4
+ | bhs >5
+ |1:
+ if (op == BC_TNEW) {
+ | and CARG2, RC, #0x7ff
+ | lsr CARG3, RC, #11
+ | cmp CARG2, #0x7ff
+ | mov TMP0, #0x801
+ | csel CARG2, CARG2, TMP0, ne
+ | bl extern lj_tab_new // (lua_State *L, int32_t asize, uint32_t hbits)
+ | // Returns GCtab *.
+ } else {
+ | mvn RC, RC
+ | ldr CARG2, [KBASE, RC, lsl #3]
+ | bl extern lj_tab_dup // (lua_State *L, Table *kt)
+ | // Returns GCtab *.
+ }
+ | ldr BASE, L->base
+ | movk CRET1, #(LJ_TTAB>>1)&0xffff, lsl #48
+ | str CRET1, [BASE, RA, lsl #3]
+ | ins_next
+ |
+ |5:
+ | bl extern lj_gc_step_fixtop // (lua_State *L)
+ | mov CARG1, L
+ | b <1
+ break;
+
+ case BC_GGET:
+ | // RA = dst, RC = str_const (~)
+ case BC_GSET:
+ | // RA = src, RC = str_const (~)
+ | ldr LFUNC:CARG1, [BASE, FRAME_FUNC]
+ | mvn RC, RC
+ | and LFUNC:CARG1, CARG1, #LJ_GCVMASK
+ | ldr TAB:CARG2, LFUNC:CARG1->env
+ | ldr STR:RC, [KBASE, RC, lsl #3]
+ if (op == BC_GGET) {
+ | b ->BC_TGETS_Z
+ } else {
+ | b ->BC_TSETS_Z
+ }
+ break;
+
+ case BC_TGETV:
+ | decode_RB RB, INS
+ | and RC, RC, #255
+ | // RA = dst, RB = table, RC = key
+ | ldr CARG2, [BASE, RB, lsl #3]
+ | ldr TMP1, [BASE, RC, lsl #3]
+ | checktab CARG2, ->vmeta_tgetv
+ | checkint TMP1, >9 // Integer key?
+ | ldr CARG3, TAB:CARG2->array
+ | ldr CARG1w, TAB:CARG2->asize
+ | add CARG3, CARG3, TMP1, uxtw #3
+ | cmp TMP1w, CARG1w // In array part?
+ | bhs ->vmeta_tgetv
+ | ldr TMP0, [CARG3]
+ | cmp TMP0, TISNIL
+ | beq >5
+ |1:
+ | str TMP0, [BASE, RA, lsl #3]
+ | ins_next
+ |
+ |5: // Check for __index if table value is nil.
+ | ldr TAB:CARG1, TAB:CARG2->metatable
+ | cbz TAB:CARG1, <1 // No metatable: done.
+ | ldrb TMP1w, TAB:CARG1->nomm
+ | tbnz TMP1w, #MM_index, <1 // 'no __index' flag set: done.
+ | b ->vmeta_tgetv
+ |
+ |9:
+ | asr ITYPE, TMP1, #47
+ | cmn ITYPE, #-LJ_TSTR // String key?
+ | bne ->vmeta_tgetv
+ | and STR:RC, TMP1, #LJ_GCVMASK
+ | b ->BC_TGETS_Z
+ break;
+ case BC_TGETS:
+ | decode_RB RB, INS
+ | and RC, RC, #255
+ | // RA = dst, RB = table, RC = str_const (~)
+ | ldr CARG2, [BASE, RB, lsl #3]
+ | mvn RC, RC
+ | ldr STR:RC, [KBASE, RC, lsl #3]
+ | checktab CARG2, ->vmeta_tgets1
+ |->BC_TGETS_Z:
+ | // TAB:CARG2 = GCtab *, STR:RC = GCstr *, RA = dst
+ | ldr TMP1w, TAB:CARG2->hmask
+ | ldr TMP2w, STR:RC->sid
+ | ldr NODE:CARG3, TAB:CARG2->node
+ | and TMP1w, TMP1w, TMP2w // idx = str->sid & tab->hmask
+ | add TMP1, TMP1, TMP1, lsl #1
+ | movn CARG4, #~LJ_TSTR
+ | add NODE:CARG3, NODE:CARG3, TMP1, lsl #3 // node = tab->node + idx*3*8
+ | add CARG4, STR:RC, CARG4, lsl #47 // Tagged key to look for.
+ |1:
+ | ldp TMP0, CARG1, NODE:CARG3->val
+ | ldr NODE:CARG3, NODE:CARG3->next
+ | cmp CARG1, CARG4
+ | bne >4
+ | cmp TMP0, TISNIL
+ | beq >5
+ |3:
+ | str TMP0, [BASE, RA, lsl #3]
+ | ins_next
+ |
+ |4: // Follow hash chain.
+ | cbnz NODE:CARG3, <1
+ | // End of hash chain: key not found, nil result.
+ | mov TMP0, TISNIL
+ |
+ |5: // Check for __index if table value is nil.
+ | ldr TAB:CARG1, TAB:CARG2->metatable
+ | cbz TAB:CARG1, <3 // No metatable: done.
+ | ldrb TMP1w, TAB:CARG1->nomm
+ | tbnz TMP1w, #MM_index, <3 // 'no __index' flag set: done.
+ | b ->vmeta_tgets
+ break;
+ case BC_TGETB:
+ | decode_RB RB, INS
+ | and RC, RC, #255
+ | // RA = dst, RB = table, RC = index
+ | ldr CARG2, [BASE, RB, lsl #3]
+ | checktab CARG2, ->vmeta_tgetb
+ | ldr CARG3, TAB:CARG2->array
+ | ldr CARG1w, TAB:CARG2->asize
+ | add CARG3, CARG3, RC, lsl #3
+ | cmp RCw, CARG1w // In array part?
+ | bhs ->vmeta_tgetb
+ | ldr TMP0, [CARG3]
+ | cmp TMP0, TISNIL
+ | beq >5
+ |1:
+ | str TMP0, [BASE, RA, lsl #3]
+ | ins_next
+ |
+ |5: // Check for __index if table value is nil.
+ | ldr TAB:CARG1, TAB:CARG2->metatable
+ | cbz TAB:CARG1, <1 // No metatable: done.
+ | ldrb TMP1w, TAB:CARG1->nomm
+ | tbnz TMP1w, #MM_index, <1 // 'no __index' flag set: done.
+ | b ->vmeta_tgetb
+ break;
+ case BC_TGETR:
+ | decode_RB RB, INS
+ | and RC, RC, #255
+ | // RA = dst, RB = table, RC = key
+ | ldr CARG1, [BASE, RB, lsl #3]
+ | ldr TMP1, [BASE, RC, lsl #3]
+ | and TAB:CARG1, CARG1, #LJ_GCVMASK
+ | ldr CARG3, TAB:CARG1->array
+ | ldr TMP2w, TAB:CARG1->asize
+ | add CARG3, CARG3, TMP1w, uxtw #3
+ | cmp TMP1w, TMP2w // In array part?
+ | bhs ->vmeta_tgetr
+ | ldr TMP0, [CARG3]
+ |->BC_TGETR_Z:
+ | str TMP0, [BASE, RA, lsl #3]
+ | ins_next
+ break;
+
+ case BC_TSETV:
+ | decode_RB RB, INS
+ | and RC, RC, #255
+ | // RA = src, RB = table, RC = key
+ | ldr CARG2, [BASE, RB, lsl #3]
+ | ldr TMP1, [BASE, RC, lsl #3]
+ | checktab CARG2, ->vmeta_tsetv
+ | checkint TMP1, >9 // Integer key?
+ | ldr CARG3, TAB:CARG2->array
+ | ldr CARG1w, TAB:CARG2->asize
+ | add CARG3, CARG3, TMP1, uxtw #3
+ | cmp TMP1w, CARG1w // In array part?
+ | bhs ->vmeta_tsetv
+ | ldr TMP1, [CARG3]
+ | ldr TMP0, [BASE, RA, lsl #3]
+ | ldrb TMP2w, TAB:CARG2->marked
+ | cmp TMP1, TISNIL // Previous value is nil?
+ | beq >5
+ |1:
+ | str TMP0, [CARG3]
+ | tbnz TMP2w, #2, >7 // isblack(table)
+ |2:
+ | ins_next
+ |
+ |5: // Check for __newindex if previous value is nil.
+ | ldr TAB:CARG1, TAB:CARG2->metatable
+ | cbz TAB:CARG1, <1 // No metatable: done.
+ | ldrb TMP1w, TAB:CARG1->nomm
+ | tbnz TMP1w, #MM_newindex, <1 // 'no __newindex' flag set: done.
+ | b ->vmeta_tsetv
+ |
+ |7: // Possible table write barrier for the value. Skip valiswhite check.
+ | barrierback TAB:CARG2, TMP2w, TMP1
+ | b <2
+ |
+ |9:
+ | asr ITYPE, TMP1, #47
+ | cmn ITYPE, #-LJ_TSTR // String key?
+ | bne ->vmeta_tsetv
+ | and STR:RC, TMP1, #LJ_GCVMASK
+ | b ->BC_TSETS_Z
+ break;
+ case BC_TSETS:
+ | decode_RB RB, INS
+ | and RC, RC, #255
+ | // RA = dst, RB = table, RC = str_const (~)
+ | ldr CARG2, [BASE, RB, lsl #3]
+ | mvn RC, RC
+ | ldr STR:RC, [KBASE, RC, lsl #3]
+ | checktab CARG2, ->vmeta_tsets1
+ |->BC_TSETS_Z:
+ | // TAB:CARG2 = GCtab *, STR:RC = GCstr *, RA = src
+ | ldr TMP1w, TAB:CARG2->hmask
+ | ldr TMP2w, STR:RC->sid
+ | ldr NODE:CARG3, TAB:CARG2->node
+ | and TMP1w, TMP1w, TMP2w // idx = str->sid & tab->hmask
+ | add TMP1, TMP1, TMP1, lsl #1
+ | movn CARG4, #~LJ_TSTR
+ | add NODE:CARG3, NODE:CARG3, TMP1, lsl #3 // node = tab->node + idx*3*8
+ | add CARG4, STR:RC, CARG4, lsl #47 // Tagged key to look for.
+ | strb wzr, TAB:CARG2->nomm // Clear metamethod cache.
+ |1:
+ | ldp TMP1, CARG1, NODE:CARG3->val
+ | ldr NODE:TMP3, NODE:CARG3->next
+ | ldrb TMP2w, TAB:CARG2->marked
+ | cmp CARG1, CARG4
+ | bne >5
+ | ldr TMP0, [BASE, RA, lsl #3]
+ | cmp TMP1, TISNIL // Previous value is nil?
+ | beq >4
+ |2:
+ | str TMP0, NODE:CARG3->val
+ | tbnz TMP2w, #2, >7 // isblack(table)
+ |3:
+ | ins_next
+ |
+ |4: // Check for __newindex if previous value is nil.
+ | ldr TAB:CARG1, TAB:CARG2->metatable
+ | cbz TAB:CARG1, <2 // No metatable: done.
+ | ldrb TMP1w, TAB:CARG1->nomm
+ | tbnz TMP1w, #MM_newindex, <2 // 'no __newindex' flag set: done.
+ | b ->vmeta_tsets
+ |
+ |5: // Follow hash chain.
+ | mov NODE:CARG3, NODE:TMP3
+ | cbnz NODE:TMP3, <1
+ | // End of hash chain: key not found, add a new one.
+ |
+ | // But check for __newindex first.
+ | ldr TAB:CARG1, TAB:CARG2->metatable
+ | cbz TAB:CARG1, >6 // No metatable: continue.
+ | ldrb TMP1w, TAB:CARG1->nomm
+ | // 'no __newindex' flag NOT set: check.
+ | tbz TMP1w, #MM_newindex, ->vmeta_tsets
+ |6:
+ | movn TMP1, #~LJ_TSTR
+ | str PC, SAVE_PC
+ | add TMP0, STR:RC, TMP1, lsl #47
+ | str BASE, L->base
+ | mov CARG1, L
+ | str TMP0, TMPD
+ | add CARG3, sp, TMPDofs
+ | bl extern lj_tab_newkey // (lua_State *L, GCtab *t, TValue *k)
+ | // Returns TValue *.
+ | ldr BASE, L->base
+ | ldr TMP0, [BASE, RA, lsl #3]
+ | str TMP0, [CRET1]
+ | b <3 // No 2nd write barrier needed.
+ |
+ |7: // Possible table write barrier for the value. Skip valiswhite check.
+ | barrierback TAB:CARG2, TMP2w, TMP1
+ | b <3
+ break;
+ case BC_TSETB:
+ | decode_RB RB, INS
+ | and RC, RC, #255
+ | // RA = src, RB = table, RC = index
+ | ldr CARG2, [BASE, RB, lsl #3]
+ | checktab CARG2, ->vmeta_tsetb
+ | ldr CARG3, TAB:CARG2->array
+ | ldr CARG1w, TAB:CARG2->asize
+ | add CARG3, CARG3, RC, lsl #3
+ | cmp RCw, CARG1w // In array part?
+ | bhs ->vmeta_tsetb
+ | ldr TMP1, [CARG3]
+ | ldr TMP0, [BASE, RA, lsl #3]
+ | ldrb TMP2w, TAB:CARG2->marked
+ | cmp TMP1, TISNIL // Previous value is nil?
+ | beq >5
+ |1:
+ | str TMP0, [CARG3]
+ | tbnz TMP2w, #2, >7 // isblack(table)
+ |2:
+ | ins_next
+ |
+ |5: // Check for __newindex if previous value is nil.
+ | ldr TAB:CARG1, TAB:CARG2->metatable
+ | cbz TAB:CARG1, <1 // No metatable: done.
+ | ldrb TMP1w, TAB:CARG1->nomm
+ | tbnz TMP1w, #MM_newindex, <1 // 'no __newindex' flag set: done.
+ | b ->vmeta_tsetb
+ |
+ |7: // Possible table write barrier for the value. Skip valiswhite check.
+ | barrierback TAB:CARG2, TMP2w, TMP1
+ | b <2
+ break;
+ case BC_TSETR:
+ | decode_RB RB, INS
+ | and RC, RC, #255
+ | // RA = src, RB = table, RC = key
+ | ldr CARG2, [BASE, RB, lsl #3]
+ | ldr TMP1, [BASE, RC, lsl #3]
+ | and TAB:CARG2, CARG2, #LJ_GCVMASK
+ | ldr CARG1, TAB:CARG2->array
+ | ldrb TMP2w, TAB:CARG2->marked
+ | ldr CARG4w, TAB:CARG2->asize
+ | add CARG1, CARG1, TMP1, uxtw #3
+ | tbnz TMP2w, #2, >7 // isblack(table)
+ |2:
+ | cmp TMP1w, CARG4w // In array part?
+ | bhs ->vmeta_tsetr
+ |->BC_TSETR_Z:
+ | ldr TMP0, [BASE, RA, lsl #3]
+ | str TMP0, [CARG1]
+ | ins_next
+ |
+ |7: // Possible table write barrier for the value. Skip valiswhite check.
+ | barrierback TAB:CARG2, TMP2w, TMP0
+ | b <2
+ break;
+
+ case BC_TSETM:
+ | // RA = base (table at base-1), RC = num_const (start index)
+ | add RA, BASE, RA, lsl #3
+ |1:
+ | ldr RBw, SAVE_MULTRES
+ | ldr TAB:CARG2, [RA, #-8] // Guaranteed to be a table.
+ | ldr TMP1, [KBASE, RC, lsl #3] // Integer constant is in lo-word.
+ | sub RB, RB, #8
+ | cbz RB, >4 // Nothing to copy?
+ | and TAB:CARG2, CARG2, #LJ_GCVMASK
+ | ldr CARG1w, TAB:CARG2->asize
+ | add CARG3w, TMP1w, RBw, lsr #3
+ | ldr CARG4, TAB:CARG2->array
+ | cmp CARG3, CARG1
+ | add RB, RA, RB
+ | bhi >5
+ | add TMP1, CARG4, TMP1w, uxtw #3
+ | ldrb TMP2w, TAB:CARG2->marked
+ |3: // Copy result slots to table.
+ | ldr TMP0, [RA], #8
+ | str TMP0, [TMP1], #8
+ | cmp RA, RB
+ | blo <3
+ | tbnz TMP2w, #2, >7 // isblack(table)
+ |4:
+ | ins_next
+ |
+ |5: // Need to resize array part.
+ | str BASE, L->base
+ | mov CARG1, L
+ | str PC, SAVE_PC
+ | bl extern lj_tab_reasize // (lua_State *L, GCtab *t, int nasize)
+ | // Must not reallocate the stack.
+ | b <1
+ |
+ |7: // Possible table write barrier for any value. Skip valiswhite check.
+ | barrierback TAB:CARG2, TMP2w, TMP1
+ | b <4
+ break;
+
+ /* -- Calls and vararg handling ----------------------------------------- */
+
+ case BC_CALLM:
+ | // RA = base, (RB = nresults+1,) RC = extra_nargs
+ | ldr TMP0w, SAVE_MULTRES
+ | decode_RC8RD NARGS8:RC, RC
+ | add NARGS8:RC, NARGS8:RC, TMP0
+ | b ->BC_CALL_Z
+ break;
+ case BC_CALL:
+ | decode_RC8RD NARGS8:RC, RC
+ | // RA = base, (RB = nresults+1,) RC = (nargs+1)*8
+ |->BC_CALL_Z:
+ | mov RB, BASE // Save old BASE for vmeta_call.
+ | add BASE, BASE, RA, lsl #3
+ | ldr CARG3, [BASE]
+ | sub NARGS8:RC, NARGS8:RC, #8
+ | add BASE, BASE, #16
+ | checkfunc CARG3, ->vmeta_call
+ | ins_call
+ break;
+
+ case BC_CALLMT:
+ | // RA = base, (RB = 0,) RC = extra_nargs
+ | ldr TMP0w, SAVE_MULTRES
+ | add NARGS8:RC, TMP0, RC, lsl #3
+ | b ->BC_CALLT1_Z
+ break;
+ case BC_CALLT:
+ | lsl NARGS8:RC, RC, #3
+ | // RA = base, (RB = 0,) RC = (nargs+1)*8
+ |->BC_CALLT1_Z:
+ | add RA, BASE, RA, lsl #3
+ | ldr TMP1, [RA]
+ | sub NARGS8:RC, NARGS8:RC, #8
+ | add RA, RA, #16
+ | checktp CARG3, TMP1, LJ_TFUNC, ->vmeta_callt
+ | ldr PC, [BASE, FRAME_PC]
+ |->BC_CALLT2_Z:
+ | mov RB, #0
+ | ldrb TMP2w, LFUNC:CARG3->ffid
+ | tst PC, #FRAME_TYPE
+ | bne >7
+ |1:
+ | str TMP1, [BASE, FRAME_FUNC] // Copy function down, but keep PC.
+ | cbz NARGS8:RC, >3
+ |2:
+ | ldr TMP0, [RA, RB]
+ | add TMP1, RB, #8
+ | cmp TMP1, NARGS8:RC
+ | str TMP0, [BASE, RB]
+ | mov RB, TMP1
+ | bne <2
+ |3:
+ | cmp TMP2, #1 // (> FF_C) Calling a fast function?
+ | bhi >5
+ |4:
+ | ins_callt
+ |
+ |5: // Tailcall to a fast function with a Lua frame below.
+ | ldrb RAw, [PC, #-4+OFS_RA]
+ | sub CARG1, BASE, RA, lsl #3
+ | ldr LFUNC:CARG1, [CARG1, #-32]
+ | and LFUNC:CARG1, CARG1, #LJ_GCVMASK
+ | ldr CARG1, LFUNC:CARG1->pc
+ | ldr KBASE, [CARG1, #PC2PROTO(k)]
+ | b <4
+ |
+ |7: // Tailcall from a vararg function.
+ | eor PC, PC, #FRAME_VARG
+ | tst PC, #FRAME_TYPEP // Vararg frame below?
+ | csel TMP2, RB, TMP2, ne // Clear ffid if no Lua function below.
+ | bne <1
+ | sub BASE, BASE, PC
+ | ldr PC, [BASE, FRAME_PC]
+ | tst PC, #FRAME_TYPE
+ | csel TMP2, RB, TMP2, ne // Clear ffid if no Lua function below.
+ | b <1
+ break;
+
+ case BC_ITERC:
+ | // RA = base, (RB = nresults+1, RC = nargs+1 (2+1))
+ | add RA, BASE, RA, lsl #3
+ | ldr CARG3, [RA, #-24]
+ | mov RB, BASE // Save old BASE for vmeta_call.
+ | ldp CARG1, CARG2, [RA, #-16]
+ | add BASE, RA, #16
+ | mov NARGS8:RC, #16 // Iterators get 2 arguments.
+ | str CARG3, [RA] // Copy callable.
+ | stp CARG1, CARG2, [RA, #16] // Copy state and control var.
+ | checkfunc CARG3, ->vmeta_call
+ | ins_call
+ break;
+
+ case BC_ITERN:
+ |.if JIT
+ | hotloop
+ |.endif
+ |->vm_IITERN:
+ | // RA = base, (RB = nresults+1, RC = nargs+1 (2+1))
+ | add RA, BASE, RA, lsl #3
+ | ldr TAB:RB, [RA, #-16]
+ | ldrh TMP3w, [PC, # OFS_RD]
+ | ldr CARG1w, [RA, #-8+LO] // Get index from control var.
+ | add PC, PC, #4
+ | add TMP3, PC, TMP3, lsl #2
+ | and TAB:RB, RB, #LJ_GCVMASK
+ | sub TMP3, TMP3, #0x20000
+ | ldr TMP1w, TAB:RB->asize
+ | ldr CARG2, TAB:RB->array
+ |1: // Traverse array part.
+ | subs RC, CARG1, TMP1
+ | add CARG3, CARG2, CARG1, lsl #3
+ | bhs >5 // Index points after array part?
+ | ldr TMP0, [CARG3]
+ | cmp TMP0, TISNIL
+ | cinc CARG1, CARG1, eq // Skip holes in array part.
+ | beq <1
+ | add CARG1, CARG1, TISNUM
+ | stp CARG1, TMP0, [RA]
+ | add CARG1, CARG1, #1
+ |3:
+ | str CARG1w, [RA, #-8+LO] // Update control var.
+ | mov PC, TMP3
+ |4:
+ | ins_next
+ |
+ |5: // Traverse hash part.
+ | ldr TMP2w, TAB:RB->hmask
+ | ldr NODE:RB, TAB:RB->node
+ |6:
+ | add CARG1, RC, RC, lsl #1
+ | cmp RC, TMP2 // End of iteration? Branch to ITERN+1.
+ | add NODE:CARG3, NODE:RB, CARG1, lsl #3 // node = tab->node + idx*3*8
+ | bhi <4
+ | ldp TMP0, CARG1, NODE:CARG3->val
+ | cmp TMP0, TISNIL
+ | add RC, RC, #1
+ | beq <6 // Skip holes in hash part.
+ | stp CARG1, TMP0, [RA]
+ | add CARG1, RC, TMP1
+ | b <3
+ break;
+
+ case BC_ISNEXT:
+ | // RA = base, RC = target (points to ITERN)
+ | add RA, BASE, RA, lsl #3
+ | ldr CFUNC:CARG1, [RA, #-24]
+ | add RC, PC, RC, lsl #2
+ | ldp TAB:CARG3, CARG4, [RA, #-16]
+ | sub RC, RC, #0x20000
+ | checkfunc CFUNC:CARG1, >5
+ | asr TMP0, TAB:CARG3, #47
+ | ldrb TMP1w, CFUNC:CARG1->ffid
+ | cmn TMP0, #-LJ_TTAB
+ | ccmp CARG4, TISNIL, #0, eq
+ | ccmp TMP1w, #FF_next_N, #0, eq
+ | bne >5
+ | mov TMP0w, #0xfffe7fff // LJ_KEYINDEX
+ | lsl TMP0, TMP0, #32
+ | str TMP0, [RA, #-8] // Initialize control var.
+ |1:
+ | mov PC, RC
+ | ins_next
+ |
+ |5: // Despecialize bytecode if any of the checks fail.
+ |.if JIT
+ | ldrb TMP2w, [RC, # OFS_OP]
+ |.endif
+ | mov TMP0, #BC_JMP
+ | mov TMP1, #BC_ITERC
+ | strb TMP0w, [PC, #-4+OFS_OP]
+ |.if JIT
+ | cmp TMP2w, #BC_ITERN
+ | bne >6
+ |.endif
+ | strb TMP1w, [RC, # OFS_OP]
+ | b <1
+ |.if JIT
+ |6: // Unpatch JLOOP.
+ | ldr RA, [GL, #GL_J(trace)]
+ | ldrh TMP2w, [RC, # OFS_RD]
+ | ldr TRACE:RA, [RA, TMP2, lsl #3]
+ | ldr TMP2w, TRACE:RA->startins
+ | bfxil TMP2w, TMP1w, #0, #8
+ | str TMP2w, [RC]
+ | b <1
+ |.endif
+ break;
+
+ case BC_VARG:
+ | decode_RB RB, INS
+ | and RC, RC, #255
+ | // RA = base, RB = (nresults+1), RC = numparams
+ | ldr TMP1, [BASE, FRAME_PC]
+ | add RC, BASE, RC, lsl #3
+ | add RA, BASE, RA, lsl #3
+ | add RC, RC, #FRAME_VARG
+ | add TMP2, RA, RB, lsl #3
+ | sub RC, RC, TMP1 // RC = vbase
+ | // Note: RC may now be even _above_ BASE if nargs was < numparams.
+ | sub TMP3, BASE, #16 // TMP3 = vtop
+ | cbz RB, >5
+ | sub TMP2, TMP2, #16
+ |1: // Copy vararg slots to destination slots.
+ | cmp RC, TMP3
+ | ldr TMP0, [RC], #8
+ | csel TMP0, TMP0, TISNIL, lo
+ | cmp RA, TMP2
+ | str TMP0, [RA], #8
+ | blo <1
+ |2:
+ | ins_next
+ |
+ |5: // Copy all varargs.
+ | ldr TMP0, L->maxstack
+ | subs TMP2, TMP3, RC
+ | csel RB, xzr, TMP2, le // MULTRES = (max(vtop-vbase,0)+1)*8
+ | add RB, RB, #8
+ | add TMP1, RA, TMP2
+ | str RBw, SAVE_MULTRES
+ | ble <2 // Nothing to copy.
+ | cmp TMP1, TMP0
+ | bhi >7
+ |6:
+ | ldr TMP0, [RC], #8
+ | str TMP0, [RA], #8
+ | cmp RC, TMP3
+ | blo <6
+ | b <2
+ |
+ |7: // Grow stack for varargs.
+ | lsr CARG2, TMP2, #3
+ | stp BASE, RA, L->base
+ | mov CARG1, L
+ | sub RC, RC, BASE // Need delta, because BASE may change.
+ | str PC, SAVE_PC
+ | bl extern lj_state_growstack // (lua_State *L, int n)
+ | ldp BASE, RA, L->base
+ | add RC, BASE, RC
+ | sub TMP3, BASE, #16
+ | b <6
+ break;
+
+ /* -- Returns ----------------------------------------------------------- */
+
+ case BC_RETM:
+ | // RA = results, RC = extra results
+ | ldr TMP0w, SAVE_MULTRES
+ | ldr PC, [BASE, FRAME_PC]
+ | add RA, BASE, RA, lsl #3
+ | add RC, TMP0, RC, lsl #3
+ | b ->BC_RETM_Z
+ break;
+
+ case BC_RET:
+ | // RA = results, RC = nresults+1
+ | ldr PC, [BASE, FRAME_PC]
+ | lsl RC, RC, #3
+ | add RA, BASE, RA, lsl #3
+ |->BC_RETM_Z:
+ | str RCw, SAVE_MULTRES
+ |1:
+ | ands CARG1, PC, #FRAME_TYPE
+ | eor CARG2, PC, #FRAME_VARG
+ | bne ->BC_RETV2_Z
+ |
+ |->BC_RET_Z:
+ | // BASE = base, RA = resultptr, RC = (nresults+1)*8, PC = return
+ | ldr INSw, [PC, #-4]
+ | subs TMP1, RC, #8
+ | sub CARG3, BASE, #16
+ | beq >3
+ |2:
+ | ldr TMP0, [RA], #8
+ | add BASE, BASE, #8
+ | sub TMP1, TMP1, #8
+ | str TMP0, [BASE, #-24]
+ | cbnz TMP1, <2
+ |3:
+ | decode_RA RA, INS
+ | sub CARG4, CARG3, RA, lsl #3
+ | decode_RB RB, INS
+ | ldr LFUNC:CARG1, [CARG4, FRAME_FUNC]
+ |5:
+ | cmp RC, RB, lsl #3 // More results expected?
+ | blo >6
+ | and LFUNC:CARG1, CARG1, #LJ_GCVMASK
+ | mov BASE, CARG4
+ | ldr CARG2, LFUNC:CARG1->pc
+ | ldr KBASE, [CARG2, #PC2PROTO(k)]
+ | ins_next
+ |
+ |6: // Fill up results with nil.
+ | add BASE, BASE, #8
+ | add RC, RC, #8
+ | str TISNIL, [BASE, #-24]
+ | b <5
+ |
+ |->BC_RETV1_Z: // Non-standard return case.
+ | add RA, BASE, RA, lsl #3
+ |->BC_RETV2_Z:
+ | tst CARG2, #FRAME_TYPEP
+ | bne ->vm_return
+ | // Return from vararg function: relocate BASE down.
+ | sub BASE, BASE, CARG2
+ | ldr PC, [BASE, FRAME_PC]
+ | b <1
+ break;
+
+ case BC_RET0: case BC_RET1:
+ | // RA = results, RC = nresults+1
+ | ldr PC, [BASE, FRAME_PC]
+ | lsl RC, RC, #3
+ | str RCw, SAVE_MULTRES
+ | ands CARG1, PC, #FRAME_TYPE
+ | eor CARG2, PC, #FRAME_VARG
+ | bne ->BC_RETV1_Z
+ | ldr INSw, [PC, #-4]
+ if (op == BC_RET1) {
+ | ldr TMP0, [BASE, RA, lsl #3]
+ }
+ | sub CARG4, BASE, #16
+ | decode_RA RA, INS
+ | sub BASE, CARG4, RA, lsl #3
+ if (op == BC_RET1) {
+ | str TMP0, [CARG4], #8
+ }
+ | decode_RB RB, INS
+ | ldr LFUNC:CARG1, [BASE, FRAME_FUNC]
+ |5:
+ | cmp RC, RB, lsl #3
+ | blo >6
+ | and LFUNC:CARG1, CARG1, #LJ_GCVMASK
+ | ldr CARG2, LFUNC:CARG1->pc
+ | ldr KBASE, [CARG2, #PC2PROTO(k)]
+ | ins_next
+ |
+ |6: // Fill up results with nil.
+ | add RC, RC, #8
+ | str TISNIL, [CARG4], #8
+ | b <5
+ break;
+
+ /* -- Loops and branches ------------------------------------------------ */
+
+ |.define FOR_IDX, [RA]; .define FOR_TIDX, [RA, #4]
+ |.define FOR_STOP, [RA, #8]; .define FOR_TSTOP, [RA, #12]
+ |.define FOR_STEP, [RA, #16]; .define FOR_TSTEP, [RA, #20]
+ |.define FOR_EXT, [RA, #24]; .define FOR_TEXT, [RA, #28]
+
+ case BC_FORL:
+ |.if JIT
+ | hotloop
+ |.endif
+ | // Fall through. Assumes BC_IFORL follows.
+ break;
+
+ case BC_JFORI:
+ case BC_JFORL:
+#if !LJ_HASJIT
+ break;
+#endif
+ case BC_FORI:
+ case BC_IFORL:
+ | // RA = base, RC = target (after end of loop or start of loop)
+ vk = (op == BC_IFORL || op == BC_JFORL);
+ | add RA, BASE, RA, lsl #3
+ | ldp CARG1, CARG2, FOR_IDX // CARG1 = IDX, CARG2 = STOP
+ | ldr CARG3, FOR_STEP // CARG3 = STEP
+ if (op != BC_JFORL) {
+ | add RC, PC, RC, lsl #2
+ | sub RC, RC, #0x20000
+ }
+ | checkint CARG1, >5
+ if (!vk) {
+ | checkint CARG2, ->vmeta_for
+ | checkint CARG3, ->vmeta_for
+ | tbnz CARG3w, #31, >4
+ | cmp CARG1w, CARG2w
+ } else {
+ | adds CARG1w, CARG1w, CARG3w
+ | bvs >2
+ | add TMP0, CARG1, TISNUM
+ | tbnz CARG3w, #31, >4
+ | cmp CARG1w, CARG2w
+ }
+ |1:
+ if (op == BC_FORI) {
+ | csel PC, RC, PC, gt
+ } else if (op == BC_JFORI) {
+ | mov PC, RC
+ | ldrh RCw, [RC, #-4+OFS_RD]
+ } else if (op == BC_IFORL) {
+ | csel PC, RC, PC, le
+ }
+ if (vk) {
+ | str TMP0, FOR_IDX
+ | str TMP0, FOR_EXT
+ } else {
+ | str CARG1, FOR_EXT
+ }
+ if (op == BC_JFORI || op == BC_JFORL) {
+ | ble =>BC_JLOOP
+ }
+ |2:
+ | ins_next
+ |
+ |4: // Invert check for negative step.
+ | cmp CARG2w, CARG1w
+ | b <1
+ |
+ |5: // FP loop.
+ | ldp d0, d1, FOR_IDX
+ | blo ->vmeta_for
+ if (!vk) {
+ | checknum CARG2, ->vmeta_for
+ | checknum CARG3, ->vmeta_for
+ | str d0, FOR_EXT
+ } else {
+ | ldr d2, FOR_STEP
+ | fadd d0, d0, d2
+ }
+ | tbnz CARG3, #63, >7
+ | fcmp d0, d1
+ |6:
+ if (vk) {
+ | str d0, FOR_IDX
+ | str d0, FOR_EXT
+ }
+ if (op == BC_FORI) {
+ | csel PC, RC, PC, hi
+ } else if (op == BC_JFORI) {
+ | ldrh RCw, [RC, #-4+OFS_RD]
+ | bls =>BC_JLOOP
+ } else if (op == BC_IFORL) {
+ | csel PC, RC, PC, ls
+ } else {
+ | bls =>BC_JLOOP
+ }
+ | b <2
+ |
+ |7: // Invert check for negative step.
+ | fcmp d1, d0
+ | b <6
+ break;
+
+ case BC_ITERL:
+ |.if JIT
+ | hotloop
+ |.endif
+ | // Fall through. Assumes BC_IITERL follows.
+ break;
+
+ case BC_JITERL:
+#if !LJ_HASJIT
+ break;
+#endif
+ case BC_IITERL:
+ | // RA = base, RC = target
+ | ldr CARG1, [BASE, RA, lsl #3]
+ | add TMP1, BASE, RA, lsl #3
+ | cmp CARG1, TISNIL
+ | beq >1 // Stop if iterator returned nil.
+ if (op == BC_JITERL) {
+ | str CARG1, [TMP1, #-8]
+ | b =>BC_JLOOP
+ } else {
+ | add TMP0, PC, RC, lsl #2 // Otherwise save control var + branch.
+ | sub PC, TMP0, #0x20000
+ | str CARG1, [TMP1, #-8]
+ }
+ |1:
+ | ins_next
+ break;
+
+ case BC_LOOP:
+ | // RA = base, RC = target (loop extent)
+ | // Note: RA/RC is only used by trace recorder to determine scope/extent
+ | // This opcode does NOT jump, it's only purpose is to detect a hot loop.
+ |.if JIT
+ | hotloop
+ |.endif
+ | // Fall through. Assumes BC_ILOOP follows.
+ break;
+
+ case BC_ILOOP:
+ | // RA = base, RC = target (loop extent)
+ | ins_next
+ break;
+
+ case BC_JLOOP:
+ |.if JIT
+ | // RA = base (ignored), RC = traceno
+ | ldr CARG1, [GL, #GL_J(trace)]
+ | mov CARG2w, #0 // Traces on ARM64 don't store the trace #, so use 0.
+ | ldr TRACE:RC, [CARG1, RC, lsl #3]
+ | st_vmstate CARG2w
+ | ldr RA, TRACE:RC->mcode
+ | str BASE, GL->jit_base
+ | str L, GL->tmpbuf.L
+ | sub sp, sp, #16 // See SPS_FIXED. Avoids sp adjust in every root trace.
+ | br RA
+ |.endif
+ break;
+
+ case BC_JMP:
+ | // RA = base (only used by trace recorder), RC = target
+ | add RC, PC, RC, lsl #2
+ | sub PC, RC, #0x20000
+ | ins_next
+ break;
+
+ /* -- Function headers -------------------------------------------------- */
+
+ case BC_FUNCF:
+ |.if JIT
+ | hotcall
+ |.endif
+ case BC_FUNCV: /* NYI: compiled vararg functions. */
+ | // Fall through. Assumes BC_IFUNCF/BC_IFUNCV follow.
+ break;
+
+ case BC_JFUNCF:
+#if !LJ_HASJIT
+ break;
+#endif
+ case BC_IFUNCF:
+ | // BASE = new base, RA = BASE+framesize*8, CARG3 = LFUNC, RC = nargs*8
+ | ldr CARG1, L->maxstack
+ | ldrb TMP1w, [PC, #-4+PC2PROTO(numparams)]
+ | ldr KBASE, [PC, #-4+PC2PROTO(k)]
+ | cmp RA, CARG1
+ | bhi ->vm_growstack_l
+ |2:
+ | cmp NARGS8:RC, TMP1, lsl #3 // Check for missing parameters.
+ | blo >3
+ if (op == BC_JFUNCF) {
+ | decode_RD RC, INS
+ | b =>BC_JLOOP
+ } else {
+ | ins_next
+ }
+ |
+ |3: // Clear missing parameters.
+ | str TISNIL, [BASE, NARGS8:RC]
+ | add NARGS8:RC, NARGS8:RC, #8
+ | b <2
+ break;
+
+ case BC_JFUNCV:
+#if !LJ_HASJIT
+ break;
+#endif
+ | NYI // NYI: compiled vararg functions
+ break; /* NYI: compiled vararg functions. */
+
+ case BC_IFUNCV:
+ | // BASE = new base, RA = BASE+framesize*8, CARG3 = LFUNC, RC = nargs*8
+ | ldr CARG1, L->maxstack
+ | movn TMP0, #~LJ_TFUNC
+ | add TMP2, BASE, RC
+ | add LFUNC:CARG3, CARG3, TMP0, lsl #47
+ | add RA, RA, RC
+ | add TMP0, RC, #16+FRAME_VARG
+ | str LFUNC:CARG3, [TMP2], #8 // Store (tagged) copy of LFUNC.
+ | ldr KBASE, [PC, #-4+PC2PROTO(k)]
+ | cmp RA, CARG1
+ | str TMP0, [TMP2], #8 // Store delta + FRAME_VARG.
+ | bhs ->vm_growstack_l
+ | sub RC, TMP2, #16
+ | ldrb TMP1w, [PC, #-4+PC2PROTO(numparams)]
+ | mov RA, BASE
+ | mov BASE, TMP2
+ | cbz TMP1, >2
+ |1:
+ | cmp RA, RC // Less args than parameters?
+ | bhs >3
+ | ldr TMP0, [RA]
+ | sub TMP1, TMP1, #1
+ | str TISNIL, [RA], #8 // Clear old fixarg slot (help the GC).
+ | str TMP0, [TMP2], #8
+ | cbnz TMP1, <1
+ |2:
+ | ins_next
+ |
+ |3:
+ | sub TMP1, TMP1, #1
+ | str TISNIL, [TMP2], #8
+ | cbz TMP1, <2
+ | b <3
+ break;
+
+ case BC_FUNCC:
+ case BC_FUNCCW:
+ | // BASE = new base, RA = BASE+framesize*8, CARG3 = CFUNC, RC = nargs*8
+ if (op == BC_FUNCC) {
+ | ldr CARG4, CFUNC:CARG3->f
+ } else {
+ | ldr CARG4, GL->wrapf
+ }
+ | add CARG2, RA, NARGS8:RC
+ | ldr CARG1, L->maxstack
+ | add RC, BASE, NARGS8:RC
+ | cmp CARG2, CARG1
+ | stp BASE, RC, L->base
+ if (op == BC_FUNCCW) {
+ | ldr CARG2, CFUNC:CARG3->f
+ }
+ | mv_vmstate TMP0w, C
+ | mov CARG1, L
+ | bhi ->vm_growstack_c // Need to grow stack.
+ | st_vmstate TMP0w
+ | blr CARG4 // (lua_State *L [, lua_CFunction f])
+ | // Returns nresults.
+ | ldp BASE, TMP1, L->base
+ | str L, GL->cur_L
+ | sbfiz RC, CRET1, #3, #32
+ | st_vmstate ST_INTERP
+ | ldr PC, [BASE, FRAME_PC]
+ | sub RA, TMP1, RC // RA = L->top - nresults*8
+ | b ->vm_returnc
+ break;
+
+ /* ---------------------------------------------------------------------- */
+
+ default:
+ fprintf(stderr, "Error: undefined opcode BC_%s\n", bc_names[op]);
+ exit(2);
+ break;
+ }
+}
+
+static int build_backend(BuildCtx *ctx)
+{
+ int op;
+
+ dasm_growpc(Dst, BC__MAX);
+
+ build_subroutines(ctx);
+
+ |.code_op
+ for (op = 0; op < BC__MAX; op++)
+ build_ins(ctx, (BCOp)op, op);
+
+ return BC__MAX;
+}
+
+/* Emit pseudo frame-info for all assembler functions. */
+static void emit_asm_debug(BuildCtx *ctx)
+{
+ int fcofs = (int)((uint8_t *)ctx->glob[GLOB_vm_ffi_call] - ctx->code);
+ int i;
+ switch (ctx->mode) {
+ case BUILD_elfasm:
+ fprintf(ctx->fp, "\t.section .debug_frame,\"\",%%progbits\n");
+ fprintf(ctx->fp,
+ ".Lframe0:\n"
+ "\t.long .LECIE0-.LSCIE0\n"
+ ".LSCIE0:\n"
+ "\t.long 0xffffffff\n"
+ "\t.byte 0x1\n"
+ "\t.string \"\"\n"
+ "\t.uleb128 0x1\n"
+ "\t.sleb128 -8\n"
+ "\t.byte 30\n" /* Return address is in lr. */
+ "\t.byte 0xc\n\t.uleb128 29\n\t.uleb128 16\n" /* def_cfa fp 16 */
+ "\t.align 3\n"
+ ".LECIE0:\n\n");
+ fprintf(ctx->fp,
+ ".LSFDE0:\n"
+ "\t.long .LEFDE0-.LASFDE0\n"
+ ".LASFDE0:\n"
+ "\t.long .Lframe0\n"
+ "\t.quad .Lbegin\n"
+ "\t.quad %d\n"
+ "\t.byte 0x9e\n\t.uleb128 1\n" /* offset lr */
+ "\t.byte 0x9d\n\t.uleb128 2\n", /* offset fp */
+ fcofs);
+ for (i = 19; i <= 28; i++) /* offset x19-x28 */
+ fprintf(ctx->fp, "\t.byte 0x%x\n\t.uleb128 %d\n", 0x80+i, i+(3-19));
+ for (i = 8; i <= 15; i++) /* offset d8-d15 */
+ fprintf(ctx->fp, "\t.byte 5\n\t.uleb128 0x%x\n\t.uleb128 %d\n",
+ 64+i, i+(3+(28-19+1)-8));
+ fprintf(ctx->fp,
+ "\t.align 3\n"
+ ".LEFDE0:\n\n");
+#if LJ_HASFFI
+ fprintf(ctx->fp,
+ ".LSFDE1:\n"
+ "\t.long .LEFDE1-.LASFDE1\n"
+ ".LASFDE1:\n"
+ "\t.long .Lframe0\n"
+ "\t.quad lj_vm_ffi_call\n"
+ "\t.quad %d\n"
+ "\t.byte 0x9e\n\t.uleb128 1\n" /* offset lr */
+ "\t.byte 0x9d\n\t.uleb128 2\n" /* offset fp */
+ "\t.byte 0x93\n\t.uleb128 3\n" /* offset x19 */
+ "\t.byte 0x94\n\t.uleb128 4\n" /* offset x20 */
+ "\t.align 3\n"
+ ".LEFDE1:\n\n", (int)ctx->codesz - fcofs);
+#endif
+#if !LJ_NO_UNWIND
+ fprintf(ctx->fp, "\t.section .eh_frame,\"a\",%%progbits\n");
+ fprintf(ctx->fp,
+ ".Lframe1:\n"
+ "\t.long .LECIE1-.LSCIE1\n"
+ ".LSCIE1:\n"
+ "\t.long 0\n"
+ "\t.byte 0x1\n"
+ "\t.string \"zPR\"\n"
+ "\t.uleb128 0x1\n"
+ "\t.sleb128 -8\n"
+ "\t.byte 30\n" /* Return address is in lr. */
+ "\t.uleb128 6\n" /* augmentation length */
+ "\t.byte 0x1b\n" /* pcrel|sdata4 */
+ "\t.long lj_err_unwind_dwarf-.\n"
+ "\t.byte 0x1b\n" /* pcrel|sdata4 */
+ "\t.byte 0xc\n\t.uleb128 29\n\t.uleb128 16\n" /* def_cfa fp 16 */
+ "\t.align 3\n"
+ ".LECIE1:\n\n");
+ fprintf(ctx->fp,
+ ".LSFDE2:\n"
+ "\t.long .LEFDE2-.LASFDE2\n"
+ ".LASFDE2:\n"
+ "\t.long .LASFDE2-.Lframe1\n"
+ "\t.long .Lbegin-.\n"
+ "\t.long %d\n"
+ "\t.uleb128 0\n" /* augmentation length */
+ "\t.byte 0x9e\n\t.uleb128 1\n" /* offset lr */
+ "\t.byte 0x9d\n\t.uleb128 2\n", /* offset fp */
+ fcofs);
+ for (i = 19; i <= 28; i++) /* offset x19-x28 */
+ fprintf(ctx->fp, "\t.byte 0x%x\n\t.uleb128 %d\n", 0x80+i, i+(3-19));
+ for (i = 8; i <= 15; i++) /* offset d8-d15 */
+ fprintf(ctx->fp, "\t.byte 5\n\t.uleb128 0x%x\n\t.uleb128 %d\n",
+ 64+i, i+(3+(28-19+1)-8));
+ fprintf(ctx->fp,
+ "\t.align 3\n"
+ ".LEFDE2:\n\n");
+#if LJ_HASFFI
+ fprintf(ctx->fp,
+ ".Lframe2:\n"
+ "\t.long .LECIE2-.LSCIE2\n"
+ ".LSCIE2:\n"
+ "\t.long 0\n"
+ "\t.byte 0x1\n"
+ "\t.string \"zR\"\n"
+ "\t.uleb128 0x1\n"
+ "\t.sleb128 -8\n"
+ "\t.byte 30\n" /* Return address is in lr. */
+ "\t.uleb128 1\n" /* augmentation length */
+ "\t.byte 0x1b\n" /* pcrel|sdata4 */
+ "\t.byte 0xc\n\t.uleb128 29\n\t.uleb128 16\n" /* def_cfa fp 16 */
+ "\t.align 3\n"
+ ".LECIE2:\n\n");
+ fprintf(ctx->fp,
+ ".LSFDE3:\n"
+ "\t.long .LEFDE3-.LASFDE3\n"
+ ".LASFDE3:\n"
+ "\t.long .LASFDE3-.Lframe2\n"
+ "\t.long lj_vm_ffi_call-.\n"
+ "\t.long %d\n"
+ "\t.uleb128 0\n" /* augmentation length */
+ "\t.byte 0x9e\n\t.uleb128 1\n" /* offset lr */
+ "\t.byte 0x9d\n\t.uleb128 2\n" /* offset fp */
+ "\t.byte 0x93\n\t.uleb128 3\n" /* offset x19 */
+ "\t.byte 0x94\n\t.uleb128 4\n" /* offset x20 */
+ "\t.align 3\n"
+ ".LEFDE3:\n\n", (int)ctx->codesz - fcofs);
+#endif
+#endif
+ break;
+#if !LJ_NO_UNWIND
+ case BUILD_machasm: {
+#if LJ_HASFFI
+ int fcsize = 0;
+#endif
+ int j;
+ fprintf(ctx->fp, "\t.section __TEXT,__eh_frame,coalesced,no_toc+strip_static_syms+live_support\n");
+ fprintf(ctx->fp,
+ "EH_frame1:\n"
+ "\t.set L$set$x,LECIEX-LSCIEX\n"
+ "\t.long L$set$x\n"
+ "LSCIEX:\n"
+ "\t.long 0\n"
+ "\t.byte 0x1\n"
+ "\t.ascii \"zPR\\0\"\n"
+ "\t.uleb128 0x1\n"
+ "\t.sleb128 -8\n"
+ "\t.byte 30\n" /* Return address is in lr. */
+ "\t.uleb128 6\n" /* augmentation length */
+ "\t.byte 0x9b\n" /* indirect|pcrel|sdata4 */
+ "\t.long _lj_err_unwind_dwarf@GOT-.\n"
+ "\t.byte 0x1b\n" /* pcrel|sdata4 */
+ "\t.byte 0xc\n\t.uleb128 29\n\t.uleb128 16\n" /* def_cfa fp 16 */
+ "\t.align 3\n"
+ "LECIEX:\n\n");
+ for (j = 0; j < ctx->nsym; j++) {
+ const char *name = ctx->sym[j].name;
+ int32_t size = ctx->sym[j+1].ofs - ctx->sym[j].ofs;
+ if (size == 0) continue;
+#if LJ_HASFFI
+ if (!strcmp(name, "_lj_vm_ffi_call")) { fcsize = size; continue; }
+#endif
+ fprintf(ctx->fp,
+ "LSFDE%d:\n"
+ "\t.set L$set$%d,LEFDE%d-LASFDE%d\n"
+ "\t.long L$set$%d\n"
+ "LASFDE%d:\n"
+ "\t.long LASFDE%d-EH_frame1\n"
+ "\t.long %s-.\n"
+ "\t.long %d\n"
+ "\t.uleb128 0\n" /* augmentation length */
+ "\t.byte 0x9e\n\t.uleb128 1\n" /* offset lr */
+ "\t.byte 0x9d\n\t.uleb128 2\n", /* offset fp */
+ j, j, j, j, j, j, j, name, size);
+ for (i = 19; i <= 28; i++) /* offset x19-x28 */
+ fprintf(ctx->fp, "\t.byte 0x%x\n\t.uleb128 %d\n", 0x80+i, i+(3-19));
+ for (i = 8; i <= 15; i++) /* offset d8-d15 */
+ fprintf(ctx->fp, "\t.byte 5\n\t.uleb128 0x%x\n\t.uleb128 %d\n",
+ 64+i, i+(3+(28-19+1)-8));
+ fprintf(ctx->fp,
+ "\t.align 3\n"
+ "LEFDE%d:\n\n", j);
+ }
+#if LJ_HASFFI
+ if (fcsize) {
+ fprintf(ctx->fp,
+ "EH_frame2:\n"
+ "\t.set L$set$y,LECIEY-LSCIEY\n"
+ "\t.long L$set$y\n"
+ "LSCIEY:\n"
+ "\t.long 0\n"
+ "\t.byte 0x1\n"
+ "\t.ascii \"zR\\0\"\n"
+ "\t.uleb128 0x1\n"
+ "\t.sleb128 -8\n"
+ "\t.byte 30\n" /* Return address is in lr. */
+ "\t.uleb128 1\n" /* augmentation length */
+ "\t.byte 0x1b\n" /* pcrel|sdata4 */
+ "\t.byte 0xc\n\t.uleb128 29\n\t.uleb128 16\n" /* def_cfa fp 16 */
+ "\t.align 3\n"
+ "LECIEY:\n\n");
+ fprintf(ctx->fp,
+ "LSFDEY:\n"
+ "\t.set L$set$yy,LEFDEY-LASFDEY\n"
+ "\t.long L$set$yy\n"
+ "LASFDEY:\n"
+ "\t.long LASFDEY-EH_frame2\n"
+ "\t.long _lj_vm_ffi_call-.\n"
+ "\t.long %d\n"
+ "\t.uleb128 0\n" /* augmentation length */
+ "\t.byte 0x9e\n\t.uleb128 1\n" /* offset lr */
+ "\t.byte 0x9d\n\t.uleb128 2\n" /* offset fp */
+ "\t.byte 0x93\n\t.uleb128 3\n" /* offset x19 */
+ "\t.byte 0x94\n\t.uleb128 4\n" /* offset x20 */
+ "\t.align 3\n"
+ "LEFDEY:\n\n", fcsize);
+ }
+#endif
+ fprintf(ctx->fp, ".subsections_via_symbols\n");
+ }
+ break;
+#endif
+ default:
+ break;
+ }
+}
+
diff --git a/libs/luajit-cmake/luajit/src/vm_mips.dasc b/libs/luajit-cmake/luajit/src/vm_mips.dasc
new file mode 100644
index 0000000..34645bf
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/vm_mips.dasc
@@ -0,0 +1,5392 @@
+|// Low-level VM code for MIPS CPUs.
+|// Bytecode interpreter, fast functions and helper functions.
+|// Copyright (C) 2005-2022 Mike Pall. See Copyright Notice in luajit.h
+|//
+|// MIPS soft-float support contributed by Djordje Kovacevic and
+|// Stefan Pejic from RT-RK.com, sponsored by Cisco Systems, Inc.
+|
+|.arch mips
+|.section code_op, code_sub
+|
+|.actionlist build_actionlist
+|.globals GLOB_
+|.globalnames globnames
+|.externnames extnames
+|
+|// Note: The ragged indentation of the instructions is intentional.
+|// The starting columns indicate data dependencies.
+|
+|//-----------------------------------------------------------------------
+|
+|// Fixed register assignments for the interpreter.
+|// Don't use: r0 = 0, r26/r27 = reserved, r28 = gp, r29 = sp, r31 = ra
+|
+|.macro .FPU, a, b
+|.if FPU
+| a, b
+|.endif
+|.endmacro
+|
+|// The following must be C callee-save (but BASE is often refetched).
+|.define BASE, r16 // Base of current Lua stack frame.
+|.define KBASE, r17 // Constants of current Lua function.
+|.define PC, r18 // Next PC.
+|.define DISPATCH, r19 // Opcode dispatch table.
+|.define LREG, r20 // Register holding lua_State (also in SAVE_L).
+|.define MULTRES, r21 // Size of multi-result: (nresults+1)*8.
+|
+|.define JGL, r30 // On-trace: global_State + 32768.
+|
+|// Constants for type-comparisons, stores and conversions. C callee-save.
+|.define TISNUM, r22
+|.define TISNIL, r30
+|.if FPU
+|.define TOBIT, f30 // 2^52 + 2^51.
+|.endif
+|
+|// The following temporaries are not saved across C calls, except for RA.
+|.define RA, r23 // Callee-save.
+|.define RB, r8
+|.define RC, r9
+|.define RD, r10
+|.define INS, r11
+|
+|.define AT, r1 // Assembler temporary.
+|.define TMP0, r12
+|.define TMP1, r13
+|.define TMP2, r14
+|.define TMP3, r15
+|
+|// MIPS o32 calling convention.
+|.define CFUNCADDR, r25
+|.define CARG1, r4
+|.define CARG2, r5
+|.define CARG3, r6
+|.define CARG4, r7
+|
+|.define CRET1, r2
+|.define CRET2, r3
+|
+|.if ENDIAN_LE
+|.define SFRETLO, CRET1
+|.define SFRETHI, CRET2
+|.define SFARG1LO, CARG1
+|.define SFARG1HI, CARG2
+|.define SFARG2LO, CARG3
+|.define SFARG2HI, CARG4
+|.else
+|.define SFRETLO, CRET2
+|.define SFRETHI, CRET1
+|.define SFARG1LO, CARG2
+|.define SFARG1HI, CARG1
+|.define SFARG2LO, CARG4
+|.define SFARG2HI, CARG3
+|.endif
+|
+|.if FPU
+|.define FARG1, f12
+|.define FARG2, f14
+|
+|.define FRET1, f0
+|.define FRET2, f2
+|.endif
+|
+|// Stack layout while in interpreter. Must match with lj_frame.h.
+|.if FPU // MIPS32 hard-float.
+|
+|.define CFRAME_SPACE, 112 // Delta for sp.
+|
+|.define SAVE_ERRF, 124(sp) // 32 bit C frame info.
+|.define SAVE_NRES, 120(sp)
+|.define SAVE_CFRAME, 116(sp)
+|.define SAVE_L, 112(sp)
+|//----- 8 byte aligned, ^^^^ 16 byte register save area, owned by interpreter.
+|.define SAVE_GPR_, 72 // .. 72+10*4: 32 bit GPR saves.
+|.define SAVE_FPR_, 24 // .. 24+6*8: 64 bit FPR saves.
+|
+|.else // MIPS32 soft-float
+|
+|.define CFRAME_SPACE, 64 // Delta for sp.
+|
+|.define SAVE_ERRF, 76(sp) // 32 bit C frame info.
+|.define SAVE_NRES, 72(sp)
+|.define SAVE_CFRAME, 68(sp)
+|.define SAVE_L, 64(sp)
+|//----- 8 byte aligned, ^^^^ 16 byte register save area, owned by interpreter.
+|.define SAVE_GPR_, 24 // .. 24+10*4: 32 bit GPR saves.
+|
+|.endif
+|
+|.define SAVE_PC, 20(sp)
+|.define ARG5, 16(sp)
+|.define CSAVE_4, 12(sp)
+|.define CSAVE_3, 8(sp)
+|.define CSAVE_2, 4(sp)
+|.define CSAVE_1, 0(sp)
+|//----- 8 byte aligned, ^^^^ 16 byte register save area, owned by callee.
+|
+|.define ARG5_OFS, 16
+|.define SAVE_MULTRES, ARG5
+|
+|//-----------------------------------------------------------------------
+|
+|.macro saveregs
+| addiu sp, sp, -CFRAME_SPACE
+| sw ra, SAVE_GPR_+9*4(sp)
+| sw r30, SAVE_GPR_+8*4(sp)
+| .FPU sdc1 f30, SAVE_FPR_+5*8(sp)
+| sw r23, SAVE_GPR_+7*4(sp)
+| sw r22, SAVE_GPR_+6*4(sp)
+| .FPU sdc1 f28, SAVE_FPR_+4*8(sp)
+| sw r21, SAVE_GPR_+5*4(sp)
+| sw r20, SAVE_GPR_+4*4(sp)
+| .FPU sdc1 f26, SAVE_FPR_+3*8(sp)
+| sw r19, SAVE_GPR_+3*4(sp)
+| sw r18, SAVE_GPR_+2*4(sp)
+| .FPU sdc1 f24, SAVE_FPR_+2*8(sp)
+| sw r17, SAVE_GPR_+1*4(sp)
+| sw r16, SAVE_GPR_+0*4(sp)
+| .FPU sdc1 f22, SAVE_FPR_+1*8(sp)
+| .FPU sdc1 f20, SAVE_FPR_+0*8(sp)
+|.endmacro
+|
+|.macro restoreregs_ret
+| lw ra, SAVE_GPR_+9*4(sp)
+| lw r30, SAVE_GPR_+8*4(sp)
+| .FPU ldc1 f30, SAVE_FPR_+5*8(sp)
+| lw r23, SAVE_GPR_+7*4(sp)
+| lw r22, SAVE_GPR_+6*4(sp)
+| .FPU ldc1 f28, SAVE_FPR_+4*8(sp)
+| lw r21, SAVE_GPR_+5*4(sp)
+| lw r20, SAVE_GPR_+4*4(sp)
+| .FPU ldc1 f26, SAVE_FPR_+3*8(sp)
+| lw r19, SAVE_GPR_+3*4(sp)
+| lw r18, SAVE_GPR_+2*4(sp)
+| .FPU ldc1 f24, SAVE_FPR_+2*8(sp)
+| lw r17, SAVE_GPR_+1*4(sp)
+| lw r16, SAVE_GPR_+0*4(sp)
+| .FPU ldc1 f22, SAVE_FPR_+1*8(sp)
+| .FPU ldc1 f20, SAVE_FPR_+0*8(sp)
+| jr ra
+| addiu sp, sp, CFRAME_SPACE
+|.endmacro
+|
+|// Type definitions. Some of these are only used for documentation.
+|.type L, lua_State, LREG
+|.type GL, global_State
+|.type TVALUE, TValue
+|.type GCOBJ, GCobj
+|.type STR, GCstr
+|.type TAB, GCtab
+|.type LFUNC, GCfuncL
+|.type CFUNC, GCfuncC
+|.type PROTO, GCproto
+|.type UPVAL, GCupval
+|.type NODE, Node
+|.type NARGS8, int
+|.type TRACE, GCtrace
+|.type SBUF, SBuf
+|
+|//-----------------------------------------------------------------------
+|
+|// Trap for not-yet-implemented parts.
+|.macro NYI; .long 0xec1cf0f0; .endmacro
+|
+|// Macros to mark delay slots.
+|.macro ., a; a; .endmacro
+|.macro ., a,b; a,b; .endmacro
+|.macro ., a,b,c; a,b,c; .endmacro
+|
+|//-----------------------------------------------------------------------
+|
+|// Endian-specific defines.
+|.if ENDIAN_LE
+|.define FRAME_PC, -4
+|.define FRAME_FUNC, -8
+|.define HI, 4
+|.define LO, 0
+|.define OFS_RD, 2
+|.define OFS_RA, 1
+|.define OFS_OP, 0
+|.else
+|.define FRAME_PC, -8
+|.define FRAME_FUNC, -4
+|.define HI, 0
+|.define LO, 4
+|.define OFS_RD, 0
+|.define OFS_RA, 2
+|.define OFS_OP, 3
+|.endif
+|
+|// Instruction decode.
+|.macro decode_OP1, dst, ins; andi dst, ins, 0xff; .endmacro
+|.macro decode_OP4a, dst, ins; andi dst, ins, 0xff; .endmacro
+|.macro decode_OP4b, dst; sll dst, dst, 2; .endmacro
+|.macro decode_RC4a, dst, ins; srl dst, ins, 14; .endmacro
+|.macro decode_RC4b, dst; andi dst, dst, 0x3fc; .endmacro
+|.macro decode_RD4b, dst; sll dst, dst, 2; .endmacro
+|.macro decode_RA8a, dst, ins; srl dst, ins, 5; .endmacro
+|.macro decode_RA8b, dst; andi dst, dst, 0x7f8; .endmacro
+|.macro decode_RB8a, dst, ins; srl dst, ins, 21; .endmacro
+|.macro decode_RB8b, dst; andi dst, dst, 0x7f8; .endmacro
+|.macro decode_RD8a, dst, ins; srl dst, ins, 16; .endmacro
+|.macro decode_RD8b, dst; sll dst, dst, 3; .endmacro
+|.macro decode_RDtoRC8, dst, src; andi dst, src, 0x7f8; .endmacro
+|
+|// Instruction fetch.
+|.macro ins_NEXT1
+| lw INS, 0(PC)
+| addiu PC, PC, 4
+|.endmacro
+|// Instruction decode+dispatch.
+|.macro ins_NEXT2
+| decode_OP4a TMP1, INS
+| decode_OP4b TMP1
+| addu TMP0, DISPATCH, TMP1
+| decode_RD8a RD, INS
+| lw AT, 0(TMP0)
+| decode_RA8a RA, INS
+| decode_RD8b RD
+| jr AT
+| decode_RA8b RA
+|.endmacro
+|.macro ins_NEXT
+| ins_NEXT1
+| ins_NEXT2
+|.endmacro
+|
+|// Instruction footer.
+|.if 1
+| // Replicated dispatch. Less unpredictable branches, but higher I-Cache use.
+| .define ins_next, ins_NEXT
+| .define ins_next_, ins_NEXT
+| .define ins_next1, ins_NEXT1
+| .define ins_next2, ins_NEXT2
+|.else
+| // Common dispatch. Lower I-Cache use, only one (very) unpredictable branch.
+| // Affects only certain kinds of benchmarks (and only with -j off).
+| .macro ins_next
+| b ->ins_next
+| .endmacro
+| .macro ins_next1
+| .endmacro
+| .macro ins_next2
+| b ->ins_next
+| .endmacro
+| .macro ins_next_
+| ->ins_next:
+| ins_NEXT
+| .endmacro
+|.endif
+|
+|// Call decode and dispatch.
+|.macro ins_callt
+| // BASE = new base, RB = LFUNC/CFUNC, RC = nargs*8, FRAME_PC(BASE) = PC
+| lw PC, LFUNC:RB->pc
+| lw INS, 0(PC)
+| addiu PC, PC, 4
+| decode_OP4a TMP1, INS
+| decode_RA8a RA, INS
+| decode_OP4b TMP1
+| decode_RA8b RA
+| addu TMP0, DISPATCH, TMP1
+| lw TMP0, 0(TMP0)
+| jr TMP0
+| addu RA, RA, BASE
+|.endmacro
+|
+|.macro ins_call
+| // BASE = new base, RB = LFUNC/CFUNC, RC = nargs*8, PC = caller PC
+| sw PC, FRAME_PC(BASE)
+| ins_callt
+|.endmacro
+|
+|//-----------------------------------------------------------------------
+|
+|.macro branch_RD
+| srl TMP0, RD, 1
+| lui AT, (-(BCBIAS_J*4 >> 16) & 65535)
+| addu TMP0, TMP0, AT
+| addu PC, PC, TMP0
+|.endmacro
+|
+|// Assumes DISPATCH is relative to GL.
+#define DISPATCH_GL(field) (GG_DISP2G + (int)offsetof(global_State, field))
+#define DISPATCH_J(field) (GG_DISP2J + (int)offsetof(jit_State, field))
+#define GG_DISP2GOT (GG_OFS(got) - GG_OFS(dispatch))
+#define DISPATCH_GOT(name) (GG_DISP2GOT + 4*LJ_GOT_##name)
+|
+#define PC2PROTO(field) ((int)offsetof(GCproto, field)-(int)sizeof(GCproto))
+|
+|.macro load_got, func
+| lw CFUNCADDR, DISPATCH_GOT(func)(DISPATCH)
+|.endmacro
+|// Much faster. Sadly, there's no easy way to force the required code layout.
+|// .macro call_intern, func; bal extern func; .endmacro
+|.macro call_intern, func; jalr CFUNCADDR; .endmacro
+|.macro call_extern; jalr CFUNCADDR; .endmacro
+|.macro jmp_extern; jr CFUNCADDR; .endmacro
+|
+|.macro hotcheck, delta, target
+| srl TMP1, PC, 1
+| andi TMP1, TMP1, 126
+| addu TMP1, TMP1, DISPATCH
+| lhu TMP2, GG_DISP2HOT(TMP1)
+| addiu TMP2, TMP2, -delta
+| bltz TMP2, target
+|. sh TMP2, GG_DISP2HOT(TMP1)
+|.endmacro
+|
+|.macro hotloop
+| hotcheck HOTCOUNT_LOOP, ->vm_hotloop
+|.endmacro
+|
+|.macro hotcall
+| hotcheck HOTCOUNT_CALL, ->vm_hotcall
+|.endmacro
+|
+|// Set current VM state. Uses TMP0.
+|.macro li_vmstate, st; li TMP0, ~LJ_VMST_..st; .endmacro
+|.macro st_vmstate; sw TMP0, DISPATCH_GL(vmstate)(DISPATCH); .endmacro
+|
+|// Move table write barrier back. Overwrites mark and tmp.
+|.macro barrierback, tab, mark, tmp, target
+| lw tmp, DISPATCH_GL(gc.grayagain)(DISPATCH)
+| andi mark, mark, ~LJ_GC_BLACK & 255 // black2gray(tab)
+| sw tab, DISPATCH_GL(gc.grayagain)(DISPATCH)
+| sb mark, tab->marked
+| b target
+|. sw tmp, tab->gclist
+|.endmacro
+|
+|//-----------------------------------------------------------------------
+
+/* Generate subroutines used by opcodes and other parts of the VM. */
+/* The .code_sub section should be last to help static branch prediction. */
+static void build_subroutines(BuildCtx *ctx)
+{
+ |.code_sub
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Return handling ----------------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |->vm_returnp:
+ | // See vm_return. Also: TMP2 = previous base.
+ | andi AT, PC, FRAME_P
+ | beqz AT, ->cont_dispatch
+ |. li TMP1, LJ_TTRUE
+ |
+ | // Return from pcall or xpcall fast func.
+ | lw PC, FRAME_PC(TMP2) // Fetch PC of previous frame.
+ | move BASE, TMP2 // Restore caller base.
+ | // Prepending may overwrite the pcall frame, so do it at the end.
+ | sw TMP1, FRAME_PC(RA) // Prepend true to results.
+ | addiu RA, RA, -8
+ |
+ |->vm_returnc:
+ | addiu RD, RD, 8 // RD = (nresults+1)*8.
+ | andi TMP0, PC, FRAME_TYPE
+ | beqz RD, ->vm_unwind_c_eh
+ |. li CRET1, LUA_YIELD
+ | beqz TMP0, ->BC_RET_Z // Handle regular return to Lua.
+ |. move MULTRES, RD
+ |
+ |->vm_return:
+ | // BASE = base, RA = resultptr, RD/MULTRES = (nresults+1)*8, PC = return
+ | // TMP0 = PC & FRAME_TYPE
+ | li TMP2, -8
+ | xori AT, TMP0, FRAME_C
+ | and TMP2, PC, TMP2
+ | bnez AT, ->vm_returnp
+ |. subu TMP2, BASE, TMP2 // TMP2 = previous base.
+ |
+ | addiu TMP1, RD, -8
+ | sw TMP2, L->base
+ | li_vmstate C
+ | lw TMP2, SAVE_NRES
+ | addiu BASE, BASE, -8
+ | st_vmstate
+ | beqz TMP1, >2
+ |. sll TMP2, TMP2, 3
+ |1:
+ | addiu TMP1, TMP1, -8
+ | lw SFRETHI, HI(RA)
+ | lw SFRETLO, LO(RA)
+ | addiu RA, RA, 8
+ | sw SFRETHI, HI(BASE)
+ | sw SFRETLO, LO(BASE)
+ | bnez TMP1, <1
+ |. addiu BASE, BASE, 8
+ |
+ |2:
+ | bne TMP2, RD, >6
+ |3:
+ |. sw BASE, L->top // Store new top.
+ |
+ |->vm_leave_cp:
+ | lw TMP0, SAVE_CFRAME // Restore previous C frame.
+ | move CRET1, r0 // Ok return status for vm_pcall.
+ | sw TMP0, L->cframe
+ |
+ |->vm_leave_unw:
+ | restoreregs_ret
+ |
+ |6:
+ | lw TMP1, L->maxstack
+ | slt AT, TMP2, RD
+ | bnez AT, >7 // Less results wanted?
+ | // More results wanted. Check stack size and fill up results with nil.
+ |. slt AT, BASE, TMP1
+ | beqz AT, >8
+ |. nop
+ | sw TISNIL, HI(BASE)
+ | addiu RD, RD, 8
+ | b <2
+ |. addiu BASE, BASE, 8
+ |
+ |7: // Less results wanted.
+ | subu TMP0, RD, TMP2
+ | subu TMP0, BASE, TMP0 // Either keep top or shrink it.
+ | b <3
+ |. movn BASE, TMP0, TMP2 // LUA_MULTRET+1 case?
+ |
+ |8: // Corner case: need to grow stack for filling up results.
+ | // This can happen if:
+ | // - A C function grows the stack (a lot).
+ | // - The GC shrinks the stack in between.
+ | // - A return back from a lua_call() with (high) nresults adjustment.
+ | load_got lj_state_growstack
+ | move MULTRES, RD
+ | srl CARG2, TMP2, 3
+ | call_intern lj_state_growstack // (lua_State *L, int n)
+ |. move CARG1, L
+ | lw TMP2, SAVE_NRES
+ | lw BASE, L->top // Need the (realloced) L->top in BASE.
+ | move RD, MULTRES
+ | b <2
+ |. sll TMP2, TMP2, 3
+ |
+ |->vm_unwind_c: // Unwind C stack, return from vm_pcall.
+ | // (void *cframe, int errcode)
+ | move sp, CARG1
+ | move CRET1, CARG2
+ |->vm_unwind_c_eh: // Landing pad for external unwinder.
+ | lw L, SAVE_L
+ | li TMP0, ~LJ_VMST_C
+ | lw GL:TMP1, L->glref
+ | b ->vm_leave_unw
+ |. sw TMP0, GL:TMP1->vmstate
+ |
+ |->vm_unwind_ff: // Unwind C stack, return from ff pcall.
+ | // (void *cframe)
+ | li AT, -4
+ | and sp, CARG1, AT
+ |->vm_unwind_ff_eh: // Landing pad for external unwinder.
+ | lw L, SAVE_L
+ | .FPU lui TMP3, 0x59c0 // TOBIT = 2^52 + 2^51 (float).
+ | li TISNUM, LJ_TISNUM // Setup type comparison constants.
+ | li TISNIL, LJ_TNIL
+ | lw BASE, L->base
+ | lw DISPATCH, L->glref // Setup pointer to dispatch table.
+ | .FPU mtc1 TMP3, TOBIT
+ | li TMP1, LJ_TFALSE
+ | li_vmstate INTERP
+ | lw PC, FRAME_PC(BASE) // Fetch PC of previous frame.
+ | .FPU cvt.d.s TOBIT, TOBIT
+ | addiu RA, BASE, -8 // Results start at BASE-8.
+ | addiu DISPATCH, DISPATCH, GG_G2DISP
+ | sw TMP1, HI(RA) // Prepend false to error message.
+ | st_vmstate
+ | b ->vm_returnc
+ |. li RD, 16 // 2 results: false + error message.
+ |
+ |->vm_unwind_stub: // Jump to exit stub from unwinder.
+ | jr CARG1
+ |. move ra, CARG2
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Grow stack for calls -----------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |->vm_growstack_c: // Grow stack for C function.
+ | b >2
+ |. li CARG2, LUA_MINSTACK
+ |
+ |->vm_growstack_l: // Grow stack for Lua function.
+ | // BASE = new base, RA = BASE+framesize*8, RC = nargs*8, PC = first PC
+ | addu RC, BASE, RC
+ | subu RA, RA, BASE
+ | sw BASE, L->base
+ | addiu PC, PC, 4 // Must point after first instruction.
+ | sw RC, L->top
+ | srl CARG2, RA, 3
+ |2:
+ | // L->base = new base, L->top = top
+ | load_got lj_state_growstack
+ | sw PC, SAVE_PC
+ | call_intern lj_state_growstack // (lua_State *L, int n)
+ |. move CARG1, L
+ | lw BASE, L->base
+ | lw RC, L->top
+ | lw LFUNC:RB, FRAME_FUNC(BASE)
+ | subu RC, RC, BASE
+ | // BASE = new base, RB = LFUNC/CFUNC, RC = nargs*8, FRAME_PC(BASE) = PC
+ | ins_callt // Just retry the call.
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Entry points into the assembler VM ---------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |->vm_resume: // Setup C frame and resume thread.
+ | // (lua_State *L, TValue *base, int nres1 = 0, ptrdiff_t ef = 0)
+ | saveregs
+ | move L, CARG1
+ | lw DISPATCH, L->glref // Setup pointer to dispatch table.
+ | move BASE, CARG2
+ | lbu TMP1, L->status
+ | sw L, SAVE_L
+ | li PC, FRAME_CP
+ | addiu TMP0, sp, CFRAME_RESUME
+ | addiu DISPATCH, DISPATCH, GG_G2DISP
+ | sw r0, SAVE_NRES
+ | sw r0, SAVE_ERRF
+ | sw CARG1, SAVE_PC // Any value outside of bytecode is ok.
+ | sw r0, SAVE_CFRAME
+ | beqz TMP1, >3
+ |. sw TMP0, L->cframe
+ |
+ | // Resume after yield (like a return).
+ | sw L, DISPATCH_GL(cur_L)(DISPATCH)
+ | move RA, BASE
+ | lw BASE, L->base
+ | li TISNUM, LJ_TISNUM // Setup type comparison constants.
+ | lw TMP1, L->top
+ | lw PC, FRAME_PC(BASE)
+ | .FPU lui TMP3, 0x59c0 // TOBIT = 2^52 + 2^51 (float).
+ | subu RD, TMP1, BASE
+ | .FPU mtc1 TMP3, TOBIT
+ | sb r0, L->status
+ | .FPU cvt.d.s TOBIT, TOBIT
+ | li_vmstate INTERP
+ | addiu RD, RD, 8
+ | st_vmstate
+ | move MULTRES, RD
+ | andi TMP0, PC, FRAME_TYPE
+ | beqz TMP0, ->BC_RET_Z
+ |. li TISNIL, LJ_TNIL
+ | b ->vm_return
+ |. nop
+ |
+ |->vm_pcall: // Setup protected C frame and enter VM.
+ | // (lua_State *L, TValue *base, int nres1, ptrdiff_t ef)
+ | saveregs
+ | sw CARG4, SAVE_ERRF
+ | b >1
+ |. li PC, FRAME_CP
+ |
+ |->vm_call: // Setup C frame and enter VM.
+ | // (lua_State *L, TValue *base, int nres1)
+ | saveregs
+ | li PC, FRAME_C
+ |
+ |1: // Entry point for vm_pcall above (PC = ftype).
+ | lw TMP1, L:CARG1->cframe
+ | move L, CARG1
+ | sw CARG3, SAVE_NRES
+ | lw DISPATCH, L->glref // Setup pointer to dispatch table.
+ | sw CARG1, SAVE_L
+ | move BASE, CARG2
+ | addiu DISPATCH, DISPATCH, GG_G2DISP
+ | sw CARG1, SAVE_PC // Any value outside of bytecode is ok.
+ | sw TMP1, SAVE_CFRAME
+ | sw sp, L->cframe // Add our C frame to cframe chain.
+ |
+ |3: // Entry point for vm_cpcall/vm_resume (BASE = base, PC = ftype).
+ | sw L, DISPATCH_GL(cur_L)(DISPATCH)
+ | lw TMP2, L->base // TMP2 = old base (used in vmeta_call).
+ | li TISNUM, LJ_TISNUM // Setup type comparison constants.
+ | .FPU lui TMP3, 0x59c0 // TOBIT = 2^52 + 2^51 (float).
+ | lw TMP1, L->top
+ | .FPU mtc1 TMP3, TOBIT
+ | addu PC, PC, BASE
+ | subu NARGS8:RC, TMP1, BASE
+ | subu PC, PC, TMP2 // PC = frame delta + frame type
+ | .FPU cvt.d.s TOBIT, TOBIT
+ | li_vmstate INTERP
+ | li TISNIL, LJ_TNIL
+ | st_vmstate
+ |
+ |->vm_call_dispatch:
+ | // TMP2 = old base, BASE = new base, RC = nargs*8, PC = caller PC
+ | lw TMP0, FRAME_PC(BASE)
+ | li AT, LJ_TFUNC
+ | bne TMP0, AT, ->vmeta_call
+ |. lw LFUNC:RB, FRAME_FUNC(BASE)
+ |
+ |->vm_call_dispatch_f:
+ | ins_call
+ | // BASE = new base, RB = func, RC = nargs*8, PC = caller PC
+ |
+ |->vm_cpcall: // Setup protected C frame, call C.
+ | // (lua_State *L, lua_CFunction func, void *ud, lua_CPFunction cp)
+ | saveregs
+ | move L, CARG1
+ | lw TMP0, L:CARG1->stack
+ | sw CARG1, SAVE_L
+ | lw TMP1, L->top
+ | lw DISPATCH, L->glref // Setup pointer to dispatch table.
+ | sw CARG1, SAVE_PC // Any value outside of bytecode is ok.
+ | subu TMP0, TMP0, TMP1 // Compute -savestack(L, L->top).
+ | lw TMP1, L->cframe
+ | addiu DISPATCH, DISPATCH, GG_G2DISP
+ | sw TMP0, SAVE_NRES // Neg. delta means cframe w/o frame.
+ | sw r0, SAVE_ERRF // No error function.
+ | sw TMP1, SAVE_CFRAME
+ | sw sp, L->cframe // Add our C frame to cframe chain.
+ | sw L, DISPATCH_GL(cur_L)(DISPATCH)
+ | jalr CARG4 // (lua_State *L, lua_CFunction func, void *ud)
+ |. move CFUNCADDR, CARG4
+ | move BASE, CRET1
+ | bnez CRET1, <3 // Else continue with the call.
+ |. li PC, FRAME_CP
+ | b ->vm_leave_cp // No base? Just remove C frame.
+ |. nop
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Metamethod handling ------------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |// The lj_meta_* functions (except for lj_meta_cat) don't reallocate the
+ |// stack, so BASE doesn't need to be reloaded across these calls.
+ |
+ |//-- Continuation dispatch ----------------------------------------------
+ |
+ |->cont_dispatch:
+ | // BASE = meta base, RA = resultptr, RD = (nresults+1)*8
+ | lw TMP0, -16+LO(BASE) // Continuation.
+ | move RB, BASE
+ | move BASE, TMP2 // Restore caller BASE.
+ | lw LFUNC:TMP1, FRAME_FUNC(TMP2)
+ |.if FFI
+ | sltiu AT, TMP0, 2
+ |.endif
+ | lw PC, -16+HI(RB) // Restore PC from [cont|PC].
+ | addu TMP2, RA, RD
+ |.if FFI
+ | bnez AT, >1
+ |.endif
+ |. sw TISNIL, -8+HI(TMP2) // Ensure one valid arg.
+ | lw TMP1, LFUNC:TMP1->pc
+ | // BASE = base, RA = resultptr, RB = meta base
+ | jr TMP0 // Jump to continuation.
+ |. lw KBASE, PC2PROTO(k)(TMP1)
+ |
+ |.if FFI
+ |1:
+ | bnez TMP0, ->cont_ffi_callback // cont = 1: return from FFI callback.
+ | // cont = 0: tailcall from C function.
+ |. addiu TMP1, RB, -16
+ | b ->vm_call_tail
+ |. subu RC, TMP1, BASE
+ |.endif
+ |
+ |->cont_cat: // RA = resultptr, RB = meta base
+ | lw INS, -4(PC)
+ | addiu CARG2, RB, -16
+ | lw SFRETHI, HI(RA)
+ | lw SFRETLO, LO(RA)
+ | decode_RB8a MULTRES, INS
+ | decode_RA8a RA, INS
+ | decode_RB8b MULTRES
+ | decode_RA8b RA
+ | addu TMP1, BASE, MULTRES
+ | sw BASE, L->base
+ | subu CARG3, CARG2, TMP1
+ | sw SFRETHI, HI(CARG2)
+ | bne TMP1, CARG2, ->BC_CAT_Z
+ |. sw SFRETLO, LO(CARG2)
+ | addu RA, BASE, RA
+ | sw SFRETHI, HI(RA)
+ | b ->cont_nop
+ |. sw SFRETLO, LO(RA)
+ |
+ |//-- Table indexing metamethods -----------------------------------------
+ |
+ |->vmeta_tgets1:
+ | addiu CARG3, DISPATCH, DISPATCH_GL(tmptv)
+ | li TMP0, LJ_TSTR
+ | sw STR:RC, LO(CARG3)
+ | b >1
+ |. sw TMP0, HI(CARG3)
+ |
+ |->vmeta_tgets:
+ | addiu CARG2, DISPATCH, DISPATCH_GL(tmptv)
+ | li TMP0, LJ_TTAB
+ | sw TAB:RB, LO(CARG2)
+ | addiu CARG3, DISPATCH, DISPATCH_GL(tmptv2)
+ | sw TMP0, HI(CARG2)
+ | li TMP1, LJ_TSTR
+ | sw STR:RC, LO(CARG3)
+ | b >1
+ |. sw TMP1, HI(CARG3)
+ |
+ |->vmeta_tgetb: // TMP0 = index
+ | addiu CARG3, DISPATCH, DISPATCH_GL(tmptv)
+ | sw TMP0, LO(CARG3)
+ | sw TISNUM, HI(CARG3)
+ |
+ |->vmeta_tgetv:
+ |1:
+ | load_got lj_meta_tget
+ | sw BASE, L->base
+ | sw PC, SAVE_PC
+ | call_intern lj_meta_tget // (lua_State *L, TValue *o, TValue *k)
+ |. move CARG1, L
+ | // Returns TValue * (finished) or NULL (metamethod).
+ | beqz CRET1, >3
+ |. addiu TMP1, BASE, -FRAME_CONT
+ | lw SFARG1HI, HI(CRET1)
+ | lw SFARG2HI, LO(CRET1)
+ | ins_next1
+ | sw SFARG1HI, HI(RA)
+ | sw SFARG2HI, LO(RA)
+ | ins_next2
+ |
+ |3: // Call __index metamethod.
+ | // BASE = base, L->top = new base, stack = cont/func/t/k
+ | lw BASE, L->top
+ | sw PC, -16+HI(BASE) // [cont|PC]
+ | subu PC, BASE, TMP1
+ | lw LFUNC:RB, FRAME_FUNC(BASE) // Guaranteed to be a function here.
+ | b ->vm_call_dispatch_f
+ |. li NARGS8:RC, 16 // 2 args for func(t, k).
+ |
+ |->vmeta_tgetr:
+ | load_got lj_tab_getinth
+ | call_intern lj_tab_getinth // (GCtab *t, int32_t key)
+ |. nop
+ | // Returns cTValue * or NULL.
+ | beqz CRET1, ->BC_TGETR_Z
+ |. move SFARG2HI, TISNIL
+ | lw SFARG2HI, HI(CRET1)
+ | b ->BC_TGETR_Z
+ |. lw SFARG2LO, LO(CRET1)
+ |
+ |//-----------------------------------------------------------------------
+ |
+ |->vmeta_tsets1:
+ | addiu CARG3, DISPATCH, DISPATCH_GL(tmptv)
+ | li TMP0, LJ_TSTR
+ | sw STR:RC, LO(CARG3)
+ | b >1
+ |. sw TMP0, HI(CARG3)
+ |
+ |->vmeta_tsets:
+ | addiu CARG2, DISPATCH, DISPATCH_GL(tmptv)
+ | li TMP0, LJ_TTAB
+ | sw TAB:RB, LO(CARG2)
+ | addiu CARG3, DISPATCH, DISPATCH_GL(tmptv2)
+ | sw TMP0, HI(CARG2)
+ | li TMP1, LJ_TSTR
+ | sw STR:RC, LO(CARG3)
+ | b >1
+ |. sw TMP1, HI(CARG3)
+ |
+ |->vmeta_tsetb: // TMP0 = index
+ | addiu CARG3, DISPATCH, DISPATCH_GL(tmptv)
+ | sw TMP0, LO(CARG3)
+ | sw TISNUM, HI(CARG3)
+ |
+ |->vmeta_tsetv:
+ |1:
+ | load_got lj_meta_tset
+ | sw BASE, L->base
+ | sw PC, SAVE_PC
+ | call_intern lj_meta_tset // (lua_State *L, TValue *o, TValue *k)
+ |. move CARG1, L
+ | // Returns TValue * (finished) or NULL (metamethod).
+ | lw SFARG1HI, HI(RA)
+ | beqz CRET1, >3
+ |. lw SFARG1LO, LO(RA)
+ | // NOBARRIER: lj_meta_tset ensures the table is not black.
+ | ins_next1
+ | sw SFARG1HI, HI(CRET1)
+ | sw SFARG1LO, LO(CRET1)
+ | ins_next2
+ |
+ |3: // Call __newindex metamethod.
+ | // BASE = base, L->top = new base, stack = cont/func/t/k/(v)
+ | addiu TMP1, BASE, -FRAME_CONT
+ | lw BASE, L->top
+ | sw PC, -16+HI(BASE) // [cont|PC]
+ | subu PC, BASE, TMP1
+ | lw LFUNC:RB, FRAME_FUNC(BASE) // Guaranteed to be a function here.
+ | sw SFARG1HI, 16+HI(BASE) // Copy value to third argument.
+ | sw SFARG1LO, 16+LO(BASE)
+ | b ->vm_call_dispatch_f
+ |. li NARGS8:RC, 24 // 3 args for func(t, k, v)
+ |
+ |->vmeta_tsetr:
+ | load_got lj_tab_setinth
+ | sw BASE, L->base
+ | sw PC, SAVE_PC
+ | call_intern lj_tab_setinth // (lua_State *L, GCtab *t, int32_t key)
+ |. move CARG1, L
+ | // Returns TValue *.
+ | b ->BC_TSETR_Z
+ |. nop
+ |
+ |//-- Comparison metamethods ---------------------------------------------
+ |
+ |->vmeta_comp:
+ | // RA/RD point to o1/o2.
+ | move CARG2, RA
+ | move CARG3, RD
+ | load_got lj_meta_comp
+ | addiu PC, PC, -4
+ | sw BASE, L->base
+ | sw PC, SAVE_PC
+ | decode_OP1 CARG4, INS
+ | call_intern lj_meta_comp // (lua_State *L, TValue *o1, *o2, int op)
+ |. move CARG1, L
+ | // Returns 0/1 or TValue * (metamethod).
+ |3:
+ | sltiu AT, CRET1, 2
+ | beqz AT, ->vmeta_binop
+ | negu TMP2, CRET1
+ |4:
+ | lhu RD, OFS_RD(PC)
+ | addiu PC, PC, 4
+ | lui TMP1, (-(BCBIAS_J*4 >> 16) & 65535)
+ | sll RD, RD, 2
+ | addu RD, RD, TMP1
+ | and RD, RD, TMP2
+ | addu PC, PC, RD
+ |->cont_nop:
+ | ins_next
+ |
+ |->cont_ra: // RA = resultptr
+ | lbu TMP1, -4+OFS_RA(PC)
+ | lw SFRETHI, HI(RA)
+ | lw SFRETLO, LO(RA)
+ | sll TMP1, TMP1, 3
+ | addu TMP1, BASE, TMP1
+ | sw SFRETHI, HI(TMP1)
+ | b ->cont_nop
+ |. sw SFRETLO, LO(TMP1)
+ |
+ |->cont_condt: // RA = resultptr
+ | lw TMP0, HI(RA)
+ | sltiu AT, TMP0, LJ_TISTRUECOND
+ | b <4
+ |. negu TMP2, AT // Branch if result is true.
+ |
+ |->cont_condf: // RA = resultptr
+ | lw TMP0, HI(RA)
+ | sltiu AT, TMP0, LJ_TISTRUECOND
+ | b <4
+ |. addiu TMP2, AT, -1 // Branch if result is false.
+ |
+ |->vmeta_equal:
+ | // SFARG1LO/SFARG2LO point to o1/o2. TMP0 is set to 0/1.
+ | load_got lj_meta_equal
+ | move CARG2, SFARG1LO
+ | move CARG3, SFARG2LO
+ | move CARG4, TMP0
+ | addiu PC, PC, -4
+ | sw BASE, L->base
+ | sw PC, SAVE_PC
+ | call_intern lj_meta_equal // (lua_State *L, GCobj *o1, *o2, int ne)
+ |. move CARG1, L
+ | // Returns 0/1 or TValue * (metamethod).
+ | b <3
+ |. nop
+ |
+ |->vmeta_equal_cd:
+ |.if FFI
+ | load_got lj_meta_equal_cd
+ | move CARG2, INS
+ | addiu PC, PC, -4
+ | sw BASE, L->base
+ | sw PC, SAVE_PC
+ | call_intern lj_meta_equal_cd // (lua_State *L, BCIns op)
+ |. move CARG1, L
+ | // Returns 0/1 or TValue * (metamethod).
+ | b <3
+ |. nop
+ |.endif
+ |
+ |->vmeta_istype:
+ | load_got lj_meta_istype
+ | addiu PC, PC, -4
+ | sw BASE, L->base
+ | srl CARG2, RA, 3
+ | srl CARG3, RD, 3
+ | sw PC, SAVE_PC
+ | call_intern lj_meta_istype // (lua_State *L, BCReg ra, BCReg tp)
+ |. move CARG1, L
+ | b ->cont_nop
+ |. nop
+ |
+ |//-- Arithmetic metamethods ---------------------------------------------
+ |
+ |->vmeta_unm:
+ | move RC, RB
+ |
+ |->vmeta_arith:
+ | load_got lj_meta_arith
+ | decode_OP1 TMP0, INS
+ | sw BASE, L->base
+ | move CARG2, RA
+ | sw PC, SAVE_PC
+ | move CARG3, RB
+ | move CARG4, RC
+ | sw TMP0, ARG5
+ | call_intern lj_meta_arith // (lua_State *L, TValue *ra,*rb,*rc, BCReg op)
+ |. move CARG1, L
+ | // Returns NULL (finished) or TValue * (metamethod).
+ | beqz CRET1, ->cont_nop
+ |. nop
+ |
+ | // Call metamethod for binary op.
+ |->vmeta_binop:
+ | // BASE = old base, CRET1 = new base, stack = cont/func/o1/o2
+ | subu TMP1, CRET1, BASE
+ | sw PC, -16+HI(CRET1) // [cont|PC]
+ | move TMP2, BASE
+ | addiu PC, TMP1, FRAME_CONT
+ | move BASE, CRET1
+ | b ->vm_call_dispatch
+ |. li NARGS8:RC, 16 // 2 args for func(o1, o2).
+ |
+ |->vmeta_len:
+ | // CARG2 already set by BC_LEN.
+#if LJ_52
+ | move MULTRES, CARG1
+#endif
+ | load_got lj_meta_len
+ | sw BASE, L->base
+ | sw PC, SAVE_PC
+ | call_intern lj_meta_len // (lua_State *L, TValue *o)
+ |. move CARG1, L
+ | // Returns NULL (retry) or TValue * (metamethod base).
+#if LJ_52
+ | bnez CRET1, ->vmeta_binop // Binop call for compatibility.
+ |. nop
+ | b ->BC_LEN_Z
+ |. move CARG1, MULTRES
+#else
+ | b ->vmeta_binop // Binop call for compatibility.
+ |. nop
+#endif
+ |
+ |//-- Call metamethod ----------------------------------------------------
+ |
+ |->vmeta_call: // Resolve and call __call metamethod.
+ | // TMP2 = old base, BASE = new base, RC = nargs*8
+ | load_got lj_meta_call
+ | sw TMP2, L->base // This is the callers base!
+ | addiu CARG2, BASE, -8
+ | sw PC, SAVE_PC
+ | addu CARG3, BASE, RC
+ | move MULTRES, NARGS8:RC
+ | call_intern lj_meta_call // (lua_State *L, TValue *func, TValue *top)
+ |. move CARG1, L
+ | lw LFUNC:RB, FRAME_FUNC(BASE) // Guaranteed to be a function here.
+ | addiu NARGS8:RC, MULTRES, 8 // Got one more argument now.
+ | ins_call
+ |
+ |->vmeta_callt: // Resolve __call for BC_CALLT.
+ | // BASE = old base, RA = new base, RC = nargs*8
+ | load_got lj_meta_call
+ | sw BASE, L->base
+ | addiu CARG2, RA, -8
+ | sw PC, SAVE_PC
+ | addu CARG3, RA, RC
+ | move MULTRES, NARGS8:RC
+ | call_intern lj_meta_call // (lua_State *L, TValue *func, TValue *top)
+ |. move CARG1, L
+ | lw TMP1, FRAME_PC(BASE)
+ | lw LFUNC:RB, FRAME_FUNC(RA) // Guaranteed to be a function here.
+ | b ->BC_CALLT_Z
+ |. addiu NARGS8:RC, MULTRES, 8 // Got one more argument now.
+ |
+ |//-- Argument coercion for 'for' statement ------------------------------
+ |
+ |->vmeta_for:
+ | load_got lj_meta_for
+ | sw BASE, L->base
+ | move CARG2, RA
+ | sw PC, SAVE_PC
+ | move MULTRES, INS
+ | call_intern lj_meta_for // (lua_State *L, TValue *base)
+ |. move CARG1, L
+ |.if JIT
+ | decode_OP1 TMP0, MULTRES
+ | li AT, BC_JFORI
+ |.endif
+ | decode_RA8a RA, MULTRES
+ | decode_RD8a RD, MULTRES
+ | decode_RA8b RA
+ |.if JIT
+ | beq TMP0, AT, =>BC_JFORI
+ |. decode_RD8b RD
+ | b =>BC_FORI
+ |. nop
+ |.else
+ | b =>BC_FORI
+ |. decode_RD8b RD
+ |.endif
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Fast functions -----------------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |.macro .ffunc, name
+ |->ff_ .. name:
+ |.endmacro
+ |
+ |.macro .ffunc_1, name
+ |->ff_ .. name:
+ | lw SFARG1HI, HI(BASE)
+ | beqz NARGS8:RC, ->fff_fallback
+ |. lw SFARG1LO, LO(BASE)
+ |.endmacro
+ |
+ |.macro .ffunc_2, name
+ |->ff_ .. name:
+ | sltiu AT, NARGS8:RC, 16
+ | lw SFARG1HI, HI(BASE)
+ | bnez AT, ->fff_fallback
+ |. lw SFARG2HI, 8+HI(BASE)
+ | lw SFARG1LO, LO(BASE)
+ | lw SFARG2LO, 8+LO(BASE)
+ |.endmacro
+ |
+ |.macro .ffunc_n, name // Caveat: has delay slot!
+ |->ff_ .. name:
+ | lw SFARG1HI, HI(BASE)
+ |.if FPU
+ | ldc1 FARG1, 0(BASE)
+ |.else
+ | lw SFARG1LO, LO(BASE)
+ |.endif
+ | beqz NARGS8:RC, ->fff_fallback
+ |. sltiu AT, SFARG1HI, LJ_TISNUM
+ | beqz AT, ->fff_fallback
+ |.endmacro
+ |
+ |.macro .ffunc_nn, name // Caveat: has delay slot!
+ |->ff_ .. name:
+ | sltiu AT, NARGS8:RC, 16
+ | lw SFARG1HI, HI(BASE)
+ | bnez AT, ->fff_fallback
+ |. lw SFARG2HI, 8+HI(BASE)
+ | sltiu TMP0, SFARG1HI, LJ_TISNUM
+ |.if FPU
+ | ldc1 FARG1, 0(BASE)
+ |.else
+ | lw SFARG1LO, LO(BASE)
+ |.endif
+ | sltiu TMP1, SFARG2HI, LJ_TISNUM
+ |.if FPU
+ | ldc1 FARG2, 8(BASE)
+ |.else
+ | lw SFARG2LO, 8+LO(BASE)
+ |.endif
+ | and TMP0, TMP0, TMP1
+ | beqz TMP0, ->fff_fallback
+ |.endmacro
+ |
+ |// Inlined GC threshold check. Caveat: uses TMP0 and TMP1 and has delay slot!
+ |.macro ffgccheck
+ | lw TMP0, DISPATCH_GL(gc.total)(DISPATCH)
+ | lw TMP1, DISPATCH_GL(gc.threshold)(DISPATCH)
+ | subu AT, TMP0, TMP1
+ | bgezal AT, ->fff_gcstep
+ |.endmacro
+ |
+ |//-- Base library: checks -----------------------------------------------
+ |
+ |.ffunc_1 assert
+ | sltiu AT, SFARG1HI, LJ_TISTRUECOND
+ | beqz AT, ->fff_fallback
+ |. addiu RA, BASE, -8
+ | lw PC, FRAME_PC(BASE)
+ | addiu RD, NARGS8:RC, 8 // Compute (nresults+1)*8.
+ | addu TMP2, RA, NARGS8:RC
+ | sw SFARG1HI, HI(RA)
+ | addiu TMP1, BASE, 8
+ | beq BASE, TMP2, ->fff_res // Done if exactly 1 argument.
+ |. sw SFARG1LO, LO(RA)
+ |1:
+ | lw SFRETHI, HI(TMP1)
+ | lw SFRETLO, LO(TMP1)
+ | sw SFRETHI, -8+HI(TMP1)
+ | sw SFRETLO, -8+LO(TMP1)
+ | bne TMP1, TMP2, <1
+ |. addiu TMP1, TMP1, 8
+ | b ->fff_res
+ |. nop
+ |
+ |.ffunc type
+ | lw SFARG1HI, HI(BASE)
+ | beqz NARGS8:RC, ->fff_fallback
+ |. sltiu TMP0, SFARG1HI, LJ_TISNUM
+ | movn SFARG1HI, TISNUM, TMP0
+ | not TMP1, SFARG1HI
+ | sll TMP1, TMP1, 3
+ | addu TMP1, CFUNC:RB, TMP1
+ | lw SFARG1HI, CFUNC:TMP1->upvalue[0].u32.hi
+ | b ->fff_restv
+ |. lw SFARG1LO, CFUNC:TMP1->upvalue[0].u32.lo
+ |
+ |//-- Base library: getters and setters ---------------------------------
+ |
+ |.ffunc_1 getmetatable
+ | li AT, LJ_TTAB
+ | bne SFARG1HI, AT, >6
+ |. li AT, LJ_TUDATA
+ |1: // Field metatable must be at same offset for GCtab and GCudata!
+ | lw TAB:SFARG1LO, TAB:SFARG1LO->metatable
+ |2:
+ | lw STR:RC, DISPATCH_GL(gcroot[GCROOT_MMNAME+MM_metatable])(DISPATCH)
+ | beqz TAB:SFARG1LO, ->fff_restv
+ |. li SFARG1HI, LJ_TNIL
+ | lw TMP0, TAB:SFARG1LO->hmask
+ | li SFARG1HI, LJ_TTAB // Use metatable as default result.
+ | lw TMP1, STR:RC->sid
+ | lw NODE:TMP2, TAB:SFARG1LO->node
+ | and TMP1, TMP1, TMP0 // idx = str->sid & tab->hmask
+ | sll TMP0, TMP1, 5
+ | sll TMP1, TMP1, 3
+ | subu TMP1, TMP0, TMP1
+ | addu NODE:TMP2, NODE:TMP2, TMP1 // node = tab->node + (idx*32-idx*8)
+ | li AT, LJ_TSTR
+ |3: // Rearranged logic, because we expect _not_ to find the key.
+ | lw CARG4, offsetof(Node, key)+HI(NODE:TMP2)
+ | lw TMP0, offsetof(Node, key)+LO(NODE:TMP2)
+ | lw NODE:TMP3, NODE:TMP2->next
+ | bne CARG4, AT, >4
+ |. lw CARG3, offsetof(Node, val)+HI(NODE:TMP2)
+ | beq TMP0, STR:RC, >5
+ |. lw TMP1, offsetof(Node, val)+LO(NODE:TMP2)
+ |4:
+ | beqz NODE:TMP3, ->fff_restv // Not found, keep default result.
+ |. move NODE:TMP2, NODE:TMP3
+ | b <3
+ |. nop
+ |5:
+ | beq CARG3, TISNIL, ->fff_restv // Ditto for nil value.
+ |. nop
+ | move SFARG1HI, CARG3 // Return value of mt.__metatable.
+ | b ->fff_restv
+ |. move SFARG1LO, TMP1
+ |
+ |6:
+ | beq SFARG1HI, AT, <1
+ |. sltu AT, TISNUM, SFARG1HI
+ | movz SFARG1HI, TISNUM, AT
+ | not TMP1, SFARG1HI
+ | sll TMP1, TMP1, 2
+ | addu TMP1, DISPATCH, TMP1
+ | b <2
+ |. lw TAB:SFARG1LO, DISPATCH_GL(gcroot[GCROOT_BASEMT])(TMP1)
+ |
+ |.ffunc_2 setmetatable
+ | // Fast path: no mt for table yet and not clearing the mt.
+ | li AT, LJ_TTAB
+ | bne SFARG1HI, AT, ->fff_fallback
+ |. addiu SFARG2HI, SFARG2HI, -LJ_TTAB
+ | lw TAB:TMP1, TAB:SFARG1LO->metatable
+ | lbu TMP3, TAB:SFARG1LO->marked
+ | or AT, SFARG2HI, TAB:TMP1
+ | bnez AT, ->fff_fallback
+ |. andi AT, TMP3, LJ_GC_BLACK // isblack(table)
+ | beqz AT, ->fff_restv
+ |. sw TAB:SFARG2LO, TAB:SFARG1LO->metatable
+ | barrierback TAB:SFARG1LO, TMP3, TMP0, ->fff_restv
+ |
+ |.ffunc rawget
+ | lw CARG4, HI(BASE)
+ | sltiu AT, NARGS8:RC, 16
+ | lw TAB:CARG2, LO(BASE)
+ | load_got lj_tab_get
+ | addiu CARG4, CARG4, -LJ_TTAB
+ | or AT, AT, CARG4
+ | bnez AT, ->fff_fallback
+ | addiu CARG3, BASE, 8
+ | call_intern lj_tab_get // (lua_State *L, GCtab *t, cTValue *key)
+ |. move CARG1, L
+ | // Returns cTValue *.
+ | lw SFARG1HI, HI(CRET1)
+ | b ->fff_restv
+ |. lw SFARG1LO, LO(CRET1)
+ |
+ |//-- Base library: conversions ------------------------------------------
+ |
+ |.ffunc tonumber
+ | // Only handles the number case inline (without a base argument).
+ | lw CARG1, HI(BASE)
+ | xori AT, NARGS8:RC, 8 // Exactly one number argument.
+ | sltu TMP0, TISNUM, CARG1
+ | or AT, AT, TMP0
+ | bnez AT, ->fff_fallback
+ |. lw SFARG1HI, HI(BASE)
+ | b ->fff_restv
+ |. lw SFARG1LO, LO(BASE)
+ |
+ |.ffunc_1 tostring
+ | // Only handles the string or number case inline.
+ | li AT, LJ_TSTR
+ | // A __tostring method in the string base metatable is ignored.
+ | beq SFARG1HI, AT, ->fff_restv // String key?
+ | // Handle numbers inline, unless a number base metatable is present.
+ |. lw TMP1, DISPATCH_GL(gcroot[GCROOT_BASEMT_NUM])(DISPATCH)
+ | sltu TMP0, TISNUM, SFARG1HI
+ | or TMP0, TMP0, TMP1
+ | bnez TMP0, ->fff_fallback
+ |. sw BASE, L->base // Add frame since C call can throw.
+ | ffgccheck
+ |. sw PC, SAVE_PC // Redundant (but a defined value).
+ | load_got lj_strfmt_number
+ | move CARG1, L
+ | call_intern lj_strfmt_number // (lua_State *L, cTValue *o)
+ |. move CARG2, BASE
+ | // Returns GCstr *.
+ | li SFARG1HI, LJ_TSTR
+ | b ->fff_restv
+ |. move SFARG1LO, CRET1
+ |
+ |//-- Base library: iterators -------------------------------------------
+ |
+ |.ffunc next
+ | lw CARG2, HI(BASE)
+ | lw TAB:CARG1, LO(BASE)
+ | beqz NARGS8:RC, ->fff_fallback
+ |. addu TMP2, BASE, NARGS8:RC
+ | li AT, LJ_TTAB
+ | sw TISNIL, HI(TMP2) // Set missing 2nd arg to nil.
+ | bne CARG2, AT, ->fff_fallback
+ |. lw PC, FRAME_PC(BASE)
+ | load_got lj_tab_next
+ | addiu CARG2, BASE, 8
+ | call_intern lj_tab_next // (GCtab *t, cTValue *key, TValue *o)
+ |. addiu CARG3, BASE, -8
+ | // Returns 1=found, 0=end, -1=error.
+ | addiu RA, BASE, -8
+ | bgtz CRET1, ->fff_res // Found key/value.
+ |. li RD, (2+1)*8
+ | beqz CRET1, ->fff_restv // End of traversal: return nil.
+ |. li SFARG1HI, LJ_TNIL
+ | lw CFUNC:RB, FRAME_FUNC(BASE)
+ | b ->fff_fallback // Invalid key.
+ |. li RC, 2*8
+ |
+ |.ffunc_1 pairs
+ | li AT, LJ_TTAB
+ | bne SFARG1HI, AT, ->fff_fallback
+ |. lw PC, FRAME_PC(BASE)
+#if LJ_52
+ | lw TAB:TMP2, TAB:SFARG1LO->metatable
+ | lw TMP0, CFUNC:RB->upvalue[0].u32.hi
+ | lw TMP1, CFUNC:RB->upvalue[0].u32.lo
+ | bnez TAB:TMP2, ->fff_fallback
+#else
+ | lw TMP0, CFUNC:RB->upvalue[0].u32.hi
+ | lw TMP1, CFUNC:RB->upvalue[0].u32.lo
+#endif
+ |. addiu RA, BASE, -8
+ | sw TISNIL, 8+HI(BASE)
+ | sw TMP0, HI(RA)
+ | sw TMP1, LO(RA)
+ | b ->fff_res
+ |. li RD, (3+1)*8
+ |
+ |.ffunc ipairs_aux
+ | sltiu AT, NARGS8:RC, 16
+ | lw CARG3, HI(BASE)
+ | lw TAB:CARG1, LO(BASE)
+ | lw CARG4, 8+HI(BASE)
+ | bnez AT, ->fff_fallback
+ |. addiu CARG3, CARG3, -LJ_TTAB
+ | xor CARG4, CARG4, TISNUM
+ | and AT, CARG3, CARG4
+ | bnez AT, ->fff_fallback
+ |. lw PC, FRAME_PC(BASE)
+ | lw TMP2, 8+LO(BASE)
+ | lw TMP0, TAB:CARG1->asize
+ | lw TMP1, TAB:CARG1->array
+ | addiu TMP2, TMP2, 1
+ | sw TISNUM, -8+HI(BASE)
+ | sltu AT, TMP2, TMP0
+ | sw TMP2, -8+LO(BASE)
+ | beqz AT, >2 // Not in array part?
+ |. addiu RA, BASE, -8
+ | sll TMP3, TMP2, 3
+ | addu TMP3, TMP1, TMP3
+ | lw TMP1, HI(TMP3)
+ | lw TMP2, LO(TMP3)
+ |1:
+ | beq TMP1, TISNIL, ->fff_res // End of iteration, return 0 results.
+ |. li RD, (0+1)*8
+ | sw TMP1, 8+HI(RA)
+ | sw TMP2, 8+LO(RA)
+ | b ->fff_res
+ |. li RD, (2+1)*8
+ |
+ |2: // Check for empty hash part first. Otherwise call C function.
+ | lw TMP0, TAB:CARG1->hmask
+ | load_got lj_tab_getinth
+ | beqz TMP0, ->fff_res
+ |. li RD, (0+1)*8
+ | call_intern lj_tab_getinth // (GCtab *t, int32_t key)
+ |. move CARG2, TMP2
+ | // Returns cTValue * or NULL.
+ | beqz CRET1, ->fff_res
+ |. li RD, (0+1)*8
+ | lw TMP1, HI(CRET1)
+ | b <1
+ |. lw TMP2, LO(CRET1)
+ |
+ |.ffunc_1 ipairs
+ | li AT, LJ_TTAB
+ | bne SFARG1HI, AT, ->fff_fallback
+ |. lw PC, FRAME_PC(BASE)
+#if LJ_52
+ | lw TAB:TMP2, TAB:SFARG1LO->metatable
+ | lw TMP0, CFUNC:RB->upvalue[0].u32.hi
+ | lw TMP1, CFUNC:RB->upvalue[0].u32.lo
+ | bnez TAB:TMP2, ->fff_fallback
+#else
+ | lw TMP0, CFUNC:RB->upvalue[0].u32.hi
+ | lw TMP1, CFUNC:RB->upvalue[0].u32.lo
+#endif
+ |. addiu RA, BASE, -8
+ | sw TISNUM, 8+HI(BASE)
+ | sw r0, 8+LO(BASE)
+ | sw TMP0, HI(RA)
+ | sw TMP1, LO(RA)
+ | b ->fff_res
+ |. li RD, (3+1)*8
+ |
+ |//-- Base library: catch errors ----------------------------------------
+ |
+ |.ffunc pcall
+ | lbu TMP3, DISPATCH_GL(hookmask)(DISPATCH)
+ | beqz NARGS8:RC, ->fff_fallback
+ | move TMP2, BASE
+ | addiu BASE, BASE, 8
+ | // Remember active hook before pcall.
+ | srl TMP3, TMP3, HOOK_ACTIVE_SHIFT
+ | andi TMP3, TMP3, 1
+ | addiu PC, TMP3, 8+FRAME_PCALL
+ | b ->vm_call_dispatch
+ |. addiu NARGS8:RC, NARGS8:RC, -8
+ |
+ |.ffunc xpcall
+ | sltiu AT, NARGS8:RC, 16
+ | lw CARG4, 8+HI(BASE)
+ | bnez AT, ->fff_fallback
+ |. lw CARG3, 8+LO(BASE)
+ | lw CARG1, LO(BASE)
+ | lw CARG2, HI(BASE)
+ | lbu TMP1, DISPATCH_GL(hookmask)(DISPATCH)
+ | li AT, LJ_TFUNC
+ | move TMP2, BASE
+ | bne CARG4, AT, ->fff_fallback // Traceback must be a function.
+ | addiu BASE, BASE, 16
+ | // Remember active hook before pcall.
+ | srl TMP3, TMP3, HOOK_ACTIVE_SHIFT
+ | sw CARG3, LO(TMP2) // Swap function and traceback.
+ | sw CARG4, HI(TMP2)
+ | andi TMP3, TMP3, 1
+ | sw CARG1, 8+LO(TMP2)
+ | sw CARG2, 8+HI(TMP2)
+ | addiu PC, TMP3, 16+FRAME_PCALL
+ | b ->vm_call_dispatch
+ |. addiu NARGS8:RC, NARGS8:RC, -16
+ |
+ |//-- Coroutine library --------------------------------------------------
+ |
+ |.macro coroutine_resume_wrap, resume
+ |.if resume
+ |.ffunc coroutine_resume
+ | lw CARG3, HI(BASE)
+ | beqz NARGS8:RC, ->fff_fallback
+ |. lw CARG1, LO(BASE)
+ | li AT, LJ_TTHREAD
+ | bne CARG3, AT, ->fff_fallback
+ |.else
+ |.ffunc coroutine_wrap_aux
+ | lw L:CARG1, CFUNC:RB->upvalue[0].gcr
+ |.endif
+ | lbu TMP0, L:CARG1->status
+ | lw TMP1, L:CARG1->cframe
+ | lw CARG2, L:CARG1->top
+ | lw TMP2, L:CARG1->base
+ | addiu TMP3, TMP0, -LUA_YIELD
+ | bgtz TMP3, ->fff_fallback // st > LUA_YIELD?
+ |. xor TMP2, TMP2, CARG2
+ | bnez TMP1, ->fff_fallback // cframe != 0?
+ |. or AT, TMP2, TMP0
+ | lw TMP0, L:CARG1->maxstack
+ | beqz AT, ->fff_fallback // base == top && st == 0?
+ |. lw PC, FRAME_PC(BASE)
+ | addu TMP2, CARG2, NARGS8:RC
+ | sltu AT, TMP0, TMP2
+ | bnez AT, ->fff_fallback // Stack overflow?
+ |. sw PC, SAVE_PC
+ | sw BASE, L->base
+ |1:
+ |.if resume
+ | addiu BASE, BASE, 8 // Keep resumed thread in stack for GC.
+ | addiu NARGS8:RC, NARGS8:RC, -8
+ | addiu TMP2, TMP2, -8
+ |.endif
+ | sw TMP2, L:CARG1->top
+ | addu TMP1, BASE, NARGS8:RC
+ | move CARG3, CARG2
+ | sw BASE, L->top
+ |2: // Move args to coroutine.
+ | lw SFRETHI, HI(BASE)
+ | lw SFRETLO, LO(BASE)
+ | sltu AT, BASE, TMP1
+ | beqz AT, >3
+ |. addiu BASE, BASE, 8
+ | sw SFRETHI, HI(CARG3)
+ | sw SFRETLO, LO(CARG3)
+ | b <2
+ |. addiu CARG3, CARG3, 8
+ |3:
+ | bal ->vm_resume // (lua_State *L, TValue *base, 0, 0)
+ |. move L:RA, L:CARG1
+ | // Returns thread status.
+ |4:
+ | lw TMP2, L:RA->base
+ | sltiu AT, CRET1, LUA_YIELD+1
+ | lw TMP3, L:RA->top
+ | li_vmstate INTERP
+ | lw BASE, L->base
+ | sw L, DISPATCH_GL(cur_L)(DISPATCH)
+ | st_vmstate
+ | beqz AT, >8
+ |. subu RD, TMP3, TMP2
+ | lw TMP0, L->maxstack
+ | beqz RD, >6 // No results?
+ |. addu TMP1, BASE, RD
+ | sltu AT, TMP0, TMP1
+ | bnez AT, >9 // Need to grow stack?
+ |. addu TMP3, TMP2, RD
+ | sw TMP2, L:RA->top // Clear coroutine stack.
+ | move TMP1, BASE
+ |5: // Move results from coroutine.
+ | lw SFRETHI, HI(TMP2)
+ | lw SFRETLO, LO(TMP2)
+ | addiu TMP2, TMP2, 8
+ | sltu AT, TMP2, TMP3
+ | sw SFRETHI, HI(TMP1)
+ | sw SFRETLO, LO(TMP1)
+ | bnez AT, <5
+ |. addiu TMP1, TMP1, 8
+ |6:
+ | andi TMP0, PC, FRAME_TYPE
+ |.if resume
+ | li TMP1, LJ_TTRUE
+ | addiu RA, BASE, -8
+ | sw TMP1, -8+HI(BASE) // Prepend true to results.
+ | addiu RD, RD, 16
+ |.else
+ | move RA, BASE
+ | addiu RD, RD, 8
+ |.endif
+ |7:
+ | sw PC, SAVE_PC
+ | beqz TMP0, ->BC_RET_Z
+ |. move MULTRES, RD
+ | b ->vm_return
+ |. nop
+ |
+ |8: // Coroutine returned with error (at co->top-1).
+ |.if resume
+ | addiu TMP3, TMP3, -8
+ | li TMP1, LJ_TFALSE
+ | lw SFRETHI, HI(TMP3)
+ | lw SFRETLO, LO(TMP3)
+ | sw TMP3, L:RA->top // Remove error from coroutine stack.
+ | li RD, (2+1)*8
+ | sw TMP1, -8+HI(BASE) // Prepend false to results.
+ | addiu RA, BASE, -8
+ | sw SFRETHI, HI(BASE) // Copy error message.
+ | sw SFRETLO, LO(BASE)
+ | b <7
+ |. andi TMP0, PC, FRAME_TYPE
+ |.else
+ | load_got lj_ffh_coroutine_wrap_err
+ | move CARG2, L:RA
+ | call_intern lj_ffh_coroutine_wrap_err // (lua_State *L, lua_State *co)
+ |. move CARG1, L
+ |.endif
+ |
+ |9: // Handle stack expansion on return from yield.
+ | load_got lj_state_growstack
+ | srl CARG2, RD, 3
+ | call_intern lj_state_growstack // (lua_State *L, int n)
+ |. move CARG1, L
+ | b <4
+ |. li CRET1, 0
+ |.endmacro
+ |
+ | coroutine_resume_wrap 1 // coroutine.resume
+ | coroutine_resume_wrap 0 // coroutine.wrap
+ |
+ |.ffunc coroutine_yield
+ | lw TMP0, L->cframe
+ | addu TMP1, BASE, NARGS8:RC
+ | sw BASE, L->base
+ | andi TMP0, TMP0, CFRAME_RESUME
+ | sw TMP1, L->top
+ | beqz TMP0, ->fff_fallback
+ |. li CRET1, LUA_YIELD
+ | sw r0, L->cframe
+ | b ->vm_leave_unw
+ |. sb CRET1, L->status
+ |
+ |//-- Math library -------------------------------------------------------
+ |
+ |.ffunc_1 math_abs
+ | bne SFARG1HI, TISNUM, >1
+ |. sra TMP0, SFARG1LO, 31
+ | xor TMP1, SFARG1LO, TMP0
+ | subu SFARG1LO, TMP1, TMP0
+ | bgez SFARG1LO, ->fff_restv
+ |. nop
+ | lui SFARG1HI, 0x41e0 // 2^31 as a double.
+ | b ->fff_restv
+ |. li SFARG1LO, 0
+ |1:
+ | sltiu AT, SFARG1HI, LJ_TISNUM
+ | beqz AT, ->fff_fallback
+ |. sll SFARG1HI, SFARG1HI, 1
+ | srl SFARG1HI, SFARG1HI, 1
+ |// fallthrough
+ |
+ |->fff_restv:
+ | // SFARG1LO/SFARG1HI = TValue result.
+ | lw PC, FRAME_PC(BASE)
+ | sw SFARG1HI, -8+HI(BASE)
+ | addiu RA, BASE, -8
+ | sw SFARG1LO, -8+LO(BASE)
+ |->fff_res1:
+ | // RA = results, PC = return.
+ | li RD, (1+1)*8
+ |->fff_res:
+ | // RA = results, RD = (nresults+1)*8, PC = return.
+ | andi TMP0, PC, FRAME_TYPE
+ | bnez TMP0, ->vm_return
+ |. move MULTRES, RD
+ | lw INS, -4(PC)
+ | decode_RB8a RB, INS
+ | decode_RB8b RB
+ |5:
+ | sltu AT, RD, RB
+ | bnez AT, >6 // More results expected?
+ |. decode_RA8a TMP0, INS
+ | decode_RA8b TMP0
+ | ins_next1
+ | // Adjust BASE. KBASE is assumed to be set for the calling frame.
+ | subu BASE, RA, TMP0
+ | ins_next2
+ |
+ |6: // Fill up results with nil.
+ | addu TMP1, RA, RD
+ | addiu RD, RD, 8
+ | b <5
+ |. sw TISNIL, -8+HI(TMP1)
+ |
+ |.macro math_extern, func
+ | .ffunc math_ .. func
+ | lw SFARG1HI, HI(BASE)
+ | beqz NARGS8:RC, ->fff_fallback
+ |. load_got func
+ | sltiu AT, SFARG1HI, LJ_TISNUM
+ | beqz AT, ->fff_fallback
+ |.if FPU
+ |. ldc1 FARG1, 0(BASE)
+ |.else
+ |. lw SFARG1LO, LO(BASE)
+ |.endif
+ | call_extern
+ |. nop
+ | b ->fff_resn
+ |. nop
+ |.endmacro
+ |
+ |.macro math_extern2, func
+ | .ffunc_nn math_ .. func
+ |. load_got func
+ | call_extern
+ |. nop
+ | b ->fff_resn
+ |. nop
+ |.endmacro
+ |
+ |// TODO: Return integer type if result is integer (own sf implementation).
+ |.macro math_round, func
+ |->ff_math_ .. func:
+ | lw SFARG1HI, HI(BASE)
+ | beqz NARGS8:RC, ->fff_fallback
+ |. lw SFARG1LO, LO(BASE)
+ | beq SFARG1HI, TISNUM, ->fff_restv
+ |. sltu AT, SFARG1HI, TISNUM
+ | beqz AT, ->fff_fallback
+ |.if FPU
+ |. ldc1 FARG1, 0(BASE)
+ | bal ->vm_ .. func
+ |.else
+ |. load_got func
+ | call_extern
+ |.endif
+ |. nop
+ | b ->fff_resn
+ |. nop
+ |.endmacro
+ |
+ | math_round floor
+ | math_round ceil
+ |
+ |.ffunc math_log
+ | li AT, 8
+ | bne NARGS8:RC, AT, ->fff_fallback // Exactly 1 argument.
+ |. lw SFARG1HI, HI(BASE)
+ | sltiu AT, SFARG1HI, LJ_TISNUM
+ | beqz AT, ->fff_fallback
+ |. load_got log
+ |.if FPU
+ | call_extern
+ |. ldc1 FARG1, 0(BASE)
+ |.else
+ | call_extern
+ |. lw SFARG1LO, LO(BASE)
+ |.endif
+ | b ->fff_resn
+ |. nop
+ |
+ | math_extern log10
+ | math_extern exp
+ | math_extern sin
+ | math_extern cos
+ | math_extern tan
+ | math_extern asin
+ | math_extern acos
+ | math_extern atan
+ | math_extern sinh
+ | math_extern cosh
+ | math_extern tanh
+ | math_extern2 pow
+ | math_extern2 atan2
+ | math_extern2 fmod
+ |
+ |.if FPU
+ |.ffunc_n math_sqrt
+ |. sqrt.d FRET1, FARG1
+ |// fallthrough to ->fff_resn
+ |.else
+ | math_extern sqrt
+ |.endif
+ |
+ |->fff_resn:
+ | lw PC, FRAME_PC(BASE)
+ | addiu RA, BASE, -8
+ |.if FPU
+ | b ->fff_res1
+ |. sdc1 FRET1, -8(BASE)
+ |.else
+ | sw SFRETHI, -8+HI(BASE)
+ | b ->fff_res1
+ |. sw SFRETLO, -8+LO(BASE)
+ |.endif
+ |
+ |
+ |.ffunc math_ldexp
+ | sltiu AT, NARGS8:RC, 16
+ | lw SFARG1HI, HI(BASE)
+ | bnez AT, ->fff_fallback
+ |. lw CARG4, 8+HI(BASE)
+ | bne CARG4, TISNUM, ->fff_fallback
+ | load_got ldexp
+ |. sltu AT, SFARG1HI, TISNUM
+ | beqz AT, ->fff_fallback
+ |.if FPU
+ |. ldc1 FARG1, 0(BASE)
+ |.else
+ |. lw SFARG1LO, LO(BASE)
+ |.endif
+ | call_extern
+ |. lw CARG3, 8+LO(BASE)
+ | b ->fff_resn
+ |. nop
+ |
+ |.ffunc_n math_frexp
+ | load_got frexp
+ | lw PC, FRAME_PC(BASE)
+ | call_extern
+ |. addiu CARG3, DISPATCH, DISPATCH_GL(tmptv)
+ | lw TMP1, DISPATCH_GL(tmptv)(DISPATCH)
+ | addiu RA, BASE, -8
+ |.if FPU
+ | mtc1 TMP1, FARG2
+ | sdc1 FRET1, 0(RA)
+ | cvt.d.w FARG2, FARG2
+ | sdc1 FARG2, 8(RA)
+ |.else
+ | sw SFRETLO, LO(RA)
+ | sw SFRETHI, HI(RA)
+ | sw TMP1, 8+LO(RA)
+ | sw TISNUM, 8+HI(RA)
+ |.endif
+ | b ->fff_res
+ |. li RD, (2+1)*8
+ |
+ |.ffunc_n math_modf
+ | load_got modf
+ | lw PC, FRAME_PC(BASE)
+ | call_extern
+ |. addiu CARG3, BASE, -8
+ | addiu RA, BASE, -8
+ |.if FPU
+ | sdc1 FRET1, 0(BASE)
+ |.else
+ | sw SFRETLO, LO(BASE)
+ | sw SFRETHI, HI(BASE)
+ |.endif
+ | b ->fff_res
+ |. li RD, (2+1)*8
+ |
+ |.macro math_minmax, name, intins, ismax
+ | .ffunc_1 name
+ | addu TMP3, BASE, NARGS8:RC
+ | bne SFARG1HI, TISNUM, >5
+ |. addiu TMP2, BASE, 8
+ |1: // Handle integers.
+ |. lw SFARG2HI, HI(TMP2)
+ | beq TMP2, TMP3, ->fff_restv
+ |. lw SFARG2LO, LO(TMP2)
+ | bne SFARG2HI, TISNUM, >3
+ |. slt AT, SFARG1LO, SFARG2LO
+ | intins SFARG1LO, SFARG2LO, AT
+ | b <1
+ |. addiu TMP2, TMP2, 8
+ |
+ |3: // Convert intermediate result to number and continue with number loop.
+ | sltiu AT, SFARG2HI, LJ_TISNUM
+ | beqz AT, ->fff_fallback
+ |.if FPU
+ |. mtc1 SFARG1LO, FRET1
+ | cvt.d.w FRET1, FRET1
+ | b >7
+ |. ldc1 FARG1, 0(TMP2)
+ |.else
+ |. nop
+ | bal ->vm_sfi2d_1
+ |. nop
+ | b >7
+ |. nop
+ |.endif
+ |
+ |5:
+ |. sltiu AT, SFARG1HI, LJ_TISNUM
+ | beqz AT, ->fff_fallback
+ |.if FPU
+ |. ldc1 FRET1, 0(BASE)
+ |.endif
+ |
+ |6: // Handle numbers.
+ |. lw SFARG2HI, HI(TMP2)
+ |.if FPU
+ | beq TMP2, TMP3, ->fff_resn
+ |.else
+ | beq TMP2, TMP3, ->fff_restv
+ |.endif
+ |. sltiu AT, SFARG2HI, LJ_TISNUM
+ | beqz AT, >8
+ |.if FPU
+ |. ldc1 FARG1, 0(TMP2)
+ |.else
+ |. lw SFARG2LO, LO(TMP2)
+ |.endif
+ |7:
+ |.if FPU
+ |.if ismax
+ | c.olt.d FARG1, FRET1
+ |.else
+ | c.olt.d FRET1, FARG1
+ |.endif
+ | movf.d FRET1, FARG1
+ |.else
+ |.if ismax
+ | bal ->vm_sfcmpogt
+ |.else
+ | bal ->vm_sfcmpolt
+ |.endif
+ |. nop
+ | movz SFARG1LO, SFARG2LO, CRET1
+ | movz SFARG1HI, SFARG2HI, CRET1
+ |.endif
+ | b <6
+ |. addiu TMP2, TMP2, 8
+ |
+ |8: // Convert integer to number and continue with number loop.
+ | bne SFARG2HI, TISNUM, ->fff_fallback
+ |.if FPU
+ |. lwc1 FARG1, LO(TMP2)
+ | b <7
+ |. cvt.d.w FARG1, FARG1
+ |.else
+ |. nop
+ | bal ->vm_sfi2d_2
+ |. nop
+ | b <7
+ |. nop
+ |.endif
+ |
+ |.endmacro
+ |
+ | math_minmax math_min, movz, 0
+ | math_minmax math_max, movn, 1
+ |
+ |//-- String library -----------------------------------------------------
+ |
+ |.ffunc string_byte // Only handle the 1-arg case here.
+ | lw CARG3, HI(BASE)
+ | lw STR:CARG1, LO(BASE)
+ | xori AT, NARGS8:RC, 8
+ | addiu CARG3, CARG3, -LJ_TSTR
+ | or AT, AT, CARG3
+ | bnez AT, ->fff_fallback // Need exactly 1 string argument.
+ |. nop
+ | lw TMP0, STR:CARG1->len
+ | addiu RA, BASE, -8
+ | lw PC, FRAME_PC(BASE)
+ | sltu RD, r0, TMP0
+ | lbu TMP1, STR:CARG1[1] // Access is always ok (NUL at end).
+ | addiu RD, RD, 1
+ | sll RD, RD, 3 // RD = ((str->len != 0)+1)*8
+ | sw TISNUM, HI(RA)
+ | b ->fff_res
+ |. sw TMP1, LO(RA)
+ |
+ |.ffunc string_char // Only handle the 1-arg case here.
+ | ffgccheck
+ |. nop
+ | lw CARG3, HI(BASE)
+ | lw CARG1, LO(BASE)
+ | li TMP1, 255
+ | xori AT, NARGS8:RC, 8 // Exactly 1 argument.
+ | xor TMP0, CARG3, TISNUM // Integer.
+ | sltu TMP1, TMP1, CARG1 // !(255 < n).
+ | or AT, AT, TMP0
+ | or AT, AT, TMP1
+ | bnez AT, ->fff_fallback
+ |. li CARG3, 1
+ | addiu CARG2, sp, ARG5_OFS
+ | sb CARG1, ARG5
+ |->fff_newstr:
+ | load_got lj_str_new
+ | sw BASE, L->base
+ | sw PC, SAVE_PC
+ | call_intern lj_str_new // (lua_State *L, char *str, size_t l)
+ |. move CARG1, L
+ | // Returns GCstr *.
+ | lw BASE, L->base
+ |->fff_resstr:
+ | move SFARG1LO, CRET1
+ | b ->fff_restv
+ |. li SFARG1HI, LJ_TSTR
+ |
+ |.ffunc string_sub
+ | ffgccheck
+ |. nop
+ | addiu AT, NARGS8:RC, -16
+ | lw CARG3, 16+HI(BASE)
+ | lw TMP0, HI(BASE)
+ | lw STR:CARG1, LO(BASE)
+ | bltz AT, ->fff_fallback
+ |. lw CARG2, 8+HI(BASE)
+ | beqz AT, >1
+ |. li CARG4, -1
+ | bne CARG3, TISNUM, ->fff_fallback
+ |. lw CARG4, 16+LO(BASE)
+ |1:
+ | bne CARG2, TISNUM, ->fff_fallback
+ |. li AT, LJ_TSTR
+ | bne TMP0, AT, ->fff_fallback
+ |. lw CARG3, 8+LO(BASE)
+ | lw CARG2, STR:CARG1->len
+ | // STR:CARG1 = str, CARG2 = str->len, CARG3 = start, CARG4 = end
+ | slt AT, CARG4, r0
+ | addiu TMP0, CARG2, 1
+ | addu TMP1, CARG4, TMP0
+ | slt TMP3, CARG3, r0
+ | movn CARG4, TMP1, AT // if (end < 0) end += len+1
+ | addu TMP1, CARG3, TMP0
+ | movn CARG3, TMP1, TMP3 // if (start < 0) start += len+1
+ | li TMP2, 1
+ | slt AT, CARG4, r0
+ | slt TMP3, r0, CARG3
+ | movn CARG4, r0, AT // if (end < 0) end = 0
+ | movz CARG3, TMP2, TMP3 // if (start < 1) start = 1
+ | slt AT, CARG2, CARG4
+ | movn CARG4, CARG2, AT // if (end > len) end = len
+ | addu CARG2, STR:CARG1, CARG3
+ | subu CARG3, CARG4, CARG3 // len = end - start
+ | addiu CARG2, CARG2, sizeof(GCstr)-1
+ | bgez CARG3, ->fff_newstr
+ |. addiu CARG3, CARG3, 1 // len++
+ |->fff_emptystr: // Return empty string.
+ | addiu STR:SFARG1LO, DISPATCH, DISPATCH_GL(strempty)
+ | b ->fff_restv
+ |. li SFARG1HI, LJ_TSTR
+ |
+ |.macro ffstring_op, name
+ | .ffunc string_ .. name
+ | ffgccheck
+ |. nop
+ | lw CARG3, HI(BASE)
+ | lw STR:CARG2, LO(BASE)
+ | beqz NARGS8:RC, ->fff_fallback
+ |. li AT, LJ_TSTR
+ | bne CARG3, AT, ->fff_fallback
+ |. addiu SBUF:CARG1, DISPATCH, DISPATCH_GL(tmpbuf)
+ | load_got lj_buf_putstr_ .. name
+ | lw TMP0, SBUF:CARG1->b
+ | sw L, SBUF:CARG1->L
+ | sw BASE, L->base
+ | sw TMP0, SBUF:CARG1->w
+ | call_intern extern lj_buf_putstr_ .. name
+ |. sw PC, SAVE_PC
+ | load_got lj_buf_tostr
+ | call_intern lj_buf_tostr
+ |. move SBUF:CARG1, SBUF:CRET1
+ | b ->fff_resstr
+ |. lw BASE, L->base
+ |.endmacro
+ |
+ |ffstring_op reverse
+ |ffstring_op lower
+ |ffstring_op upper
+ |
+ |//-- Bit library --------------------------------------------------------
+ |
+ |->vm_tobit_fb:
+ | beqz TMP1, ->fff_fallback
+ |.if FPU
+ |. ldc1 FARG1, 0(BASE)
+ | add.d FARG1, FARG1, TOBIT
+ | jr ra
+ |. mfc1 CRET1, FARG1
+ |.else
+ |// FP number to bit conversion for soft-float.
+ |->vm_tobit:
+ | sll TMP0, SFARG1HI, 1
+ | lui AT, 0x0020
+ | addu TMP0, TMP0, AT
+ | slt AT, TMP0, r0
+ | movz SFARG1LO, r0, AT
+ | beqz AT, >2
+ |. li TMP1, 0x3e0
+ | not TMP1, TMP1
+ | sra TMP0, TMP0, 21
+ | subu TMP0, TMP1, TMP0
+ | slt AT, TMP0, r0
+ | bnez AT, >1
+ |. sll TMP1, SFARG1HI, 11
+ | lui AT, 0x8000
+ | or TMP1, TMP1, AT
+ | srl AT, SFARG1LO, 21
+ | or TMP1, TMP1, AT
+ | slt AT, SFARG1HI, r0
+ | beqz AT, >2
+ |. srlv SFARG1LO, TMP1, TMP0
+ | subu SFARG1LO, r0, SFARG1LO
+ |2:
+ | jr ra
+ |. move CRET1, SFARG1LO
+ |1:
+ | addiu TMP0, TMP0, 21
+ | srlv TMP1, SFARG1LO, TMP0
+ | li AT, 20
+ | subu TMP0, AT, TMP0
+ | sll SFARG1LO, SFARG1HI, 12
+ | sllv AT, SFARG1LO, TMP0
+ | or SFARG1LO, TMP1, AT
+ | slt AT, SFARG1HI, r0
+ | beqz AT, <2
+ |. nop
+ | jr ra
+ |. subu CRET1, r0, SFARG1LO
+ |.endif
+ |
+ |.macro .ffunc_bit, name
+ | .ffunc_1 bit_..name
+ | beq SFARG1HI, TISNUM, >6
+ |. move CRET1, SFARG1LO
+ | bal ->vm_tobit_fb
+ |. sltu TMP1, SFARG1HI, TISNUM
+ |6:
+ |.endmacro
+ |
+ |.macro .ffunc_bit_op, name, ins
+ | .ffunc_bit name
+ | addiu TMP2, BASE, 8
+ | addu TMP3, BASE, NARGS8:RC
+ |1:
+ | lw SFARG1HI, HI(TMP2)
+ | beq TMP2, TMP3, ->fff_resi
+ |. lw SFARG1LO, LO(TMP2)
+ |.if FPU
+ | bne SFARG1HI, TISNUM, >2
+ |. addiu TMP2, TMP2, 8
+ | b <1
+ |. ins CRET1, CRET1, SFARG1LO
+ |2:
+ | ldc1 FARG1, -8(TMP2)
+ | sltu TMP1, SFARG1HI, TISNUM
+ | beqz TMP1, ->fff_fallback
+ |. add.d FARG1, FARG1, TOBIT
+ | mfc1 SFARG1LO, FARG1
+ | b <1
+ |. ins CRET1, CRET1, SFARG1LO
+ |.else
+ | beq SFARG1HI, TISNUM, >2
+ |. move CRET2, CRET1
+ | bal ->vm_tobit_fb
+ |. sltu TMP1, SFARG1HI, TISNUM
+ | move SFARG1LO, CRET2
+ |2:
+ | ins CRET1, CRET1, SFARG1LO
+ | b <1
+ |. addiu TMP2, TMP2, 8
+ |.endif
+ |.endmacro
+ |
+ |.ffunc_bit_op band, and
+ |.ffunc_bit_op bor, or
+ |.ffunc_bit_op bxor, xor
+ |
+ |.ffunc_bit bswap
+ | srl TMP0, CRET1, 24
+ | srl TMP2, CRET1, 8
+ | sll TMP1, CRET1, 24
+ | andi TMP2, TMP2, 0xff00
+ | or TMP0, TMP0, TMP1
+ | andi CRET1, CRET1, 0xff00
+ | or TMP0, TMP0, TMP2
+ | sll CRET1, CRET1, 8
+ | b ->fff_resi
+ |. or CRET1, TMP0, CRET1
+ |
+ |.ffunc_bit bnot
+ | b ->fff_resi
+ |. not CRET1, CRET1
+ |
+ |.macro .ffunc_bit_sh, name, ins, shmod
+ | .ffunc_2 bit_..name
+ | beq SFARG1HI, TISNUM, >1
+ |. nop
+ | bal ->vm_tobit_fb
+ |. sltu TMP1, SFARG1HI, TISNUM
+ | move SFARG1LO, CRET1
+ |1:
+ | bne SFARG2HI, TISNUM, ->fff_fallback
+ |. nop
+ |.if shmod == 1
+ | li AT, 32
+ | subu TMP0, AT, SFARG2LO
+ | sllv SFARG2LO, SFARG1LO, SFARG2LO
+ | srlv SFARG1LO, SFARG1LO, TMP0
+ |.elif shmod == 2
+ | li AT, 32
+ | subu TMP0, AT, SFARG2LO
+ | srlv SFARG2LO, SFARG1LO, SFARG2LO
+ | sllv SFARG1LO, SFARG1LO, TMP0
+ |.endif
+ | b ->fff_resi
+ |. ins CRET1, SFARG1LO, SFARG2LO
+ |.endmacro
+ |
+ |.ffunc_bit_sh lshift, sllv, 0
+ |.ffunc_bit_sh rshift, srlv, 0
+ |.ffunc_bit_sh arshift, srav, 0
+ |// Can't use rotrv, since it's only in MIPS32R2.
+ |.ffunc_bit_sh rol, or, 1
+ |.ffunc_bit_sh ror, or, 2
+ |
+ |.ffunc_bit tobit
+ |->fff_resi:
+ | lw PC, FRAME_PC(BASE)
+ | addiu RA, BASE, -8
+ | sw TISNUM, -8+HI(BASE)
+ | b ->fff_res1
+ |. sw CRET1, -8+LO(BASE)
+ |
+ |//-----------------------------------------------------------------------
+ |
+ |->fff_fallback: // Call fast function fallback handler.
+ | // BASE = new base, RB = CFUNC, RC = nargs*8
+ | lw TMP3, CFUNC:RB->f
+ | addu TMP1, BASE, NARGS8:RC
+ | lw PC, FRAME_PC(BASE) // Fallback may overwrite PC.
+ | addiu TMP0, TMP1, 8*LUA_MINSTACK
+ | lw TMP2, L->maxstack
+ | sw PC, SAVE_PC // Redundant (but a defined value).
+ | sltu AT, TMP2, TMP0
+ | sw BASE, L->base
+ | sw TMP1, L->top
+ | bnez AT, >5 // Need to grow stack.
+ |. move CFUNCADDR, TMP3
+ | jalr TMP3 // (lua_State *L)
+ |. move CARG1, L
+ | // Either throws an error, or recovers and returns -1, 0 or nresults+1.
+ | lw BASE, L->base
+ | sll RD, CRET1, 3
+ | bgtz CRET1, ->fff_res // Returned nresults+1?
+ |. addiu RA, BASE, -8
+ |1: // Returned 0 or -1: retry fast path.
+ | lw TMP0, L->top
+ | lw LFUNC:RB, FRAME_FUNC(BASE)
+ | bnez CRET1, ->vm_call_tail // Returned -1?
+ |. subu NARGS8:RC, TMP0, BASE
+ | ins_callt // Returned 0: retry fast path.
+ |
+ |// Reconstruct previous base for vmeta_call during tailcall.
+ |->vm_call_tail:
+ | andi TMP0, PC, FRAME_TYPE
+ | li AT, -4
+ | bnez TMP0, >3
+ |. and TMP1, PC, AT
+ | lbu TMP1, OFS_RA(PC)
+ | sll TMP1, TMP1, 3
+ | addiu TMP1, TMP1, 8
+ |3:
+ | b ->vm_call_dispatch // Resolve again for tailcall.
+ |. subu TMP2, BASE, TMP1
+ |
+ |5: // Grow stack for fallback handler.
+ | load_got lj_state_growstack
+ | li CARG2, LUA_MINSTACK
+ | call_intern lj_state_growstack // (lua_State *L, int n)
+ |. move CARG1, L
+ | lw BASE, L->base
+ | b <1
+ |. li CRET1, 0 // Force retry.
+ |
+ |->fff_gcstep: // Call GC step function.
+ | // BASE = new base, RC = nargs*8
+ | move MULTRES, ra
+ | load_got lj_gc_step
+ | sw BASE, L->base
+ | addu TMP0, BASE, NARGS8:RC
+ | sw PC, SAVE_PC // Redundant (but a defined value).
+ | sw TMP0, L->top
+ | call_intern lj_gc_step // (lua_State *L)
+ |. move CARG1, L
+ | lw BASE, L->base
+ | move ra, MULTRES
+ | lw TMP0, L->top
+ | lw CFUNC:RB, FRAME_FUNC(BASE)
+ | jr ra
+ |. subu NARGS8:RC, TMP0, BASE
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Special dispatch targets -------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |->vm_record: // Dispatch target for recording phase.
+ |.if JIT
+ | lbu TMP3, DISPATCH_GL(hookmask)(DISPATCH)
+ | andi AT, TMP3, HOOK_VMEVENT // No recording while in vmevent.
+ | bnez AT, >5
+ | // Decrement the hookcount for consistency, but always do the call.
+ |. lw TMP2, DISPATCH_GL(hookcount)(DISPATCH)
+ | andi AT, TMP3, HOOK_ACTIVE
+ | bnez AT, >1
+ |. addiu TMP2, TMP2, -1
+ | andi AT, TMP3, LUA_MASKLINE|LUA_MASKCOUNT
+ | beqz AT, >1
+ |. nop
+ | b >1
+ |. sw TMP2, DISPATCH_GL(hookcount)(DISPATCH)
+ |.endif
+ |
+ |->vm_rethook: // Dispatch target for return hooks.
+ | lbu TMP3, DISPATCH_GL(hookmask)(DISPATCH)
+ | andi AT, TMP3, HOOK_ACTIVE // Hook already active?
+ | beqz AT, >1
+ |5: // Re-dispatch to static ins.
+ |. lw AT, GG_DISP2STATIC(TMP0) // Assumes TMP0 holds DISPATCH+OP*4.
+ | jr AT
+ |. nop
+ |
+ |->vm_inshook: // Dispatch target for instr/line hooks.
+ | lbu TMP3, DISPATCH_GL(hookmask)(DISPATCH)
+ | lw TMP2, DISPATCH_GL(hookcount)(DISPATCH)
+ | andi AT, TMP3, HOOK_ACTIVE // Hook already active?
+ | bnez AT, <5
+ |. andi AT, TMP3, LUA_MASKLINE|LUA_MASKCOUNT
+ | beqz AT, <5
+ |. addiu TMP2, TMP2, -1
+ | beqz TMP2, >1
+ |. sw TMP2, DISPATCH_GL(hookcount)(DISPATCH)
+ | andi AT, TMP3, LUA_MASKLINE
+ | beqz AT, <5
+ |1:
+ |. load_got lj_dispatch_ins
+ | sw MULTRES, SAVE_MULTRES
+ | move CARG2, PC
+ | sw BASE, L->base
+ | // SAVE_PC must hold the _previous_ PC. The callee updates it with PC.
+ | call_intern lj_dispatch_ins // (lua_State *L, const BCIns *pc)
+ |. move CARG1, L
+ |3:
+ | lw BASE, L->base
+ |4: // Re-dispatch to static ins.
+ | lw INS, -4(PC)
+ | decode_OP4a TMP1, INS
+ | decode_OP4b TMP1
+ | addu TMP0, DISPATCH, TMP1
+ | decode_RD8a RD, INS
+ | lw AT, GG_DISP2STATIC(TMP0)
+ | decode_RA8a RA, INS
+ | decode_RD8b RD
+ | jr AT
+ | decode_RA8b RA
+ |
+ |->cont_hook: // Continue from hook yield.
+ | addiu PC, PC, 4
+ | b <4
+ |. lw MULTRES, -24+LO(RB) // Restore MULTRES for *M ins.
+ |
+ |->vm_hotloop: // Hot loop counter underflow.
+ |.if JIT
+ | lw LFUNC:TMP1, FRAME_FUNC(BASE)
+ | addiu CARG1, DISPATCH, GG_DISP2J
+ | sw PC, SAVE_PC
+ | lw TMP1, LFUNC:TMP1->pc
+ | move CARG2, PC
+ | sw L, DISPATCH_J(L)(DISPATCH)
+ | lbu TMP1, PC2PROTO(framesize)(TMP1)
+ | load_got lj_trace_hot
+ | sw BASE, L->base
+ | sll TMP1, TMP1, 3
+ | addu TMP1, BASE, TMP1
+ | call_intern lj_trace_hot // (jit_State *J, const BCIns *pc)
+ |. sw TMP1, L->top
+ | b <3
+ |. nop
+ |.endif
+ |
+ |->vm_callhook: // Dispatch target for call hooks.
+ |.if JIT
+ | b >1
+ |.endif
+ |. move CARG2, PC
+ |
+ |->vm_hotcall: // Hot call counter underflow.
+ |.if JIT
+ | ori CARG2, PC, 1
+ |1:
+ |.endif
+ | load_got lj_dispatch_call
+ | addu TMP0, BASE, RC
+ | sw PC, SAVE_PC
+ | sw BASE, L->base
+ | subu RA, RA, BASE
+ | sw TMP0, L->top
+ | call_intern lj_dispatch_call // (lua_State *L, const BCIns *pc)
+ |. move CARG1, L
+ | // Returns ASMFunction.
+ | lw BASE, L->base
+ | lw TMP0, L->top
+ | sw r0, SAVE_PC // Invalidate for subsequent line hook.
+ | subu NARGS8:RC, TMP0, BASE
+ | addu RA, BASE, RA
+ | lw LFUNC:RB, FRAME_FUNC(BASE)
+ | jr CRET1
+ |. lw INS, -4(PC)
+ |
+ |->cont_stitch: // Trace stitching.
+ |.if JIT
+ | // RA = resultptr, RB = meta base
+ | lw INS, -4(PC)
+ | lw TMP2, -24+LO(RB) // Save previous trace.
+ | decode_RA8a RC, INS
+ | addiu AT, MULTRES, -8
+ | decode_RA8b RC
+ | beqz AT, >2
+ |. addu RC, BASE, RC // Call base.
+ |1: // Move results down.
+ | lw SFRETHI, HI(RA)
+ | lw SFRETLO, LO(RA)
+ | addiu AT, AT, -8
+ | addiu RA, RA, 8
+ | sw SFRETHI, HI(RC)
+ | sw SFRETLO, LO(RC)
+ | bnez AT, <1
+ |. addiu RC, RC, 8
+ |2:
+ | decode_RA8a RA, INS
+ | decode_RB8a RB, INS
+ | decode_RA8b RA
+ | decode_RB8b RB
+ | addu RA, RA, RB
+ | addu RA, BASE, RA
+ |3:
+ | sltu AT, RC, RA
+ | bnez AT, >9 // More results wanted?
+ |. nop
+ |
+ | lhu TMP3, TRACE:TMP2->traceno
+ | lhu RD, TRACE:TMP2->link
+ | beq RD, TMP3, ->cont_nop // Blacklisted.
+ |. load_got lj_dispatch_stitch
+ | bnez RD, =>BC_JLOOP // Jump to stitched trace.
+ |. sll RD, RD, 3
+ |
+ | // Stitch a new trace to the previous trace.
+ | sw TMP3, DISPATCH_J(exitno)(DISPATCH)
+ | sw L, DISPATCH_J(L)(DISPATCH)
+ | sw BASE, L->base
+ | addiu CARG1, DISPATCH, GG_DISP2J
+ | call_intern lj_dispatch_stitch // (jit_State *J, const BCIns *pc)
+ |. move CARG2, PC
+ | b ->cont_nop
+ |. lw BASE, L->base
+ |
+ |9:
+ | sw TISNIL, HI(RC)
+ | b <3
+ |. addiu RC, RC, 8
+ |.endif
+ |
+ |->vm_profhook: // Dispatch target for profiler hook.
+#if LJ_HASPROFILE
+ | load_got lj_dispatch_profile
+ | sw MULTRES, SAVE_MULTRES
+ | move CARG2, PC
+ | sw BASE, L->base
+ | call_intern lj_dispatch_profile // (lua_State *L, const BCIns *pc)
+ |. move CARG1, L
+ | // HOOK_PROFILE is off again, so re-dispatch to dynamic instruction.
+ | addiu PC, PC, -4
+ | b ->cont_nop
+ |. lw BASE, L->base
+#endif
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Trace exit handler -------------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |.macro savex_, a, b
+ |.if FPU
+ | sdc1 f..a, 16+a*8(sp)
+ | sw r..a, 16+32*8+a*4(sp)
+ | sw r..b, 16+32*8+b*4(sp)
+ |.else
+ | sw r..a, 16+a*4(sp)
+ | sw r..b, 16+b*4(sp)
+ |.endif
+ |.endmacro
+ |
+ |->vm_exit_handler:
+ |.if JIT
+ |.if FPU
+ | addiu sp, sp, -(16+32*8+32*4)
+ |.else
+ | addiu sp, sp, -(16+32*4)
+ |.endif
+ | savex_ 0, 1
+ | savex_ 2, 3
+ | savex_ 4, 5
+ | savex_ 6, 7
+ | savex_ 8, 9
+ | savex_ 10, 11
+ | savex_ 12, 13
+ | savex_ 14, 15
+ | savex_ 16, 17
+ | savex_ 18, 19
+ | savex_ 20, 21
+ | savex_ 22, 23
+ | savex_ 24, 25
+ | savex_ 26, 27
+ |.if FPU
+ | sdc1 f28, 16+28*8(sp)
+ | sdc1 f30, 16+30*8(sp)
+ | sw r28, 16+32*8+28*4(sp)
+ | sw r30, 16+32*8+30*4(sp)
+ | sw r0, 16+32*8+31*4(sp) // Clear RID_TMP.
+ | addiu TMP2, sp, 16+32*8+32*4 // Recompute original value of sp.
+ | sw TMP2, 16+32*8+29*4(sp) // Store sp in RID_SP
+ |.else
+ | sw r28, 16+28*4(sp)
+ | sw r30, 16+30*4(sp)
+ | sw r0, 16+31*4(sp) // Clear RID_TMP.
+ | addiu TMP2, sp, 16+32*4 // Recompute original value of sp.
+ | sw TMP2, 16+29*4(sp) // Store sp in RID_SP
+ |.endif
+ | li_vmstate EXIT
+ | addiu DISPATCH, JGL, -GG_DISP2G-32768
+ | lw TMP1, 0(TMP2) // Load exit number.
+ | st_vmstate
+ | lw L, DISPATCH_GL(cur_L)(DISPATCH)
+ | lw BASE, DISPATCH_GL(jit_base)(DISPATCH)
+ | load_got lj_trace_exit
+ | sw L, DISPATCH_J(L)(DISPATCH)
+ | sw ra, DISPATCH_J(parent)(DISPATCH) // Store trace number.
+ | sw BASE, L->base
+ | sw TMP1, DISPATCH_J(exitno)(DISPATCH) // Store exit number.
+ | addiu CARG1, DISPATCH, GG_DISP2J
+ | sw r0, DISPATCH_GL(jit_base)(DISPATCH)
+ | call_intern lj_trace_exit // (jit_State *J, ExitState *ex)
+ |. addiu CARG2, sp, 16
+ | // Returns MULTRES (unscaled) or negated error code.
+ | lw TMP1, L->cframe
+ | li AT, -4
+ | lw BASE, L->base
+ | and sp, TMP1, AT
+ | lw PC, SAVE_PC // Get SAVE_PC.
+ | b >1
+ |. sw L, SAVE_L // Set SAVE_L (on-trace resume/yield).
+ |.endif
+ |->vm_exit_interp:
+ |.if JIT
+ | // CRET1 = MULTRES or negated error code, BASE, PC and JGL set.
+ | lw L, SAVE_L
+ | addiu DISPATCH, JGL, -GG_DISP2G-32768
+ | sw BASE, L->base
+ |1:
+ | bltz CRET1, >9 // Check for error from exit.
+ |. lw LFUNC:RB, FRAME_FUNC(BASE)
+ | .FPU lui TMP3, 0x59c0 // TOBIT = 2^52 + 2^51 (float).
+ | sll MULTRES, CRET1, 3
+ | li TISNIL, LJ_TNIL
+ | li TISNUM, LJ_TISNUM // Setup type comparison constants.
+ | sw MULTRES, SAVE_MULTRES
+ | .FPU mtc1 TMP3, TOBIT
+ | lw TMP1, LFUNC:RB->pc
+ | sw r0, DISPATCH_GL(jit_base)(DISPATCH)
+ | lw KBASE, PC2PROTO(k)(TMP1)
+ | .FPU cvt.d.s TOBIT, TOBIT
+ | // Modified copy of ins_next which handles function header dispatch, too.
+ | lw INS, 0(PC)
+ | addiu PC, PC, 4
+ | // Assumes TISNIL == ~LJ_VMST_INTERP == -1
+ | sw TISNIL, DISPATCH_GL(vmstate)(DISPATCH)
+ | decode_OP4a TMP1, INS
+ | decode_OP4b TMP1
+ | sltiu TMP2, TMP1, BC_FUNCF*4
+ | addu TMP0, DISPATCH, TMP1
+ | decode_RD8a RD, INS
+ | lw AT, 0(TMP0)
+ | decode_RA8a RA, INS
+ | beqz TMP2, >2
+ |. decode_RA8b RA
+ | jr AT
+ |. decode_RD8b RD
+ |2:
+ | sltiu TMP2, TMP1, (BC_FUNCC+2)*4 // Fast function?
+ | bnez TMP2, >3
+ |. lw TMP1, FRAME_PC(BASE)
+ | // Check frame below fast function.
+ | andi TMP0, TMP1, FRAME_TYPE
+ | bnez TMP0, >3 // Trace stitching continuation?
+ |. nop
+ | // Otherwise set KBASE for Lua function below fast function.
+ | lw TMP2, -4(TMP1)
+ | decode_RA8a TMP0, TMP2
+ | decode_RA8b TMP0
+ | subu TMP1, BASE, TMP0
+ | lw LFUNC:TMP2, -8+FRAME_FUNC(TMP1)
+ | lw TMP1, LFUNC:TMP2->pc
+ | lw KBASE, PC2PROTO(k)(TMP1)
+ |3:
+ | addiu RC, MULTRES, -8
+ | jr AT
+ |. addu RA, RA, BASE
+ |
+ |9: // Rethrow error from the right C frame.
+ | load_got lj_err_trace
+ | sub CARG2, r0, CRET1
+ | call_intern lj_err_trace // (lua_State *L, int errcode)
+ |. move CARG1, L
+ |.endif
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Math helper functions ----------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |// Hard-float round to integer.
+ |// Modifies AT, TMP0, FRET1, FRET2, f4. Keeps all others incl. FARG1.
+ |.macro vm_round_hf, func
+ | lui TMP0, 0x4330 // Hiword of 2^52 (double).
+ | mtc1 r0, f4
+ | mtc1 TMP0, f5
+ | abs.d FRET2, FARG1 // |x|
+ | mfc1 AT, f13
+ | c.olt.d 0, FRET2, f4
+ | add.d FRET1, FRET2, f4 // (|x| + 2^52) - 2^52
+ | bc1f 0, >1 // Truncate only if |x| < 2^52.
+ |. sub.d FRET1, FRET1, f4
+ | slt AT, AT, r0
+ |.if "func" == "ceil"
+ | lui TMP0, 0xbff0 // Hiword of -1 (double). Preserves -0.
+ |.else
+ | lui TMP0, 0x3ff0 // Hiword of +1 (double).
+ |.endif
+ |.if "func" == "trunc"
+ | mtc1 TMP0, f5
+ | c.olt.d 0, FRET2, FRET1 // |x| < result?
+ | sub.d FRET2, FRET1, f4
+ | movt.d FRET1, FRET2, 0 // If yes, subtract +1.
+ | neg.d FRET2, FRET1
+ | jr ra
+ |. movn.d FRET1, FRET2, AT // Merge sign bit back in.
+ |.else
+ | neg.d FRET2, FRET1
+ | mtc1 TMP0, f5
+ | movn.d FRET1, FRET2, AT // Merge sign bit back in.
+ |.if "func" == "ceil"
+ | c.olt.d 0, FRET1, FARG1 // x > result?
+ |.else
+ | c.olt.d 0, FARG1, FRET1 // x < result?
+ |.endif
+ | sub.d FRET2, FRET1, f4 // If yes, subtract +-1.
+ | jr ra
+ |. movt.d FRET1, FRET2, 0
+ |.endif
+ |1:
+ | jr ra
+ |. mov.d FRET1, FARG1
+ |.endmacro
+ |
+ |.macro vm_round, func
+ |.if FPU
+ | vm_round_hf, func
+ |.endif
+ |.endmacro
+ |
+ |->vm_floor:
+ | vm_round floor
+ |->vm_ceil:
+ | vm_round ceil
+ |->vm_trunc:
+ |.if JIT
+ | vm_round trunc
+ |.endif
+ |
+ |// Soft-float integer to number conversion.
+ |.macro sfi2d, AHI, ALO
+ |.if not FPU
+ | beqz ALO, >9 // Handle zero first.
+ |. sra TMP0, ALO, 31
+ | xor TMP1, ALO, TMP0
+ | subu TMP1, TMP1, TMP0 // Absolute value in TMP1.
+ | clz AHI, TMP1
+ | andi TMP0, TMP0, 0x800 // Mask sign bit.
+ | li AT, 0x3ff+31-1
+ | sllv TMP1, TMP1, AHI // Align mantissa left with leading 1.
+ | subu AHI, AT, AHI // Exponent - 1 in AHI.
+ | sll ALO, TMP1, 21
+ | or AHI, AHI, TMP0 // Sign | Exponent.
+ | srl TMP1, TMP1, 11
+ | sll AHI, AHI, 20 // Align left.
+ | jr ra
+ |. addu AHI, AHI, TMP1 // Add mantissa, increment exponent.
+ |9:
+ | jr ra
+ |. li AHI, 0
+ |.endif
+ |.endmacro
+ |
+ |// Input SFARG1LO. Output: SFARG1*. Temporaries: AT, TMP0, TMP1.
+ |->vm_sfi2d_1:
+ | sfi2d SFARG1HI, SFARG1LO
+ |
+ |// Input SFARG2LO. Output: SFARG2*. Temporaries: AT, TMP0, TMP1.
+ |->vm_sfi2d_2:
+ | sfi2d SFARG2HI, SFARG2LO
+ |
+ |// Soft-float comparison. Equivalent to c.eq.d.
+ |// Input: SFARG*. Output: CRET1. Temporaries: AT, TMP0, TMP1.
+ |->vm_sfcmpeq:
+ |.if not FPU
+ | sll AT, SFARG1HI, 1
+ | sll TMP0, SFARG2HI, 1
+ | or CRET1, SFARG1LO, SFARG2LO
+ | or TMP1, AT, TMP0
+ | or TMP1, TMP1, CRET1
+ | beqz TMP1, >8 // Both args +-0: return 1.
+ |. sltu CRET1, r0, SFARG1LO
+ | lui TMP1, 0xffe0
+ | addu AT, AT, CRET1
+ | sltu CRET1, r0, SFARG2LO
+ | sltu AT, TMP1, AT
+ | addu TMP0, TMP0, CRET1
+ | sltu TMP0, TMP1, TMP0
+ | or TMP1, AT, TMP0
+ | bnez TMP1, >9 // Either arg is NaN: return 0;
+ |. xor TMP0, SFARG1HI, SFARG2HI
+ | xor TMP1, SFARG1LO, SFARG2LO
+ | or AT, TMP0, TMP1
+ | jr ra
+ |. sltiu CRET1, AT, 1 // Same values: return 1.
+ |8:
+ | jr ra
+ |. li CRET1, 1
+ |9:
+ | jr ra
+ |. li CRET1, 0
+ |.endif
+ |
+ |// Soft-float comparison. Equivalent to c.ult.d and c.olt.d.
+ |// Input: SFARG*. Output: CRET1. Temporaries: AT, TMP0, TMP1, CRET2.
+ |->vm_sfcmpult:
+ |.if not FPU
+ | b >1
+ |. li CRET2, 1
+ |.endif
+ |
+ |->vm_sfcmpolt:
+ |.if not FPU
+ | li CRET2, 0
+ |1:
+ | sll AT, SFARG1HI, 1
+ | sll TMP0, SFARG2HI, 1
+ | or CRET1, SFARG1LO, SFARG2LO
+ | or TMP1, AT, TMP0
+ | or TMP1, TMP1, CRET1
+ | beqz TMP1, >8 // Both args +-0: return 0.
+ |. sltu CRET1, r0, SFARG1LO
+ | lui TMP1, 0xffe0
+ | addu AT, AT, CRET1
+ | sltu CRET1, r0, SFARG2LO
+ | sltu AT, TMP1, AT
+ | addu TMP0, TMP0, CRET1
+ | sltu TMP0, TMP1, TMP0
+ | or TMP1, AT, TMP0
+ | bnez TMP1, >9 // Either arg is NaN: return 0 or 1;
+ |. and AT, SFARG1HI, SFARG2HI
+ | bltz AT, >5 // Both args negative?
+ |. nop
+ | beq SFARG1HI, SFARG2HI, >8
+ |. sltu CRET1, SFARG1LO, SFARG2LO
+ | jr ra
+ |. slt CRET1, SFARG1HI, SFARG2HI
+ |5: // Swap conditions if both operands are negative.
+ | beq SFARG1HI, SFARG2HI, >8
+ |. sltu CRET1, SFARG2LO, SFARG1LO
+ | jr ra
+ |. slt CRET1, SFARG2HI, SFARG1HI
+ |8:
+ | jr ra
+ |. nop
+ |9:
+ | jr ra
+ |. move CRET1, CRET2
+ |.endif
+ |
+ |->vm_sfcmpogt:
+ |.if not FPU
+ | sll AT, SFARG2HI, 1
+ | sll TMP0, SFARG1HI, 1
+ | or CRET1, SFARG2LO, SFARG1LO
+ | or TMP1, AT, TMP0
+ | or TMP1, TMP1, CRET1
+ | beqz TMP1, >8 // Both args +-0: return 0.
+ |. sltu CRET1, r0, SFARG2LO
+ | lui TMP1, 0xffe0
+ | addu AT, AT, CRET1
+ | sltu CRET1, r0, SFARG1LO
+ | sltu AT, TMP1, AT
+ | addu TMP0, TMP0, CRET1
+ | sltu TMP0, TMP1, TMP0
+ | or TMP1, AT, TMP0
+ | bnez TMP1, >9 // Either arg is NaN: return 0 or 1;
+ |. and AT, SFARG2HI, SFARG1HI
+ | bltz AT, >5 // Both args negative?
+ |. nop
+ | beq SFARG2HI, SFARG1HI, >8
+ |. sltu CRET1, SFARG2LO, SFARG1LO
+ | jr ra
+ |. slt CRET1, SFARG2HI, SFARG1HI
+ |5: // Swap conditions if both operands are negative.
+ | beq SFARG2HI, SFARG1HI, >8
+ |. sltu CRET1, SFARG1LO, SFARG2LO
+ | jr ra
+ |. slt CRET1, SFARG1HI, SFARG2HI
+ |8:
+ | jr ra
+ |. nop
+ |9:
+ | jr ra
+ |. li CRET1, 0
+ |.endif
+ |
+ |// Soft-float comparison. Equivalent to c.ole.d a, b or c.ole.d b, a.
+ |// Input: SFARG*, TMP3. Output: CRET1. Temporaries: AT, TMP0, TMP1.
+ |->vm_sfcmpolex:
+ |.if not FPU
+ | sll AT, SFARG1HI, 1
+ | sll TMP0, SFARG2HI, 1
+ | or CRET1, SFARG1LO, SFARG2LO
+ | or TMP1, AT, TMP0
+ | or TMP1, TMP1, CRET1
+ | beqz TMP1, >8 // Both args +-0: return 1.
+ |. sltu CRET1, r0, SFARG1LO
+ | lui TMP1, 0xffe0
+ | addu AT, AT, CRET1
+ | sltu CRET1, r0, SFARG2LO
+ | sltu AT, TMP1, AT
+ | addu TMP0, TMP0, CRET1
+ | sltu TMP0, TMP1, TMP0
+ | or TMP1, AT, TMP0
+ | bnez TMP1, >9 // Either arg is NaN: return 0;
+ |. and AT, SFARG1HI, SFARG2HI
+ | xor AT, AT, TMP3
+ | bltz AT, >5 // Both args negative?
+ |. nop
+ | beq SFARG1HI, SFARG2HI, >6
+ |. sltu CRET1, SFARG2LO, SFARG1LO
+ | jr ra
+ |. slt CRET1, SFARG2HI, SFARG1HI
+ |5: // Swap conditions if both operands are negative.
+ | beq SFARG1HI, SFARG2HI, >6
+ |. sltu CRET1, SFARG1LO, SFARG2LO
+ | slt CRET1, SFARG1HI, SFARG2HI
+ |6:
+ | jr ra
+ |. nop
+ |8:
+ | jr ra
+ |. li CRET1, 1
+ |9:
+ | jr ra
+ |. li CRET1, 0
+ |.endif
+ |
+ |.macro sfmin_max, name, fpcall
+ |->vm_sf .. name:
+ |.if JIT and not FPU
+ | move TMP2, ra
+ | bal ->fpcall
+ |. nop
+ | move TMP0, CRET1
+ | move SFRETHI, SFARG1HI
+ | move SFRETLO, SFARG1LO
+ | move ra, TMP2
+ | movz SFRETHI, SFARG2HI, TMP0
+ | jr ra
+ |. movz SFRETLO, SFARG2LO, TMP0
+ |.endif
+ |.endmacro
+ |
+ | sfmin_max min, vm_sfcmpolt
+ | sfmin_max max, vm_sfcmpogt
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Miscellaneous functions --------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |.define NEXT_TAB, TAB:CARG1
+ |.define NEXT_IDX, CARG2
+ |.define NEXT_ASIZE, CARG3
+ |.define NEXT_NIL, CARG4
+ |.define NEXT_TMP0, r12
+ |.define NEXT_TMP1, r13
+ |.define NEXT_TMP2, r14
+ |.define NEXT_RES_VK, CRET1
+ |.define NEXT_RES_IDX, CRET2
+ |.define NEXT_RES_PTR, sp
+ |.define NEXT_RES_VAL_I, 0(sp)
+ |.define NEXT_RES_VAL_IT, 4(sp)
+ |.define NEXT_RES_KEY_I, 8(sp)
+ |.define NEXT_RES_KEY_IT, 12(sp)
+ |
+ |// TValue *lj_vm_next(GCtab *t, uint32_t idx)
+ |// Next idx returned in CRET2.
+ |->vm_next:
+ |.if JIT and ENDIAN_LE
+ | lw NEXT_ASIZE, NEXT_TAB->asize
+ | lw NEXT_TMP0, NEXT_TAB->array
+ | li NEXT_NIL, LJ_TNIL
+ |1: // Traverse array part.
+ | sltu AT, NEXT_IDX, NEXT_ASIZE
+ | sll NEXT_TMP1, NEXT_IDX, 3
+ | beqz AT, >5
+ |. addu NEXT_TMP1, NEXT_TMP0, NEXT_TMP1
+ | lw NEXT_TMP2, 4(NEXT_TMP1)
+ | sw NEXT_IDX, NEXT_RES_KEY_I
+ | beq NEXT_TMP2, NEXT_NIL, <1
+ |. addiu NEXT_IDX, NEXT_IDX, 1
+ | lw NEXT_TMP0, 0(NEXT_TMP1)
+ | li AT, LJ_TISNUM
+ | sw NEXT_TMP2, NEXT_RES_VAL_IT
+ | sw AT, NEXT_RES_KEY_IT
+ | sw NEXT_TMP0, NEXT_RES_VAL_I
+ | move NEXT_RES_VK, NEXT_RES_PTR
+ | jr ra
+ |. move NEXT_RES_IDX, NEXT_IDX
+ |
+ |5: // Traverse hash part.
+ | subu NEXT_RES_IDX, NEXT_IDX, NEXT_ASIZE
+ | lw NODE:NEXT_RES_VK, NEXT_TAB->node
+ | sll NEXT_TMP2, NEXT_RES_IDX, 5
+ | lw NEXT_TMP0, NEXT_TAB->hmask
+ | sll AT, NEXT_RES_IDX, 3
+ | subu AT, NEXT_TMP2, AT
+ | addu NODE:NEXT_RES_VK, NODE:NEXT_RES_VK, AT
+ |6:
+ | sltu AT, NEXT_TMP0, NEXT_RES_IDX
+ | bnez AT, >8
+ |. nop
+ | lw NEXT_TMP2, NODE:NEXT_RES_VK->val.it
+ | bne NEXT_TMP2, NEXT_NIL, >9
+ |. addiu NEXT_RES_IDX, NEXT_RES_IDX, 1
+ | // Skip holes in hash part.
+ | b <6
+ |. addiu NODE:NEXT_RES_VK, NODE:NEXT_RES_VK, sizeof(Node)
+ |
+ |8: // End of iteration. Set the key to nil (not the value).
+ | sw NEXT_NIL, NEXT_RES_KEY_IT
+ | move NEXT_RES_VK, NEXT_RES_PTR
+ |9:
+ | jr ra
+ |. addu NEXT_RES_IDX, NEXT_RES_IDX, NEXT_ASIZE
+ |.endif
+ |
+ |//-----------------------------------------------------------------------
+ |//-- FFI helper functions -----------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |// Handler for callback functions. Callback slot number in r1, g in r2.
+ |->vm_ffi_callback:
+ |.if FFI
+ |.type CTSTATE, CTState, PC
+ | saveregs
+ | lw CTSTATE, GL:r2->ctype_state
+ | addiu DISPATCH, r2, GG_G2DISP
+ | load_got lj_ccallback_enter
+ | sw r1, CTSTATE->cb.slot
+ | sw CARG1, CTSTATE->cb.gpr[0]
+ | sw CARG2, CTSTATE->cb.gpr[1]
+ | .FPU sdc1 FARG1, CTSTATE->cb.fpr[0]
+ | sw CARG3, CTSTATE->cb.gpr[2]
+ | sw CARG4, CTSTATE->cb.gpr[3]
+ | .FPU sdc1 FARG2, CTSTATE->cb.fpr[1]
+ | addiu TMP0, sp, CFRAME_SPACE+16
+ | sw TMP0, CTSTATE->cb.stack
+ | sw r0, SAVE_PC // Any value outside of bytecode is ok.
+ | move CARG2, sp
+ | call_intern lj_ccallback_enter // (CTState *cts, void *cf)
+ |. move CARG1, CTSTATE
+ | // Returns lua_State *.
+ | lw BASE, L:CRET1->base
+ | lw RC, L:CRET1->top
+ | li TISNUM, LJ_TISNUM // Setup type comparison constants.
+ | move L, CRET1
+ | .FPU lui TMP3, 0x59c0 // TOBIT = 2^52 + 2^51 (float).
+ | lw LFUNC:RB, FRAME_FUNC(BASE)
+ | .FPU mtc1 TMP3, TOBIT
+ | li_vmstate INTERP
+ | li TISNIL, LJ_TNIL
+ | subu RC, RC, BASE
+ | st_vmstate
+ | .FPU cvt.d.s TOBIT, TOBIT
+ | ins_callt
+ |.endif
+ |
+ |->cont_ffi_callback: // Return from FFI callback.
+ |.if FFI
+ | load_got lj_ccallback_leave
+ | lw CTSTATE, DISPATCH_GL(ctype_state)(DISPATCH)
+ | sw BASE, L->base
+ | sw RB, L->top
+ | sw L, CTSTATE->L
+ | move CARG2, RA
+ | call_intern lj_ccallback_leave // (CTState *cts, TValue *o)
+ |. move CARG1, CTSTATE
+ | .FPU ldc1 FRET1, CTSTATE->cb.fpr[0]
+ | lw CRET1, CTSTATE->cb.gpr[0]
+ | .FPU ldc1 FRET2, CTSTATE->cb.fpr[1]
+ | b ->vm_leave_unw
+ |. lw CRET2, CTSTATE->cb.gpr[1]
+ |.endif
+ |
+ |->vm_ffi_call: // Call C function via FFI.
+ | // Caveat: needs special frame unwinding, see below.
+ |.if FFI
+ | .type CCSTATE, CCallState, CARG1
+ | lw TMP1, CCSTATE->spadj
+ | lbu CARG2, CCSTATE->nsp
+ | move TMP2, sp
+ | subu sp, sp, TMP1
+ | sw ra, -4(TMP2)
+ | sll CARG2, CARG2, 2
+ | sw r16, -8(TMP2)
+ | sw CCSTATE, -12(TMP2)
+ | move r16, TMP2
+ | addiu TMP1, CCSTATE, offsetof(CCallState, stack)
+ | addiu TMP2, sp, 16
+ | beqz CARG2, >2
+ |. addu TMP3, TMP1, CARG2
+ |1:
+ | lw TMP0, 0(TMP1)
+ | addiu TMP1, TMP1, 4
+ | sltu AT, TMP1, TMP3
+ | sw TMP0, 0(TMP2)
+ | bnez AT, <1
+ |. addiu TMP2, TMP2, 4
+ |2:
+ | lw CFUNCADDR, CCSTATE->func
+ | lw CARG2, CCSTATE->gpr[1]
+ | lw CARG3, CCSTATE->gpr[2]
+ | lw CARG4, CCSTATE->gpr[3]
+ | .FPU ldc1 FARG1, CCSTATE->fpr[0]
+ | .FPU ldc1 FARG2, CCSTATE->fpr[1]
+ | jalr CFUNCADDR
+ |. lw CARG1, CCSTATE->gpr[0] // Do this last, since CCSTATE is CARG1.
+ | lw CCSTATE:TMP1, -12(r16)
+ | lw TMP2, -8(r16)
+ | lw ra, -4(r16)
+ | sw CRET1, CCSTATE:TMP1->gpr[0]
+ | sw CRET2, CCSTATE:TMP1->gpr[1]
+ |.if FPU
+ | sdc1 FRET1, CCSTATE:TMP1->fpr[0]
+ | sdc1 FRET2, CCSTATE:TMP1->fpr[1]
+ |.else
+ | sw CARG1, CCSTATE:TMP1->gpr[2] // Soft-float: complex double .im part.
+ | sw CARG2, CCSTATE:TMP1->gpr[3]
+ |.endif
+ | move sp, r16
+ | jr ra
+ |. move r16, TMP2
+ |.endif
+ |// Note: vm_ffi_call must be the last function in this object file!
+ |
+ |//-----------------------------------------------------------------------
+}
+
+/* Generate the code for a single instruction. */
+static void build_ins(BuildCtx *ctx, BCOp op, int defop)
+{
+ int vk = 0;
+ |=>defop:
+
+ switch (op) {
+
+ /* -- Comparison ops ---------------------------------------------------- */
+
+ /* Remember: all ops branch for a true comparison, fall through otherwise. */
+
+ case BC_ISLT: case BC_ISGE: case BC_ISLE: case BC_ISGT:
+ | // RA = src1*8, RD = src2*8, JMP with RD = target
+ |.macro bc_comp, FRA, FRD, RAHI, RALO, RDHI, RDLO, movop, fmovop, fcomp, sfcomp
+ | addu RA, BASE, RA
+ | addu RD, BASE, RD
+ | lw RAHI, HI(RA)
+ | lw RDHI, HI(RD)
+ | lhu TMP2, OFS_RD(PC)
+ | addiu PC, PC, 4
+ | bne RAHI, TISNUM, >2
+ |. lw RALO, LO(RA)
+ | lui TMP3, (-(BCBIAS_J*4 >> 16) & 65535)
+ | lw RDLO, LO(RD)
+ | bne RDHI, TISNUM, >5
+ |. decode_RD4b TMP2
+ | slt AT, SFARG1LO, SFARG2LO
+ | addu TMP2, TMP2, TMP3
+ | movop TMP2, r0, AT
+ |1:
+ | addu PC, PC, TMP2
+ | ins_next
+ |
+ |2: // RA is not an integer.
+ | sltiu AT, RAHI, LJ_TISNUM
+ | beqz AT, ->vmeta_comp
+ |. lui TMP3, (-(BCBIAS_J*4 >> 16) & 65535)
+ | sltiu AT, RDHI, LJ_TISNUM
+ |.if FPU
+ | ldc1 FRA, 0(RA)
+ | ldc1 FRD, 0(RD)
+ |.else
+ | lw RDLO, LO(RD)
+ |.endif
+ | beqz AT, >4
+ |. decode_RD4b TMP2
+ |3: // RA and RD are both numbers.
+ |.if FPU
+ | fcomp f20, f22
+ | addu TMP2, TMP2, TMP3
+ | b <1
+ |. fmovop TMP2, r0
+ |.else
+ | bal sfcomp
+ |. addu TMP2, TMP2, TMP3
+ | b <1
+ |. movop TMP2, r0, CRET1
+ |.endif
+ |
+ |4: // RA is a number, RD is not a number.
+ | bne RDHI, TISNUM, ->vmeta_comp
+ | // RA is a number, RD is an integer. Convert RD to a number.
+ |.if FPU
+ |. lwc1 FRD, LO(RD)
+ | b <3
+ |. cvt.d.w FRD, FRD
+ |.else
+ |. nop
+ |.if "RDHI" == "SFARG1HI"
+ | bal ->vm_sfi2d_1
+ |.else
+ | bal ->vm_sfi2d_2
+ |.endif
+ |. nop
+ | b <3
+ |. nop
+ |.endif
+ |
+ |5: // RA is an integer, RD is not an integer
+ | sltiu AT, RDHI, LJ_TISNUM
+ | beqz AT, ->vmeta_comp
+ | // RA is an integer, RD is a number. Convert RA to a number.
+ |.if FPU
+ |. mtc1 RALO, FRA
+ | ldc1 FRD, 0(RD)
+ | b <3
+ | cvt.d.w FRA, FRA
+ |.else
+ |. nop
+ |.if "RAHI" == "SFARG1HI"
+ | bal ->vm_sfi2d_1
+ |.else
+ | bal ->vm_sfi2d_2
+ |.endif
+ |. nop
+ | b <3
+ |. nop
+ |.endif
+ |.endmacro
+ |
+ if (op == BC_ISLT) {
+ | bc_comp f20, f22, SFARG1HI, SFARG1LO, SFARG2HI, SFARG2LO, movz, movf, c.olt.d, ->vm_sfcmpolt
+ } else if (op == BC_ISGE) {
+ | bc_comp f20, f22, SFARG1HI, SFARG1LO, SFARG2HI, SFARG2LO, movn, movt, c.olt.d, ->vm_sfcmpolt
+ } else if (op == BC_ISLE) {
+ | bc_comp f22, f20, SFARG2HI, SFARG2LO, SFARG1HI, SFARG1LO, movn, movt, c.ult.d, ->vm_sfcmpult
+ } else {
+ | bc_comp f22, f20, SFARG2HI, SFARG2LO, SFARG1HI, SFARG1LO, movz, movf, c.ult.d, ->vm_sfcmpult
+ }
+ break;
+
+ case BC_ISEQV: case BC_ISNEV:
+ vk = op == BC_ISEQV;
+ | // RA = src1*8, RD = src2*8, JMP with RD = target
+ | addu RA, BASE, RA
+ | addiu PC, PC, 4
+ | addu RD, BASE, RD
+ | lw SFARG1HI, HI(RA)
+ | lhu TMP2, -4+OFS_RD(PC)
+ | lw SFARG2HI, HI(RD)
+ | lui TMP3, (-(BCBIAS_J*4 >> 16) & 65535)
+ | sltu AT, TISNUM, SFARG1HI
+ | sltu TMP0, TISNUM, SFARG2HI
+ | or AT, AT, TMP0
+ if (vk) {
+ | beqz AT, ->BC_ISEQN_Z
+ } else {
+ | beqz AT, ->BC_ISNEN_Z
+ }
+ |. decode_RD4b TMP2
+ | // Either or both types are not numbers.
+ | lw SFARG1LO, LO(RA)
+ | lw SFARG2LO, LO(RD)
+ | addu TMP2, TMP2, TMP3
+ |.if FFI
+ | li TMP3, LJ_TCDATA
+ | beq SFARG1HI, TMP3, ->vmeta_equal_cd
+ |.endif
+ |. sltiu AT, SFARG1HI, LJ_TISPRI // Not a primitive?
+ |.if FFI
+ | beq SFARG2HI, TMP3, ->vmeta_equal_cd
+ |.endif
+ |. xor TMP3, SFARG1LO, SFARG2LO // Same tv?
+ | xor SFARG2HI, SFARG2HI, SFARG1HI // Same type?
+ | sltiu TMP0, SFARG1HI, LJ_TISTABUD+1 // Table or userdata?
+ | movz TMP3, r0, AT // Ignore tv if primitive.
+ | movn TMP0, r0, SFARG2HI // Tab/ud and same type?
+ | or AT, SFARG2HI, TMP3 // Same type && (pri||same tv).
+ | movz TMP0, r0, AT
+ | beqz TMP0, >1 // Done if not tab/ud or not same type or same tv.
+ if (vk) {
+ |. movn TMP2, r0, AT
+ } else {
+ |. movz TMP2, r0, AT
+ }
+ | // Different tables or userdatas. Need to check __eq metamethod.
+ | // Field metatable must be at same offset for GCtab and GCudata!
+ | lw TAB:TMP1, TAB:SFARG1LO->metatable
+ | beqz TAB:TMP1, >1 // No metatable?
+ |. nop
+ | lbu TMP1, TAB:TMP1->nomm
+ | andi TMP1, TMP1, 1<<MM_eq
+ | bnez TMP1, >1 // Or 'no __eq' flag set?
+ |. nop
+ | b ->vmeta_equal // Handle __eq metamethod.
+ |. li TMP0, 1-vk // ne = 0 or 1.
+ |1:
+ | addu PC, PC, TMP2
+ | ins_next
+ break;
+
+ case BC_ISEQS: case BC_ISNES:
+ vk = op == BC_ISEQS;
+ | // RA = src*8, RD = str_const*8 (~), JMP with RD = target
+ | addu RA, BASE, RA
+ | addiu PC, PC, 4
+ | lw TMP0, HI(RA)
+ | srl RD, RD, 1
+ | lw STR:TMP3, LO(RA)
+ | subu RD, KBASE, RD
+ | lhu TMP2, -4+OFS_RD(PC)
+ |.if FFI
+ | li AT, LJ_TCDATA
+ | beq TMP0, AT, ->vmeta_equal_cd
+ |.endif
+ |. lw STR:TMP1, -4(RD) // KBASE-4-str_const*4
+ | addiu TMP0, TMP0, -LJ_TSTR
+ | decode_RD4b TMP2
+ | xor TMP1, STR:TMP1, STR:TMP3
+ | or TMP0, TMP0, TMP1
+ | lui TMP3, (-(BCBIAS_J*4 >> 16) & 65535)
+ | addu TMP2, TMP2, TMP3
+ if (vk) {
+ | movn TMP2, r0, TMP0
+ } else {
+ | movz TMP2, r0, TMP0
+ }
+ | addu PC, PC, TMP2
+ | ins_next
+ break;
+
+ case BC_ISEQN: case BC_ISNEN:
+ vk = op == BC_ISEQN;
+ | // RA = src*8, RD = num_const*8, JMP with RD = target
+ | addu RA, BASE, RA
+ | addu RD, KBASE, RD
+ | lw SFARG1HI, HI(RA)
+ | lw SFARG2HI, HI(RD)
+ | lhu TMP2, OFS_RD(PC)
+ | addiu PC, PC, 4
+ | lui TMP3, (-(BCBIAS_J*4 >> 16) & 65535)
+ | decode_RD4b TMP2
+ if (vk) {
+ |->BC_ISEQN_Z:
+ } else {
+ |->BC_ISNEN_Z:
+ }
+ | bne SFARG1HI, TISNUM, >3
+ |. lw SFARG1LO, LO(RA)
+ | lw SFARG2LO, LO(RD)
+ | addu TMP2, TMP2, TMP3
+ | bne SFARG2HI, TISNUM, >6
+ |. xor AT, SFARG1LO, SFARG2LO
+ if (vk) {
+ | movn TMP2, r0, AT
+ |1:
+ | addu PC, PC, TMP2
+ |2:
+ } else {
+ | movz TMP2, r0, AT
+ |1:
+ |2:
+ | addu PC, PC, TMP2
+ }
+ | ins_next
+ |
+ |3: // RA is not an integer.
+ | sltiu AT, SFARG1HI, LJ_TISNUM
+ |.if FFI
+ | beqz AT, >8
+ |.else
+ | beqz AT, <2
+ |.endif
+ |. addu TMP2, TMP2, TMP3
+ | sltiu AT, SFARG2HI, LJ_TISNUM
+ |.if FPU
+ | ldc1 f20, 0(RA)
+ | ldc1 f22, 0(RD)
+ |.endif
+ | beqz AT, >5
+ |. lw SFARG2LO, LO(RD)
+ |4: // RA and RD are both numbers.
+ |.if FPU
+ | c.eq.d f20, f22
+ | b <1
+ if (vk) {
+ |. movf TMP2, r0
+ } else {
+ |. movt TMP2, r0
+ }
+ |.else
+ | bal ->vm_sfcmpeq
+ |. nop
+ | b <1
+ if (vk) {
+ |. movz TMP2, r0, CRET1
+ } else {
+ |. movn TMP2, r0, CRET1
+ }
+ |.endif
+ |
+ |5: // RA is a number, RD is not a number.
+ |.if FFI
+ | bne SFARG2HI, TISNUM, >9
+ |.else
+ | bne SFARG2HI, TISNUM, <2
+ |.endif
+ | // RA is a number, RD is an integer. Convert RD to a number.
+ |.if FPU
+ |. lwc1 f22, LO(RD)
+ | b <4
+ |. cvt.d.w f22, f22
+ |.else
+ |. nop
+ | bal ->vm_sfi2d_2
+ |. nop
+ | b <4
+ |. nop
+ |.endif
+ |
+ |6: // RA is an integer, RD is not an integer
+ | sltiu AT, SFARG2HI, LJ_TISNUM
+ |.if FFI
+ | beqz AT, >9
+ |.else
+ | beqz AT, <2
+ |.endif
+ | // RA is an integer, RD is a number. Convert RA to a number.
+ |.if FPU
+ |. mtc1 SFARG1LO, f20
+ | ldc1 f22, 0(RD)
+ | b <4
+ | cvt.d.w f20, f20
+ |.else
+ |. nop
+ | bal ->vm_sfi2d_1
+ |. nop
+ | b <4
+ |. nop
+ |.endif
+ |
+ |.if FFI
+ |8:
+ | li AT, LJ_TCDATA
+ | bne SFARG1HI, AT, <2
+ |. nop
+ | b ->vmeta_equal_cd
+ |. nop
+ |9:
+ | li AT, LJ_TCDATA
+ | bne SFARG2HI, AT, <2
+ |. nop
+ | b ->vmeta_equal_cd
+ |. nop
+ |.endif
+ break;
+
+ case BC_ISEQP: case BC_ISNEP:
+ vk = op == BC_ISEQP;
+ | // RA = src*8, RD = primitive_type*8 (~), JMP with RD = target
+ | addu RA, BASE, RA
+ | srl TMP1, RD, 3
+ | lw TMP0, HI(RA)
+ | lhu TMP2, OFS_RD(PC)
+ | not TMP1, TMP1
+ | addiu PC, PC, 4
+ |.if FFI
+ | li AT, LJ_TCDATA
+ | beq TMP0, AT, ->vmeta_equal_cd
+ |.endif
+ |. xor TMP0, TMP0, TMP1
+ | decode_RD4b TMP2
+ | lui TMP3, (-(BCBIAS_J*4 >> 16) & 65535)
+ | addu TMP2, TMP2, TMP3
+ if (vk) {
+ | movn TMP2, r0, TMP0
+ } else {
+ | movz TMP2, r0, TMP0
+ }
+ | addu PC, PC, TMP2
+ | ins_next
+ break;
+
+ /* -- Unary test and copy ops ------------------------------------------- */
+
+ case BC_ISTC: case BC_ISFC: case BC_IST: case BC_ISF:
+ | // RA = dst*8 or unused, RD = src*8, JMP with RD = target
+ | addu RD, BASE, RD
+ | lhu TMP2, OFS_RD(PC)
+ | lw TMP0, HI(RD)
+ | addiu PC, PC, 4
+ if (op == BC_IST || op == BC_ISF) {
+ | sltiu TMP0, TMP0, LJ_TISTRUECOND
+ | decode_RD4b TMP2
+ | lui TMP3, (-(BCBIAS_J*4 >> 16) & 65535)
+ | addu TMP2, TMP2, TMP3
+ if (op == BC_IST) {
+ | movz TMP2, r0, TMP0
+ } else {
+ | movn TMP2, r0, TMP0
+ }
+ | addu PC, PC, TMP2
+ } else {
+ | sltiu TMP0, TMP0, LJ_TISTRUECOND
+ | lw SFRETHI, HI(RD)
+ | lw SFRETLO, LO(RD)
+ if (op == BC_ISTC) {
+ | beqz TMP0, >1
+ } else {
+ | bnez TMP0, >1
+ }
+ |. addu RA, BASE, RA
+ | decode_RD4b TMP2
+ | lui TMP3, (-(BCBIAS_J*4 >> 16) & 65535)
+ | addu TMP2, TMP2, TMP3
+ | sw SFRETHI, HI(RA)
+ | sw SFRETLO, LO(RA)
+ | addu PC, PC, TMP2
+ |1:
+ }
+ | ins_next
+ break;
+
+ case BC_ISTYPE:
+ | // RA = src*8, RD = -type*8
+ | addu TMP2, BASE, RA
+ | srl TMP1, RD, 3
+ | lw TMP0, HI(TMP2)
+ | ins_next1
+ | addu AT, TMP0, TMP1
+ | bnez AT, ->vmeta_istype
+ |. ins_next2
+ break;
+ case BC_ISNUM:
+ | // RA = src*8, RD = -(TISNUM-1)*8
+ | addu TMP2, BASE, RA
+ | lw TMP0, HI(TMP2)
+ | ins_next1
+ | sltiu AT, TMP0, LJ_TISNUM
+ | beqz AT, ->vmeta_istype
+ |. ins_next2
+ break;
+
+ /* -- Unary ops --------------------------------------------------------- */
+
+ case BC_MOV:
+ | // RA = dst*8, RD = src*8
+ | addu RD, BASE, RD
+ | addu RA, BASE, RA
+ | lw SFRETHI, HI(RD)
+ | lw SFRETLO, LO(RD)
+ | ins_next1
+ | sw SFRETHI, HI(RA)
+ | sw SFRETLO, LO(RA)
+ | ins_next2
+ break;
+ case BC_NOT:
+ | // RA = dst*8, RD = src*8
+ | addu RD, BASE, RD
+ | addu RA, BASE, RA
+ | lw TMP0, HI(RD)
+ | li TMP1, LJ_TFALSE
+ | sltiu TMP0, TMP0, LJ_TISTRUECOND
+ | addiu TMP1, TMP0, LJ_TTRUE
+ | ins_next1
+ | sw TMP1, HI(RA)
+ | ins_next2
+ break;
+ case BC_UNM:
+ | // RA = dst*8, RD = src*8
+ | addu RB, BASE, RD
+ | lw SFARG1HI, HI(RB)
+ | addu RA, BASE, RA
+ | bne SFARG1HI, TISNUM, >2
+ |. lw SFARG1LO, LO(RB)
+ | lui TMP1, 0x8000
+ | beq SFARG1LO, TMP1, ->vmeta_unm // Meta handler deals with -2^31.
+ |. negu SFARG1LO, SFARG1LO
+ |1:
+ | ins_next1
+ | sw SFARG1HI, HI(RA)
+ | sw SFARG1LO, LO(RA)
+ | ins_next2
+ |2:
+ | sltiu AT, SFARG1HI, LJ_TISNUM
+ | beqz AT, ->vmeta_unm
+ |. lui TMP1, 0x8000
+ | b <1
+ |. xor SFARG1HI, SFARG1HI, TMP1
+ break;
+ case BC_LEN:
+ | // RA = dst*8, RD = src*8
+ | addu CARG2, BASE, RD
+ | addu RA, BASE, RA
+ | lw TMP0, HI(CARG2)
+ | lw CARG1, LO(CARG2)
+ | li AT, LJ_TSTR
+ | bne TMP0, AT, >2
+ |. li AT, LJ_TTAB
+ | lw CRET1, STR:CARG1->len
+ |1:
+ | ins_next1
+ | sw TISNUM, HI(RA)
+ | sw CRET1, LO(RA)
+ | ins_next2
+ |2:
+ | bne TMP0, AT, ->vmeta_len
+ |. nop
+#if LJ_52
+ | lw TAB:TMP2, TAB:CARG1->metatable
+ | bnez TAB:TMP2, >9
+ |. nop
+ |3:
+#endif
+ |->BC_LEN_Z:
+ | load_got lj_tab_len
+ | call_intern lj_tab_len // (GCtab *t)
+ |. nop
+ | // Returns uint32_t (but less than 2^31).
+ | b <1
+ |. nop
+#if LJ_52
+ |9:
+ | lbu TMP0, TAB:TMP2->nomm
+ | andi TMP0, TMP0, 1<<MM_len
+ | bnez TMP0, <3 // 'no __len' flag set: done.
+ |. nop
+ | b ->vmeta_len
+ |. nop
+#endif
+ break;
+
+ /* -- Binary ops -------------------------------------------------------- */
+
+ |.macro fpmod, a, b, c
+ | bal ->vm_floor // floor(b/c)
+ |. div.d FARG1, b, c
+ | mul.d a, FRET1, c
+ | sub.d a, b, a // b - floor(b/c)*c
+ |.endmacro
+
+ |.macro sfpmod
+ | addiu sp, sp, -16
+ |
+ | load_got __divdf3
+ | sw SFARG1HI, HI(sp)
+ | sw SFARG1LO, LO(sp)
+ | sw SFARG2HI, 8+HI(sp)
+ | call_extern
+ |. sw SFARG2LO, 8+LO(sp)
+ |
+ | load_got floor
+ | move SFARG1HI, SFRETHI
+ | call_extern
+ |. move SFARG1LO, SFRETLO
+ |
+ | load_got __muldf3
+ | move SFARG1HI, SFRETHI
+ | move SFARG1LO, SFRETLO
+ | lw SFARG2HI, 8+HI(sp)
+ | call_extern
+ |. lw SFARG2LO, 8+LO(sp)
+ |
+ | load_got __subdf3
+ | lw SFARG1HI, HI(sp)
+ | lw SFARG1LO, LO(sp)
+ | move SFARG2HI, SFRETHI
+ | call_extern
+ |. move SFARG2LO, SFRETLO
+ |
+ | addiu sp, sp, 16
+ |.endmacro
+
+ |.macro ins_arithpre, label
+ ||vk = ((int)op - BC_ADDVN) / (BC_ADDNV-BC_ADDVN);
+ | // RA = dst*8, RB = src1*8, RC = src2*8 | num_const*8
+ ||switch (vk) {
+ ||case 0:
+ | decode_RB8a RB, INS
+ | decode_RB8b RB
+ | decode_RDtoRC8 RC, RD
+ | // RA = dst*8, RB = src1*8, RC = num_const*8
+ | addu RB, BASE, RB
+ |.if "label" ~= "none"
+ | b label
+ |.endif
+ |. addu RC, KBASE, RC
+ || break;
+ ||case 1:
+ | decode_RB8a RC, INS
+ | decode_RB8b RC
+ | decode_RDtoRC8 RB, RD
+ | // RA = dst*8, RB = num_const*8, RC = src1*8
+ | addu RC, BASE, RC
+ |.if "label" ~= "none"
+ | b label
+ |.endif
+ |. addu RB, KBASE, RB
+ || break;
+ ||default:
+ | decode_RB8a RB, INS
+ | decode_RB8b RB
+ | decode_RDtoRC8 RC, RD
+ | // RA = dst*8, RB = src1*8, RC = src2*8
+ | addu RB, BASE, RB
+ |.if "label" ~= "none"
+ | b label
+ |.endif
+ |. addu RC, BASE, RC
+ || break;
+ ||}
+ |.endmacro
+ |
+ |.macro ins_arith, intins, fpins, fpcall, label
+ | ins_arithpre none
+ |
+ |.if "label" ~= "none"
+ |label:
+ |.endif
+ |
+ | lw SFARG1HI, HI(RB)
+ | lw SFARG2HI, HI(RC)
+ |
+ |.if "intins" ~= "div"
+ |
+ | // Check for two integers.
+ | lw SFARG1LO, LO(RB)
+ | bne SFARG1HI, TISNUM, >5
+ |. lw SFARG2LO, LO(RC)
+ | bne SFARG2HI, TISNUM, >5
+ |
+ |.if "intins" == "addu"
+ |. intins CRET1, SFARG1LO, SFARG2LO
+ | xor TMP1, CRET1, SFARG1LO // ((y^a) & (y^b)) < 0: overflow.
+ | xor TMP2, CRET1, SFARG2LO
+ | and TMP1, TMP1, TMP2
+ | bltz TMP1, ->vmeta_arith
+ |. addu RA, BASE, RA
+ |.elif "intins" == "subu"
+ |. intins CRET1, SFARG1LO, SFARG2LO
+ | xor TMP1, CRET1, SFARG1LO // ((y^a) & (a^b)) < 0: overflow.
+ | xor TMP2, SFARG1LO, SFARG2LO
+ | and TMP1, TMP1, TMP2
+ | bltz TMP1, ->vmeta_arith
+ |. addu RA, BASE, RA
+ |.elif "intins" == "mult"
+ |. intins SFARG1LO, SFARG2LO
+ | mflo CRET1
+ | mfhi TMP2
+ | sra TMP1, CRET1, 31
+ | bne TMP1, TMP2, ->vmeta_arith
+ |. addu RA, BASE, RA
+ |.else
+ |. load_got lj_vm_modi
+ | beqz SFARG2LO, ->vmeta_arith
+ |. addu RA, BASE, RA
+ |.if ENDIAN_BE
+ | move CARG1, SFARG1LO
+ |.endif
+ | call_extern
+ |. move CARG2, SFARG2LO
+ |.endif
+ |
+ | ins_next1
+ | sw TISNUM, HI(RA)
+ | sw CRET1, LO(RA)
+ |3:
+ | ins_next2
+ |
+ |.elif not FPU
+ |
+ | lw SFARG1LO, LO(RB)
+ | lw SFARG2LO, LO(RC)
+ |
+ |.endif
+ |
+ |5: // Check for two numbers.
+ | .FPU ldc1 f20, 0(RB)
+ | sltiu AT, SFARG1HI, LJ_TISNUM
+ | sltiu TMP0, SFARG2HI, LJ_TISNUM
+ | .FPU ldc1 f22, 0(RC)
+ | and AT, AT, TMP0
+ | beqz AT, ->vmeta_arith
+ |. addu RA, BASE, RA
+ |
+ |.if FPU
+ | fpins FRET1, f20, f22
+ |.elif "fpcall" == "sfpmod"
+ | sfpmod
+ |.else
+ | load_got fpcall
+ | call_extern
+ |. nop
+ |.endif
+ |
+ | ins_next1
+ |.if not FPU
+ | sw SFRETHI, HI(RA)
+ |.endif
+ |.if "intins" ~= "div"
+ | b <3
+ |.endif
+ |.if FPU
+ |. sdc1 FRET1, 0(RA)
+ |.else
+ |. sw SFRETLO, LO(RA)
+ |.endif
+ |.if "intins" == "div"
+ | ins_next2
+ |.endif
+ |
+ |.endmacro
+
+ case BC_ADDVN: case BC_ADDNV: case BC_ADDVV:
+ | ins_arith addu, add.d, __adddf3, none
+ break;
+ case BC_SUBVN: case BC_SUBNV: case BC_SUBVV:
+ | ins_arith subu, sub.d, __subdf3, none
+ break;
+ case BC_MULVN: case BC_MULNV: case BC_MULVV:
+ | ins_arith mult, mul.d, __muldf3, none
+ break;
+ case BC_DIVVN:
+ | ins_arith div, div.d, __divdf3, ->BC_DIVVN_Z
+ break;
+ case BC_DIVNV: case BC_DIVVV:
+ | ins_arithpre ->BC_DIVVN_Z
+ break;
+ case BC_MODVN:
+ | ins_arith modi, fpmod, sfpmod, ->BC_MODVN_Z
+ break;
+ case BC_MODNV: case BC_MODVV:
+ | ins_arithpre ->BC_MODVN_Z
+ break;
+ case BC_POW:
+ | ins_arithpre none
+ | lw SFARG1HI, HI(RB)
+ | lw SFARG2HI, HI(RC)
+ | sltiu AT, SFARG1HI, LJ_TISNUM
+ | sltiu TMP0, SFARG2HI, LJ_TISNUM
+ | and AT, AT, TMP0
+ | load_got pow
+ | beqz AT, ->vmeta_arith
+ |. addu RA, BASE, RA
+ |.if FPU
+ | ldc1 FARG1, 0(RB)
+ | ldc1 FARG2, 0(RC)
+ |.else
+ | lw SFARG1LO, LO(RB)
+ | lw SFARG2LO, LO(RC)
+ |.endif
+ | call_extern
+ |. nop
+ | ins_next1
+ |.if FPU
+ | sdc1 FRET1, 0(RA)
+ |.else
+ | sw SFRETHI, HI(RA)
+ | sw SFRETLO, LO(RA)
+ |.endif
+ | ins_next2
+ break;
+
+ case BC_CAT:
+ | // RA = dst*8, RB = src_start*8, RC = src_end*8
+ | decode_RB8a RB, INS
+ | decode_RB8b RB
+ | decode_RDtoRC8 RC, RD
+ | subu CARG3, RC, RB
+ | sw BASE, L->base
+ | addu CARG2, BASE, RC
+ | move MULTRES, RB
+ |->BC_CAT_Z:
+ | load_got lj_meta_cat
+ | srl CARG3, CARG3, 3
+ | sw PC, SAVE_PC
+ | call_intern lj_meta_cat // (lua_State *L, TValue *top, int left)
+ |. move CARG1, L
+ | // Returns NULL (finished) or TValue * (metamethod).
+ | bnez CRET1, ->vmeta_binop
+ |. lw BASE, L->base
+ | addu RB, BASE, MULTRES
+ | lw SFRETHI, HI(RB)
+ | lw SFRETLO, LO(RB)
+ | addu RA, BASE, RA
+ | ins_next1
+ | sw SFRETHI, HI(RA)
+ | sw SFRETLO, LO(RA)
+ | ins_next2
+ break;
+
+ /* -- Constant ops ------------------------------------------------------ */
+
+ case BC_KSTR:
+ | // RA = dst*8, RD = str_const*8 (~)
+ | srl TMP1, RD, 1
+ | subu TMP1, KBASE, TMP1
+ | ins_next1
+ | lw TMP0, -4(TMP1) // KBASE-4-str_const*4
+ | addu RA, BASE, RA
+ | li TMP2, LJ_TSTR
+ | sw TMP0, LO(RA)
+ | sw TMP2, HI(RA)
+ | ins_next2
+ break;
+ case BC_KCDATA:
+ |.if FFI
+ | // RA = dst*8, RD = cdata_const*8 (~)
+ | srl TMP1, RD, 1
+ | subu TMP1, KBASE, TMP1
+ | ins_next1
+ | lw TMP0, -4(TMP1) // KBASE-4-cdata_const*4
+ | addu RA, BASE, RA
+ | li TMP2, LJ_TCDATA
+ | sw TMP0, LO(RA)
+ | sw TMP2, HI(RA)
+ | ins_next2
+ |.endif
+ break;
+ case BC_KSHORT:
+ | // RA = dst*8, RD = int16_literal*8
+ | sra RD, INS, 16
+ | addu RA, BASE, RA
+ | ins_next1
+ | sw TISNUM, HI(RA)
+ | sw RD, LO(RA)
+ | ins_next2
+ break;
+ case BC_KNUM:
+ | // RA = dst*8, RD = num_const*8
+ | addu RD, KBASE, RD
+ | addu RA, BASE, RA
+ | lw SFRETHI, HI(RD)
+ | lw SFRETLO, LO(RD)
+ | ins_next1
+ | sw SFRETHI, HI(RA)
+ | sw SFRETLO, LO(RA)
+ | ins_next2
+ break;
+ case BC_KPRI:
+ | // RA = dst*8, RD = primitive_type*8 (~)
+ | srl TMP1, RD, 3
+ | addu RA, BASE, RA
+ | not TMP0, TMP1
+ | ins_next1
+ | sw TMP0, HI(RA)
+ | ins_next2
+ break;
+ case BC_KNIL:
+ | // RA = base*8, RD = end*8
+ | addu RA, BASE, RA
+ | sw TISNIL, HI(RA)
+ | addiu RA, RA, 8
+ | addu RD, BASE, RD
+ |1:
+ | sw TISNIL, HI(RA)
+ | slt AT, RA, RD
+ | bnez AT, <1
+ |. addiu RA, RA, 8
+ | ins_next_
+ break;
+
+ /* -- Upvalue and function ops ------------------------------------------ */
+
+ case BC_UGET:
+ | // RA = dst*8, RD = uvnum*8
+ | lw LFUNC:RB, FRAME_FUNC(BASE)
+ | srl RD, RD, 1
+ | addu RD, RD, LFUNC:RB
+ | lw UPVAL:RB, LFUNC:RD->uvptr
+ | ins_next1
+ | lw TMP1, UPVAL:RB->v
+ | lw SFRETHI, HI(TMP1)
+ | lw SFRETLO, LO(TMP1)
+ | addu RA, BASE, RA
+ | sw SFRETHI, HI(RA)
+ | sw SFRETLO, LO(RA)
+ | ins_next2
+ break;
+ case BC_USETV:
+ | // RA = uvnum*8, RD = src*8
+ | lw LFUNC:RB, FRAME_FUNC(BASE)
+ | srl RA, RA, 1
+ | addu RD, BASE, RD
+ | addu RA, RA, LFUNC:RB
+ | lw UPVAL:RB, LFUNC:RA->uvptr
+ | lw SFRETHI, HI(RD)
+ | lw SFRETLO, LO(RD)
+ | lbu TMP3, UPVAL:RB->marked
+ | lw CARG2, UPVAL:RB->v
+ | andi TMP3, TMP3, LJ_GC_BLACK // isblack(uv)
+ | lbu TMP0, UPVAL:RB->closed
+ | sw SFRETHI, HI(CARG2)
+ | sw SFRETLO, LO(CARG2)
+ | li AT, LJ_GC_BLACK|1
+ | or TMP3, TMP3, TMP0
+ | beq TMP3, AT, >2 // Upvalue is closed and black?
+ |. addiu TMP2, SFRETHI, -(LJ_TNUMX+1)
+ |1:
+ | ins_next
+ |
+ |2: // Check if new value is collectable.
+ | sltiu AT, TMP2, LJ_TISGCV - (LJ_TNUMX+1)
+ | beqz AT, <1 // tvisgcv(v)
+ |. nop
+ | lbu TMP3, GCOBJ:SFRETLO->gch.marked
+ | andi TMP3, TMP3, LJ_GC_WHITES // iswhite(v)
+ | beqz TMP3, <1
+ |. load_got lj_gc_barrieruv
+ | // Crossed a write barrier. Move the barrier forward.
+ | call_intern lj_gc_barrieruv // (global_State *g, TValue *tv)
+ |. addiu CARG1, DISPATCH, GG_DISP2G
+ | b <1
+ |. nop
+ break;
+ case BC_USETS:
+ | // RA = uvnum*8, RD = str_const*8 (~)
+ | lw LFUNC:RB, FRAME_FUNC(BASE)
+ | srl RA, RA, 1
+ | srl TMP1, RD, 1
+ | addu RA, RA, LFUNC:RB
+ | subu TMP1, KBASE, TMP1
+ | lw UPVAL:RB, LFUNC:RA->uvptr
+ | lw STR:TMP1, -4(TMP1) // KBASE-4-str_const*4
+ | lbu TMP2, UPVAL:RB->marked
+ | lw CARG2, UPVAL:RB->v
+ | lbu TMP3, STR:TMP1->marked
+ | andi AT, TMP2, LJ_GC_BLACK // isblack(uv)
+ | lbu TMP2, UPVAL:RB->closed
+ | li TMP0, LJ_TSTR
+ | sw STR:TMP1, LO(CARG2)
+ | bnez AT, >2
+ |. sw TMP0, HI(CARG2)
+ |1:
+ | ins_next
+ |
+ |2: // Check if string is white and ensure upvalue is closed.
+ | beqz TMP2, <1
+ |. andi AT, TMP3, LJ_GC_WHITES // iswhite(str)
+ | beqz AT, <1
+ |. load_got lj_gc_barrieruv
+ | // Crossed a write barrier. Move the barrier forward.
+ | call_intern lj_gc_barrieruv // (global_State *g, TValue *tv)
+ |. addiu CARG1, DISPATCH, GG_DISP2G
+ | b <1
+ |. nop
+ break;
+ case BC_USETN:
+ | // RA = uvnum*8, RD = num_const*8
+ | lw LFUNC:RB, FRAME_FUNC(BASE)
+ | srl RA, RA, 1
+ | addu RD, KBASE, RD
+ | addu RA, RA, LFUNC:RB
+ | lw UPVAL:RB, LFUNC:RA->uvptr
+ | lw SFRETHI, HI(RD)
+ | lw SFRETLO, LO(RD)
+ | lw TMP1, UPVAL:RB->v
+ | ins_next1
+ | sw SFRETHI, HI(TMP1)
+ | sw SFRETLO, LO(TMP1)
+ | ins_next2
+ break;
+ case BC_USETP:
+ | // RA = uvnum*8, RD = primitive_type*8 (~)
+ | lw LFUNC:RB, FRAME_FUNC(BASE)
+ | srl RA, RA, 1
+ | srl TMP0, RD, 3
+ | addu RA, RA, LFUNC:RB
+ | not TMP0, TMP0
+ | lw UPVAL:RB, LFUNC:RA->uvptr
+ | ins_next1
+ | lw TMP1, UPVAL:RB->v
+ | sw TMP0, HI(TMP1)
+ | ins_next2
+ break;
+
+ case BC_UCLO:
+ | // RA = level*8, RD = target
+ | lw TMP2, L->openupval
+ | branch_RD // Do this first since RD is not saved.
+ | load_got lj_func_closeuv
+ | sw BASE, L->base
+ | beqz TMP2, >1
+ |. move CARG1, L
+ | call_intern lj_func_closeuv // (lua_State *L, TValue *level)
+ |. addu CARG2, BASE, RA
+ | lw BASE, L->base
+ |1:
+ | ins_next
+ break;
+
+ case BC_FNEW:
+ | // RA = dst*8, RD = proto_const*8 (~) (holding function prototype)
+ | srl TMP1, RD, 1
+ | load_got lj_func_newL_gc
+ | subu TMP1, KBASE, TMP1
+ | lw CARG3, FRAME_FUNC(BASE)
+ | lw CARG2, -4(TMP1) // KBASE-4-tab_const*4
+ | sw BASE, L->base
+ | sw PC, SAVE_PC
+ | // (lua_State *L, GCproto *pt, GCfuncL *parent)
+ | call_intern lj_func_newL_gc
+ |. move CARG1, L
+ | // Returns GCfuncL *.
+ | lw BASE, L->base
+ | li TMP0, LJ_TFUNC
+ | ins_next1
+ | addu RA, BASE, RA
+ | sw LFUNC:CRET1, LO(RA)
+ | sw TMP0, HI(RA)
+ | ins_next2
+ break;
+
+ /* -- Table ops --------------------------------------------------------- */
+
+ case BC_TNEW:
+ case BC_TDUP:
+ | // RA = dst*8, RD = (hbits|asize)*8 | tab_const*8 (~)
+ | lw TMP0, DISPATCH_GL(gc.total)(DISPATCH)
+ | lw TMP1, DISPATCH_GL(gc.threshold)(DISPATCH)
+ | sw BASE, L->base
+ | sw PC, SAVE_PC
+ | sltu AT, TMP0, TMP1
+ | beqz AT, >5
+ |1:
+ if (op == BC_TNEW) {
+ | load_got lj_tab_new
+ | srl CARG2, RD, 3
+ | andi CARG2, CARG2, 0x7ff
+ | li TMP0, 0x801
+ | addiu AT, CARG2, -0x7ff
+ | srl CARG3, RD, 14
+ | movz CARG2, TMP0, AT
+ | // (lua_State *L, int32_t asize, uint32_t hbits)
+ | call_intern lj_tab_new
+ |. move CARG1, L
+ | // Returns Table *.
+ } else {
+ | load_got lj_tab_dup
+ | srl TMP1, RD, 1
+ | subu TMP1, KBASE, TMP1
+ | move CARG1, L
+ | call_intern lj_tab_dup // (lua_State *L, Table *kt)
+ |. lw CARG2, -4(TMP1) // KBASE-4-str_const*4
+ | // Returns Table *.
+ }
+ | lw BASE, L->base
+ | ins_next1
+ | addu RA, BASE, RA
+ | li TMP0, LJ_TTAB
+ | sw TAB:CRET1, LO(RA)
+ | sw TMP0, HI(RA)
+ | ins_next2
+ |5:
+ | load_got lj_gc_step_fixtop
+ | move MULTRES, RD
+ | call_intern lj_gc_step_fixtop // (lua_State *L)
+ |. move CARG1, L
+ | b <1
+ |. move RD, MULTRES
+ break;
+
+ case BC_GGET:
+ | // RA = dst*8, RD = str_const*8 (~)
+ case BC_GSET:
+ | // RA = src*8, RD = str_const*8 (~)
+ | lw LFUNC:TMP2, FRAME_FUNC(BASE)
+ | srl TMP1, RD, 1
+ | subu TMP1, KBASE, TMP1
+ | lw TAB:RB, LFUNC:TMP2->env
+ | lw STR:RC, -4(TMP1) // KBASE-4-str_const*4
+ if (op == BC_GGET) {
+ | b ->BC_TGETS_Z
+ } else {
+ | b ->BC_TSETS_Z
+ }
+ |. addu RA, BASE, RA
+ break;
+
+ case BC_TGETV:
+ | // RA = dst*8, RB = table*8, RC = key*8
+ | decode_RB8a RB, INS
+ | decode_RB8b RB
+ | decode_RDtoRC8 RC, RD
+ | addu CARG2, BASE, RB
+ | addu CARG3, BASE, RC
+ | lw TMP1, HI(CARG2)
+ | lw TMP2, HI(CARG3)
+ | lw TAB:RB, LO(CARG2)
+ | li AT, LJ_TTAB
+ | bne TMP1, AT, ->vmeta_tgetv
+ |. addu RA, BASE, RA
+ | bne TMP2, TISNUM, >5
+ |. lw RC, LO(CARG3)
+ | lw TMP0, TAB:RB->asize
+ | lw TMP1, TAB:RB->array
+ | sltu AT, RC, TMP0
+ | sll TMP2, RC, 3
+ | beqz AT, ->vmeta_tgetv // Integer key and in array part?
+ |. addu TMP2, TMP1, TMP2
+ | lw SFRETHI, HI(TMP2)
+ | beq SFRETHI, TISNIL, >2
+ |. lw SFRETLO, LO(TMP2)
+ |1:
+ | ins_next1
+ | sw SFRETHI, HI(RA)
+ | sw SFRETLO, LO(RA)
+ | ins_next2
+ |
+ |2: // Check for __index if table value is nil.
+ | lw TAB:TMP2, TAB:RB->metatable
+ | beqz TAB:TMP2, <1 // No metatable: done.
+ |. nop
+ | lbu TMP0, TAB:TMP2->nomm
+ | andi TMP0, TMP0, 1<<MM_index
+ | bnez TMP0, <1 // 'no __index' flag set: done.
+ |. nop
+ | b ->vmeta_tgetv
+ |. nop
+ |
+ |5:
+ | li AT, LJ_TSTR
+ | bne TMP2, AT, ->vmeta_tgetv
+ |. nop
+ | b ->BC_TGETS_Z // String key?
+ |. nop
+ break;
+ case BC_TGETS:
+ | // RA = dst*8, RB = table*8, RC = str_const*4 (~)
+ | decode_RB8a RB, INS
+ | decode_RB8b RB
+ | addu CARG2, BASE, RB
+ | decode_RC4a RC, INS
+ | lw TMP0, HI(CARG2)
+ | decode_RC4b RC
+ | li AT, LJ_TTAB
+ | lw TAB:RB, LO(CARG2)
+ | subu CARG3, KBASE, RC
+ | lw STR:RC, -4(CARG3) // KBASE-4-str_const*4
+ | bne TMP0, AT, ->vmeta_tgets1
+ |. addu RA, BASE, RA
+ |->BC_TGETS_Z:
+ | // TAB:RB = GCtab *, STR:RC = GCstr *, RA = dst*8
+ | lw TMP0, TAB:RB->hmask
+ | lw TMP1, STR:RC->sid
+ | lw NODE:TMP2, TAB:RB->node
+ | and TMP1, TMP1, TMP0 // idx = str->sid & tab->hmask
+ | sll TMP0, TMP1, 5
+ | sll TMP1, TMP1, 3
+ | subu TMP1, TMP0, TMP1
+ | addu NODE:TMP2, NODE:TMP2, TMP1 // node = tab->node + (idx*32-idx*8)
+ |1:
+ | lw CARG1, offsetof(Node, key)+HI(NODE:TMP2)
+ | lw TMP0, offsetof(Node, key)+LO(NODE:TMP2)
+ | lw NODE:TMP1, NODE:TMP2->next
+ | lw SFRETHI, offsetof(Node, val)+HI(NODE:TMP2)
+ | addiu CARG1, CARG1, -LJ_TSTR
+ | xor TMP0, TMP0, STR:RC
+ | or AT, CARG1, TMP0
+ | bnez AT, >4
+ |. lw TAB:TMP3, TAB:RB->metatable
+ | beq SFRETHI, TISNIL, >5 // Key found, but nil value?
+ |. lw SFRETLO, offsetof(Node, val)+LO(NODE:TMP2)
+ |3:
+ | ins_next1
+ | sw SFRETHI, HI(RA)
+ | sw SFRETLO, LO(RA)
+ | ins_next2
+ |
+ |4: // Follow hash chain.
+ | bnez NODE:TMP1, <1
+ |. move NODE:TMP2, NODE:TMP1
+ | // End of hash chain: key not found, nil result.
+ |
+ |5: // Check for __index if table value is nil.
+ | beqz TAB:TMP3, <3 // No metatable: done.
+ |. li SFRETHI, LJ_TNIL
+ | lbu TMP0, TAB:TMP3->nomm
+ | andi TMP0, TMP0, 1<<MM_index
+ | bnez TMP0, <3 // 'no __index' flag set: done.
+ |. nop
+ | b ->vmeta_tgets
+ |. nop
+ break;
+ case BC_TGETB:
+ | // RA = dst*8, RB = table*8, RC = index*8
+ | decode_RB8a RB, INS
+ | decode_RB8b RB
+ | addu CARG2, BASE, RB
+ | decode_RDtoRC8 RC, RD
+ | lw CARG1, HI(CARG2)
+ | li AT, LJ_TTAB
+ | lw TAB:RB, LO(CARG2)
+ | addu RA, BASE, RA
+ | bne CARG1, AT, ->vmeta_tgetb
+ |. srl TMP0, RC, 3
+ | lw TMP1, TAB:RB->asize
+ | lw TMP2, TAB:RB->array
+ | sltu AT, TMP0, TMP1
+ | beqz AT, ->vmeta_tgetb
+ |. addu RC, TMP2, RC
+ | lw SFRETHI, HI(RC)
+ | beq SFRETHI, TISNIL, >5
+ |. lw SFRETLO, LO(RC)
+ |1:
+ | ins_next1
+ | sw SFRETHI, HI(RA)
+ | sw SFRETLO, LO(RA)
+ | ins_next2
+ |
+ |5: // Check for __index if table value is nil.
+ | lw TAB:TMP2, TAB:RB->metatable
+ | beqz TAB:TMP2, <1 // No metatable: done.
+ |. nop
+ | lbu TMP1, TAB:TMP2->nomm
+ | andi TMP1, TMP1, 1<<MM_index
+ | bnez TMP1, <1 // 'no __index' flag set: done.
+ |. nop
+ | b ->vmeta_tgetb // Caveat: preserve TMP0 and CARG2!
+ |. nop
+ break;
+ case BC_TGETR:
+ | // RA = dst*8, RB = table*8, RC = key*8
+ | decode_RB8a RB, INS
+ | decode_RB8b RB
+ | decode_RDtoRC8 RC, RD
+ | addu RB, BASE, RB
+ | addu RC, BASE, RC
+ | lw TAB:CARG1, LO(RB)
+ | lw CARG2, LO(RC)
+ | addu RA, BASE, RA
+ | lw TMP0, TAB:CARG1->asize
+ | lw TMP1, TAB:CARG1->array
+ | sltu AT, CARG2, TMP0
+ | sll TMP2, CARG2, 3
+ | beqz AT, ->vmeta_tgetr // In array part?
+ |. addu CRET1, TMP1, TMP2
+ | lw SFARG2HI, HI(CRET1)
+ | lw SFARG2LO, LO(CRET1)
+ |->BC_TGETR_Z:
+ | ins_next1
+ | sw SFARG2HI, HI(RA)
+ | sw SFARG2LO, LO(RA)
+ | ins_next2
+ break;
+
+ case BC_TSETV:
+ | // RA = src*8, RB = table*8, RC = key*8
+ | decode_RB8a RB, INS
+ | decode_RB8b RB
+ | decode_RDtoRC8 RC, RD
+ | addu CARG2, BASE, RB
+ | addu CARG3, BASE, RC
+ | lw TMP1, HI(CARG2)
+ | lw TMP2, HI(CARG3)
+ | lw TAB:RB, LO(CARG2)
+ | li AT, LJ_TTAB
+ | bne TMP1, AT, ->vmeta_tsetv
+ |. addu RA, BASE, RA
+ | bne TMP2, TISNUM, >5
+ |. lw RC, LO(CARG3)
+ | lw TMP0, TAB:RB->asize
+ | lw TMP1, TAB:RB->array
+ | sltu AT, RC, TMP0
+ | sll TMP2, RC, 3
+ | beqz AT, ->vmeta_tsetv // Integer key and in array part?
+ |. addu TMP1, TMP1, TMP2
+ | lw TMP0, HI(TMP1)
+ | lbu TMP3, TAB:RB->marked
+ | lw SFRETHI, HI(RA)
+ | beq TMP0, TISNIL, >3
+ |. lw SFRETLO, LO(RA)
+ |1:
+ | andi AT, TMP3, LJ_GC_BLACK // isblack(table)
+ | sw SFRETHI, HI(TMP1)
+ | bnez AT, >7
+ |. sw SFRETLO, LO(TMP1)
+ |2:
+ | ins_next
+ |
+ |3: // Check for __newindex if previous value is nil.
+ | lw TAB:TMP2, TAB:RB->metatable
+ | beqz TAB:TMP2, <1 // No metatable: done.
+ |. nop
+ | lbu TMP2, TAB:TMP2->nomm
+ | andi TMP2, TMP2, 1<<MM_newindex
+ | bnez TMP2, <1 // 'no __newindex' flag set: done.
+ |. nop
+ | b ->vmeta_tsetv
+ |. nop
+ |
+ |5:
+ | li AT, LJ_TSTR
+ | bne TMP2, AT, ->vmeta_tsetv
+ |. nop
+ | b ->BC_TSETS_Z // String key?
+ |. nop
+ |
+ |7: // Possible table write barrier for the value. Skip valiswhite check.
+ | barrierback TAB:RB, TMP3, TMP0, <2
+ break;
+ case BC_TSETS:
+ | // RA = src*8, RB = table*8, RC = str_const*8 (~)
+ | decode_RB8a RB, INS
+ | decode_RB8b RB
+ | addu CARG2, BASE, RB
+ | decode_RC4a RC, INS
+ | lw TMP0, HI(CARG2)
+ | decode_RC4b RC
+ | li AT, LJ_TTAB
+ | subu CARG3, KBASE, RC
+ | lw TAB:RB, LO(CARG2)
+ | lw STR:RC, -4(CARG3) // KBASE-4-str_const*4
+ | bne TMP0, AT, ->vmeta_tsets1
+ |. addu RA, BASE, RA
+ |->BC_TSETS_Z:
+ | // TAB:RB = GCtab *, STR:RC = GCstr *, RA = BASE+src*8
+ | lw TMP0, TAB:RB->hmask
+ | lw TMP1, STR:RC->sid
+ | lw NODE:TMP2, TAB:RB->node
+ | sb r0, TAB:RB->nomm // Clear metamethod cache.
+ | and TMP1, TMP1, TMP0 // idx = str->sid & tab->hmask
+ | sll TMP0, TMP1, 5
+ | sll TMP1, TMP1, 3
+ | subu TMP1, TMP0, TMP1
+ | addu NODE:TMP2, NODE:TMP2, TMP1 // node = tab->node + (idx*32-idx*8)
+ |.if FPU
+ | ldc1 f20, 0(RA)
+ |.else
+ | lw SFRETHI, HI(RA)
+ | lw SFRETLO, LO(RA)
+ |.endif
+ |1:
+ | lw CARG1, offsetof(Node, key)+HI(NODE:TMP2)
+ | lw TMP0, offsetof(Node, key)+LO(NODE:TMP2)
+ | li AT, LJ_TSTR
+ | lw NODE:TMP1, NODE:TMP2->next
+ | bne CARG1, AT, >5
+ |. lw CARG2, offsetof(Node, val)+HI(NODE:TMP2)
+ | bne TMP0, STR:RC, >5
+ |. lbu TMP3, TAB:RB->marked
+ | beq CARG2, TISNIL, >4 // Key found, but nil value?
+ |. lw TAB:TMP0, TAB:RB->metatable
+ |2:
+ | andi AT, TMP3, LJ_GC_BLACK // isblack(table)
+ |.if FPU
+ | bnez AT, >7
+ |. sdc1 f20, NODE:TMP2->val
+ |.else
+ | sw SFRETHI, NODE:TMP2->val.u32.hi
+ | bnez AT, >7
+ |. sw SFRETLO, NODE:TMP2->val.u32.lo
+ |.endif
+ |3:
+ | ins_next
+ |
+ |4: // Check for __newindex if previous value is nil.
+ | beqz TAB:TMP0, <2 // No metatable: done.
+ |. nop
+ | lbu TMP0, TAB:TMP0->nomm
+ | andi TMP0, TMP0, 1<<MM_newindex
+ | bnez TMP0, <2 // 'no __newindex' flag set: done.
+ |. nop
+ | b ->vmeta_tsets
+ |. nop
+ |
+ |5: // Follow hash chain.
+ | bnez NODE:TMP1, <1
+ |. move NODE:TMP2, NODE:TMP1
+ | // End of hash chain: key not found, add a new one
+ |
+ | // But check for __newindex first.
+ | lw TAB:TMP2, TAB:RB->metatable
+ | beqz TAB:TMP2, >6 // No metatable: continue.
+ |. addiu CARG3, DISPATCH, DISPATCH_GL(tmptv)
+ | lbu TMP0, TAB:TMP2->nomm
+ | andi TMP0, TMP0, 1<<MM_newindex
+ | beqz TMP0, ->vmeta_tsets // 'no __newindex' flag NOT set: check.
+ |. li AT, LJ_TSTR
+ |6:
+ | load_got lj_tab_newkey
+ | sw STR:RC, LO(CARG3)
+ | sw AT, HI(CARG3)
+ | sw BASE, L->base
+ | move CARG2, TAB:RB
+ | sw PC, SAVE_PC
+ | call_intern lj_tab_newkey // (lua_State *L, GCtab *t, TValue *k
+ |. move CARG1, L
+ | // Returns TValue *.
+ | lw BASE, L->base
+ |.if FPU
+ | b <3 // No 2nd write barrier needed.
+ |. sdc1 f20, 0(CRET1)
+ |.else
+ | lw SFARG1HI, HI(RA)
+ | lw SFARG1LO, LO(RA)
+ | sw SFARG1HI, HI(CRET1)
+ | b <3 // No 2nd write barrier needed.
+ |. sw SFARG1LO, LO(CRET1)
+ |.endif
+ |
+ |7: // Possible table write barrier for the value. Skip valiswhite check.
+ | barrierback TAB:RB, TMP3, TMP0, <3
+ break;
+ case BC_TSETB:
+ | // RA = src*8, RB = table*8, RC = index*8
+ | decode_RB8a RB, INS
+ | decode_RB8b RB
+ | addu CARG2, BASE, RB
+ | decode_RDtoRC8 RC, RD
+ | lw CARG1, HI(CARG2)
+ | li AT, LJ_TTAB
+ | lw TAB:RB, LO(CARG2)
+ | addu RA, BASE, RA
+ | bne CARG1, AT, ->vmeta_tsetb
+ |. srl TMP0, RC, 3
+ | lw TMP1, TAB:RB->asize
+ | lw TMP2, TAB:RB->array
+ | sltu AT, TMP0, TMP1
+ | beqz AT, ->vmeta_tsetb
+ |. addu RC, TMP2, RC
+ | lw TMP1, HI(RC)
+ | lbu TMP3, TAB:RB->marked
+ | beq TMP1, TISNIL, >5
+ |1:
+ |. lw SFRETHI, HI(RA)
+ | lw SFRETLO, LO(RA)
+ | andi AT, TMP3, LJ_GC_BLACK // isblack(table)
+ | sw SFRETHI, HI(RC)
+ | bnez AT, >7
+ |. sw SFRETLO, LO(RC)
+ |2:
+ | ins_next
+ |
+ |5: // Check for __newindex if previous value is nil.
+ | lw TAB:TMP2, TAB:RB->metatable
+ | beqz TAB:TMP2, <1 // No metatable: done.
+ |. nop
+ | lbu TMP1, TAB:TMP2->nomm
+ | andi TMP1, TMP1, 1<<MM_newindex
+ | bnez TMP1, <1 // 'no __newindex' flag set: done.
+ |. nop
+ | b ->vmeta_tsetb // Caveat: preserve TMP0 and CARG2!
+ |. nop
+ |
+ |7: // Possible table write barrier for the value. Skip valiswhite check.
+ | barrierback TAB:RB, TMP3, TMP0, <2
+ break;
+ case BC_TSETR:
+ | // RA = dst*8, RB = table*8, RC = key*8
+ | decode_RB8a RB, INS
+ | decode_RB8b RB
+ | decode_RDtoRC8 RC, RD
+ | addu CARG1, BASE, RB
+ | addu CARG3, BASE, RC
+ | lw TAB:CARG2, LO(CARG1)
+ | lw CARG3, LO(CARG3)
+ | lbu TMP3, TAB:CARG2->marked
+ | lw TMP0, TAB:CARG2->asize
+ | lw TMP1, TAB:CARG2->array
+ | andi AT, TMP3, LJ_GC_BLACK // isblack(table)
+ | bnez AT, >7
+ |. addu RA, BASE, RA
+ |2:
+ | sltu AT, CARG3, TMP0
+ | sll TMP2, CARG3, 3
+ | beqz AT, ->vmeta_tsetr // In array part?
+ |. addu CRET1, TMP1, TMP2
+ |->BC_TSETR_Z:
+ | lw SFARG1HI, HI(RA)
+ | lw SFARG1LO, LO(RA)
+ | ins_next1
+ | sw SFARG1HI, HI(CRET1)
+ | sw SFARG1LO, LO(CRET1)
+ | ins_next2
+ |
+ |7: // Possible table write barrier for the value. Skip valiswhite check.
+ | barrierback TAB:CARG2, TMP3, CRET1, <2
+ break;
+
+ case BC_TSETM:
+ | // RA = base*8 (table at base-1), RD = num_const*8 (start index)
+ | addu RA, BASE, RA
+ |1:
+ | addu TMP3, KBASE, RD
+ | lw TAB:CARG2, -8+LO(RA) // Guaranteed to be a table.
+ | addiu TMP0, MULTRES, -8
+ | lw TMP3, LO(TMP3) // Integer constant is in lo-word.
+ | beqz TMP0, >4 // Nothing to copy?
+ |. srl CARG3, TMP0, 3
+ | addu CARG3, CARG3, TMP3
+ | lw TMP2, TAB:CARG2->asize
+ | sll TMP1, TMP3, 3
+ | lbu TMP3, TAB:CARG2->marked
+ | lw CARG1, TAB:CARG2->array
+ | sltu AT, TMP2, CARG3
+ | bnez AT, >5
+ |. addu TMP2, RA, TMP0
+ | addu TMP1, TMP1, CARG1
+ | andi TMP0, TMP3, LJ_GC_BLACK // isblack(table)
+ |3: // Copy result slots to table.
+ | lw SFRETHI, HI(RA)
+ | lw SFRETLO, LO(RA)
+ | addiu RA, RA, 8
+ | sltu AT, RA, TMP2
+ | sw SFRETHI, HI(TMP1)
+ | sw SFRETLO, LO(TMP1)
+ | bnez AT, <3
+ |. addiu TMP1, TMP1, 8
+ | bnez TMP0, >7
+ |. nop
+ |4:
+ | ins_next
+ |
+ |5: // Need to resize array part.
+ | load_got lj_tab_reasize
+ | sw BASE, L->base
+ | sw PC, SAVE_PC
+ | move BASE, RD
+ | call_intern lj_tab_reasize // (lua_State *L, GCtab *t, int nasize)
+ |. move CARG1, L
+ | // Must not reallocate the stack.
+ | move RD, BASE
+ | b <1
+ |. lw BASE, L->base // Reload BASE for lack of a saved register.
+ |
+ |7: // Possible table write barrier for any value. Skip valiswhite check.
+ | barrierback TAB:CARG2, TMP3, TMP0, <4
+ break;
+
+ /* -- Calls and vararg handling ----------------------------------------- */
+
+ case BC_CALLM:
+ | // RA = base*8, (RB = (nresults+1)*8,) RC = extra_nargs*8
+ | decode_RDtoRC8 NARGS8:RC, RD
+ | b ->BC_CALL_Z
+ |. addu NARGS8:RC, NARGS8:RC, MULTRES
+ break;
+ case BC_CALL:
+ | // RA = base*8, (RB = (nresults+1)*8,) RC = (nargs+1)*8
+ | decode_RDtoRC8 NARGS8:RC, RD
+ |->BC_CALL_Z:
+ | move TMP2, BASE
+ | addu BASE, BASE, RA
+ | li AT, LJ_TFUNC
+ | lw TMP0, HI(BASE)
+ | lw LFUNC:RB, LO(BASE)
+ | addiu BASE, BASE, 8
+ | bne TMP0, AT, ->vmeta_call
+ |. addiu NARGS8:RC, NARGS8:RC, -8
+ | ins_call
+ break;
+
+ case BC_CALLMT:
+ | // RA = base*8, (RB = 0,) RC = extra_nargs*8
+ | addu NARGS8:RD, NARGS8:RD, MULTRES // BC_CALLT gets RC from RD.
+ | // Fall through. Assumes BC_CALLT follows.
+ break;
+ case BC_CALLT:
+ | // RA = base*8, (RB = 0,) RC = (nargs+1)*8
+ | addu RA, BASE, RA
+ | li AT, LJ_TFUNC
+ | lw TMP0, HI(RA)
+ | lw LFUNC:RB, LO(RA)
+ | move NARGS8:RC, RD
+ | lw TMP1, FRAME_PC(BASE)
+ | addiu RA, RA, 8
+ | bne TMP0, AT, ->vmeta_callt
+ |. addiu NARGS8:RC, NARGS8:RC, -8
+ |->BC_CALLT_Z:
+ | andi TMP0, TMP1, FRAME_TYPE // Caveat: preserve TMP0 until the 'or'.
+ | lbu TMP3, LFUNC:RB->ffid
+ | bnez TMP0, >7
+ |. xori TMP2, TMP1, FRAME_VARG
+ |1:
+ | sw LFUNC:RB, FRAME_FUNC(BASE) // Copy function down, but keep PC.
+ | sltiu AT, TMP3, 2 // (> FF_C) Calling a fast function?
+ | move TMP2, BASE
+ | beqz NARGS8:RC, >3
+ |. move TMP3, NARGS8:RC
+ |2:
+ | lw SFRETHI, HI(RA)
+ | lw SFRETLO, LO(RA)
+ | addiu RA, RA, 8
+ | addiu TMP3, TMP3, -8
+ | sw SFRETHI, HI(TMP2)
+ | sw SFRETLO, LO(TMP2)
+ | bnez TMP3, <2
+ |. addiu TMP2, TMP2, 8
+ |3:
+ | or TMP0, TMP0, AT
+ | beqz TMP0, >5
+ |. nop
+ |4:
+ | ins_callt
+ |
+ |5: // Tailcall to a fast function with a Lua frame below.
+ | lw INS, -4(TMP1)
+ | decode_RA8a RA, INS
+ | decode_RA8b RA
+ | subu TMP1, BASE, RA
+ | lw LFUNC:TMP1, -8+FRAME_FUNC(TMP1)
+ | lw TMP1, LFUNC:TMP1->pc
+ | b <4
+ |. lw KBASE, PC2PROTO(k)(TMP1) // Need to prepare KBASE.
+ |
+ |7: // Tailcall from a vararg function.
+ | andi AT, TMP2, FRAME_TYPEP
+ | bnez AT, <1 // Vararg frame below?
+ |. subu TMP2, BASE, TMP2 // Relocate BASE down.
+ | move BASE, TMP2
+ | lw TMP1, FRAME_PC(TMP2)
+ | b <1
+ |. andi TMP0, TMP1, FRAME_TYPE
+ break;
+
+ case BC_ITERC:
+ | // RA = base*8, (RB = (nresults+1)*8, RC = (nargs+1)*8 ((2+1)*8))
+ | move TMP2, BASE
+ | addu BASE, BASE, RA
+ | li AT, LJ_TFUNC
+ | lw TMP1, -24+HI(BASE)
+ | lw LFUNC:RB, -24+LO(BASE)
+ | lw SFARG1HI, -16+HI(BASE)
+ | lw SFARG1LO, -16+LO(BASE)
+ | lw SFARG2HI, -8+HI(BASE)
+ | lw SFARG2LO, -8+LO(BASE)
+ | sw TMP1, HI(BASE) // Copy callable.
+ | sw LFUNC:RB, LO(BASE)
+ | sw SFARG1HI, 8+HI(BASE) // Copy state.
+ | sw SFARG1LO, 8+LO(BASE)
+ | sw SFARG2HI, 16+HI(BASE) // Copy control var.
+ | sw SFARG2LO, 16+LO(BASE)
+ | addiu BASE, BASE, 8
+ | bne TMP1, AT, ->vmeta_call
+ |. li NARGS8:RC, 16 // Iterators get 2 arguments.
+ | ins_call
+ break;
+
+ case BC_ITERN:
+ |.if JIT and ENDIAN_LE
+ | hotloop
+ |.endif
+ |->vm_IITERN:
+ | // RA = base*8, (RB = (nresults+1)*8, RC = (nargs+1)*8 (2+1)*8)
+ | addu RA, BASE, RA
+ | lw TAB:RB, -16+LO(RA)
+ | lw RC, -8+LO(RA) // Get index from control var.
+ | lw TMP0, TAB:RB->asize
+ | lw TMP1, TAB:RB->array
+ | addiu PC, PC, 4
+ |1: // Traverse array part.
+ | sltu AT, RC, TMP0
+ | beqz AT, >5 // Index points after array part?
+ |. sll TMP3, RC, 3
+ | addu TMP3, TMP1, TMP3
+ | lw SFARG1HI, HI(TMP3)
+ | lw SFARG1LO, LO(TMP3)
+ | lhu RD, -4+OFS_RD(PC)
+ | sw TISNUM, HI(RA)
+ | sw RC, LO(RA)
+ | beq SFARG1HI, TISNIL, <1 // Skip holes in array part.
+ |. addiu RC, RC, 1
+ | sw SFARG1HI, 8+HI(RA)
+ | sw SFARG1LO, 8+LO(RA)
+ | lui TMP3, (-(BCBIAS_J*4 >> 16) & 65535)
+ | decode_RD4b RD
+ | addu RD, RD, TMP3
+ | sw RC, -8+LO(RA) // Update control var.
+ | addu PC, PC, RD
+ |3:
+ | ins_next
+ |
+ |5: // Traverse hash part.
+ | lw TMP1, TAB:RB->hmask
+ | subu RC, RC, TMP0
+ | lw TMP2, TAB:RB->node
+ |6:
+ | sltu AT, TMP1, RC // End of iteration? Branch to ITERL+1.
+ | bnez AT, <3
+ |. sll TMP3, RC, 5
+ | sll RB, RC, 3
+ | subu TMP3, TMP3, RB
+ | addu NODE:TMP3, TMP3, TMP2
+ | lw SFARG1HI, NODE:TMP3->val.u32.hi
+ | lw SFARG1LO, NODE:TMP3->val.u32.lo
+ | lhu RD, -4+OFS_RD(PC)
+ | beq SFARG1HI, TISNIL, <6 // Skip holes in hash part.
+ |. addiu RC, RC, 1
+ | lw SFARG2HI, NODE:TMP3->key.u32.hi
+ | lw SFARG2LO, NODE:TMP3->key.u32.lo
+ | lui TMP3, (-(BCBIAS_J*4 >> 16) & 65535)
+ | sw SFARG1HI, 8+HI(RA)
+ | sw SFARG1LO, 8+LO(RA)
+ | addu RC, RC, TMP0
+ | decode_RD4b RD
+ | addu RD, RD, TMP3
+ | sw SFARG2HI, HI(RA)
+ | sw SFARG2LO, LO(RA)
+ | addu PC, PC, RD
+ | b <3
+ |. sw RC, -8+LO(RA) // Update control var.
+ break;
+
+ case BC_ISNEXT:
+ | // RA = base*8, RD = target (points to ITERN)
+ | addu RA, BASE, RA
+ | srl TMP0, RD, 1
+ | lw CARG1, -24+HI(RA)
+ | lw CFUNC:CARG2, -24+LO(RA)
+ | addu TMP0, PC, TMP0
+ | lw CARG3, -16+HI(RA)
+ | lw CARG4, -8+HI(RA)
+ | li AT, LJ_TFUNC
+ | bne CARG1, AT, >5
+ |. lui TMP2, (-(BCBIAS_J*4 >> 16) & 65535)
+ | lbu CARG2, CFUNC:CARG2->ffid
+ | addiu CARG3, CARG3, -LJ_TTAB
+ | addiu CARG4, CARG4, -LJ_TNIL
+ | or CARG3, CARG3, CARG4
+ | addiu CARG2, CARG2, -FF_next_N
+ | or CARG2, CARG2, CARG3
+ | bnez CARG2, >5
+ |. lui TMP1, (LJ_KEYINDEX >> 16)
+ | addu PC, TMP0, TMP2
+ | ori TMP1, TMP1, (LJ_KEYINDEX & 0xffff)
+ | sw r0, -8+LO(RA) // Initialize control var.
+ | sw TMP1, -8+HI(RA)
+ |1:
+ | ins_next
+ |5: // Despecialize bytecode if any of the checks fail.
+ | li TMP3, BC_JMP
+ | li TMP1, BC_ITERC
+ | sb TMP3, -4+OFS_OP(PC)
+ | addu PC, TMP0, TMP2
+ |.if JIT
+ | lb TMP0, OFS_OP(PC)
+ | li AT, BC_ITERN
+ | bne TMP0, AT, >6
+ |. lhu TMP2, OFS_RD(PC)
+ |.endif
+ | b <1
+ |. sb TMP1, OFS_OP(PC)
+ |.if JIT
+ |6: // Unpatch JLOOP.
+ | lw TMP0, DISPATCH_J(trace)(DISPATCH)
+ | sll TMP2, TMP2, 2
+ | addu TMP0, TMP0, TMP2
+ | lw TRACE:TMP2, 0(TMP0)
+ | lw TMP0, TRACE:TMP2->startins
+ | li AT, -256
+ | and TMP0, TMP0, AT
+ | or TMP0, TMP0, TMP1
+ | b <1
+ |. sw TMP0, 0(PC)
+ |.endif
+ break;
+
+ case BC_VARG:
+ | // RA = base*8, RB = (nresults+1)*8, RC = numparams*8
+ | lw TMP0, FRAME_PC(BASE)
+ | decode_RDtoRC8 RC, RD
+ | decode_RB8a RB, INS
+ | addu RC, BASE, RC
+ | decode_RB8b RB
+ | addu RA, BASE, RA
+ | addiu RC, RC, FRAME_VARG
+ | addu TMP2, RA, RB
+ | addiu TMP3, BASE, -8 // TMP3 = vtop
+ | subu RC, RC, TMP0 // RC = vbase
+ | // Note: RC may now be even _above_ BASE if nargs was < numparams.
+ | beqz RB, >5 // Copy all varargs?
+ |. subu TMP1, TMP3, RC
+ | addiu TMP2, TMP2, -16
+ |1: // Copy vararg slots to destination slots.
+ | lw CARG1, HI(RC)
+ | sltu AT, RC, TMP3
+ | lw CARG2, LO(RC)
+ | addiu RC, RC, 8
+ | movz CARG1, TISNIL, AT
+ | sw CARG1, HI(RA)
+ | sw CARG2, LO(RA)
+ | sltu AT, RA, TMP2
+ | bnez AT, <1
+ |. addiu RA, RA, 8
+ |3:
+ | ins_next
+ |
+ |5: // Copy all varargs.
+ | lw TMP0, L->maxstack
+ | blez TMP1, <3 // No vararg slots?
+ |. li MULTRES, 8 // MULTRES = (0+1)*8
+ | addu TMP2, RA, TMP1
+ | sltu AT, TMP0, TMP2
+ | bnez AT, >7
+ |. addiu MULTRES, TMP1, 8
+ |6:
+ | lw SFRETHI, HI(RC)
+ | lw SFRETLO, LO(RC)
+ | addiu RC, RC, 8
+ | sw SFRETHI, HI(RA)
+ | sw SFRETLO, LO(RA)
+ | sltu AT, RC, TMP3
+ | bnez AT, <6 // More vararg slots?
+ |. addiu RA, RA, 8
+ | b <3
+ |. nop
+ |
+ |7: // Grow stack for varargs.
+ | load_got lj_state_growstack
+ | sw RA, L->top
+ | subu RA, RA, BASE
+ | sw BASE, L->base
+ | subu BASE, RC, BASE // Need delta, because BASE may change.
+ | sw PC, SAVE_PC
+ | srl CARG2, TMP1, 3
+ | call_intern lj_state_growstack // (lua_State *L, int n)
+ |. move CARG1, L
+ | move RC, BASE
+ | lw BASE, L->base
+ | addu RA, BASE, RA
+ | addu RC, BASE, RC
+ | b <6
+ |. addiu TMP3, BASE, -8
+ break;
+
+ /* -- Returns ----------------------------------------------------------- */
+
+ case BC_RETM:
+ | // RA = results*8, RD = extra_nresults*8
+ | addu RD, RD, MULTRES // MULTRES >= 8, so RD >= 8.
+ | // Fall through. Assumes BC_RET follows.
+ break;
+
+ case BC_RET:
+ | // RA = results*8, RD = (nresults+1)*8
+ | lw PC, FRAME_PC(BASE)
+ | addu RA, BASE, RA
+ | move MULTRES, RD
+ |1:
+ | andi TMP0, PC, FRAME_TYPE
+ | bnez TMP0, ->BC_RETV_Z
+ |. xori TMP1, PC, FRAME_VARG
+ |
+ |->BC_RET_Z:
+ | // BASE = base, RA = resultptr, RD = (nresults+1)*8, PC = return
+ | lw INS, -4(PC)
+ | addiu TMP2, BASE, -8
+ | addiu RC, RD, -8
+ | decode_RA8a TMP0, INS
+ | decode_RB8a RB, INS
+ | decode_RA8b TMP0
+ | decode_RB8b RB
+ | addu TMP3, TMP2, RB
+ | beqz RC, >3
+ |. subu BASE, TMP2, TMP0
+ |2:
+ | lw SFRETHI, HI(RA)
+ | lw SFRETLO, LO(RA)
+ | addiu RA, RA, 8
+ | addiu RC, RC, -8
+ | sw SFRETHI, HI(TMP2)
+ | sw SFRETLO, LO(TMP2)
+ | bnez RC, <2
+ |. addiu TMP2, TMP2, 8
+ |3:
+ | addiu TMP3, TMP3, -8
+ |5:
+ | sltu AT, TMP2, TMP3
+ | bnez AT, >6
+ |. lw LFUNC:TMP1, FRAME_FUNC(BASE)
+ | ins_next1
+ | lw TMP1, LFUNC:TMP1->pc
+ | lw KBASE, PC2PROTO(k)(TMP1)
+ | ins_next2
+ |
+ |6: // Fill up results with nil.
+ | sw TISNIL, HI(TMP2)
+ | b <5
+ |. addiu TMP2, TMP2, 8
+ |
+ |->BC_RETV_Z: // Non-standard return case.
+ | andi TMP2, TMP1, FRAME_TYPEP
+ | bnez TMP2, ->vm_return
+ |. nop
+ | // Return from vararg function: relocate BASE down.
+ | subu BASE, BASE, TMP1
+ | b <1
+ |. lw PC, FRAME_PC(BASE)
+ break;
+
+ case BC_RET0: case BC_RET1:
+ | // RA = results*8, RD = (nresults+1)*8
+ | lw PC, FRAME_PC(BASE)
+ | addu RA, BASE, RA
+ | move MULTRES, RD
+ | andi TMP0, PC, FRAME_TYPE
+ | bnez TMP0, ->BC_RETV_Z
+ |. xori TMP1, PC, FRAME_VARG
+ |
+ | lw INS, -4(PC)
+ | addiu TMP2, BASE, -8
+ if (op == BC_RET1) {
+ | lw SFRETHI, HI(RA)
+ | lw SFRETLO, LO(RA)
+ }
+ | decode_RB8a RB, INS
+ | decode_RA8a RA, INS
+ | decode_RB8b RB
+ | decode_RA8b RA
+ if (op == BC_RET1) {
+ | sw SFRETHI, HI(TMP2)
+ | sw SFRETLO, LO(TMP2)
+ }
+ | subu BASE, TMP2, RA
+ |5:
+ | sltu AT, RD, RB
+ | bnez AT, >6
+ |. lw LFUNC:TMP1, FRAME_FUNC(BASE)
+ | ins_next1
+ | lw TMP1, LFUNC:TMP1->pc
+ | lw KBASE, PC2PROTO(k)(TMP1)
+ | ins_next2
+ |
+ |6: // Fill up results with nil.
+ | addiu TMP2, TMP2, 8
+ | addiu RD, RD, 8
+ | b <5
+ if (op == BC_RET1) {
+ |. sw TISNIL, HI(TMP2)
+ } else {
+ |. sw TISNIL, -8+HI(TMP2)
+ }
+ break;
+
+ /* -- Loops and branches ------------------------------------------------ */
+
+ case BC_FORL:
+ |.if JIT
+ | hotloop
+ |.endif
+ | // Fall through. Assumes BC_IFORL follows.
+ break;
+
+ case BC_JFORI:
+ case BC_JFORL:
+#if !LJ_HASJIT
+ break;
+#endif
+ case BC_FORI:
+ case BC_IFORL:
+ | // RA = base*8, RD = target (after end of loop or start of loop)
+ vk = (op == BC_IFORL || op == BC_JFORL);
+ | addu RA, BASE, RA
+ | lw SFARG1HI, FORL_IDX*8+HI(RA)
+ | lw SFARG1LO, FORL_IDX*8+LO(RA)
+ if (op != BC_JFORL) {
+ | srl RD, RD, 1
+ | lui TMP2, (-(BCBIAS_J*4 >> 16) & 65535)
+ | addu TMP2, RD, TMP2
+ }
+ if (!vk) {
+ | lw SFARG2HI, FORL_STOP*8+HI(RA)
+ | lw SFARG2LO, FORL_STOP*8+LO(RA)
+ | bne SFARG1HI, TISNUM, >5
+ |. lw SFRETHI, FORL_STEP*8+HI(RA)
+ | xor AT, SFARG2HI, TISNUM
+ | lw SFRETLO, FORL_STEP*8+LO(RA)
+ | xor TMP0, SFRETHI, TISNUM
+ | or AT, AT, TMP0
+ | bnez AT, ->vmeta_for
+ |. slt AT, SFRETLO, r0
+ | slt CRET1, SFARG2LO, SFARG1LO
+ | slt TMP1, SFARG1LO, SFARG2LO
+ | movn CRET1, TMP1, AT
+ } else {
+ | bne SFARG1HI, TISNUM, >5
+ |. lw SFARG2LO, FORL_STEP*8+LO(RA)
+ | lw SFRETLO, FORL_STOP*8+LO(RA)
+ | move TMP3, SFARG1LO
+ | addu SFARG1LO, SFARG1LO, SFARG2LO
+ | xor TMP0, SFARG1LO, TMP3
+ | xor TMP1, SFARG1LO, SFARG2LO
+ | and TMP0, TMP0, TMP1
+ | slt TMP1, SFARG1LO, SFRETLO
+ | slt CRET1, SFRETLO, SFARG1LO
+ | slt AT, SFARG2LO, r0
+ | slt TMP0, TMP0, r0 // ((y^a) & (y^b)) < 0: overflow.
+ | movn CRET1, TMP1, AT
+ | or CRET1, CRET1, TMP0
+ }
+ |1:
+ if (op == BC_FORI) {
+ | movz TMP2, r0, CRET1
+ | addu PC, PC, TMP2
+ } else if (op == BC_JFORI) {
+ | addu PC, PC, TMP2
+ | lhu RD, -4+OFS_RD(PC)
+ } else if (op == BC_IFORL) {
+ | movn TMP2, r0, CRET1
+ | addu PC, PC, TMP2
+ }
+ if (vk) {
+ | sw SFARG1HI, FORL_IDX*8+HI(RA)
+ | sw SFARG1LO, FORL_IDX*8+LO(RA)
+ }
+ | ins_next1
+ | sw SFARG1HI, FORL_EXT*8+HI(RA)
+ | sw SFARG1LO, FORL_EXT*8+LO(RA)
+ |2:
+ if (op == BC_JFORI) {
+ | beqz CRET1, =>BC_JLOOP
+ |. decode_RD8b RD
+ } else if (op == BC_JFORL) {
+ | beqz CRET1, =>BC_JLOOP
+ }
+ | ins_next2
+ |
+ |5: // FP loop.
+ |.if FPU
+ if (!vk) {
+ | ldc1 f0, FORL_IDX*8(RA)
+ | ldc1 f2, FORL_STOP*8(RA)
+ | sltiu TMP0, SFARG1HI, LJ_TISNUM
+ | sltiu TMP1, SFARG2HI, LJ_TISNUM
+ | sltiu AT, SFRETHI, LJ_TISNUM
+ | and TMP0, TMP0, TMP1
+ | and AT, AT, TMP0
+ | beqz AT, ->vmeta_for
+ |. slt TMP3, SFRETHI, r0
+ | c.ole.d 0, f0, f2
+ | c.ole.d 1, f2, f0
+ | li CRET1, 1
+ | movt CRET1, r0, 0
+ | movt AT, r0, 1
+ | b <1
+ |. movn CRET1, AT, TMP3
+ } else {
+ | ldc1 f0, FORL_IDX*8(RA)
+ | ldc1 f4, FORL_STEP*8(RA)
+ | ldc1 f2, FORL_STOP*8(RA)
+ | lw SFARG2HI, FORL_STEP*8+HI(RA)
+ | add.d f0, f0, f4
+ | c.ole.d 0, f0, f2
+ | c.ole.d 1, f2, f0
+ | slt TMP3, SFARG2HI, r0
+ | li CRET1, 1
+ | li AT, 1
+ | movt CRET1, r0, 0
+ | movt AT, r0, 1
+ | movn CRET1, AT, TMP3
+ if (op == BC_IFORL) {
+ | movn TMP2, r0, CRET1
+ | addu PC, PC, TMP2
+ }
+ | sdc1 f0, FORL_IDX*8(RA)
+ | ins_next1
+ | b <2
+ |. sdc1 f0, FORL_EXT*8(RA)
+ }
+ |.else
+ if (!vk) {
+ | sltiu TMP0, SFARG1HI, LJ_TISNUM
+ | sltiu TMP1, SFARG2HI, LJ_TISNUM
+ | sltiu AT, SFRETHI, LJ_TISNUM
+ | and TMP0, TMP0, TMP1
+ | and AT, AT, TMP0
+ | beqz AT, ->vmeta_for
+ |. nop
+ | bal ->vm_sfcmpolex
+ |. move TMP3, SFRETHI
+ | b <1
+ |. nop
+ } else {
+ | lw SFARG2HI, FORL_STEP*8+HI(RA)
+ | load_got __adddf3
+ | call_extern
+ |. sw TMP2, ARG5
+ | lw SFARG2HI, FORL_STOP*8+HI(RA)
+ | lw SFARG2LO, FORL_STOP*8+LO(RA)
+ | move SFARG1HI, SFRETHI
+ | move SFARG1LO, SFRETLO
+ | bal ->vm_sfcmpolex
+ |. lw TMP3, FORL_STEP*8+HI(RA)
+ if ( op == BC_JFORL ) {
+ | lhu RD, -4+OFS_RD(PC)
+ | lw TMP2, ARG5
+ | b <1
+ |. decode_RD8b RD
+ } else {
+ | b <1
+ |. lw TMP2, ARG5
+ }
+ }
+ |.endif
+ break;
+
+ case BC_ITERL:
+ |.if JIT
+ | hotloop
+ |.endif
+ | // Fall through. Assumes BC_IITERL follows.
+ break;
+
+ case BC_JITERL:
+#if !LJ_HASJIT
+ break;
+#endif
+ case BC_IITERL:
+ | // RA = base*8, RD = target
+ | addu RA, BASE, RA
+ | lw TMP1, HI(RA)
+ | beq TMP1, TISNIL, >1 // Stop if iterator returned nil.
+ |. lw TMP2, LO(RA)
+ if (op == BC_JITERL) {
+ | sw TMP1, -8+HI(RA)
+ | b =>BC_JLOOP
+ |. sw TMP2, -8+LO(RA)
+ } else {
+ | branch_RD // Otherwise save control var + branch.
+ | sw TMP1, -8+HI(RA)
+ | sw TMP2, -8+LO(RA)
+ }
+ |1:
+ | ins_next
+ break;
+
+ case BC_LOOP:
+ | // RA = base*8, RD = target (loop extent)
+ | // Note: RA/RD is only used by trace recorder to determine scope/extent
+ | // This opcode does NOT jump, it's only purpose is to detect a hot loop.
+ |.if JIT
+ | hotloop
+ |.endif
+ | // Fall through. Assumes BC_ILOOP follows.
+ break;
+
+ case BC_ILOOP:
+ | // RA = base*8, RD = target (loop extent)
+ | ins_next
+ break;
+
+ case BC_JLOOP:
+ |.if JIT
+ | // RA = base*8 (ignored), RD = traceno*8
+ | lw TMP1, DISPATCH_J(trace)(DISPATCH)
+ | srl RD, RD, 1
+ | li AT, 0
+ | addu TMP1, TMP1, RD
+ | // Traces on MIPS don't store the trace number, so use 0.
+ | sw AT, DISPATCH_GL(vmstate)(DISPATCH)
+ | lw TRACE:TMP2, 0(TMP1)
+ | sw BASE, DISPATCH_GL(jit_base)(DISPATCH)
+ | lw TMP2, TRACE:TMP2->mcode
+ | sw L, DISPATCH_GL(tmpbuf.L)(DISPATCH)
+ | jr TMP2
+ |. addiu JGL, DISPATCH, GG_DISP2G+32768
+ |.endif
+ break;
+
+ case BC_JMP:
+ | // RA = base*8 (only used by trace recorder), RD = target
+ | branch_RD
+ | ins_next
+ break;
+
+ /* -- Function headers -------------------------------------------------- */
+
+ case BC_FUNCF:
+ |.if JIT
+ | hotcall
+ |.endif
+ case BC_FUNCV: /* NYI: compiled vararg functions. */
+ | // Fall through. Assumes BC_IFUNCF/BC_IFUNCV follow.
+ break;
+
+ case BC_JFUNCF:
+#if !LJ_HASJIT
+ break;
+#endif
+ case BC_IFUNCF:
+ | // BASE = new base, RA = BASE+framesize*8, RB = LFUNC, RC = nargs*8
+ | lw TMP2, L->maxstack
+ | lbu TMP1, -4+PC2PROTO(numparams)(PC)
+ | lw KBASE, -4+PC2PROTO(k)(PC)
+ | sltu AT, TMP2, RA
+ | bnez AT, ->vm_growstack_l
+ |. sll TMP1, TMP1, 3
+ if (op != BC_JFUNCF) {
+ | ins_next1
+ }
+ |2:
+ | sltu AT, NARGS8:RC, TMP1 // Check for missing parameters.
+ | bnez AT, >3
+ |. addu AT, BASE, NARGS8:RC
+ if (op == BC_JFUNCF) {
+ | decode_RD8a RD, INS
+ | b =>BC_JLOOP
+ |. decode_RD8b RD
+ } else {
+ | ins_next2
+ }
+ |
+ |3: // Clear missing parameters.
+ | sw TISNIL, HI(AT)
+ | b <2
+ |. addiu NARGS8:RC, NARGS8:RC, 8
+ break;
+
+ case BC_JFUNCV:
+#if !LJ_HASJIT
+ break;
+#endif
+ | NYI // NYI: compiled vararg functions
+ break; /* NYI: compiled vararg functions. */
+
+ case BC_IFUNCV:
+ | // BASE = new base, RA = BASE+framesize*8, RB = LFUNC, RC = nargs*8
+ | addu TMP1, BASE, RC
+ | lw TMP2, L->maxstack
+ | addu TMP0, RA, RC
+ | sw LFUNC:RB, LO(TMP1) // Store copy of LFUNC.
+ | addiu TMP3, RC, 8+FRAME_VARG
+ | sltu AT, TMP0, TMP2
+ | lw KBASE, -4+PC2PROTO(k)(PC)
+ | beqz AT, ->vm_growstack_l
+ |. sw TMP3, HI(TMP1) // Store delta + FRAME_VARG.
+ | lbu TMP2, -4+PC2PROTO(numparams)(PC)
+ | move RA, BASE
+ | move RC, TMP1
+ | ins_next1
+ | beqz TMP2, >3
+ |. addiu BASE, TMP1, 8
+ |1:
+ | lw TMP0, HI(RA)
+ | lw TMP3, LO(RA)
+ | sltu AT, RA, RC // Less args than parameters?
+ | move CARG1, TMP0
+ | movz TMP0, TISNIL, AT // Clear missing parameters.
+ | movn CARG1, TISNIL, AT // Clear old fixarg slot (help the GC).
+ | sw TMP3, 8+LO(TMP1)
+ | addiu TMP2, TMP2, -1
+ | sw TMP0, 8+HI(TMP1)
+ | addiu TMP1, TMP1, 8
+ | sw CARG1, HI(RA)
+ | bnez TMP2, <1
+ |. addiu RA, RA, 8
+ |3:
+ | ins_next2
+ break;
+
+ case BC_FUNCC:
+ case BC_FUNCCW:
+ | // BASE = new base, RA = BASE+framesize*8, RB = CFUNC, RC = nargs*8
+ if (op == BC_FUNCC) {
+ | lw CFUNCADDR, CFUNC:RB->f
+ } else {
+ | lw CFUNCADDR, DISPATCH_GL(wrapf)(DISPATCH)
+ }
+ | addu TMP1, RA, NARGS8:RC
+ | lw TMP2, L->maxstack
+ | addu RC, BASE, NARGS8:RC
+ | sw BASE, L->base
+ | sltu AT, TMP2, TMP1
+ | sw RC, L->top
+ | li_vmstate C
+ if (op == BC_FUNCCW) {
+ | lw CARG2, CFUNC:RB->f
+ }
+ | bnez AT, ->vm_growstack_c // Need to grow stack.
+ |. move CARG1, L
+ | jalr CFUNCADDR // (lua_State *L [, lua_CFunction f])
+ |. st_vmstate
+ | // Returns nresults.
+ | lw BASE, L->base
+ | sll RD, CRET1, 3
+ | lw TMP1, L->top
+ | li_vmstate INTERP
+ | lw PC, FRAME_PC(BASE) // Fetch PC of caller.
+ | subu RA, TMP1, RD // RA = L->top - nresults*8
+ | sw L, DISPATCH_GL(cur_L)(DISPATCH)
+ | b ->vm_returnc
+ |. st_vmstate
+ break;
+
+ /* ---------------------------------------------------------------------- */
+
+ default:
+ fprintf(stderr, "Error: undefined opcode BC_%s\n", bc_names[op]);
+ exit(2);
+ break;
+ }
+}
+
+static int build_backend(BuildCtx *ctx)
+{
+ int op;
+
+ dasm_growpc(Dst, BC__MAX);
+
+ build_subroutines(ctx);
+
+ |.code_op
+ for (op = 0; op < BC__MAX; op++)
+ build_ins(ctx, (BCOp)op, op);
+
+ return BC__MAX;
+}
+
+/* Emit pseudo frame-info for all assembler functions. */
+static void emit_asm_debug(BuildCtx *ctx)
+{
+ int fcofs = (int)((uint8_t *)ctx->glob[GLOB_vm_ffi_call] - ctx->code);
+ int i;
+ switch (ctx->mode) {
+ case BUILD_elfasm:
+ fprintf(ctx->fp, "\t.section .debug_frame,\"\",@progbits\n");
+ fprintf(ctx->fp,
+ ".Lframe0:\n"
+ "\t.4byte .LECIE0-.LSCIE0\n"
+ ".LSCIE0:\n"
+ "\t.4byte 0xffffffff\n"
+ "\t.byte 0x1\n"
+ "\t.string \"\"\n"
+ "\t.uleb128 0x1\n"
+ "\t.sleb128 -4\n"
+ "\t.byte 31\n"
+ "\t.byte 0xc\n\t.uleb128 29\n\t.uleb128 0\n"
+ "\t.align 2\n"
+ ".LECIE0:\n\n");
+ fprintf(ctx->fp,
+ ".LSFDE0:\n"
+ "\t.4byte .LEFDE0-.LASFDE0\n"
+ ".LASFDE0:\n"
+ "\t.4byte .Lframe0\n"
+ "\t.4byte .Lbegin\n"
+ "\t.4byte %d\n"
+ "\t.byte 0xe\n\t.uleb128 %d\n"
+ "\t.byte 0x9f\n\t.sleb128 1\n"
+ "\t.byte 0x9e\n\t.sleb128 2\n",
+ fcofs, CFRAME_SIZE);
+ for (i = 23; i >= 16; i--)
+ fprintf(ctx->fp, "\t.byte %d\n\t.uleb128 %d\n", 0x80+i, 26-i);
+#if !LJ_SOFTFP
+ for (i = 30; i >= 20; i -= 2)
+ fprintf(ctx->fp, "\t.byte %d\n\t.uleb128 %d\n", 0x80+32+i, 42-i);
+#endif
+ fprintf(ctx->fp,
+ "\t.align 2\n"
+ ".LEFDE0:\n\n");
+#if LJ_HASFFI
+ fprintf(ctx->fp,
+ ".LSFDE1:\n"
+ "\t.4byte .LEFDE1-.LASFDE1\n"
+ ".LASFDE1:\n"
+ "\t.4byte .Lframe0\n"
+ "\t.4byte lj_vm_ffi_call\n"
+ "\t.4byte %d\n"
+ "\t.byte 0x9f\n\t.uleb128 1\n"
+ "\t.byte 0x90\n\t.uleb128 2\n"
+ "\t.byte 0xd\n\t.uleb128 0x10\n"
+ "\t.align 2\n"
+ ".LEFDE1:\n\n", (int)ctx->codesz - fcofs);
+#endif
+#if !LJ_NO_UNWIND
+ fprintf(ctx->fp, "\t.section .eh_frame,\"aw\",@progbits\n");
+ fprintf(ctx->fp,
+ "\t.globl lj_err_unwind_dwarf\n"
+ ".Lframe1:\n"
+ "\t.4byte .LECIE1-.LSCIE1\n"
+ ".LSCIE1:\n"
+ "\t.4byte 0\n"
+ "\t.byte 0x1\n"
+ "\t.string \"zPR\"\n"
+ "\t.uleb128 0x1\n"
+ "\t.sleb128 -4\n"
+ "\t.byte 31\n"
+ "\t.uleb128 6\n" /* augmentation length */
+ "\t.byte 0\n"
+ "\t.4byte lj_err_unwind_dwarf\n"
+ "\t.byte 0\n"
+ "\t.byte 0xc\n\t.uleb128 29\n\t.uleb128 0\n"
+ "\t.align 2\n"
+ ".LECIE1:\n\n");
+ fprintf(ctx->fp,
+ ".LSFDE2:\n"
+ "\t.4byte .LEFDE2-.LASFDE2\n"
+ ".LASFDE2:\n"
+ "\t.4byte .LASFDE2-.Lframe1\n"
+ "\t.4byte .Lbegin\n"
+ "\t.4byte %d\n"
+ "\t.uleb128 0\n" /* augmentation length */
+ "\t.byte 0xe\n\t.uleb128 %d\n"
+ "\t.byte 0x9f\n\t.sleb128 1\n"
+ "\t.byte 0x9e\n\t.sleb128 2\n",
+ fcofs, CFRAME_SIZE);
+ for (i = 23; i >= 16; i--)
+ fprintf(ctx->fp, "\t.byte %d\n\t.uleb128 %d\n", 0x80+i, 26-i);
+#if !LJ_SOFTFP
+ for (i = 30; i >= 20; i -= 2)
+ fprintf(ctx->fp, "\t.byte %d\n\t.uleb128 %d\n", 0x80+32+i, 42-i);
+#endif
+ fprintf(ctx->fp,
+ "\t.align 2\n"
+ ".LEFDE2:\n\n");
+#if LJ_HASFFI
+ fprintf(ctx->fp,
+ ".Lframe2:\n"
+ "\t.4byte .LECIE2-.LSCIE2\n"
+ ".LSCIE2:\n"
+ "\t.4byte 0\n"
+ "\t.byte 0x1\n"
+ "\t.string \"zR\"\n"
+ "\t.uleb128 0x1\n"
+ "\t.sleb128 -4\n"
+ "\t.byte 31\n"
+ "\t.uleb128 1\n" /* augmentation length */
+ "\t.byte 0\n"
+ "\t.byte 0xc\n\t.uleb128 29\n\t.uleb128 0\n"
+ "\t.align 2\n"
+ ".LECIE2:\n\n");
+ fprintf(ctx->fp,
+ ".LSFDE3:\n"
+ "\t.4byte .LEFDE3-.LASFDE3\n"
+ ".LASFDE3:\n"
+ "\t.4byte .LASFDE3-.Lframe2\n"
+ "\t.4byte lj_vm_ffi_call\n"
+ "\t.4byte %d\n"
+ "\t.uleb128 0\n" /* augmentation length */
+ "\t.byte 0x9f\n\t.uleb128 1\n"
+ "\t.byte 0x90\n\t.uleb128 2\n"
+ "\t.byte 0xd\n\t.uleb128 0x10\n"
+ "\t.align 2\n"
+ ".LEFDE3:\n\n", (int)ctx->codesz - fcofs);
+#endif
+#endif
+ break;
+ default:
+ break;
+ }
+}
+
diff --git a/libs/luajit-cmake/luajit/src/vm_mips64.dasc b/libs/luajit-cmake/luajit/src/vm_mips64.dasc
new file mode 100644
index 0000000..651bc42
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/vm_mips64.dasc
@@ -0,0 +1,5538 @@
+|// Low-level VM code for MIPS64 CPUs.
+|// Bytecode interpreter, fast functions and helper functions.
+|// Copyright (C) 2005-2022 Mike Pall. See Copyright Notice in luajit.h
+|//
+|// Contributed by Djordje Kovacevic and Stefan Pejic from RT-RK.com.
+|// Sponsored by Cisco Systems, Inc.
+|
+|.arch mips64
+|.section code_op, code_sub
+|
+|.actionlist build_actionlist
+|.globals GLOB_
+|.globalnames globnames
+|.externnames extnames
+|
+|// Note: The ragged indentation of the instructions is intentional.
+|// The starting columns indicate data dependencies.
+|
+|//-----------------------------------------------------------------------
+|
+|// Fixed register assignments for the interpreter.
+|// Don't use: r0 = 0, r26/r27 = reserved, r28 = gp, r29 = sp, r31 = ra
+|
+|.macro .FPU, a, b
+|.if FPU
+| a, b
+|.endif
+|.endmacro
+|
+|// The following must be C callee-save (but BASE is often refetched).
+|.define BASE, r16 // Base of current Lua stack frame.
+|.define KBASE, r17 // Constants of current Lua function.
+|.define PC, r18 // Next PC.
+|.define DISPATCH, r19 // Opcode dispatch table.
+|.define LREG, r20 // Register holding lua_State (also in SAVE_L).
+|.define MULTRES, r21 // Size of multi-result: (nresults+1)*8.
+|
+|.define JGL, r30 // On-trace: global_State + 32768.
+|
+|// Constants for type-comparisons, stores and conversions. C callee-save.
+|.define TISNIL, r30
+|.define TISNUM, r22
+|.if FPU
+|.define TOBIT, f30 // 2^52 + 2^51.
+|.endif
+|
+|// The following temporaries are not saved across C calls, except for RA.
+|.define RA, r23 // Callee-save.
+|.define RB, r8
+|.define RC, r9
+|.define RD, r10
+|.define INS, r11
+|
+|.define AT, r1 // Assembler temporary.
+|.define TMP0, r12
+|.define TMP1, r13
+|.define TMP2, r14
+|.define TMP3, r15
+|
+|// MIPS n64 calling convention.
+|.define CFUNCADDR, r25
+|.define CARG1, r4
+|.define CARG2, r5
+|.define CARG3, r6
+|.define CARG4, r7
+|.define CARG5, r8
+|.define CARG6, r9
+|.define CARG7, r10
+|.define CARG8, r11
+|
+|.define CRET1, r2
+|.define CRET2, r3
+|
+|.if FPU
+|.define FARG1, f12
+|.define FARG2, f13
+|.define FARG3, f14
+|.define FARG4, f15
+|.define FARG5, f16
+|.define FARG6, f17
+|.define FARG7, f18
+|.define FARG8, f19
+|
+|.define FRET1, f0
+|.define FRET2, f2
+|
+|.define FTMP0, f20
+|.define FTMP1, f21
+|.define FTMP2, f22
+|.endif
+|
+|// Stack layout while in interpreter. Must match with lj_frame.h.
+|.if FPU // MIPS64 hard-float.
+|
+|.define CFRAME_SPACE, 192 // Delta for sp.
+|
+|//----- 16 byte aligned, <-- sp entering interpreter
+|.define SAVE_ERRF, 188(sp) // 32 bit values.
+|.define SAVE_NRES, 184(sp)
+|.define SAVE_CFRAME, 176(sp) // 64 bit values.
+|.define SAVE_L, 168(sp)
+|.define SAVE_PC, 160(sp)
+|//----- 16 byte aligned
+|.define SAVE_GPR_, 80 // .. 80+10*8: 64 bit GPR saves.
+|.define SAVE_FPR_, 16 // .. 16+8*8: 64 bit FPR saves.
+|
+|.else // MIPS64 soft-float
+|
+|.define CFRAME_SPACE, 128 // Delta for sp.
+|
+|//----- 16 byte aligned, <-- sp entering interpreter
+|.define SAVE_ERRF, 124(sp) // 32 bit values.
+|.define SAVE_NRES, 120(sp)
+|.define SAVE_CFRAME, 112(sp) // 64 bit values.
+|.define SAVE_L, 104(sp)
+|.define SAVE_PC, 96(sp)
+|//----- 16 byte aligned
+|.define SAVE_GPR_, 16 // .. 16+10*8: 64 bit GPR saves.
+|
+|.endif
+|
+|.define TMPX, 8(sp) // Unused by interpreter, temp for JIT code.
+|.define TMPD, 0(sp)
+|//----- 16 byte aligned
+|
+|.define TMPD_OFS, 0
+|
+|.define SAVE_MULTRES, TMPD
+|
+|//-----------------------------------------------------------------------
+|
+|.macro saveregs
+| daddiu sp, sp, -CFRAME_SPACE
+| sd ra, SAVE_GPR_+9*8(sp)
+| sd r30, SAVE_GPR_+8*8(sp)
+| .FPU sdc1 f31, SAVE_FPR_+7*8(sp)
+| sd r23, SAVE_GPR_+7*8(sp)
+| .FPU sdc1 f30, SAVE_FPR_+6*8(sp)
+| sd r22, SAVE_GPR_+6*8(sp)
+| .FPU sdc1 f29, SAVE_FPR_+5*8(sp)
+| sd r21, SAVE_GPR_+5*8(sp)
+| .FPU sdc1 f28, SAVE_FPR_+4*8(sp)
+| sd r20, SAVE_GPR_+4*8(sp)
+| .FPU sdc1 f27, SAVE_FPR_+3*8(sp)
+| sd r19, SAVE_GPR_+3*8(sp)
+| .FPU sdc1 f26, SAVE_FPR_+2*8(sp)
+| sd r18, SAVE_GPR_+2*8(sp)
+| .FPU sdc1 f25, SAVE_FPR_+1*8(sp)
+| sd r17, SAVE_GPR_+1*8(sp)
+| .FPU sdc1 f24, SAVE_FPR_+0*8(sp)
+| sd r16, SAVE_GPR_+0*8(sp)
+|.endmacro
+|
+|.macro restoreregs_ret
+| ld ra, SAVE_GPR_+9*8(sp)
+| ld r30, SAVE_GPR_+8*8(sp)
+| ld r23, SAVE_GPR_+7*8(sp)
+| .FPU ldc1 f31, SAVE_FPR_+7*8(sp)
+| ld r22, SAVE_GPR_+6*8(sp)
+| .FPU ldc1 f30, SAVE_FPR_+6*8(sp)
+| ld r21, SAVE_GPR_+5*8(sp)
+| .FPU ldc1 f29, SAVE_FPR_+5*8(sp)
+| ld r20, SAVE_GPR_+4*8(sp)
+| .FPU ldc1 f28, SAVE_FPR_+4*8(sp)
+| ld r19, SAVE_GPR_+3*8(sp)
+| .FPU ldc1 f27, SAVE_FPR_+3*8(sp)
+| ld r18, SAVE_GPR_+2*8(sp)
+| .FPU ldc1 f26, SAVE_FPR_+2*8(sp)
+| ld r17, SAVE_GPR_+1*8(sp)
+| .FPU ldc1 f25, SAVE_FPR_+1*8(sp)
+| ld r16, SAVE_GPR_+0*8(sp)
+| .FPU ldc1 f24, SAVE_FPR_+0*8(sp)
+| jr ra
+| daddiu sp, sp, CFRAME_SPACE
+|.endmacro
+|
+|// Type definitions. Some of these are only used for documentation.
+|.type L, lua_State, LREG
+|.type GL, global_State
+|.type TVALUE, TValue
+|.type GCOBJ, GCobj
+|.type STR, GCstr
+|.type TAB, GCtab
+|.type LFUNC, GCfuncL
+|.type CFUNC, GCfuncC
+|.type PROTO, GCproto
+|.type UPVAL, GCupval
+|.type NODE, Node
+|.type NARGS8, int
+|.type TRACE, GCtrace
+|.type SBUF, SBuf
+|
+|//-----------------------------------------------------------------------
+|
+|// Trap for not-yet-implemented parts.
+|.macro NYI; .long 0xec1cf0f0; .endmacro
+|
+|// Macros to mark delay slots.
+|.macro ., a; a; .endmacro
+|.macro ., a,b; a,b; .endmacro
+|.macro ., a,b,c; a,b,c; .endmacro
+|.macro ., a,b,c,d; a,b,c,d; .endmacro
+|
+|.define FRAME_PC, -8
+|.define FRAME_FUNC, -16
+|
+|//-----------------------------------------------------------------------
+|
+|// Endian-specific defines.
+|.if ENDIAN_LE
+|.define HI, 4
+|.define LO, 0
+|.define OFS_RD, 2
+|.define OFS_RA, 1
+|.define OFS_OP, 0
+|.else
+|.define HI, 0
+|.define LO, 4
+|.define OFS_RD, 0
+|.define OFS_RA, 2
+|.define OFS_OP, 3
+|.endif
+|
+|// Instruction decode.
+|.macro decode_OP1, dst, ins; andi dst, ins, 0xff; .endmacro
+|.macro decode_OP8a, dst, ins; andi dst, ins, 0xff; .endmacro
+|.macro decode_OP8b, dst; sll dst, dst, 3; .endmacro
+|.macro decode_RC8a, dst, ins; srl dst, ins, 13; .endmacro
+|.macro decode_RC8b, dst; andi dst, dst, 0x7f8; .endmacro
+|.macro decode_RD4b, dst; sll dst, dst, 2; .endmacro
+|.macro decode_RA8a, dst, ins; srl dst, ins, 5; .endmacro
+|.macro decode_RA8b, dst; andi dst, dst, 0x7f8; .endmacro
+|.macro decode_RB8a, dst, ins; srl dst, ins, 21; .endmacro
+|.macro decode_RB8b, dst; andi dst, dst, 0x7f8; .endmacro
+|.macro decode_RD8a, dst, ins; srl dst, ins, 16; .endmacro
+|.macro decode_RD8b, dst; sll dst, dst, 3; .endmacro
+|.macro decode_RDtoRC8, dst, src; andi dst, src, 0x7f8; .endmacro
+|
+|// Instruction fetch.
+|.macro ins_NEXT1
+| lw INS, 0(PC)
+| daddiu PC, PC, 4
+|.endmacro
+|// Instruction decode+dispatch.
+|.macro ins_NEXT2
+| decode_OP8a TMP1, INS
+| decode_OP8b TMP1
+| daddu TMP0, DISPATCH, TMP1
+| decode_RD8a RD, INS
+| ld AT, 0(TMP0)
+| decode_RA8a RA, INS
+| decode_RD8b RD
+| jr AT
+| decode_RA8b RA
+|.endmacro
+|.macro ins_NEXT
+| ins_NEXT1
+| ins_NEXT2
+|.endmacro
+|
+|// Instruction footer.
+|.if 1
+| // Replicated dispatch. Less unpredictable branches, but higher I-Cache use.
+| .define ins_next, ins_NEXT
+| .define ins_next_, ins_NEXT
+| .define ins_next1, ins_NEXT1
+| .define ins_next2, ins_NEXT2
+|.else
+| // Common dispatch. Lower I-Cache use, only one (very) unpredictable branch.
+| // Affects only certain kinds of benchmarks (and only with -j off).
+| .macro ins_next
+| b ->ins_next
+| .endmacro
+| .macro ins_next1
+| .endmacro
+| .macro ins_next2
+| b ->ins_next
+| .endmacro
+| .macro ins_next_
+| ->ins_next:
+| ins_NEXT
+| .endmacro
+|.endif
+|
+|// Call decode and dispatch.
+|.macro ins_callt
+| // BASE = new base, RB = LFUNC/CFUNC, RC = nargs*8, FRAME_PC(BASE) = PC
+| ld PC, LFUNC:RB->pc
+| lw INS, 0(PC)
+| daddiu PC, PC, 4
+| decode_OP8a TMP1, INS
+| decode_RA8a RA, INS
+| decode_OP8b TMP1
+| decode_RA8b RA
+| daddu TMP0, DISPATCH, TMP1
+| ld TMP0, 0(TMP0)
+| jr TMP0
+| daddu RA, RA, BASE
+|.endmacro
+|
+|.macro ins_call
+| // BASE = new base, RB = LFUNC/CFUNC, RC = nargs*8, PC = caller PC
+| sd PC, FRAME_PC(BASE)
+| ins_callt
+|.endmacro
+|
+|//-----------------------------------------------------------------------
+|
+|.macro branch_RD
+| srl TMP0, RD, 1
+| lui AT, (-(BCBIAS_J*4 >> 16) & 65535)
+| addu TMP0, TMP0, AT
+| daddu PC, PC, TMP0
+|.endmacro
+|
+|// Assumes DISPATCH is relative to GL.
+#define DISPATCH_GL(field) (GG_DISP2G + (int)offsetof(global_State, field))
+#define DISPATCH_J(field) (GG_DISP2J + (int)offsetof(jit_State, field))
+#define GG_DISP2GOT (GG_OFS(got) - GG_OFS(dispatch))
+#define DISPATCH_GOT(name) (GG_DISP2GOT + sizeof(void*)*LJ_GOT_##name)
+|
+#define PC2PROTO(field) ((int)offsetof(GCproto, field)-(int)sizeof(GCproto))
+|
+|.macro load_got, func
+| ld CFUNCADDR, DISPATCH_GOT(func)(DISPATCH)
+|.endmacro
+|// Much faster. Sadly, there's no easy way to force the required code layout.
+|// .macro call_intern, func; bal extern func; .endmacro
+|.macro call_intern, func; jalr CFUNCADDR; .endmacro
+|.macro call_extern; jalr CFUNCADDR; .endmacro
+|.macro jmp_extern; jr CFUNCADDR; .endmacro
+|
+|.macro hotcheck, delta, target
+| dsrl TMP1, PC, 1
+| andi TMP1, TMP1, 126
+| daddu TMP1, TMP1, DISPATCH
+| lhu TMP2, GG_DISP2HOT(TMP1)
+| addiu TMP2, TMP2, -delta
+| bltz TMP2, target
+|. sh TMP2, GG_DISP2HOT(TMP1)
+|.endmacro
+|
+|.macro hotloop
+| hotcheck HOTCOUNT_LOOP, ->vm_hotloop
+|.endmacro
+|
+|.macro hotcall
+| hotcheck HOTCOUNT_CALL, ->vm_hotcall
+|.endmacro
+|
+|// Set current VM state. Uses TMP0.
+|.macro li_vmstate, st; li TMP0, ~LJ_VMST_..st; .endmacro
+|.macro st_vmstate; sw TMP0, DISPATCH_GL(vmstate)(DISPATCH); .endmacro
+|
+|// Move table write barrier back. Overwrites mark and tmp.
+|.macro barrierback, tab, mark, tmp, target
+| ld tmp, DISPATCH_GL(gc.grayagain)(DISPATCH)
+| andi mark, mark, ~LJ_GC_BLACK & 255 // black2gray(tab)
+| sd tab, DISPATCH_GL(gc.grayagain)(DISPATCH)
+| sb mark, tab->marked
+| b target
+|. sd tmp, tab->gclist
+|.endmacro
+|
+|// Clear type tag. Isolate lowest 14+32+1=47 bits of reg.
+|.macro cleartp, reg; dextm reg, reg, 0, 14; .endmacro
+|.macro cleartp, dst, reg; dextm dst, reg, 0, 14; .endmacro
+|
+|// Set type tag: Merge 17 type bits into bits [15+32=47, 31+32+1=64) of dst.
+|.macro settp, dst, tp; dinsu dst, tp, 15, 31; .endmacro
+|
+|// Extract (negative) type tag.
+|.macro gettp, dst, src; dsra dst, src, 47; .endmacro
+|
+|// Macros to check the TValue type and extract the GCobj. Branch on failure.
+|.macro checktp, reg, tp, target
+| gettp AT, reg
+| daddiu AT, AT, tp
+| bnez AT, target
+|. cleartp reg
+|.endmacro
+|.macro checktp, dst, reg, tp, target
+| gettp AT, reg
+| daddiu AT, AT, tp
+| bnez AT, target
+|. cleartp dst, reg
+|.endmacro
+|.macro checkstr, reg, target; checktp reg, -LJ_TSTR, target; .endmacro
+|.macro checktab, reg, target; checktp reg, -LJ_TTAB, target; .endmacro
+|.macro checkfunc, reg, target; checktp reg, -LJ_TFUNC, target; .endmacro
+|.macro checkint, reg, target // Caveat: has delay slot!
+| gettp AT, reg
+| bne AT, TISNUM, target
+|.endmacro
+|.macro checknum, reg, target // Caveat: has delay slot!
+| gettp AT, reg
+| sltiu AT, AT, LJ_TISNUM
+| beqz AT, target
+|.endmacro
+|
+|.macro mov_false, reg
+| lu reg, 0x8000
+| dsll reg, reg, 32
+| not reg, reg
+|.endmacro
+|.macro mov_true, reg
+| li reg, 0x0001
+| dsll reg, reg, 48
+| not reg, reg
+|.endmacro
+|
+|//-----------------------------------------------------------------------
+
+/* Generate subroutines used by opcodes and other parts of the VM. */
+/* The .code_sub section should be last to help static branch prediction. */
+static void build_subroutines(BuildCtx *ctx)
+{
+ |.code_sub
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Return handling ----------------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |->vm_returnp:
+ | // See vm_return. Also: TMP2 = previous base.
+ | andi AT, PC, FRAME_P
+ | beqz AT, ->cont_dispatch
+ |
+ | // Return from pcall or xpcall fast func.
+ |. mov_true TMP1
+ | ld PC, FRAME_PC(TMP2) // Fetch PC of previous frame.
+ | move BASE, TMP2 // Restore caller base.
+ | // Prepending may overwrite the pcall frame, so do it at the end.
+ | sd TMP1, -8(RA) // Prepend true to results.
+ | daddiu RA, RA, -8
+ |
+ |->vm_returnc:
+ | addiu RD, RD, 8 // RD = (nresults+1)*8.
+ | andi TMP0, PC, FRAME_TYPE
+ | beqz RD, ->vm_unwind_c_eh
+ |. li CRET1, LUA_YIELD
+ | beqz TMP0, ->BC_RET_Z // Handle regular return to Lua.
+ |. move MULTRES, RD
+ |
+ |->vm_return:
+ | // BASE = base, RA = resultptr, RD/MULTRES = (nresults+1)*8, PC = return
+ | // TMP0 = PC & FRAME_TYPE
+ | li TMP2, -8
+ | xori AT, TMP0, FRAME_C
+ | and TMP2, PC, TMP2
+ | bnez AT, ->vm_returnp
+ | dsubu TMP2, BASE, TMP2 // TMP2 = previous base.
+ |
+ | addiu TMP1, RD, -8
+ | sd TMP2, L->base
+ | li_vmstate C
+ | lw TMP2, SAVE_NRES
+ | daddiu BASE, BASE, -16
+ | st_vmstate
+ | beqz TMP1, >2
+ |. sll TMP2, TMP2, 3
+ |1:
+ | addiu TMP1, TMP1, -8
+ | ld CRET1, 0(RA)
+ | daddiu RA, RA, 8
+ | sd CRET1, 0(BASE)
+ | bnez TMP1, <1
+ |. daddiu BASE, BASE, 8
+ |
+ |2:
+ | bne TMP2, RD, >6
+ |3:
+ |. sd BASE, L->top // Store new top.
+ |
+ |->vm_leave_cp:
+ | ld TMP0, SAVE_CFRAME // Restore previous C frame.
+ | move CRET1, r0 // Ok return status for vm_pcall.
+ | sd TMP0, L->cframe
+ |
+ |->vm_leave_unw:
+ | restoreregs_ret
+ |
+ |6:
+ | ld TMP1, L->maxstack
+ | slt AT, TMP2, RD
+ | bnez AT, >7 // Less results wanted?
+ | // More results wanted. Check stack size and fill up results with nil.
+ |. slt AT, BASE, TMP1
+ | beqz AT, >8
+ |. nop
+ | sd TISNIL, 0(BASE)
+ | addiu RD, RD, 8
+ | b <2
+ |. daddiu BASE, BASE, 8
+ |
+ |7: // Less results wanted.
+ | subu TMP0, RD, TMP2
+ | dsubu TMP0, BASE, TMP0 // Either keep top or shrink it.
+ |.if MIPSR6
+ | selnez TMP0, TMP0, TMP2 // LUA_MULTRET+1 case?
+ | seleqz BASE, BASE, TMP2
+ | b <3
+ |. or BASE, BASE, TMP0
+ |.else
+ | b <3
+ |. movn BASE, TMP0, TMP2 // LUA_MULTRET+1 case?
+ |.endif
+ |
+ |8: // Corner case: need to grow stack for filling up results.
+ | // This can happen if:
+ | // - A C function grows the stack (a lot).
+ | // - The GC shrinks the stack in between.
+ | // - A return back from a lua_call() with (high) nresults adjustment.
+ | load_got lj_state_growstack
+ | move MULTRES, RD
+ | srl CARG2, TMP2, 3
+ | call_intern lj_state_growstack // (lua_State *L, int n)
+ |. move CARG1, L
+ | lw TMP2, SAVE_NRES
+ | ld BASE, L->top // Need the (realloced) L->top in BASE.
+ | move RD, MULTRES
+ | b <2
+ |. sll TMP2, TMP2, 3
+ |
+ |->vm_unwind_c: // Unwind C stack, return from vm_pcall.
+ | // (void *cframe, int errcode)
+ | move sp, CARG1
+ | move CRET1, CARG2
+ |->vm_unwind_c_eh: // Landing pad for external unwinder.
+ | ld L, SAVE_L
+ | li TMP0, ~LJ_VMST_C
+ | ld GL:TMP1, L->glref
+ | b ->vm_leave_unw
+ |. sw TMP0, GL:TMP1->vmstate
+ |
+ |->vm_unwind_ff: // Unwind C stack, return from ff pcall.
+ | // (void *cframe)
+ | li AT, -4
+ | and sp, CARG1, AT
+ |->vm_unwind_ff_eh: // Landing pad for external unwinder.
+ | ld L, SAVE_L
+ | .FPU lui TMP3, 0x59c0 // TOBIT = 2^52 + 2^51 (float).
+ | li TISNIL, LJ_TNIL
+ | li TISNUM, LJ_TISNUM
+ | ld BASE, L->base
+ | ld DISPATCH, L->glref // Setup pointer to dispatch table.
+ | .FPU mtc1 TMP3, TOBIT
+ | mov_false TMP1
+ | li_vmstate INTERP
+ | ld PC, FRAME_PC(BASE) // Fetch PC of previous frame.
+ | .FPU cvt.d.s TOBIT, TOBIT
+ | daddiu RA, BASE, -8 // Results start at BASE-8.
+ | daddiu DISPATCH, DISPATCH, GG_G2DISP
+ | sd TMP1, 0(RA) // Prepend false to error message.
+ | st_vmstate
+ | b ->vm_returnc
+ |. li RD, 16 // 2 results: false + error message.
+ |
+ |->vm_unwind_stub: // Jump to exit stub from unwinder.
+ | jr CARG1
+ |. move ra, CARG2
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Grow stack for calls -----------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |->vm_growstack_c: // Grow stack for C function.
+ | b >2
+ |. li CARG2, LUA_MINSTACK
+ |
+ |->vm_growstack_l: // Grow stack for Lua function.
+ | // BASE = new base, RA = BASE+framesize*8, RC = nargs*8, PC = first PC
+ | daddu RC, BASE, RC
+ | dsubu RA, RA, BASE
+ | sd BASE, L->base
+ | daddiu PC, PC, 4 // Must point after first instruction.
+ | sd RC, L->top
+ | srl CARG2, RA, 3
+ |2:
+ | // L->base = new base, L->top = top
+ | load_got lj_state_growstack
+ | sd PC, SAVE_PC
+ | call_intern lj_state_growstack // (lua_State *L, int n)
+ |. move CARG1, L
+ | ld BASE, L->base
+ | ld RC, L->top
+ | ld LFUNC:RB, FRAME_FUNC(BASE)
+ | dsubu RC, RC, BASE
+ | cleartp LFUNC:RB
+ | // BASE = new base, RB = LFUNC/CFUNC, RC = nargs*8, FRAME_PC(BASE) = PC
+ | ins_callt // Just retry the call.
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Entry points into the assembler VM ---------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |->vm_resume: // Setup C frame and resume thread.
+ | // (lua_State *L, TValue *base, int nres1 = 0, ptrdiff_t ef = 0)
+ | saveregs
+ | move L, CARG1
+ | ld DISPATCH, L->glref // Setup pointer to dispatch table.
+ | move BASE, CARG2
+ | lbu TMP1, L->status
+ | sd L, SAVE_L
+ | li PC, FRAME_CP
+ | daddiu TMP0, sp, CFRAME_RESUME
+ | daddiu DISPATCH, DISPATCH, GG_G2DISP
+ | sw r0, SAVE_NRES
+ | sw r0, SAVE_ERRF
+ | sd CARG1, SAVE_PC // Any value outside of bytecode is ok.
+ | sd r0, SAVE_CFRAME
+ | beqz TMP1, >3
+ |. sd TMP0, L->cframe
+ |
+ | // Resume after yield (like a return).
+ | sd L, DISPATCH_GL(cur_L)(DISPATCH)
+ | move RA, BASE
+ | ld BASE, L->base
+ | ld TMP1, L->top
+ | ld PC, FRAME_PC(BASE)
+ | .FPU lui TMP3, 0x59c0 // TOBIT = 2^52 + 2^51 (float).
+ | dsubu RD, TMP1, BASE
+ | .FPU mtc1 TMP3, TOBIT
+ | sb r0, L->status
+ | .FPU cvt.d.s TOBIT, TOBIT
+ | li_vmstate INTERP
+ | daddiu RD, RD, 8
+ | st_vmstate
+ | move MULTRES, RD
+ | andi TMP0, PC, FRAME_TYPE
+ | li TISNIL, LJ_TNIL
+ | beqz TMP0, ->BC_RET_Z
+ |. li TISNUM, LJ_TISNUM
+ | b ->vm_return
+ |. nop
+ |
+ |->vm_pcall: // Setup protected C frame and enter VM.
+ | // (lua_State *L, TValue *base, int nres1, ptrdiff_t ef)
+ | saveregs
+ | sw CARG4, SAVE_ERRF
+ | b >1
+ |. li PC, FRAME_CP
+ |
+ |->vm_call: // Setup C frame and enter VM.
+ | // (lua_State *L, TValue *base, int nres1)
+ | saveregs
+ | li PC, FRAME_C
+ |
+ |1: // Entry point for vm_pcall above (PC = ftype).
+ | ld TMP1, L:CARG1->cframe
+ | move L, CARG1
+ | sw CARG3, SAVE_NRES
+ | ld DISPATCH, L->glref // Setup pointer to dispatch table.
+ | sd CARG1, SAVE_L
+ | move BASE, CARG2
+ | daddiu DISPATCH, DISPATCH, GG_G2DISP
+ | sd CARG1, SAVE_PC // Any value outside of bytecode is ok.
+ | sd TMP1, SAVE_CFRAME
+ | sd sp, L->cframe // Add our C frame to cframe chain.
+ |
+ |3: // Entry point for vm_cpcall/vm_resume (BASE = base, PC = ftype).
+ | sd L, DISPATCH_GL(cur_L)(DISPATCH)
+ | ld TMP2, L->base // TMP2 = old base (used in vmeta_call).
+ | .FPU lui TMP3, 0x59c0 // TOBIT = 2^52 + 2^51 (float).
+ | ld TMP1, L->top
+ | .FPU mtc1 TMP3, TOBIT
+ | daddu PC, PC, BASE
+ | dsubu NARGS8:RC, TMP1, BASE
+ | li TISNUM, LJ_TISNUM
+ | dsubu PC, PC, TMP2 // PC = frame delta + frame type
+ | .FPU cvt.d.s TOBIT, TOBIT
+ | li_vmstate INTERP
+ | li TISNIL, LJ_TNIL
+ | st_vmstate
+ |
+ |->vm_call_dispatch:
+ | // TMP2 = old base, BASE = new base, RC = nargs*8, PC = caller PC
+ | ld LFUNC:RB, FRAME_FUNC(BASE)
+ | checkfunc LFUNC:RB, ->vmeta_call
+ |
+ |->vm_call_dispatch_f:
+ | ins_call
+ | // BASE = new base, RB = func, RC = nargs*8, PC = caller PC
+ |
+ |->vm_cpcall: // Setup protected C frame, call C.
+ | // (lua_State *L, lua_CFunction func, void *ud, lua_CPFunction cp)
+ | saveregs
+ | move L, CARG1
+ | ld TMP0, L:CARG1->stack
+ | sd CARG1, SAVE_L
+ | ld TMP1, L->top
+ | ld DISPATCH, L->glref // Setup pointer to dispatch table.
+ | sd CARG1, SAVE_PC // Any value outside of bytecode is ok.
+ | dsubu TMP0, TMP0, TMP1 // Compute -savestack(L, L->top).
+ | ld TMP1, L->cframe
+ | daddiu DISPATCH, DISPATCH, GG_G2DISP
+ | sw TMP0, SAVE_NRES // Neg. delta means cframe w/o frame.
+ | sw r0, SAVE_ERRF // No error function.
+ | sd TMP1, SAVE_CFRAME
+ | sd sp, L->cframe // Add our C frame to cframe chain.
+ | sd L, DISPATCH_GL(cur_L)(DISPATCH)
+ | jalr CARG4 // (lua_State *L, lua_CFunction func, void *ud)
+ |. move CFUNCADDR, CARG4
+ | move BASE, CRET1
+ | bnez CRET1, <3 // Else continue with the call.
+ |. li PC, FRAME_CP
+ | b ->vm_leave_cp // No base? Just remove C frame.
+ |. nop
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Metamethod handling ------------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |// The lj_meta_* functions (except for lj_meta_cat) don't reallocate the
+ |// stack, so BASE doesn't need to be reloaded across these calls.
+ |
+ |//-- Continuation dispatch ----------------------------------------------
+ |
+ |->cont_dispatch:
+ | // BASE = meta base, RA = resultptr, RD = (nresults+1)*8
+ | ld TMP0, -32(BASE) // Continuation.
+ | move RB, BASE
+ | move BASE, TMP2 // Restore caller BASE.
+ | ld LFUNC:TMP1, FRAME_FUNC(TMP2)
+ |.if FFI
+ | sltiu AT, TMP0, 2
+ |.endif
+ | ld PC, -24(RB) // Restore PC from [cont|PC].
+ | cleartp LFUNC:TMP1
+ | daddu TMP2, RA, RD
+ |.if FFI
+ | bnez AT, >1
+ |.endif
+ |. sd TISNIL, -8(TMP2) // Ensure one valid arg.
+ | ld TMP1, LFUNC:TMP1->pc
+ | // BASE = base, RA = resultptr, RB = meta base
+ | jr TMP0 // Jump to continuation.
+ |. ld KBASE, PC2PROTO(k)(TMP1)
+ |
+ |.if FFI
+ |1:
+ | bnez TMP0, ->cont_ffi_callback // cont = 1: return from FFI callback.
+ | // cont = 0: tailcall from C function.
+ |. daddiu TMP1, RB, -32
+ | b ->vm_call_tail
+ |. dsubu RC, TMP1, BASE
+ |.endif
+ |
+ |->cont_cat: // RA = resultptr, RB = meta base
+ | lw INS, -4(PC)
+ | daddiu CARG2, RB, -32
+ | ld CRET1, 0(RA)
+ | decode_RB8a MULTRES, INS
+ | decode_RA8a RA, INS
+ | decode_RB8b MULTRES
+ | decode_RA8b RA
+ | daddu TMP1, BASE, MULTRES
+ | sd BASE, L->base
+ | dsubu CARG3, CARG2, TMP1
+ | bne TMP1, CARG2, ->BC_CAT_Z
+ |. sd CRET1, 0(CARG2)
+ | daddu RA, BASE, RA
+ | b ->cont_nop
+ |. sd CRET1, 0(RA)
+ |
+ |//-- Table indexing metamethods -----------------------------------------
+ |
+ |->vmeta_tgets1:
+ | daddiu CARG3, DISPATCH, DISPATCH_GL(tmptv)
+ | li TMP0, LJ_TSTR
+ | settp STR:RC, TMP0
+ | b >1
+ |. sd STR:RC, 0(CARG3)
+ |
+ |->vmeta_tgets:
+ | daddiu CARG2, DISPATCH, DISPATCH_GL(tmptv)
+ | li TMP0, LJ_TTAB
+ | li TMP1, LJ_TSTR
+ | settp TAB:RB, TMP0
+ | daddiu CARG3, DISPATCH, DISPATCH_GL(tmptv2)
+ | sd TAB:RB, 0(CARG2)
+ | settp STR:RC, TMP1
+ | b >1
+ |. sd STR:RC, 0(CARG3)
+ |
+ |->vmeta_tgetb: // TMP0 = index
+ | daddiu CARG3, DISPATCH, DISPATCH_GL(tmptv)
+ | settp TMP0, TISNUM
+ | sd TMP0, 0(CARG3)
+ |
+ |->vmeta_tgetv:
+ |1:
+ | load_got lj_meta_tget
+ | sd BASE, L->base
+ | sd PC, SAVE_PC
+ | call_intern lj_meta_tget // (lua_State *L, TValue *o, TValue *k)
+ |. move CARG1, L
+ | // Returns TValue * (finished) or NULL (metamethod).
+ | beqz CRET1, >3
+ |. daddiu TMP1, BASE, -FRAME_CONT
+ | ld CARG1, 0(CRET1)
+ | ins_next1
+ | sd CARG1, 0(RA)
+ | ins_next2
+ |
+ |3: // Call __index metamethod.
+ | // BASE = base, L->top = new base, stack = cont/func/t/k
+ | ld BASE, L->top
+ | sd PC, -24(BASE) // [cont|PC]
+ | dsubu PC, BASE, TMP1
+ | ld LFUNC:RB, FRAME_FUNC(BASE) // Guaranteed to be a function here.
+ | cleartp LFUNC:RB
+ | b ->vm_call_dispatch_f
+ |. li NARGS8:RC, 16 // 2 args for func(t, k).
+ |
+ |->vmeta_tgetr:
+ | load_got lj_tab_getinth
+ | call_intern lj_tab_getinth // (GCtab *t, int32_t key)
+ |. nop
+ | // Returns cTValue * or NULL.
+ | beqz CRET1, ->BC_TGETR_Z
+ |. move CARG2, TISNIL
+ | b ->BC_TGETR_Z
+ |. ld CARG2, 0(CRET1)
+ |
+ |//-----------------------------------------------------------------------
+ |
+ |->vmeta_tsets1:
+ | daddiu CARG3, DISPATCH, DISPATCH_GL(tmptv)
+ | li TMP0, LJ_TSTR
+ | settp STR:RC, TMP0
+ | b >1
+ |. sd STR:RC, 0(CARG3)
+ |
+ |->vmeta_tsets:
+ | daddiu CARG2, DISPATCH, DISPATCH_GL(tmptv)
+ | li TMP0, LJ_TTAB
+ | li TMP1, LJ_TSTR
+ | settp TAB:RB, TMP0
+ | daddiu CARG3, DISPATCH, DISPATCH_GL(tmptv2)
+ | sd TAB:RB, 0(CARG2)
+ | settp STR:RC, TMP1
+ | b >1
+ |. sd STR:RC, 0(CARG3)
+ |
+ |->vmeta_tsetb: // TMP0 = index
+ | daddiu CARG3, DISPATCH, DISPATCH_GL(tmptv)
+ | settp TMP0, TISNUM
+ | sd TMP0, 0(CARG3)
+ |
+ |->vmeta_tsetv:
+ |1:
+ | load_got lj_meta_tset
+ | sd BASE, L->base
+ | sd PC, SAVE_PC
+ | call_intern lj_meta_tset // (lua_State *L, TValue *o, TValue *k)
+ |. move CARG1, L
+ | // Returns TValue * (finished) or NULL (metamethod).
+ | beqz CRET1, >3
+ |. ld CARG1, 0(RA)
+ | // NOBARRIER: lj_meta_tset ensures the table is not black.
+ | ins_next1
+ | sd CARG1, 0(CRET1)
+ | ins_next2
+ |
+ |3: // Call __newindex metamethod.
+ | // BASE = base, L->top = new base, stack = cont/func/t/k/(v)
+ | daddiu TMP1, BASE, -FRAME_CONT
+ | ld BASE, L->top
+ | sd PC, -24(BASE) // [cont|PC]
+ | dsubu PC, BASE, TMP1
+ | ld LFUNC:RB, FRAME_FUNC(BASE) // Guaranteed to be a function here.
+ | cleartp LFUNC:RB
+ | sd CARG1, 16(BASE) // Copy value to third argument.
+ | b ->vm_call_dispatch_f
+ |. li NARGS8:RC, 24 // 3 args for func(t, k, v)
+ |
+ |->vmeta_tsetr:
+ | load_got lj_tab_setinth
+ | sd BASE, L->base
+ | sd PC, SAVE_PC
+ | call_intern lj_tab_setinth // (lua_State *L, GCtab *t, int32_t key)
+ |. move CARG1, L
+ | // Returns TValue *.
+ | b ->BC_TSETR_Z
+ |. nop
+ |
+ |//-- Comparison metamethods ---------------------------------------------
+ |
+ |->vmeta_comp:
+ | // RA/RD point to o1/o2.
+ | move CARG2, RA
+ | move CARG3, RD
+ | load_got lj_meta_comp
+ | daddiu PC, PC, -4
+ | sd BASE, L->base
+ | sd PC, SAVE_PC
+ | decode_OP1 CARG4, INS
+ | call_intern lj_meta_comp // (lua_State *L, TValue *o1, *o2, int op)
+ |. move CARG1, L
+ | // Returns 0/1 or TValue * (metamethod).
+ |3:
+ | sltiu AT, CRET1, 2
+ | beqz AT, ->vmeta_binop
+ | negu TMP2, CRET1
+ |4:
+ | lhu RD, OFS_RD(PC)
+ | daddiu PC, PC, 4
+ | lui TMP1, (-(BCBIAS_J*4 >> 16) & 65535)
+ | sll RD, RD, 2
+ | addu RD, RD, TMP1
+ | and RD, RD, TMP2
+ | daddu PC, PC, RD
+ |->cont_nop:
+ | ins_next
+ |
+ |->cont_ra: // RA = resultptr
+ | lbu TMP1, -4+OFS_RA(PC)
+ | ld CRET1, 0(RA)
+ | sll TMP1, TMP1, 3
+ | daddu TMP1, BASE, TMP1
+ | b ->cont_nop
+ |. sd CRET1, 0(TMP1)
+ |
+ |->cont_condt: // RA = resultptr
+ | ld TMP0, 0(RA)
+ | gettp TMP0, TMP0
+ | sltiu AT, TMP0, LJ_TISTRUECOND
+ | b <4
+ |. negu TMP2, AT // Branch if result is true.
+ |
+ |->cont_condf: // RA = resultptr
+ | ld TMP0, 0(RA)
+ | gettp TMP0, TMP0
+ | sltiu AT, TMP0, LJ_TISTRUECOND
+ | b <4
+ |. addiu TMP2, AT, -1 // Branch if result is false.
+ |
+ |->vmeta_equal:
+ | // CARG1/CARG2 point to o1/o2. TMP0 is set to 0/1.
+ | load_got lj_meta_equal
+ | cleartp LFUNC:CARG3, CARG2
+ | cleartp LFUNC:CARG2, CARG1
+ | move CARG4, TMP0
+ | daddiu PC, PC, -4
+ | sd BASE, L->base
+ | sd PC, SAVE_PC
+ | call_intern lj_meta_equal // (lua_State *L, GCobj *o1, *o2, int ne)
+ |. move CARG1, L
+ | // Returns 0/1 or TValue * (metamethod).
+ | b <3
+ |. nop
+ |
+ |->vmeta_equal_cd:
+ |.if FFI
+ | load_got lj_meta_equal_cd
+ | move CARG2, INS
+ | daddiu PC, PC, -4
+ | sd BASE, L->base
+ | sd PC, SAVE_PC
+ | call_intern lj_meta_equal_cd // (lua_State *L, BCIns op)
+ |. move CARG1, L
+ | // Returns 0/1 or TValue * (metamethod).
+ | b <3
+ |. nop
+ |.endif
+ |
+ |->vmeta_istype:
+ | load_got lj_meta_istype
+ | daddiu PC, PC, -4
+ | sd BASE, L->base
+ | srl CARG2, RA, 3
+ | srl CARG3, RD, 3
+ | sd PC, SAVE_PC
+ | call_intern lj_meta_istype // (lua_State *L, BCReg ra, BCReg tp)
+ |. move CARG1, L
+ | b ->cont_nop
+ |. nop
+ |
+ |//-- Arithmetic metamethods ---------------------------------------------
+ |
+ |->vmeta_unm:
+ | move RC, RB
+ |
+ |->vmeta_arith:
+ | load_got lj_meta_arith
+ | sd BASE, L->base
+ | move CARG2, RA
+ | sd PC, SAVE_PC
+ | move CARG3, RB
+ | move CARG4, RC
+ | decode_OP1 CARG5, INS // CARG5 == RB.
+ | call_intern lj_meta_arith // (lua_State *L, TValue *ra,*rb,*rc, BCReg op)
+ |. move CARG1, L
+ | // Returns NULL (finished) or TValue * (metamethod).
+ | beqz CRET1, ->cont_nop
+ |. nop
+ |
+ | // Call metamethod for binary op.
+ |->vmeta_binop:
+ | // BASE = old base, CRET1 = new base, stack = cont/func/o1/o2
+ | dsubu TMP1, CRET1, BASE
+ | sd PC, -24(CRET1) // [cont|PC]
+ | move TMP2, BASE
+ | daddiu PC, TMP1, FRAME_CONT
+ | move BASE, CRET1
+ | b ->vm_call_dispatch
+ |. li NARGS8:RC, 16 // 2 args for func(o1, o2).
+ |
+ |->vmeta_len:
+ | // CARG2 already set by BC_LEN.
+#if LJ_52
+ | move MULTRES, CARG1
+#endif
+ | load_got lj_meta_len
+ | sd BASE, L->base
+ | sd PC, SAVE_PC
+ | call_intern lj_meta_len // (lua_State *L, TValue *o)
+ |. move CARG1, L
+ | // Returns NULL (retry) or TValue * (metamethod base).
+#if LJ_52
+ | bnez CRET1, ->vmeta_binop // Binop call for compatibility.
+ |. nop
+ | b ->BC_LEN_Z
+ |. move CARG1, MULTRES
+#else
+ | b ->vmeta_binop // Binop call for compatibility.
+ |. nop
+#endif
+ |
+ |//-- Call metamethod ----------------------------------------------------
+ |
+ |->vmeta_call: // Resolve and call __call metamethod.
+ | // TMP2 = old base, BASE = new base, RC = nargs*8
+ | load_got lj_meta_call
+ | sd TMP2, L->base // This is the callers base!
+ | daddiu CARG2, BASE, -16
+ | sd PC, SAVE_PC
+ | daddu CARG3, BASE, RC
+ | move MULTRES, NARGS8:RC
+ | call_intern lj_meta_call // (lua_State *L, TValue *func, TValue *top)
+ |. move CARG1, L
+ | ld LFUNC:RB, FRAME_FUNC(BASE) // Guaranteed to be a function here.
+ | daddiu NARGS8:RC, MULTRES, 8 // Got one more argument now.
+ | cleartp LFUNC:RB
+ | ins_call
+ |
+ |->vmeta_callt: // Resolve __call for BC_CALLT.
+ | // BASE = old base, RA = new base, RC = nargs*8
+ | load_got lj_meta_call
+ | sd BASE, L->base
+ | daddiu CARG2, RA, -16
+ | sd PC, SAVE_PC
+ | daddu CARG3, RA, RC
+ | move MULTRES, NARGS8:RC
+ | call_intern lj_meta_call // (lua_State *L, TValue *func, TValue *top)
+ |. move CARG1, L
+ | ld RB, FRAME_FUNC(RA) // Guaranteed to be a function here.
+ | ld TMP1, FRAME_PC(BASE)
+ | daddiu NARGS8:RC, MULTRES, 8 // Got one more argument now.
+ | b ->BC_CALLT_Z
+ |. cleartp LFUNC:CARG3, RB
+ |
+ |//-- Argument coercion for 'for' statement ------------------------------
+ |
+ |->vmeta_for:
+ | load_got lj_meta_for
+ | sd BASE, L->base
+ | move CARG2, RA
+ | sd PC, SAVE_PC
+ | move MULTRES, INS
+ | call_intern lj_meta_for // (lua_State *L, TValue *base)
+ |. move CARG1, L
+ |.if JIT
+ | decode_OP1 TMP0, MULTRES
+ | li AT, BC_JFORI
+ |.endif
+ | decode_RA8a RA, MULTRES
+ | decode_RD8a RD, MULTRES
+ | decode_RA8b RA
+ |.if JIT
+ | beq TMP0, AT, =>BC_JFORI
+ |. decode_RD8b RD
+ | b =>BC_FORI
+ |. nop
+ |.else
+ | b =>BC_FORI
+ |. decode_RD8b RD
+ |.endif
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Fast functions -----------------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |.macro .ffunc, name
+ |->ff_ .. name:
+ |.endmacro
+ |
+ |.macro .ffunc_1, name
+ |->ff_ .. name:
+ | beqz NARGS8:RC, ->fff_fallback
+ |. ld CARG1, 0(BASE)
+ |.endmacro
+ |
+ |.macro .ffunc_2, name
+ |->ff_ .. name:
+ | sltiu AT, NARGS8:RC, 16
+ | ld CARG1, 0(BASE)
+ | bnez AT, ->fff_fallback
+ |. ld CARG2, 8(BASE)
+ |.endmacro
+ |
+ |.macro .ffunc_n, name // Caveat: has delay slot!
+ |->ff_ .. name:
+ | ld CARG1, 0(BASE)
+ | beqz NARGS8:RC, ->fff_fallback
+ | // Either ldc1 or the 1st instruction of checknum is in the delay slot.
+ | .FPU ldc1 FARG1, 0(BASE)
+ | checknum CARG1, ->fff_fallback
+ |.endmacro
+ |
+ |.macro .ffunc_nn, name // Caveat: has delay slot!
+ |->ff_ .. name:
+ | ld CARG1, 0(BASE)
+ | sltiu AT, NARGS8:RC, 16
+ | ld CARG2, 8(BASE)
+ | bnez AT, ->fff_fallback
+ |. gettp TMP0, CARG1
+ | gettp TMP1, CARG2
+ | sltiu TMP0, TMP0, LJ_TISNUM
+ | sltiu TMP1, TMP1, LJ_TISNUM
+ | .FPU ldc1 FARG1, 0(BASE)
+ | and TMP0, TMP0, TMP1
+ | .FPU ldc1 FARG2, 8(BASE)
+ | beqz TMP0, ->fff_fallback
+ |.endmacro
+ |
+ |// Inlined GC threshold check. Caveat: uses TMP0 and TMP1 and has delay slot!
+ |// MIPSR6: no delay slot, but a forbidden slot.
+ |.macro ffgccheck
+ | ld TMP0, DISPATCH_GL(gc.total)(DISPATCH)
+ | ld TMP1, DISPATCH_GL(gc.threshold)(DISPATCH)
+ | dsubu AT, TMP0, TMP1
+ |.if MIPSR6
+ | bgezalc AT, ->fff_gcstep
+ |.else
+ | bgezal AT, ->fff_gcstep
+ |.endif
+ |.endmacro
+ |
+ |//-- Base library: checks -----------------------------------------------
+ |.ffunc_1 assert
+ | gettp AT, CARG1
+ | sltiu AT, AT, LJ_TISTRUECOND
+ | beqz AT, ->fff_fallback
+ |. daddiu RA, BASE, -16
+ | ld PC, FRAME_PC(BASE)
+ | addiu RD, NARGS8:RC, 8 // Compute (nresults+1)*8.
+ | daddu TMP2, RA, RD
+ | daddiu TMP1, BASE, 8
+ | beq BASE, TMP2, ->fff_res // Done if exactly 1 argument.
+ |. sd CARG1, 0(RA)
+ |1:
+ | ld CRET1, 0(TMP1)
+ | sd CRET1, -16(TMP1)
+ | bne TMP1, TMP2, <1
+ |. daddiu TMP1, TMP1, 8
+ | b ->fff_res
+ |. nop
+ |
+ |.ffunc_1 type
+ | gettp TMP0, CARG1
+ | sltu TMP1, TISNUM, TMP0
+ | not TMP2, TMP0
+ | li TMP3, ~LJ_TISNUM
+ |.if MIPSR6
+ | selnez TMP2, TMP2, TMP1
+ | seleqz TMP3, TMP3, TMP1
+ | or TMP2, TMP2, TMP3
+ |.else
+ | movz TMP2, TMP3, TMP1
+ |.endif
+ | dsll TMP2, TMP2, 3
+ | daddu TMP2, CFUNC:RB, TMP2
+ | b ->fff_restv
+ |. ld CARG1, CFUNC:TMP2->upvalue
+ |
+ |//-- Base library: getters and setters ---------------------------------
+ |
+ |.ffunc_1 getmetatable
+ | gettp TMP2, CARG1
+ | daddiu TMP0, TMP2, -LJ_TTAB
+ | daddiu TMP1, TMP2, -LJ_TUDATA
+ |.if MIPSR6
+ | selnez TMP0, TMP1, TMP0
+ |.else
+ | movn TMP0, TMP1, TMP0
+ |.endif
+ | bnez TMP0, >6
+ |. cleartp TAB:CARG1
+ |1: // Field metatable must be at same offset for GCtab and GCudata!
+ | ld TAB:RB, TAB:CARG1->metatable
+ |2:
+ | ld STR:RC, DISPATCH_GL(gcroot[GCROOT_MMNAME+MM_metatable])(DISPATCH)
+ | beqz TAB:RB, ->fff_restv
+ |. li CARG1, LJ_TNIL
+ | lw TMP0, TAB:RB->hmask
+ | lw TMP1, STR:RC->sid
+ | ld NODE:TMP2, TAB:RB->node
+ | and TMP1, TMP1, TMP0 // idx = str->sid & tab->hmask
+ | dsll TMP0, TMP1, 5
+ | dsll TMP1, TMP1, 3
+ | dsubu TMP1, TMP0, TMP1
+ | daddu NODE:TMP2, NODE:TMP2, TMP1 // node = tab->node + (idx*32-idx*8)
+ | li CARG4, LJ_TSTR
+ | settp STR:RC, CARG4 // Tagged key to look for.
+ |3: // Rearranged logic, because we expect _not_ to find the key.
+ | ld TMP0, NODE:TMP2->key
+ | ld CARG1, NODE:TMP2->val
+ | ld NODE:TMP2, NODE:TMP2->next
+ | beq RC, TMP0, >5
+ |. li AT, LJ_TTAB
+ | bnez NODE:TMP2, <3
+ |. nop
+ |4:
+ | move CARG1, RB
+ | b ->fff_restv // Not found, keep default result.
+ |. settp CARG1, AT
+ |5:
+ | bne CARG1, TISNIL, ->fff_restv
+ |. nop
+ | b <4 // Ditto for nil value.
+ |. nop
+ |
+ |6:
+ | sltiu AT, TMP2, LJ_TISNUM
+ |.if MIPSR6
+ | selnez TMP0, TISNUM, AT
+ | seleqz AT, TMP2, AT
+ | or TMP2, TMP0, AT
+ |.else
+ | movn TMP2, TISNUM, AT
+ |.endif
+ | dsll TMP2, TMP2, 3
+ | dsubu TMP0, DISPATCH, TMP2
+ | b <2
+ |. ld TAB:RB, DISPATCH_GL(gcroot[GCROOT_BASEMT])-8(TMP0)
+ |
+ |.ffunc_2 setmetatable
+ | // Fast path: no mt for table yet and not clearing the mt.
+ | checktp TMP1, CARG1, -LJ_TTAB, ->fff_fallback
+ | gettp TMP3, CARG2
+ | ld TAB:TMP0, TAB:TMP1->metatable
+ | lbu TMP2, TAB:TMP1->marked
+ | daddiu AT, TMP3, -LJ_TTAB
+ | cleartp TAB:CARG2
+ | or AT, AT, TAB:TMP0
+ | bnez AT, ->fff_fallback
+ |. andi AT, TMP2, LJ_GC_BLACK // isblack(table)
+ | beqz AT, ->fff_restv
+ |. sd TAB:CARG2, TAB:TMP1->metatable
+ | barrierback TAB:TMP1, TMP2, TMP0, ->fff_restv
+ |
+ |.ffunc rawget
+ | ld CARG2, 0(BASE)
+ | sltiu AT, NARGS8:RC, 16
+ | load_got lj_tab_get
+ | gettp TMP0, CARG2
+ | cleartp CARG2
+ | daddiu TMP0, TMP0, -LJ_TTAB
+ | or AT, AT, TMP0
+ | bnez AT, ->fff_fallback
+ |. daddiu CARG3, BASE, 8
+ | call_intern lj_tab_get // (lua_State *L, GCtab *t, cTValue *key)
+ |. move CARG1, L
+ | b ->fff_restv
+ |. ld CARG1, 0(CRET1)
+ |
+ |//-- Base library: conversions ------------------------------------------
+ |
+ |.ffunc tonumber
+ | // Only handles the number case inline (without a base argument).
+ | ld CARG1, 0(BASE)
+ | xori AT, NARGS8:RC, 8 // Exactly one number argument.
+ | gettp TMP1, CARG1
+ | sltu TMP0, TISNUM, TMP1
+ | or AT, AT, TMP0
+ | bnez AT, ->fff_fallback
+ |. nop
+ | b ->fff_restv
+ |. nop
+ |
+ |.ffunc_1 tostring
+ | // Only handles the string or number case inline.
+ | gettp TMP0, CARG1
+ | daddiu AT, TMP0, -LJ_TSTR
+ | // A __tostring method in the string base metatable is ignored.
+ | beqz AT, ->fff_restv // String key?
+ | // Handle numbers inline, unless a number base metatable is present.
+ |. ld TMP1, DISPATCH_GL(gcroot[GCROOT_BASEMT_NUM])(DISPATCH)
+ | sltu TMP0, TISNUM, TMP0
+ | or TMP0, TMP0, TMP1
+ | bnez TMP0, ->fff_fallback
+ |. sd BASE, L->base // Add frame since C call can throw.
+ |.if MIPSR6
+ | sd PC, SAVE_PC // Redundant (but a defined value).
+ | ffgccheck
+ |.else
+ | ffgccheck
+ |. sd PC, SAVE_PC // Redundant (but a defined value).
+ |.endif
+ | load_got lj_strfmt_number
+ | move CARG1, L
+ | call_intern lj_strfmt_number // (lua_State *L, cTValue *o)
+ |. move CARG2, BASE
+ | // Returns GCstr *.
+ | li AT, LJ_TSTR
+ | settp CRET1, AT
+ | b ->fff_restv
+ |. move CARG1, CRET1
+ |
+ |//-- Base library: iterators -------------------------------------------
+ |
+ |.ffunc_1 next
+ | checktp CARG1, -LJ_TTAB, ->fff_fallback
+ | daddu TMP2, BASE, NARGS8:RC
+ | sd TISNIL, 0(TMP2) // Set missing 2nd arg to nil.
+ | load_got lj_tab_next
+ | ld PC, FRAME_PC(BASE)
+ | daddiu CARG2, BASE, 8
+ | call_intern lj_tab_next // (GCtab *t, cTValue *key, TValue *o)
+ |. daddiu CARG3, BASE, -16
+ | // Returns 1=found, 0=end, -1=error.
+ | daddiu RA, BASE, -16
+ | bgtz CRET1, ->fff_res // Found key/value.
+ |. li RD, (2+1)*8
+ | beqz CRET1, ->fff_restv // End of traversal: return nil.
+ |. move CARG1, TISNIL
+ | ld CFUNC:RB, FRAME_FUNC(BASE)
+ | cleartp CFUNC:RB
+ | b ->fff_fallback // Invalid key.
+ |. li RC, 2*8
+ |
+ |.ffunc_1 pairs
+ | checktp TAB:TMP1, CARG1, -LJ_TTAB, ->fff_fallback
+ | ld PC, FRAME_PC(BASE)
+#if LJ_52
+ | ld TAB:TMP2, TAB:TMP1->metatable
+ | ld TMP0, CFUNC:RB->upvalue[0]
+ | bnez TAB:TMP2, ->fff_fallback
+#else
+ | ld TMP0, CFUNC:RB->upvalue[0]
+#endif
+ |. daddiu RA, BASE, -16
+ | sd TISNIL, 0(BASE)
+ | sd CARG1, -8(BASE)
+ | sd TMP0, 0(RA)
+ | b ->fff_res
+ |. li RD, (3+1)*8
+ |
+ |.ffunc_2 ipairs_aux
+ | checktab CARG1, ->fff_fallback
+ | checkint CARG2, ->fff_fallback
+ |. lw TMP0, TAB:CARG1->asize
+ | ld TMP1, TAB:CARG1->array
+ | ld PC, FRAME_PC(BASE)
+ | sextw TMP2, CARG2
+ | addiu TMP2, TMP2, 1
+ | sltu AT, TMP2, TMP0
+ | daddiu RA, BASE, -16
+ | zextw TMP0, TMP2
+ | settp TMP0, TISNUM
+ | beqz AT, >2 // Not in array part?
+ |. sd TMP0, 0(RA)
+ | dsll TMP3, TMP2, 3
+ | daddu TMP3, TMP1, TMP3
+ | ld TMP1, 0(TMP3)
+ |1:
+ | beq TMP1, TISNIL, ->fff_res // End of iteration, return 0 results.
+ |. li RD, (0+1)*8
+ | sd TMP1, -8(BASE)
+ | b ->fff_res
+ |. li RD, (2+1)*8
+ |2: // Check for empty hash part first. Otherwise call C function.
+ | lw TMP0, TAB:CARG1->hmask
+ | load_got lj_tab_getinth
+ | beqz TMP0, ->fff_res
+ |. li RD, (0+1)*8
+ | call_intern lj_tab_getinth // (GCtab *t, int32_t key)
+ |. move CARG2, TMP2
+ | // Returns cTValue * or NULL.
+ | beqz CRET1, ->fff_res
+ |. li RD, (0+1)*8
+ | b <1
+ |. ld TMP1, 0(CRET1)
+ |
+ |.ffunc_1 ipairs
+ | checktp TAB:TMP1, CARG1, -LJ_TTAB, ->fff_fallback
+ | ld PC, FRAME_PC(BASE)
+#if LJ_52
+ | ld TAB:TMP2, TAB:TMP1->metatable
+ | ld CFUNC:TMP0, CFUNC:RB->upvalue[0]
+ | bnez TAB:TMP2, ->fff_fallback
+#else
+ | ld TMP0, CFUNC:RB->upvalue[0]
+#endif
+ | daddiu RA, BASE, -16
+ | dsll AT, TISNUM, 47
+ | sd CARG1, -8(BASE)
+ | sd AT, 0(BASE)
+ | sd CFUNC:TMP0, 0(RA)
+ | b ->fff_res
+ |. li RD, (3+1)*8
+ |
+ |//-- Base library: catch errors ----------------------------------------
+ |
+ |.ffunc pcall
+ | daddiu NARGS8:RC, NARGS8:RC, -8
+ | lbu TMP3, DISPATCH_GL(hookmask)(DISPATCH)
+ | bltz NARGS8:RC, ->fff_fallback
+ |. move TMP2, BASE
+ | daddiu BASE, BASE, 16
+ | // Remember active hook before pcall.
+ | srl TMP3, TMP3, HOOK_ACTIVE_SHIFT
+ | andi TMP3, TMP3, 1
+ | daddiu PC, TMP3, 16+FRAME_PCALL
+ | beqz NARGS8:RC, ->vm_call_dispatch
+ |1:
+ |. daddu TMP0, BASE, NARGS8:RC
+ |2:
+ | ld TMP1, -16(TMP0)
+ | sd TMP1, -8(TMP0)
+ | daddiu TMP0, TMP0, -8
+ | bne TMP0, BASE, <2
+ |. nop
+ | b ->vm_call_dispatch
+ |. nop
+ |
+ |.ffunc xpcall
+ | daddiu NARGS8:TMP0, NARGS8:RC, -16
+ | ld CARG1, 0(BASE)
+ | ld CARG2, 8(BASE)
+ | bltz NARGS8:TMP0, ->fff_fallback
+ |. lbu TMP1, DISPATCH_GL(hookmask)(DISPATCH)
+ | gettp AT, CARG2
+ | daddiu AT, AT, -LJ_TFUNC
+ | bnez AT, ->fff_fallback // Traceback must be a function.
+ |. move TMP2, BASE
+ | move NARGS8:RC, NARGS8:TMP0
+ | daddiu BASE, BASE, 24
+ | // Remember active hook before pcall.
+ | srl TMP3, TMP3, HOOK_ACTIVE_SHIFT
+ | sd CARG2, 0(TMP2) // Swap function and traceback.
+ | andi TMP3, TMP3, 1
+ | sd CARG1, 8(TMP2)
+ | beqz NARGS8:RC, ->vm_call_dispatch
+ |. daddiu PC, TMP3, 24+FRAME_PCALL
+ | b <1
+ |. nop
+ |
+ |//-- Coroutine library --------------------------------------------------
+ |
+ |.macro coroutine_resume_wrap, resume
+ |.if resume
+ |.ffunc_1 coroutine_resume
+ | checktp CARG1, CARG1, -LJ_TTHREAD, ->fff_fallback
+ |.else
+ |.ffunc coroutine_wrap_aux
+ | ld L:CARG1, CFUNC:RB->upvalue[0].gcr
+ | cleartp L:CARG1
+ |.endif
+ | lbu TMP0, L:CARG1->status
+ | ld TMP1, L:CARG1->cframe
+ | ld CARG2, L:CARG1->top
+ | ld TMP2, L:CARG1->base
+ | addiu AT, TMP0, -LUA_YIELD
+ | daddu CARG3, CARG2, TMP0
+ | daddiu TMP3, CARG2, 8
+ |.if MIPSR6
+ | seleqz CARG2, CARG2, AT
+ | selnez TMP3, TMP3, AT
+ | bgtz AT, ->fff_fallback // st > LUA_YIELD?
+ |. or CARG2, TMP3, CARG2
+ |.else
+ | bgtz AT, ->fff_fallback // st > LUA_YIELD?
+ |. movn CARG2, TMP3, AT
+ |.endif
+ | xor TMP2, TMP2, CARG3
+ | bnez TMP1, ->fff_fallback // cframe != 0?
+ |. or AT, TMP2, TMP0
+ | ld TMP0, L:CARG1->maxstack
+ | beqz AT, ->fff_fallback // base == top && st == 0?
+ |. ld PC, FRAME_PC(BASE)
+ | daddu TMP2, CARG2, NARGS8:RC
+ | sltu AT, TMP0, TMP2
+ | bnez AT, ->fff_fallback // Stack overflow?
+ |. sd PC, SAVE_PC
+ | sd BASE, L->base
+ |1:
+ |.if resume
+ | daddiu BASE, BASE, 8 // Keep resumed thread in stack for GC.
+ | daddiu NARGS8:RC, NARGS8:RC, -8
+ | daddiu TMP2, TMP2, -8
+ |.endif
+ | sd TMP2, L:CARG1->top
+ | daddu TMP1, BASE, NARGS8:RC
+ | move CARG3, CARG2
+ | sd BASE, L->top
+ |2: // Move args to coroutine.
+ | ld CRET1, 0(BASE)
+ | sltu AT, BASE, TMP1
+ | beqz AT, >3
+ |. daddiu BASE, BASE, 8
+ | sd CRET1, 0(CARG3)
+ | b <2
+ |. daddiu CARG3, CARG3, 8
+ |3:
+ | bal ->vm_resume // (lua_State *L, TValue *base, 0, 0)
+ |. move L:RA, L:CARG1
+ | // Returns thread status.
+ |4:
+ | ld TMP2, L:RA->base
+ | sltiu AT, CRET1, LUA_YIELD+1
+ | ld TMP3, L:RA->top
+ | li_vmstate INTERP
+ | ld BASE, L->base
+ | sd L, DISPATCH_GL(cur_L)(DISPATCH)
+ | st_vmstate
+ | beqz AT, >8
+ |. dsubu RD, TMP3, TMP2
+ | ld TMP0, L->maxstack
+ | beqz RD, >6 // No results?
+ |. daddu TMP1, BASE, RD
+ | sltu AT, TMP0, TMP1
+ | bnez AT, >9 // Need to grow stack?
+ |. daddu TMP3, TMP2, RD
+ | sd TMP2, L:RA->top // Clear coroutine stack.
+ | move TMP1, BASE
+ |5: // Move results from coroutine.
+ | ld CRET1, 0(TMP2)
+ | daddiu TMP2, TMP2, 8
+ | sltu AT, TMP2, TMP3
+ | sd CRET1, 0(TMP1)
+ | bnez AT, <5
+ |. daddiu TMP1, TMP1, 8
+ |6:
+ | andi TMP0, PC, FRAME_TYPE
+ |.if resume
+ | mov_true TMP1
+ | daddiu RA, BASE, -8
+ | sd TMP1, -8(BASE) // Prepend true to results.
+ | daddiu RD, RD, 16
+ |.else
+ | move RA, BASE
+ | daddiu RD, RD, 8
+ |.endif
+ |7:
+ | sd PC, SAVE_PC
+ | beqz TMP0, ->BC_RET_Z
+ |. move MULTRES, RD
+ | b ->vm_return
+ |. nop
+ |
+ |8: // Coroutine returned with error (at co->top-1).
+ |.if resume
+ | daddiu TMP3, TMP3, -8
+ | mov_false TMP1
+ | ld CRET1, 0(TMP3)
+ | sd TMP3, L:RA->top // Remove error from coroutine stack.
+ | li RD, (2+1)*8
+ | sd TMP1, -8(BASE) // Prepend false to results.
+ | daddiu RA, BASE, -8
+ | sd CRET1, 0(BASE) // Copy error message.
+ | b <7
+ |. andi TMP0, PC, FRAME_TYPE
+ |.else
+ | load_got lj_ffh_coroutine_wrap_err
+ | move CARG2, L:RA
+ | call_intern lj_ffh_coroutine_wrap_err // (lua_State *L, lua_State *co)
+ |. move CARG1, L
+ |.endif
+ |
+ |9: // Handle stack expansion on return from yield.
+ | load_got lj_state_growstack
+ | srl CARG2, RD, 3
+ | call_intern lj_state_growstack // (lua_State *L, int n)
+ |. move CARG1, L
+ | b <4
+ |. li CRET1, 0
+ |.endmacro
+ |
+ | coroutine_resume_wrap 1 // coroutine.resume
+ | coroutine_resume_wrap 0 // coroutine.wrap
+ |
+ |.ffunc coroutine_yield
+ | ld TMP0, L->cframe
+ | daddu TMP1, BASE, NARGS8:RC
+ | sd BASE, L->base
+ | andi TMP0, TMP0, CFRAME_RESUME
+ | sd TMP1, L->top
+ | beqz TMP0, ->fff_fallback
+ |. li CRET1, LUA_YIELD
+ | sd r0, L->cframe
+ | b ->vm_leave_unw
+ |. sb CRET1, L->status
+ |
+ |//-- Math library -------------------------------------------------------
+ |
+ |.ffunc_1 math_abs
+ | gettp CARG2, CARG1
+ | daddiu AT, CARG2, -LJ_TISNUM
+ | bnez AT, >1
+ |. sextw TMP1, CARG1
+ | sra TMP0, TMP1, 31 // Extract sign.
+ | xor TMP1, TMP1, TMP0
+ | dsubu CARG1, TMP1, TMP0
+ | dsll TMP3, CARG1, 32
+ | bgez TMP3, ->fff_restv
+ |. settp CARG1, TISNUM
+ | li CARG1, 0x41e0 // 2^31 as a double.
+ | b ->fff_restv
+ |. dsll CARG1, CARG1, 48
+ |1:
+ | sltiu AT, CARG2, LJ_TISNUM
+ | beqz AT, ->fff_fallback
+ |. dextm CARG1, CARG1, 0, 30
+ |// fallthrough
+ |
+ |->fff_restv:
+ | // CARG1 = TValue result.
+ | ld PC, FRAME_PC(BASE)
+ | daddiu RA, BASE, -16
+ | sd CARG1, -16(BASE)
+ |->fff_res1:
+ | // RA = results, PC = return.
+ | li RD, (1+1)*8
+ |->fff_res:
+ | // RA = results, RD = (nresults+1)*8, PC = return.
+ | andi TMP0, PC, FRAME_TYPE
+ | bnez TMP0, ->vm_return
+ |. move MULTRES, RD
+ | lw INS, -4(PC)
+ | decode_RB8a RB, INS
+ | decode_RB8b RB
+ |5:
+ | sltu AT, RD, RB
+ | bnez AT, >6 // More results expected?
+ |. decode_RA8a TMP0, INS
+ | decode_RA8b TMP0
+ | ins_next1
+ | // Adjust BASE. KBASE is assumed to be set for the calling frame.
+ | dsubu BASE, RA, TMP0
+ | ins_next2
+ |
+ |6: // Fill up results with nil.
+ | daddu TMP1, RA, RD
+ | daddiu RD, RD, 8
+ | b <5
+ |. sd TISNIL, -8(TMP1)
+ |
+ |.macro math_extern, func
+ | .ffunc_n math_ .. func
+ | load_got func
+ | call_extern
+ |. nop
+ | b ->fff_resn
+ |. nop
+ |.endmacro
+ |
+ |.macro math_extern2, func
+ | .ffunc_nn math_ .. func
+ |. load_got func
+ | call_extern
+ |. nop
+ | b ->fff_resn
+ |. nop
+ |.endmacro
+ |
+ |// TODO: Return integer type if result is integer (own sf implementation).
+ |.macro math_round, func
+ |->ff_math_ .. func:
+ | ld CARG1, 0(BASE)
+ | beqz NARGS8:RC, ->fff_fallback
+ |. gettp TMP0, CARG1
+ | beq TMP0, TISNUM, ->fff_restv
+ |. sltu AT, TMP0, TISNUM
+ | beqz AT, ->fff_fallback
+ |.if FPU
+ |. ldc1 FARG1, 0(BASE)
+ | bal ->vm_ .. func
+ |. nop
+ |.else
+ |. load_got func
+ | call_extern
+ |. nop
+ |.endif
+ | b ->fff_resn
+ |. nop
+ |.endmacro
+ |
+ | math_round floor
+ | math_round ceil
+ |
+ |.ffunc math_log
+ | li AT, 8
+ | bne NARGS8:RC, AT, ->fff_fallback // Exactly 1 argument.
+ |. ld CARG1, 0(BASE)
+ | checknum CARG1, ->fff_fallback
+ |. load_got log
+ |.if FPU
+ | call_extern
+ |. ldc1 FARG1, 0(BASE)
+ |.else
+ | call_extern
+ |. nop
+ |.endif
+ | b ->fff_resn
+ |. nop
+ |
+ | math_extern log10
+ | math_extern exp
+ | math_extern sin
+ | math_extern cos
+ | math_extern tan
+ | math_extern asin
+ | math_extern acos
+ | math_extern atan
+ | math_extern sinh
+ | math_extern cosh
+ | math_extern tanh
+ | math_extern2 pow
+ | math_extern2 atan2
+ | math_extern2 fmod
+ |
+ |.if FPU
+ |.ffunc_n math_sqrt
+ |. sqrt.d FRET1, FARG1
+ |// fallthrough to ->fff_resn
+ |.else
+ | math_extern sqrt
+ |.endif
+ |
+ |->fff_resn:
+ | ld PC, FRAME_PC(BASE)
+ | daddiu RA, BASE, -16
+ | b ->fff_res1
+ |.if FPU
+ |. sdc1 FRET1, 0(RA)
+ |.else
+ |. sd CRET1, 0(RA)
+ |.endif
+ |
+ |
+ |.ffunc_2 math_ldexp
+ | checknum CARG1, ->fff_fallback
+ | checkint CARG2, ->fff_fallback
+ |. load_got ldexp
+ | .FPU ldc1 FARG1, 0(BASE)
+ | call_extern
+ |. lw CARG2, 8+LO(BASE)
+ | b ->fff_resn
+ |. nop
+ |
+ |.ffunc_n math_frexp
+ | load_got frexp
+ | ld PC, FRAME_PC(BASE)
+ | call_extern
+ |. daddiu CARG2, DISPATCH, DISPATCH_GL(tmptv)
+ | lw TMP1, DISPATCH_GL(tmptv)(DISPATCH)
+ | daddiu RA, BASE, -16
+ |.if FPU
+ | mtc1 TMP1, FARG2
+ | sdc1 FRET1, 0(RA)
+ | cvt.d.w FARG2, FARG2
+ | sdc1 FARG2, 8(RA)
+ |.else
+ | sd CRET1, 0(RA)
+ | zextw TMP1, TMP1
+ | settp TMP1, TISNUM
+ | sd TMP1, 8(RA)
+ |.endif
+ | b ->fff_res
+ |. li RD, (2+1)*8
+ |
+ |.ffunc_n math_modf
+ | load_got modf
+ | ld PC, FRAME_PC(BASE)
+ | call_extern
+ |. daddiu CARG2, BASE, -16
+ | daddiu RA, BASE, -16
+ |.if FPU
+ | sdc1 FRET1, -8(BASE)
+ |.else
+ | sd CRET1, -8(BASE)
+ |.endif
+ | b ->fff_res
+ |. li RD, (2+1)*8
+ |
+ |.macro math_minmax, name, intins, intinsc, fpins
+ | .ffunc_1 name
+ | daddu TMP3, BASE, NARGS8:RC
+ | checkint CARG1, >5
+ |. daddiu TMP2, BASE, 8
+ |1: // Handle integers.
+ | beq TMP2, TMP3, ->fff_restv
+ |. ld CARG2, 0(TMP2)
+ | checkint CARG2, >3
+ |. sextw CARG1, CARG1
+ | lw CARG2, LO(TMP2)
+ |. slt AT, CARG1, CARG2
+ |.if MIPSR6
+ | intins TMP1, CARG2, AT
+ | intinsc CARG1, CARG1, AT
+ | or CARG1, CARG1, TMP1
+ |.else
+ | intins CARG1, CARG2, AT
+ |.endif
+ | daddiu TMP2, TMP2, 8
+ | zextw CARG1, CARG1
+ | b <1
+ |. settp CARG1, TISNUM
+ |
+ |3: // Convert intermediate result to number and continue with number loop.
+ | checknum CARG2, ->fff_fallback
+ |.if FPU
+ |. mtc1 CARG1, FRET1
+ | cvt.d.w FRET1, FRET1
+ | b >7
+ |. ldc1 FARG1, 0(TMP2)
+ |.else
+ |. nop
+ | bal ->vm_sfi2d_1
+ |. nop
+ | b >7
+ |. nop
+ |.endif
+ |
+ |5:
+ | .FPU ldc1 FRET1, 0(BASE)
+ | checknum CARG1, ->fff_fallback
+ |6: // Handle numbers.
+ |. ld CARG2, 0(TMP2)
+ | beq TMP2, TMP3, ->fff_resn
+ |.if FPU
+ | ldc1 FARG1, 0(TMP2)
+ |.else
+ | move CRET1, CARG1
+ |.endif
+ | checknum CARG2, >8
+ |. nop
+ |7:
+ |.if FPU
+ |.if MIPSR6
+ | fpins FRET1, FRET1, FARG1
+ |.else
+ |.if fpins // ismax
+ | c.olt.d FARG1, FRET1
+ |.else
+ | c.olt.d FRET1, FARG1
+ |.endif
+ | movf.d FRET1, FARG1
+ |.endif
+ |.else
+ |.if fpins // ismax
+ | bal ->vm_sfcmpogt
+ |.else
+ | bal ->vm_sfcmpolt
+ |.endif
+ |. nop
+ |.if MIPSR6
+ | seleqz AT, CARG2, CRET1
+ | selnez CARG1, CARG1, CRET1
+ | or CARG1, CARG1, AT
+ |.else
+ | movz CARG1, CARG2, CRET1
+ |.endif
+ |.endif
+ | b <6
+ |. daddiu TMP2, TMP2, 8
+ |
+ |8: // Convert integer to number and continue with number loop.
+ | checkint CARG2, ->fff_fallback
+ |.if FPU
+ |. lwc1 FARG1, LO(TMP2)
+ | b <7
+ |. cvt.d.w FARG1, FARG1
+ |.else
+ |. lw CARG2, LO(TMP2)
+ | bal ->vm_sfi2d_2
+ |. nop
+ | b <7
+ |. nop
+ |.endif
+ |
+ |.endmacro
+ |
+ |.if MIPSR6
+ | math_minmax math_min, seleqz, selnez, min.d
+ | math_minmax math_max, selnez, seleqz, max.d
+ |.else
+ | math_minmax math_min, movz, _, 0
+ | math_minmax math_max, movn, _, 1
+ |.endif
+ |
+ |//-- String library -----------------------------------------------------
+ |
+ |.ffunc string_byte // Only handle the 1-arg case here.
+ | ld CARG1, 0(BASE)
+ | gettp TMP0, CARG1
+ | xori AT, NARGS8:RC, 8
+ | daddiu TMP0, TMP0, -LJ_TSTR
+ | or AT, AT, TMP0
+ | bnez AT, ->fff_fallback // Need exactly 1 string argument.
+ |. cleartp STR:CARG1
+ | lw TMP0, STR:CARG1->len
+ | daddiu RA, BASE, -16
+ | ld PC, FRAME_PC(BASE)
+ | sltu RD, r0, TMP0
+ | lbu TMP1, STR:CARG1[1] // Access is always ok (NUL at end).
+ | addiu RD, RD, 1
+ | sll RD, RD, 3 // RD = ((str->len != 0)+1)*8
+ | settp TMP1, TISNUM
+ | b ->fff_res
+ |. sd TMP1, 0(RA)
+ |
+ |.ffunc string_char // Only handle the 1-arg case here.
+ | ffgccheck
+ |.if not MIPSR6
+ |. nop
+ |.endif
+ | ld CARG1, 0(BASE)
+ | gettp TMP0, CARG1
+ | xori AT, NARGS8:RC, 8 // Exactly 1 argument.
+ | daddiu TMP0, TMP0, -LJ_TISNUM // Integer.
+ | li TMP1, 255
+ | sextw CARG1, CARG1
+ | or AT, AT, TMP0
+ | sltu TMP1, TMP1, CARG1 // !(255 < n).
+ | or AT, AT, TMP1
+ | bnez AT, ->fff_fallback
+ |. li CARG3, 1
+ | daddiu CARG2, sp, TMPD_OFS
+ | sb CARG1, TMPD
+ |->fff_newstr:
+ | load_got lj_str_new
+ | sd BASE, L->base
+ | sd PC, SAVE_PC
+ | call_intern lj_str_new // (lua_State *L, char *str, size_t l)
+ |. move CARG1, L
+ | // Returns GCstr *.
+ | ld BASE, L->base
+ |->fff_resstr:
+ | li AT, LJ_TSTR
+ | settp CRET1, AT
+ | b ->fff_restv
+ |. move CARG1, CRET1
+ |
+ |.ffunc string_sub
+ | ffgccheck
+ |.if not MIPSR6
+ |. nop
+ |.endif
+ | addiu AT, NARGS8:RC, -16
+ | ld TMP0, 0(BASE)
+ | bltz AT, ->fff_fallback
+ |. gettp TMP3, TMP0
+ | cleartp STR:CARG1, TMP0
+ | ld CARG2, 8(BASE)
+ | beqz AT, >1
+ |. li CARG4, -1
+ | ld CARG3, 16(BASE)
+ | checkint CARG3, ->fff_fallback
+ |. sextw CARG4, CARG3
+ |1:
+ | checkint CARG2, ->fff_fallback
+ |. li AT, LJ_TSTR
+ | bne TMP3, AT, ->fff_fallback
+ |. sextw CARG3, CARG2
+ | lw CARG2, STR:CARG1->len
+ | // STR:CARG1 = str, CARG2 = str->len, CARG3 = start, CARG4 = end
+ | slt AT, CARG4, r0
+ | addiu TMP0, CARG2, 1
+ | addu TMP1, CARG4, TMP0
+ | slt TMP3, CARG3, r0
+ |.if MIPSR6
+ | seleqz CARG4, CARG4, AT
+ | selnez TMP1, TMP1, AT
+ | or CARG4, TMP1, CARG4 // if (end < 0) end += len+1
+ |.else
+ | movn CARG4, TMP1, AT // if (end < 0) end += len+1
+ |.endif
+ | addu TMP1, CARG3, TMP0
+ |.if MIPSR6
+ | selnez TMP1, TMP1, TMP3
+ | seleqz CARG3, CARG3, TMP3
+ | or CARG3, TMP1, CARG3 // if (start < 0) start += len+1
+ | li TMP2, 1
+ | slt AT, CARG4, r0
+ | slt TMP3, r0, CARG3
+ | seleqz CARG4, CARG4, AT // if (end < 0) end = 0
+ | selnez CARG3, CARG3, TMP3
+ | seleqz TMP2, TMP2, TMP3
+ | or CARG3, TMP2, CARG3 // if (start < 1) start = 1
+ | slt AT, CARG2, CARG4
+ | seleqz CARG4, CARG4, AT
+ | selnez CARG2, CARG2, AT
+ | or CARG4, CARG2, CARG4 // if (end > len) end = len
+ |.else
+ | movn CARG3, TMP1, TMP3 // if (start < 0) start += len+1
+ | li TMP2, 1
+ | slt AT, CARG4, r0
+ | slt TMP3, r0, CARG3
+ | movn CARG4, r0, AT // if (end < 0) end = 0
+ | movz CARG3, TMP2, TMP3 // if (start < 1) start = 1
+ | slt AT, CARG2, CARG4
+ | movn CARG4, CARG2, AT // if (end > len) end = len
+ |.endif
+ | daddu CARG2, STR:CARG1, CARG3
+ | subu CARG3, CARG4, CARG3 // len = end - start
+ | daddiu CARG2, CARG2, sizeof(GCstr)-1
+ | bgez CARG3, ->fff_newstr
+ |. addiu CARG3, CARG3, 1 // len++
+ |->fff_emptystr: // Return empty string.
+ | li AT, LJ_TSTR
+ | daddiu STR:CARG1, DISPATCH, DISPATCH_GL(strempty)
+ | b ->fff_restv
+ |. settp CARG1, AT
+ |
+ |.macro ffstring_op, name
+ | .ffunc string_ .. name
+ | ffgccheck
+ |. nop
+ | beqz NARGS8:RC, ->fff_fallback
+ |. ld CARG2, 0(BASE)
+ | checkstr STR:CARG2, ->fff_fallback
+ | daddiu SBUF:CARG1, DISPATCH, DISPATCH_GL(tmpbuf)
+ | load_got lj_buf_putstr_ .. name
+ | ld TMP0, SBUF:CARG1->b
+ | sd L, SBUF:CARG1->L
+ | sd BASE, L->base
+ | sd TMP0, SBUF:CARG1->w
+ | call_intern extern lj_buf_putstr_ .. name
+ |. sd PC, SAVE_PC
+ | load_got lj_buf_tostr
+ | call_intern lj_buf_tostr
+ |. move SBUF:CARG1, SBUF:CRET1
+ | b ->fff_resstr
+ |. ld BASE, L->base
+ |.endmacro
+ |
+ |ffstring_op reverse
+ |ffstring_op lower
+ |ffstring_op upper
+ |
+ |//-- Bit library --------------------------------------------------------
+ |
+ |->vm_tobit_fb:
+ | beqz TMP1, ->fff_fallback
+ |.if FPU
+ |. ldc1 FARG1, 0(BASE)
+ | add.d FARG1, FARG1, TOBIT
+ | mfc1 CRET1, FARG1
+ | jr ra
+ |. zextw CRET1, CRET1
+ |.else
+ |// FP number to bit conversion for soft-float.
+ |->vm_tobit:
+ | dsll TMP0, CARG1, 1
+ | li CARG3, 1076
+ | dsrl AT, TMP0, 53
+ | dsubu CARG3, CARG3, AT
+ | sltiu AT, CARG3, 54
+ | beqz AT, >1
+ |. dextm TMP0, TMP0, 0, 20
+ | dinsu TMP0, AT, 21, 21
+ | slt AT, CARG1, r0
+ | dsrlv CRET1, TMP0, CARG3
+ | dsubu TMP0, r0, CRET1
+ |.if MIPSR6
+ | selnez TMP0, TMP0, AT
+ | seleqz CRET1, CRET1, AT
+ | or CRET1, CRET1, TMP0
+ |.else
+ | movn CRET1, TMP0, AT
+ |.endif
+ | jr ra
+ |. zextw CRET1, CRET1
+ |1:
+ | jr ra
+ |. move CRET1, r0
+ |
+ |// FP number to int conversion with a check for soft-float.
+ |// Modifies CARG1, CRET1, CRET2, TMP0, AT.
+ |->vm_tointg:
+ |.if JIT
+ | dsll CRET2, CARG1, 1
+ | beqz CRET2, >2
+ |. li TMP0, 1076
+ | dsrl AT, CRET2, 53
+ | dsubu TMP0, TMP0, AT
+ | sltiu AT, TMP0, 54
+ | beqz AT, >1
+ |. dextm CRET2, CRET2, 0, 20
+ | dinsu CRET2, AT, 21, 21
+ | slt AT, CARG1, r0
+ | dsrlv CRET1, CRET2, TMP0
+ | dsubu CARG1, r0, CRET1
+ |.if MIPSR6
+ | seleqz CRET1, CRET1, AT
+ | selnez CARG1, CARG1, AT
+ | or CRET1, CRET1, CARG1
+ |.else
+ | movn CRET1, CARG1, AT
+ |.endif
+ | li CARG1, 64
+ | subu TMP0, CARG1, TMP0
+ | dsllv CRET2, CRET2, TMP0 // Integer check.
+ | sextw AT, CRET1
+ | xor AT, CRET1, AT // Range check.
+ |.if MIPSR6
+ | seleqz AT, AT, CRET2
+ | selnez CRET2, CRET2, CRET2
+ | jr ra
+ |. or CRET2, AT, CRET2
+ |.else
+ | jr ra
+ |. movz CRET2, AT, CRET2
+ |.endif
+ |1:
+ | jr ra
+ |. li CRET2, 1
+ |2:
+ | jr ra
+ |. move CRET1, r0
+ |.endif
+ |.endif
+ |
+ |.macro .ffunc_bit, name
+ | .ffunc_1 bit_..name
+ | gettp TMP0, CARG1
+ | beq TMP0, TISNUM, >6
+ |. zextw CRET1, CARG1
+ | bal ->vm_tobit_fb
+ |. sltiu TMP1, TMP0, LJ_TISNUM
+ |6:
+ |.endmacro
+ |
+ |.macro .ffunc_bit_op, name, bins
+ | .ffunc_bit name
+ | daddiu TMP2, BASE, 8
+ | daddu TMP3, BASE, NARGS8:RC
+ |1:
+ | beq TMP2, TMP3, ->fff_resi
+ |. ld CARG1, 0(TMP2)
+ | gettp TMP0, CARG1
+ |.if FPU
+ | bne TMP0, TISNUM, >2
+ |. daddiu TMP2, TMP2, 8
+ | zextw CARG1, CARG1
+ | b <1
+ |. bins CRET1, CRET1, CARG1
+ |2:
+ | ldc1 FARG1, -8(TMP2)
+ | sltiu AT, TMP0, LJ_TISNUM
+ | beqz AT, ->fff_fallback
+ |. add.d FARG1, FARG1, TOBIT
+ | mfc1 CARG1, FARG1
+ | zextw CARG1, CARG1
+ | b <1
+ |. bins CRET1, CRET1, CARG1
+ |.else
+ | beq TMP0, TISNUM, >2
+ |. move CRET2, CRET1
+ | bal ->vm_tobit_fb
+ |. sltiu TMP1, TMP0, LJ_TISNUM
+ | move CARG1, CRET2
+ |2:
+ | zextw CARG1, CARG1
+ | bins CRET1, CRET1, CARG1
+ | b <1
+ |. daddiu TMP2, TMP2, 8
+ |.endif
+ |.endmacro
+ |
+ |.ffunc_bit_op band, and
+ |.ffunc_bit_op bor, or
+ |.ffunc_bit_op bxor, xor
+ |
+ |.ffunc_bit bswap
+ | dsrl TMP0, CRET1, 8
+ | dsrl TMP1, CRET1, 24
+ | andi TMP2, TMP0, 0xff00
+ | dins TMP1, CRET1, 24, 31
+ | dins TMP2, TMP0, 16, 23
+ | b ->fff_resi
+ |. or CRET1, TMP1, TMP2
+ |
+ |.ffunc_bit bnot
+ | not CRET1, CRET1
+ | b ->fff_resi
+ |. zextw CRET1, CRET1
+ |
+ |.macro .ffunc_bit_sh, name, shins, shmod
+ | .ffunc_2 bit_..name
+ | gettp TMP0, CARG1
+ | beq TMP0, TISNUM, >1
+ |. nop
+ | bal ->vm_tobit_fb
+ |. sltiu TMP1, TMP0, LJ_TISNUM
+ | move CARG1, CRET1
+ |1:
+ | gettp TMP0, CARG2
+ | bne TMP0, TISNUM, ->fff_fallback
+ |. zextw CARG2, CARG2
+ | sextw CARG1, CARG1
+ |.if shmod == 1
+ | negu CARG2, CARG2
+ |.endif
+ | shins CRET1, CARG1, CARG2
+ | b ->fff_resi
+ |. zextw CRET1, CRET1
+ |.endmacro
+ |
+ |.ffunc_bit_sh lshift, sllv, 0
+ |.ffunc_bit_sh rshift, srlv, 0
+ |.ffunc_bit_sh arshift, srav, 0
+ |.ffunc_bit_sh rol, rotrv, 1
+ |.ffunc_bit_sh ror, rotrv, 0
+ |
+ |.ffunc_bit tobit
+ |->fff_resi:
+ | ld PC, FRAME_PC(BASE)
+ | daddiu RA, BASE, -16
+ | settp CRET1, TISNUM
+ | b ->fff_res1
+ |. sd CRET1, -16(BASE)
+ |
+ |//-----------------------------------------------------------------------
+ |->fff_fallback: // Call fast function fallback handler.
+ | // BASE = new base, RB = CFUNC, RC = nargs*8
+ | ld TMP3, CFUNC:RB->f
+ | daddu TMP1, BASE, NARGS8:RC
+ | ld PC, FRAME_PC(BASE) // Fallback may overwrite PC.
+ | daddiu TMP0, TMP1, 8*LUA_MINSTACK
+ | ld TMP2, L->maxstack
+ | sd PC, SAVE_PC // Redundant (but a defined value).
+ | sltu AT, TMP2, TMP0
+ | sd BASE, L->base
+ | sd TMP1, L->top
+ | bnez AT, >5 // Need to grow stack.
+ |. move CFUNCADDR, TMP3
+ | jalr TMP3 // (lua_State *L)
+ |. move CARG1, L
+ | // Either throws an error, or recovers and returns -1, 0 or nresults+1.
+ | ld BASE, L->base
+ | sll RD, CRET1, 3
+ | bgtz CRET1, ->fff_res // Returned nresults+1?
+ |. daddiu RA, BASE, -16
+ |1: // Returned 0 or -1: retry fast path.
+ | ld LFUNC:RB, FRAME_FUNC(BASE)
+ | ld TMP0, L->top
+ | cleartp LFUNC:RB
+ | bnez CRET1, ->vm_call_tail // Returned -1?
+ |. dsubu NARGS8:RC, TMP0, BASE
+ | ins_callt // Returned 0: retry fast path.
+ |
+ |// Reconstruct previous base for vmeta_call during tailcall.
+ |->vm_call_tail:
+ | andi TMP0, PC, FRAME_TYPE
+ | li AT, -4
+ | bnez TMP0, >3
+ |. and TMP1, PC, AT
+ | lbu TMP1, OFS_RA(PC)
+ | sll TMP1, TMP1, 3
+ | addiu TMP1, TMP1, 16
+ |3:
+ | b ->vm_call_dispatch // Resolve again for tailcall.
+ |. dsubu TMP2, BASE, TMP1
+ |
+ |5: // Grow stack for fallback handler.
+ | load_got lj_state_growstack
+ | li CARG2, LUA_MINSTACK
+ | call_intern lj_state_growstack // (lua_State *L, int n)
+ |. move CARG1, L
+ | ld BASE, L->base
+ | b <1
+ |. li CRET1, 0 // Force retry.
+ |
+ |->fff_gcstep: // Call GC step function.
+ | // BASE = new base, RC = nargs*8
+ | move MULTRES, ra
+ | load_got lj_gc_step
+ | sd BASE, L->base
+ | daddu TMP0, BASE, NARGS8:RC
+ | sd PC, SAVE_PC // Redundant (but a defined value).
+ | sd TMP0, L->top
+ | call_intern lj_gc_step // (lua_State *L)
+ |. move CARG1, L
+ | ld BASE, L->base
+ | move ra, MULTRES
+ | ld TMP0, L->top
+ | ld CFUNC:RB, FRAME_FUNC(BASE)
+ | cleartp CFUNC:RB
+ | jr ra
+ |. dsubu NARGS8:RC, TMP0, BASE
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Special dispatch targets -------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |->vm_record: // Dispatch target for recording phase.
+ |.if JIT
+ | lbu TMP3, DISPATCH_GL(hookmask)(DISPATCH)
+ | andi AT, TMP3, HOOK_VMEVENT // No recording while in vmevent.
+ | bnez AT, >5
+ | // Decrement the hookcount for consistency, but always do the call.
+ |. lw TMP2, DISPATCH_GL(hookcount)(DISPATCH)
+ | andi AT, TMP3, HOOK_ACTIVE
+ | bnez AT, >1
+ |. addiu TMP2, TMP2, -1
+ | andi AT, TMP3, LUA_MASKLINE|LUA_MASKCOUNT
+ | beqz AT, >1
+ |. nop
+ | b >1
+ |. sw TMP2, DISPATCH_GL(hookcount)(DISPATCH)
+ |.endif
+ |
+ |->vm_rethook: // Dispatch target for return hooks.
+ | lbu TMP3, DISPATCH_GL(hookmask)(DISPATCH)
+ | andi AT, TMP3, HOOK_ACTIVE // Hook already active?
+ | beqz AT, >1
+ |5: // Re-dispatch to static ins.
+ |. ld AT, GG_DISP2STATIC(TMP0) // Assumes TMP0 holds DISPATCH+OP*4.
+ | jr AT
+ |. nop
+ |
+ |->vm_inshook: // Dispatch target for instr/line hooks.
+ | lbu TMP3, DISPATCH_GL(hookmask)(DISPATCH)
+ | lw TMP2, DISPATCH_GL(hookcount)(DISPATCH)
+ | andi AT, TMP3, HOOK_ACTIVE // Hook already active?
+ | bnez AT, <5
+ |. andi AT, TMP3, LUA_MASKLINE|LUA_MASKCOUNT
+ | beqz AT, <5
+ |. addiu TMP2, TMP2, -1
+ | beqz TMP2, >1
+ |. sw TMP2, DISPATCH_GL(hookcount)(DISPATCH)
+ | andi AT, TMP3, LUA_MASKLINE
+ | beqz AT, <5
+ |1:
+ |. load_got lj_dispatch_ins
+ | sw MULTRES, SAVE_MULTRES
+ | move CARG2, PC
+ | sd BASE, L->base
+ | // SAVE_PC must hold the _previous_ PC. The callee updates it with PC.
+ | call_intern lj_dispatch_ins // (lua_State *L, const BCIns *pc)
+ |. move CARG1, L
+ |3:
+ | ld BASE, L->base
+ |4: // Re-dispatch to static ins.
+ | lw INS, -4(PC)
+ | decode_OP8a TMP1, INS
+ | decode_OP8b TMP1
+ | daddu TMP0, DISPATCH, TMP1
+ | decode_RD8a RD, INS
+ | ld AT, GG_DISP2STATIC(TMP0)
+ | decode_RA8a RA, INS
+ | decode_RD8b RD
+ | jr AT
+ | decode_RA8b RA
+ |
+ |->cont_hook: // Continue from hook yield.
+ | daddiu PC, PC, 4
+ | b <4
+ |. lw MULTRES, -24+LO(RB) // Restore MULTRES for *M ins.
+ |
+ |->vm_hotloop: // Hot loop counter underflow.
+ |.if JIT
+ | ld LFUNC:TMP1, FRAME_FUNC(BASE)
+ | daddiu CARG1, DISPATCH, GG_DISP2J
+ | cleartp LFUNC:TMP1
+ | sd PC, SAVE_PC
+ | ld TMP1, LFUNC:TMP1->pc
+ | move CARG2, PC
+ | sd L, DISPATCH_J(L)(DISPATCH)
+ | lbu TMP1, PC2PROTO(framesize)(TMP1)
+ | load_got lj_trace_hot
+ | sd BASE, L->base
+ | dsll TMP1, TMP1, 3
+ | daddu TMP1, BASE, TMP1
+ | call_intern lj_trace_hot // (jit_State *J, const BCIns *pc)
+ |. sd TMP1, L->top
+ | b <3
+ |. nop
+ |.endif
+ |
+ |
+ |->vm_callhook: // Dispatch target for call hooks.
+ |.if JIT
+ | b >1
+ |.endif
+ |. move CARG2, PC
+ |
+ |->vm_hotcall: // Hot call counter underflow.
+ |.if JIT
+ | ori CARG2, PC, 1
+ |1:
+ |.endif
+ | load_got lj_dispatch_call
+ | daddu TMP0, BASE, RC
+ | sd PC, SAVE_PC
+ | sd BASE, L->base
+ | dsubu RA, RA, BASE
+ | sd TMP0, L->top
+ | call_intern lj_dispatch_call // (lua_State *L, const BCIns *pc)
+ |. move CARG1, L
+ | // Returns ASMFunction.
+ | ld BASE, L->base
+ | ld TMP0, L->top
+ | sd r0, SAVE_PC // Invalidate for subsequent line hook.
+ | dsubu NARGS8:RC, TMP0, BASE
+ | daddu RA, BASE, RA
+ | ld LFUNC:RB, FRAME_FUNC(BASE)
+ | cleartp LFUNC:RB
+ | jr CRET1
+ |. lw INS, -4(PC)
+ |
+ |->cont_stitch: // Trace stitching.
+ |.if JIT
+ | // RA = resultptr, RB = meta base
+ | lw INS, -4(PC)
+ | ld TRACE:TMP2, -40(RB) // Save previous trace.
+ | decode_RA8a RC, INS
+ | daddiu AT, MULTRES, -8
+ | cleartp TRACE:TMP2
+ | decode_RA8b RC
+ | beqz AT, >2
+ |. daddu RC, BASE, RC // Call base.
+ |1: // Move results down.
+ | ld CARG1, 0(RA)
+ | daddiu AT, AT, -8
+ | daddiu RA, RA, 8
+ | sd CARG1, 0(RC)
+ | bnez AT, <1
+ |. daddiu RC, RC, 8
+ |2:
+ | decode_RA8a RA, INS
+ | decode_RB8a RB, INS
+ | decode_RA8b RA
+ | decode_RB8b RB
+ | daddu RA, RA, RB
+ | daddu RA, BASE, RA
+ |3:
+ | sltu AT, RC, RA
+ | bnez AT, >9 // More results wanted?
+ |. nop
+ |
+ | lhu TMP3, TRACE:TMP2->traceno
+ | lhu RD, TRACE:TMP2->link
+ | beq RD, TMP3, ->cont_nop // Blacklisted.
+ |. load_got lj_dispatch_stitch
+ | bnez RD, =>BC_JLOOP // Jump to stitched trace.
+ |. sll RD, RD, 3
+ |
+ | // Stitch a new trace to the previous trace.
+ | sw TMP3, DISPATCH_J(exitno)(DISPATCH)
+ | sd L, DISPATCH_J(L)(DISPATCH)
+ | sd BASE, L->base
+ | daddiu CARG1, DISPATCH, GG_DISP2J
+ | call_intern lj_dispatch_stitch // (jit_State *J, const BCIns *pc)
+ |. move CARG2, PC
+ | b ->cont_nop
+ |. ld BASE, L->base
+ |
+ |9:
+ | sd TISNIL, 0(RC)
+ | b <3
+ |. daddiu RC, RC, 8
+ |.endif
+ |
+ |->vm_profhook: // Dispatch target for profiler hook.
+#if LJ_HASPROFILE
+ | load_got lj_dispatch_profile
+ | sw MULTRES, SAVE_MULTRES
+ | move CARG2, PC
+ | sd BASE, L->base
+ | call_intern lj_dispatch_profile // (lua_State *L, const BCIns *pc)
+ |. move CARG1, L
+ | // HOOK_PROFILE is off again, so re-dispatch to dynamic instruction.
+ | daddiu PC, PC, -4
+ | b ->cont_nop
+ |. ld BASE, L->base
+#endif
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Trace exit handler -------------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |.macro savex_, a, b
+ |.if FPU
+ | sdc1 f..a, a*8(sp)
+ | sdc1 f..b, b*8(sp)
+ | sd r..a, 32*8+a*8(sp)
+ | sd r..b, 32*8+b*8(sp)
+ |.else
+ | sd r..a, a*8(sp)
+ | sd r..b, b*8(sp)
+ |.endif
+ |.endmacro
+ |
+ |->vm_exit_handler:
+ |.if JIT
+ |.if FPU
+ | daddiu sp, sp, -(32*8+32*8)
+ |.else
+ | daddiu sp, sp, -(32*8)
+ |.endif
+ | savex_ 0, 1
+ | savex_ 2, 3
+ | savex_ 4, 5
+ | savex_ 6, 7
+ | savex_ 8, 9
+ | savex_ 10, 11
+ | savex_ 12, 13
+ | savex_ 14, 15
+ | savex_ 16, 17
+ | savex_ 18, 19
+ | savex_ 20, 21
+ | savex_ 22, 23
+ | savex_ 24, 25
+ | savex_ 26, 27
+ | savex_ 28, 30
+ |.if FPU
+ | sdc1 f29, 29*8(sp)
+ | sdc1 f31, 31*8(sp)
+ | sd r0, 32*8+31*8(sp) // Clear RID_TMP.
+ | daddiu TMP2, sp, 32*8+32*8 // Recompute original value of sp.
+ | sd TMP2, 32*8+29*8(sp) // Store sp in RID_SP
+ |.else
+ | sd r0, 31*8(sp) // Clear RID_TMP.
+ | daddiu TMP2, sp, 32*8 // Recompute original value of sp.
+ | sd TMP2, 29*8(sp) // Store sp in RID_SP
+ |.endif
+ | li_vmstate EXIT
+ | daddiu DISPATCH, JGL, -GG_DISP2G-32768
+ | lw TMP1, 0(TMP2) // Load exit number.
+ | st_vmstate
+ | ld L, DISPATCH_GL(cur_L)(DISPATCH)
+ | ld BASE, DISPATCH_GL(jit_base)(DISPATCH)
+ | load_got lj_trace_exit
+ | sd L, DISPATCH_J(L)(DISPATCH)
+ | sw ra, DISPATCH_J(parent)(DISPATCH) // Store trace number.
+ | sd BASE, L->base
+ | sw TMP1, DISPATCH_J(exitno)(DISPATCH) // Store exit number.
+ | daddiu CARG1, DISPATCH, GG_DISP2J
+ | sd r0, DISPATCH_GL(jit_base)(DISPATCH)
+ | call_intern lj_trace_exit // (jit_State *J, ExitState *ex)
+ |. move CARG2, sp
+ | // Returns MULTRES (unscaled) or negated error code.
+ | ld TMP1, L->cframe
+ | li AT, -4
+ | ld BASE, L->base
+ | and sp, TMP1, AT
+ | ld PC, SAVE_PC // Get SAVE_PC.
+ | b >1
+ |. sd L, SAVE_L // Set SAVE_L (on-trace resume/yield).
+ |.endif
+ |->vm_exit_interp:
+ |.if JIT
+ | // CRET1 = MULTRES or negated error code, BASE, PC and JGL set.
+ | ld L, SAVE_L
+ | daddiu DISPATCH, JGL, -GG_DISP2G-32768
+ | sd BASE, L->base
+ |1:
+ | bltz CRET1, >9 // Check for error from exit.
+ |. ld LFUNC:RB, FRAME_FUNC(BASE)
+ | .FPU lui TMP3, 0x59c0 // TOBIT = 2^52 + 2^51 (float).
+ | dsll MULTRES, CRET1, 3
+ | cleartp LFUNC:RB
+ | sw MULTRES, SAVE_MULTRES
+ | li TISNIL, LJ_TNIL
+ | li TISNUM, LJ_TISNUM // Setup type comparison constants.
+ | .FPU mtc1 TMP3, TOBIT
+ | ld TMP1, LFUNC:RB->pc
+ | sd r0, DISPATCH_GL(jit_base)(DISPATCH)
+ | ld KBASE, PC2PROTO(k)(TMP1)
+ | .FPU cvt.d.s TOBIT, TOBIT
+ | // Modified copy of ins_next which handles function header dispatch, too.
+ | lw INS, 0(PC)
+ | daddiu PC, PC, 4
+ | // Assumes TISNIL == ~LJ_VMST_INTERP == -1
+ | sw TISNIL, DISPATCH_GL(vmstate)(DISPATCH)
+ | decode_OP8a TMP1, INS
+ | decode_OP8b TMP1
+ | sltiu TMP2, TMP1, BC_FUNCF*8
+ | daddu TMP0, DISPATCH, TMP1
+ | decode_RD8a RD, INS
+ | ld AT, 0(TMP0)
+ | decode_RA8a RA, INS
+ | beqz TMP2, >2
+ |. decode_RA8b RA
+ | jr AT
+ |. decode_RD8b RD
+ |2:
+ | sltiu TMP2, TMP1, (BC_FUNCC+2)*8 // Fast function?
+ | bnez TMP2, >3
+ |. ld TMP1, FRAME_PC(BASE)
+ | // Check frame below fast function.
+ | andi TMP0, TMP1, FRAME_TYPE
+ | bnez TMP0, >3 // Trace stitching continuation?
+ |. nop
+ | // Otherwise set KBASE for Lua function below fast function.
+ | lw TMP2, -4(TMP1)
+ | decode_RA8a TMP0, TMP2
+ | decode_RA8b TMP0
+ | dsubu TMP1, BASE, TMP0
+ | ld LFUNC:TMP2, -32(TMP1)
+ | cleartp LFUNC:TMP2
+ | ld TMP1, LFUNC:TMP2->pc
+ | ld KBASE, PC2PROTO(k)(TMP1)
+ |3:
+ | daddiu RC, MULTRES, -8
+ | jr AT
+ |. daddu RA, RA, BASE
+ |
+ |9: // Rethrow error from the right C frame.
+ | load_got lj_err_trace
+ | sub CARG2, r0, CRET1
+ | call_intern lj_err_trace // (lua_State *L, int errcode)
+ |. move CARG1, L
+ |.endif
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Math helper functions ----------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |// Hard-float round to integer.
+ |// Modifies AT, TMP0, FRET1, FRET2, f4. Keeps all others incl. FARG1.
+ |// MIPSR6: Modifies FTMP1, too.
+ |.macro vm_round_hf, func
+ | lui TMP0, 0x4330 // Hiword of 2^52 (double).
+ | dsll TMP0, TMP0, 32
+ | dmtc1 TMP0, f4
+ | abs.d FRET2, FARG1 // |x|
+ | dmfc1 AT, FARG1
+ |.if MIPSR6
+ | cmp.lt.d FTMP1, FRET2, f4
+ | add.d FRET1, FRET2, f4 // (|x| + 2^52) - 2^52
+ | bc1eqz FTMP1, >1 // Truncate only if |x| < 2^52.
+ |.else
+ | c.olt.d 0, FRET2, f4
+ | add.d FRET1, FRET2, f4 // (|x| + 2^52) - 2^52
+ | bc1f 0, >1 // Truncate only if |x| < 2^52.
+ |.endif
+ |. sub.d FRET1, FRET1, f4
+ | slt AT, AT, r0
+ |.if "func" == "ceil"
+ | lui TMP0, 0xbff0 // Hiword of -1 (double). Preserves -0.
+ |.else
+ | lui TMP0, 0x3ff0 // Hiword of +1 (double).
+ |.endif
+ |.if "func" == "trunc"
+ | dsll TMP0, TMP0, 32
+ | dmtc1 TMP0, f4
+ |.if MIPSR6
+ | cmp.lt.d FTMP1, FRET2, FRET1 // |x| < result?
+ | sub.d FRET2, FRET1, f4
+ | sel.d FTMP1, FRET1, FRET2 // If yes, subtract +1.
+ | dmtc1 AT, FRET1
+ | neg.d FRET2, FTMP1
+ | jr ra
+ |. sel.d FRET1, FTMP1, FRET2 // Merge sign bit back in.
+ |.else
+ | c.olt.d 0, FRET2, FRET1 // |x| < result?
+ | sub.d FRET2, FRET1, f4
+ | movt.d FRET1, FRET2, 0 // If yes, subtract +1.
+ | neg.d FRET2, FRET1
+ | jr ra
+ |. movn.d FRET1, FRET2, AT // Merge sign bit back in.
+ |.endif
+ |.else
+ | neg.d FRET2, FRET1
+ | dsll TMP0, TMP0, 32
+ | dmtc1 TMP0, f4
+ |.if MIPSR6
+ | dmtc1 AT, FTMP1
+ | sel.d FTMP1, FRET1, FRET2
+ |.if "func" == "ceil"
+ | cmp.lt.d FRET1, FTMP1, FARG1 // x > result?
+ |.else
+ | cmp.lt.d FRET1, FARG1, FTMP1 // x < result?
+ |.endif
+ | sub.d FRET2, FTMP1, f4 // If yes, subtract +-1.
+ | jr ra
+ |. sel.d FRET1, FTMP1, FRET2
+ |.else
+ | movn.d FRET1, FRET2, AT // Merge sign bit back in.
+ |.if "func" == "ceil"
+ | c.olt.d 0, FRET1, FARG1 // x > result?
+ |.else
+ | c.olt.d 0, FARG1, FRET1 // x < result?
+ |.endif
+ | sub.d FRET2, FRET1, f4 // If yes, subtract +-1.
+ | jr ra
+ |. movt.d FRET1, FRET2, 0
+ |.endif
+ |.endif
+ |1:
+ | jr ra
+ |. mov.d FRET1, FARG1
+ |.endmacro
+ |
+ |.macro vm_round, func
+ |.if FPU
+ | vm_round_hf, func
+ |.endif
+ |.endmacro
+ |
+ |->vm_floor:
+ | vm_round floor
+ |->vm_ceil:
+ | vm_round ceil
+ |->vm_trunc:
+ |.if JIT
+ | vm_round trunc
+ |.endif
+ |
+ |// Soft-float integer to number conversion.
+ |.macro sfi2d, ARG
+ |.if not FPU
+ | beqz ARG, >9 // Handle zero first.
+ |. sra TMP0, ARG, 31
+ | xor TMP1, ARG, TMP0
+ | dsubu TMP1, TMP1, TMP0 // Absolute value in TMP1.
+ | dclz ARG, TMP1
+ | addiu ARG, ARG, -11
+ | li AT, 0x3ff+63-11-1
+ | dsllv TMP1, TMP1, ARG // Align mantissa left with leading 1.
+ | subu ARG, AT, ARG // Exponent - 1.
+ | ins ARG, TMP0, 11, 11 // Sign | Exponent.
+ | dsll ARG, ARG, 52 // Align left.
+ | jr ra
+ |. daddu ARG, ARG, TMP1 // Add mantissa, increment exponent.
+ |9:
+ | jr ra
+ |. nop
+ |.endif
+ |.endmacro
+ |
+ |// Input CARG1. Output: CARG1. Temporaries: AT, TMP0, TMP1.
+ |->vm_sfi2d_1:
+ | sfi2d CARG1
+ |
+ |// Input CARG2. Output: CARG2. Temporaries: AT, TMP0, TMP1.
+ |->vm_sfi2d_2:
+ | sfi2d CARG2
+ |
+ |// Soft-float comparison. Equivalent to c.eq.d.
+ |// Input: CARG*. Output: CRET1. Temporaries: AT, TMP0, TMP1.
+ |->vm_sfcmpeq:
+ |.if not FPU
+ | dsll AT, CARG1, 1
+ | dsll TMP0, CARG2, 1
+ | or TMP1, AT, TMP0
+ | beqz TMP1, >8 // Both args +-0: return 1.
+ |. lui TMP1, 0xffe0
+ | dsll TMP1, TMP1, 32
+ | sltu AT, TMP1, AT
+ | sltu TMP0, TMP1, TMP0
+ | or TMP1, AT, TMP0
+ | bnez TMP1, >9 // Either arg is NaN: return 0;
+ |. xor AT, CARG1, CARG2
+ | jr ra
+ |. sltiu CRET1, AT, 1 // Same values: return 1.
+ |8:
+ | jr ra
+ |. li CRET1, 1
+ |9:
+ | jr ra
+ |. li CRET1, 0
+ |.endif
+ |
+ |// Soft-float comparison. Equivalent to c.ult.d and c.olt.d.
+ |// Input: CARG1, CARG2. Output: CRET1. Temporaries: AT, TMP0, TMP1, CRET2.
+ |->vm_sfcmpult:
+ |.if not FPU
+ | b >1
+ |. li CRET2, 1
+ |.endif
+ |
+ |->vm_sfcmpolt:
+ |.if not FPU
+ | li CRET2, 0
+ |1:
+ | dsll AT, CARG1, 1
+ | dsll TMP0, CARG2, 1
+ | or TMP1, AT, TMP0
+ | beqz TMP1, >8 // Both args +-0: return 0.
+ |. lui TMP1, 0xffe0
+ | dsll TMP1, TMP1, 32
+ | sltu AT, TMP1, AT
+ | sltu TMP0, TMP1, TMP0
+ | or TMP1, AT, TMP0
+ | bnez TMP1, >9 // Either arg is NaN: return 0 or 1;
+ |. and AT, CARG1, CARG2
+ | bltz AT, >5 // Both args negative?
+ |. nop
+ | jr ra
+ |. slt CRET1, CARG1, CARG2
+ |5: // Swap conditions if both operands are negative.
+ | jr ra
+ |. slt CRET1, CARG2, CARG1
+ |8:
+ | jr ra
+ |. li CRET1, 0
+ |9:
+ | jr ra
+ |. move CRET1, CRET2
+ |.endif
+ |
+ |->vm_sfcmpogt:
+ |.if not FPU
+ | dsll AT, CARG2, 1
+ | dsll TMP0, CARG1, 1
+ | or TMP1, AT, TMP0
+ | beqz TMP1, >8 // Both args +-0: return 0.
+ |. lui TMP1, 0xffe0
+ | dsll TMP1, TMP1, 32
+ | sltu AT, TMP1, AT
+ | sltu TMP0, TMP1, TMP0
+ | or TMP1, AT, TMP0
+ | bnez TMP1, >9 // Either arg is NaN: return 0 or 1;
+ |. and AT, CARG2, CARG1
+ | bltz AT, >5 // Both args negative?
+ |. nop
+ | jr ra
+ |. slt CRET1, CARG2, CARG1
+ |5: // Swap conditions if both operands are negative.
+ | jr ra
+ |. slt CRET1, CARG1, CARG2
+ |8:
+ | jr ra
+ |. li CRET1, 0
+ |9:
+ | jr ra
+ |. li CRET1, 0
+ |.endif
+ |
+ |// Soft-float comparison. Equivalent to c.ole.d a, b or c.ole.d b, a.
+ |// Input: CARG1, CARG2, TMP3. Output: CRET1. Temporaries: AT, TMP0, TMP1.
+ |->vm_sfcmpolex:
+ |.if not FPU
+ | dsll AT, CARG1, 1
+ | dsll TMP0, CARG2, 1
+ | or TMP1, AT, TMP0
+ | beqz TMP1, >8 // Both args +-0: return 1.
+ |. lui TMP1, 0xffe0
+ | dsll TMP1, TMP1, 32
+ | sltu AT, TMP1, AT
+ | sltu TMP0, TMP1, TMP0
+ | or TMP1, AT, TMP0
+ | bnez TMP1, >9 // Either arg is NaN: return 0;
+ |. and AT, CARG1, CARG2
+ | xor AT, AT, TMP3
+ | bltz AT, >5 // Both args negative?
+ |. nop
+ | jr ra
+ |. slt CRET1, CARG2, CARG1
+ |5: // Swap conditions if both operands are negative.
+ | jr ra
+ |. slt CRET1, CARG1, CARG2
+ |8:
+ | jr ra
+ |. li CRET1, 1
+ |9:
+ | jr ra
+ |. li CRET1, 0
+ |.endif
+ |
+ |.macro sfmin_max, name, fpcall
+ |->vm_sf .. name:
+ |.if JIT and not FPU
+ | move TMP2, ra
+ | bal ->fpcall
+ |. nop
+ | move ra, TMP2
+ | move TMP0, CRET1
+ | move CRET1, CARG1
+ |.if MIPSR6
+ | selnez CRET1, CRET1, TMP0
+ | seleqz TMP0, CARG2, TMP0
+ | jr ra
+ |. or CRET1, CRET1, TMP0
+ |.else
+ | jr ra
+ |. movz CRET1, CARG2, TMP0
+ |.endif
+ |.endif
+ |.endmacro
+ |
+ | sfmin_max min, vm_sfcmpolt
+ | sfmin_max max, vm_sfcmpogt
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Miscellaneous functions --------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |.define NEXT_TAB, TAB:CARG1
+ |.define NEXT_IDX, CARG2
+ |.define NEXT_ASIZE, CARG3
+ |.define NEXT_NIL, CARG4
+ |.define NEXT_TMP0, r12
+ |.define NEXT_TMP1, r13
+ |.define NEXT_TMP2, r14
+ |.define NEXT_RES_VK, CRET1
+ |.define NEXT_RES_IDX, CRET2
+ |.define NEXT_RES_PTR, sp
+ |.define NEXT_RES_VAL, 0(sp)
+ |.define NEXT_RES_KEY, 8(sp)
+ |
+ |// TValue *lj_vm_next(GCtab *t, uint32_t idx)
+ |// Next idx returned in CRET2.
+ |->vm_next:
+ |.if JIT and ENDIAN_LE
+ | lw NEXT_ASIZE, NEXT_TAB->asize
+ | ld NEXT_TMP0, NEXT_TAB->array
+ | li NEXT_NIL, LJ_TNIL
+ |1: // Traverse array part.
+ | sltu AT, NEXT_IDX, NEXT_ASIZE
+ | sll NEXT_TMP1, NEXT_IDX, 3
+ | beqz AT, >5
+ |. daddu NEXT_TMP1, NEXT_TMP0, NEXT_TMP1
+ | li AT, LJ_TISNUM
+ | ld NEXT_TMP2, 0(NEXT_TMP1)
+ | dsll AT, AT, 47
+ | or NEXT_TMP1, NEXT_IDX, AT
+ | beq NEXT_TMP2, NEXT_NIL, <1
+ |. addiu NEXT_IDX, NEXT_IDX, 1
+ | sd NEXT_TMP2, NEXT_RES_VAL
+ | sd NEXT_TMP1, NEXT_RES_KEY
+ | move NEXT_RES_VK, NEXT_RES_PTR
+ | jr ra
+ |. move NEXT_RES_IDX, NEXT_IDX
+ |
+ |5: // Traverse hash part.
+ | subu NEXT_RES_IDX, NEXT_IDX, NEXT_ASIZE
+ | ld NODE:NEXT_RES_VK, NEXT_TAB->node
+ | sll NEXT_TMP2, NEXT_RES_IDX, 5
+ | lw NEXT_TMP0, NEXT_TAB->hmask
+ | sll AT, NEXT_RES_IDX, 3
+ | subu AT, NEXT_TMP2, AT
+ | daddu NODE:NEXT_RES_VK, NODE:NEXT_RES_VK, AT
+ |6:
+ | sltu AT, NEXT_TMP0, NEXT_RES_IDX
+ | bnez AT, >8
+ |. nop
+ | ld NEXT_TMP2, NODE:NEXT_RES_VK->val
+ | bne NEXT_TMP2, NEXT_NIL, >9
+ |. addiu NEXT_RES_IDX, NEXT_RES_IDX, 1
+ | // Skip holes in hash part.
+ | b <6
+ |. daddiu NODE:NEXT_RES_VK, NODE:NEXT_RES_VK, sizeof(Node)
+ |
+ |8: // End of iteration. Set the key to nil (not the value).
+ | sd NEXT_NIL, NEXT_RES_KEY
+ | move NEXT_RES_VK, NEXT_RES_PTR
+ |9:
+ | jr ra
+ |. addu NEXT_RES_IDX, NEXT_RES_IDX, NEXT_ASIZE
+ |.endif
+ |
+ |//-----------------------------------------------------------------------
+ |//-- FFI helper functions -----------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |// Handler for callback functions. Callback slot number in r1, g in r2.
+ |->vm_ffi_callback:
+ |.if FFI
+ |.type CTSTATE, CTState, PC
+ | saveregs
+ | ld CTSTATE, GL:r2->ctype_state
+ | daddiu DISPATCH, r2, GG_G2DISP
+ | load_got lj_ccallback_enter
+ | sw r1, CTSTATE->cb.slot
+ | sd CARG1, CTSTATE->cb.gpr[0]
+ | .FPU sdc1 FARG1, CTSTATE->cb.fpr[0]
+ | sd CARG2, CTSTATE->cb.gpr[1]
+ | .FPU sdc1 FARG2, CTSTATE->cb.fpr[1]
+ | sd CARG3, CTSTATE->cb.gpr[2]
+ | .FPU sdc1 FARG3, CTSTATE->cb.fpr[2]
+ | sd CARG4, CTSTATE->cb.gpr[3]
+ | .FPU sdc1 FARG4, CTSTATE->cb.fpr[3]
+ | sd CARG5, CTSTATE->cb.gpr[4]
+ | .FPU sdc1 FARG5, CTSTATE->cb.fpr[4]
+ | sd CARG6, CTSTATE->cb.gpr[5]
+ | .FPU sdc1 FARG6, CTSTATE->cb.fpr[5]
+ | sd CARG7, CTSTATE->cb.gpr[6]
+ | .FPU sdc1 FARG7, CTSTATE->cb.fpr[6]
+ | sd CARG8, CTSTATE->cb.gpr[7]
+ | .FPU sdc1 FARG8, CTSTATE->cb.fpr[7]
+ | daddiu TMP0, sp, CFRAME_SPACE
+ | sd TMP0, CTSTATE->cb.stack
+ | sd r0, SAVE_PC // Any value outside of bytecode is ok.
+ | move CARG2, sp
+ | call_intern lj_ccallback_enter // (CTState *cts, void *cf)
+ |. move CARG1, CTSTATE
+ | // Returns lua_State *.
+ | ld BASE, L:CRET1->base
+ | ld RC, L:CRET1->top
+ | move L, CRET1
+ | .FPU lui TMP3, 0x59c0 // TOBIT = 2^52 + 2^51 (float).
+ | ld LFUNC:RB, FRAME_FUNC(BASE)
+ | .FPU mtc1 TMP3, TOBIT
+ | li TISNIL, LJ_TNIL
+ | li TISNUM, LJ_TISNUM
+ | li_vmstate INTERP
+ | subu RC, RC, BASE
+ | cleartp LFUNC:RB
+ | st_vmstate
+ | .FPU cvt.d.s TOBIT, TOBIT
+ | ins_callt
+ |.endif
+ |
+ |->cont_ffi_callback: // Return from FFI callback.
+ |.if FFI
+ | load_got lj_ccallback_leave
+ | ld CTSTATE, DISPATCH_GL(ctype_state)(DISPATCH)
+ | sd BASE, L->base
+ | sd RB, L->top
+ | sd L, CTSTATE->L
+ | move CARG2, RA
+ | call_intern lj_ccallback_leave // (CTState *cts, TValue *o)
+ |. move CARG1, CTSTATE
+ | .FPU ldc1 FRET1, CTSTATE->cb.fpr[0]
+ | ld CRET1, CTSTATE->cb.gpr[0]
+ | .FPU ldc1 FRET2, CTSTATE->cb.fpr[1]
+ | b ->vm_leave_unw
+ |. ld CRET2, CTSTATE->cb.gpr[1]
+ |.endif
+ |
+ |->vm_ffi_call: // Call C function via FFI.
+ | // Caveat: needs special frame unwinding, see below.
+ |.if FFI
+ | .type CCSTATE, CCallState, CARG1
+ | lw TMP1, CCSTATE->spadj
+ | lbu CARG2, CCSTATE->nsp
+ | move TMP2, sp
+ | dsubu sp, sp, TMP1
+ | sd ra, -8(TMP2)
+ | sll CARG2, CARG2, 3
+ | sd r16, -16(TMP2)
+ | sd CCSTATE, -24(TMP2)
+ | move r16, TMP2
+ | daddiu TMP1, CCSTATE, offsetof(CCallState, stack)
+ | move TMP2, sp
+ | beqz CARG2, >2
+ |. daddu TMP3, TMP1, CARG2
+ |1:
+ | ld TMP0, 0(TMP1)
+ | daddiu TMP1, TMP1, 8
+ | sltu AT, TMP1, TMP3
+ | sd TMP0, 0(TMP2)
+ | bnez AT, <1
+ |. daddiu TMP2, TMP2, 8
+ |2:
+ | ld CFUNCADDR, CCSTATE->func
+ | .FPU ldc1 FARG1, CCSTATE->gpr[0]
+ | ld CARG2, CCSTATE->gpr[1]
+ | .FPU ldc1 FARG2, CCSTATE->gpr[1]
+ | ld CARG3, CCSTATE->gpr[2]
+ | .FPU ldc1 FARG3, CCSTATE->gpr[2]
+ | ld CARG4, CCSTATE->gpr[3]
+ | .FPU ldc1 FARG4, CCSTATE->gpr[3]
+ | ld CARG5, CCSTATE->gpr[4]
+ | .FPU ldc1 FARG5, CCSTATE->gpr[4]
+ | ld CARG6, CCSTATE->gpr[5]
+ | .FPU ldc1 FARG6, CCSTATE->gpr[5]
+ | ld CARG7, CCSTATE->gpr[6]
+ | .FPU ldc1 FARG7, CCSTATE->gpr[6]
+ | ld CARG8, CCSTATE->gpr[7]
+ | .FPU ldc1 FARG8, CCSTATE->gpr[7]
+ | jalr CFUNCADDR
+ |. ld CARG1, CCSTATE->gpr[0] // Do this last, since CCSTATE is CARG1.
+ | ld CCSTATE:TMP1, -24(r16)
+ | ld TMP2, -16(r16)
+ | ld ra, -8(r16)
+ | sd CRET1, CCSTATE:TMP1->gpr[0]
+ | sd CRET2, CCSTATE:TMP1->gpr[1]
+ |.if FPU
+ | sdc1 FRET1, CCSTATE:TMP1->fpr[0]
+ | sdc1 FRET2, CCSTATE:TMP1->fpr[1]
+ |.else
+ | sd CARG1, CCSTATE:TMP1->gpr[2] // 2nd FP struct field for soft-float.
+ |.endif
+ | move sp, r16
+ | jr ra
+ |. move r16, TMP2
+ |.endif
+ |// Note: vm_ffi_call must be the last function in this object file!
+ |
+ |//-----------------------------------------------------------------------
+}
+
+/* Generate the code for a single instruction. */
+static void build_ins(BuildCtx *ctx, BCOp op, int defop)
+{
+ int vk = 0;
+ |=>defop:
+
+ switch (op) {
+
+ /* -- Comparison ops ---------------------------------------------------- */
+
+ /* Remember: all ops branch for a true comparison, fall through otherwise. */
+
+ case BC_ISLT: case BC_ISGE: case BC_ISLE: case BC_ISGT:
+ | // RA = src1*8, RD = src2*8, JMP with RD = target
+ |.macro bc_comp, FRA, FRD, ARGRA, ARGRD, movop, fmovop, fcomp, sfcomp
+ | daddu RA, BASE, RA
+ | daddu RD, BASE, RD
+ | ld ARGRA, 0(RA)
+ | ld ARGRD, 0(RD)
+ | lhu TMP2, OFS_RD(PC)
+ | gettp CARG3, ARGRA
+ | gettp CARG4, ARGRD
+ | bne CARG3, TISNUM, >2
+ |. daddiu PC, PC, 4
+ | bne CARG4, TISNUM, >5
+ |. decode_RD4b TMP2
+ | sextw ARGRA, ARGRA
+ | sextw ARGRD, ARGRD
+ | lui TMP3, (-(BCBIAS_J*4 >> 16) & 65535)
+ | slt AT, CARG1, CARG2
+ | addu TMP2, TMP2, TMP3
+ |.if MIPSR6
+ | movop TMP2, TMP2, AT
+ |.else
+ | movop TMP2, r0, AT
+ |.endif
+ |1:
+ | daddu PC, PC, TMP2
+ | ins_next
+ |
+ |2: // RA is not an integer.
+ | sltiu AT, CARG3, LJ_TISNUM
+ | beqz AT, ->vmeta_comp
+ |. lui TMP3, (-(BCBIAS_J*4 >> 16) & 65535)
+ | sltiu AT, CARG4, LJ_TISNUM
+ | beqz AT, >4
+ |. decode_RD4b TMP2
+ |.if FPU
+ | ldc1 FRA, 0(RA)
+ | ldc1 FRD, 0(RD)
+ |.endif
+ |3: // RA and RD are both numbers.
+ |.if FPU
+ |.if MIPSR6
+ | fcomp FTMP0, FTMP0, FTMP2
+ | addu TMP2, TMP2, TMP3
+ | mfc1 TMP3, FTMP0
+ | b <1
+ |. fmovop TMP2, TMP2, TMP3
+ |.else
+ | fcomp FTMP0, FTMP2
+ | addu TMP2, TMP2, TMP3
+ | b <1
+ |. fmovop TMP2, r0
+ |.endif
+ |.else
+ | bal sfcomp
+ |. addu TMP2, TMP2, TMP3
+ | b <1
+ |.if MIPSR6
+ |. movop TMP2, TMP2, CRET1
+ |.else
+ |. movop TMP2, r0, CRET1
+ |.endif
+ |.endif
+ |
+ |4: // RA is a number, RD is not a number.
+ | bne CARG4, TISNUM, ->vmeta_comp
+ | // RA is a number, RD is an integer. Convert RD to a number.
+ |.if FPU
+ |. lwc1 FRD, LO(RD)
+ | ldc1 FRA, 0(RA)
+ | b <3
+ |. cvt.d.w FRD, FRD
+ |.else
+ |.if "ARGRD" == "CARG1"
+ |. sextw CARG1, CARG1
+ | bal ->vm_sfi2d_1
+ |. nop
+ |.else
+ |. sextw CARG2, CARG2
+ | bal ->vm_sfi2d_2
+ |. nop
+ |.endif
+ | b <3
+ |. nop
+ |.endif
+ |
+ |5: // RA is an integer, RD is not an integer
+ | sltiu AT, CARG4, LJ_TISNUM
+ | beqz AT, ->vmeta_comp
+ |. lui TMP3, (-(BCBIAS_J*4 >> 16) & 65535)
+ | // RA is an integer, RD is a number. Convert RA to a number.
+ |.if FPU
+ | lwc1 FRA, LO(RA)
+ | ldc1 FRD, 0(RD)
+ | b <3
+ | cvt.d.w FRA, FRA
+ |.else
+ |.if "ARGRA" == "CARG1"
+ | bal ->vm_sfi2d_1
+ |. sextw CARG1, CARG1
+ |.else
+ | bal ->vm_sfi2d_2
+ |. sextw CARG2, CARG2
+ |.endif
+ | b <3
+ |. nop
+ |.endif
+ |.endmacro
+ |
+ |.if MIPSR6
+ if (op == BC_ISLT) {
+ | bc_comp FTMP0, FTMP2, CARG1, CARG2, selnez, selnez, cmp.lt.d, ->vm_sfcmpolt
+ } else if (op == BC_ISGE) {
+ | bc_comp FTMP0, FTMP2, CARG1, CARG2, seleqz, seleqz, cmp.lt.d, ->vm_sfcmpolt
+ } else if (op == BC_ISLE) {
+ | bc_comp FTMP2, FTMP0, CARG2, CARG1, seleqz, seleqz, cmp.ult.d, ->vm_sfcmpult
+ } else {
+ | bc_comp FTMP2, FTMP0, CARG2, CARG1, selnez, selnez, cmp.ult.d, ->vm_sfcmpult
+ }
+ |.else
+ if (op == BC_ISLT) {
+ | bc_comp FTMP0, FTMP2, CARG1, CARG2, movz, movf, c.olt.d, ->vm_sfcmpolt
+ } else if (op == BC_ISGE) {
+ | bc_comp FTMP0, FTMP2, CARG1, CARG2, movn, movt, c.olt.d, ->vm_sfcmpolt
+ } else if (op == BC_ISLE) {
+ | bc_comp FTMP2, FTMP0, CARG2, CARG1, movn, movt, c.ult.d, ->vm_sfcmpult
+ } else {
+ | bc_comp FTMP2, FTMP0, CARG2, CARG1, movz, movf, c.ult.d, ->vm_sfcmpult
+ }
+ |.endif
+ break;
+
+ case BC_ISEQV: case BC_ISNEV:
+ vk = op == BC_ISEQV;
+ | // RA = src1*8, RD = src2*8, JMP with RD = target
+ | daddu RA, BASE, RA
+ | daddiu PC, PC, 4
+ | daddu RD, BASE, RD
+ | ld CARG1, 0(RA)
+ | lhu TMP2, -4+OFS_RD(PC)
+ | ld CARG2, 0(RD)
+ | gettp CARG3, CARG1
+ | gettp CARG4, CARG2
+ | sltu AT, TISNUM, CARG3
+ | sltu TMP1, TISNUM, CARG4
+ | or AT, AT, TMP1
+ if (vk) {
+ | beqz AT, ->BC_ISEQN_Z
+ } else {
+ | beqz AT, ->BC_ISNEN_Z
+ }
+ | // Either or both types are not numbers.
+ | lui TMP3, (-(BCBIAS_J*4 >> 16) & 65535)
+ |.if FFI
+ |. li AT, LJ_TCDATA
+ | beq CARG3, AT, ->vmeta_equal_cd
+ |.endif
+ | decode_RD4b TMP2
+ |.if FFI
+ | beq CARG4, AT, ->vmeta_equal_cd
+ |. nop
+ |.endif
+ | bne CARG1, CARG2, >2
+ |. addu TMP2, TMP2, TMP3
+ | // Tag and value are equal.
+ if (vk) {
+ |->BC_ISEQV_Z:
+ | daddu PC, PC, TMP2
+ }
+ |1:
+ | ins_next
+ |
+ |2: // Check if the tags are the same and it's a table or userdata.
+ | xor AT, CARG3, CARG4 // Same type?
+ | sltiu TMP0, CARG3, LJ_TISTABUD+1 // Table or userdata?
+ |.if MIPSR6
+ | seleqz TMP0, TMP0, AT
+ |.else
+ | movn TMP0, r0, AT
+ |.endif
+ if (vk) {
+ | beqz TMP0, <1
+ } else {
+ | beqz TMP0, ->BC_ISEQV_Z // Reuse code from opposite instruction.
+ }
+ | // Different tables or userdatas. Need to check __eq metamethod.
+ | // Field metatable must be at same offset for GCtab and GCudata!
+ |. cleartp TAB:TMP1, CARG1
+ | ld TAB:TMP3, TAB:TMP1->metatable
+ if (vk) {
+ | beqz TAB:TMP3, <1 // No metatable?
+ |. nop
+ | lbu TMP3, TAB:TMP3->nomm
+ | andi TMP3, TMP3, 1<<MM_eq
+ | bnez TMP3, >1 // Or 'no __eq' flag set?
+ } else {
+ | beqz TAB:TMP3,->BC_ISEQV_Z // No metatable?
+ |. nop
+ | lbu TMP3, TAB:TMP3->nomm
+ | andi TMP3, TMP3, 1<<MM_eq
+ | bnez TMP3, ->BC_ISEQV_Z // Or 'no __eq' flag set?
+ }
+ |. nop
+ | b ->vmeta_equal // Handle __eq metamethod.
+ |. li TMP0, 1-vk // ne = 0 or 1.
+ break;
+
+ case BC_ISEQS: case BC_ISNES:
+ vk = op == BC_ISEQS;
+ | // RA = src*8, RD = str_const*8 (~), JMP with RD = target
+ | daddu RA, BASE, RA
+ | daddiu PC, PC, 4
+ | ld CARG1, 0(RA)
+ | dsubu RD, KBASE, RD
+ | lhu TMP2, -4+OFS_RD(PC)
+ | ld CARG2, -8(RD) // KBASE-8-str_const*8
+ |.if FFI
+ | gettp TMP0, CARG1
+ | li AT, LJ_TCDATA
+ |.endif
+ | li TMP1, LJ_TSTR
+ | decode_RD4b TMP2
+ |.if FFI
+ | beq TMP0, AT, ->vmeta_equal_cd
+ |.endif
+ |. settp CARG2, TMP1
+ | lui TMP3, (-(BCBIAS_J*4 >> 16) & 65535)
+ | xor TMP1, CARG1, CARG2
+ | addu TMP2, TMP2, TMP3
+ |.if MIPSR6
+ if (vk) {
+ | seleqz TMP2, TMP2, TMP1
+ } else {
+ | selnez TMP2, TMP2, TMP1
+ }
+ |.else
+ if (vk) {
+ | movn TMP2, r0, TMP1
+ } else {
+ | movz TMP2, r0, TMP1
+ }
+ |.endif
+ | daddu PC, PC, TMP2
+ | ins_next
+ break;
+
+ case BC_ISEQN: case BC_ISNEN:
+ vk = op == BC_ISEQN;
+ | // RA = src*8, RD = num_const*8, JMP with RD = target
+ | daddu RA, BASE, RA
+ | daddu RD, KBASE, RD
+ | ld CARG1, 0(RA)
+ | ld CARG2, 0(RD)
+ | lhu TMP2, OFS_RD(PC)
+ | gettp CARG3, CARG1
+ | gettp CARG4, CARG2
+ | daddiu PC, PC, 4
+ | lui TMP3, (-(BCBIAS_J*4 >> 16) & 65535)
+ if (vk) {
+ |->BC_ISEQN_Z:
+ } else {
+ |->BC_ISNEN_Z:
+ }
+ | bne CARG3, TISNUM, >3
+ |. decode_RD4b TMP2
+ | bne CARG4, TISNUM, >6
+ |. addu TMP2, TMP2, TMP3
+ | xor AT, CARG1, CARG2
+ |.if MIPSR6
+ if (vk) {
+ | seleqz TMP2, TMP2, AT
+ |1:
+ | daddu PC, PC, TMP2
+ |2:
+ } else {
+ | selnez TMP2, TMP2, AT
+ |1:
+ |2:
+ | daddu PC, PC, TMP2
+ }
+ |.else
+ if (vk) {
+ | movn TMP2, r0, AT
+ |1:
+ | daddu PC, PC, TMP2
+ |2:
+ } else {
+ | movz TMP2, r0, AT
+ |1:
+ |2:
+ | daddu PC, PC, TMP2
+ }
+ |.endif
+ | ins_next
+ |
+ |3: // RA is not an integer.
+ | sltu AT, CARG3, TISNUM
+ |.if FFI
+ | beqz AT, >8
+ |.else
+ | beqz AT, <2
+ |.endif
+ |. addu TMP2, TMP2, TMP3
+ | sltu AT, CARG4, TISNUM
+ |.if FPU
+ | ldc1 FTMP0, 0(RA)
+ | ldc1 FTMP2, 0(RD)
+ |.endif
+ | beqz AT, >5
+ |. nop
+ |4: // RA and RD are both numbers.
+ |.if FPU
+ |.if MIPSR6
+ | cmp.eq.d FTMP0, FTMP0, FTMP2
+ | dmfc1 TMP1, FTMP0
+ | b <1
+ if (vk) {
+ |. selnez TMP2, TMP2, TMP1
+ } else {
+ |. seleqz TMP2, TMP2, TMP1
+ }
+ |.else
+ | c.eq.d FTMP0, FTMP2
+ | b <1
+ if (vk) {
+ |. movf TMP2, r0
+ } else {
+ |. movt TMP2, r0
+ }
+ |.endif
+ |.else
+ | bal ->vm_sfcmpeq
+ |. nop
+ | b <1
+ |.if MIPSR6
+ if (vk) {
+ |. selnez TMP2, TMP2, CRET1
+ } else {
+ |. seleqz TMP2, TMP2, CRET1
+ }
+ |.else
+ if (vk) {
+ |. movz TMP2, r0, CRET1
+ } else {
+ |. movn TMP2, r0, CRET1
+ }
+ |.endif
+ |.endif
+ |
+ |5: // RA is a number, RD is not a number.
+ |.if FFI
+ | bne CARG4, TISNUM, >9
+ |.else
+ | bne CARG4, TISNUM, <2
+ |.endif
+ | // RA is a number, RD is an integer. Convert RD to a number.
+ |.if FPU
+ |. lwc1 FTMP2, LO(RD)
+ | b <4
+ |. cvt.d.w FTMP2, FTMP2
+ |.else
+ |. sextw CARG2, CARG2
+ | bal ->vm_sfi2d_2
+ |. nop
+ | b <4
+ |. nop
+ |.endif
+ |
+ |6: // RA is an integer, RD is not an integer
+ | sltu AT, CARG4, TISNUM
+ |.if FFI
+ | beqz AT, >9
+ |.else
+ | beqz AT, <2
+ |.endif
+ | // RA is an integer, RD is a number. Convert RA to a number.
+ |.if FPU
+ |. lwc1 FTMP0, LO(RA)
+ | ldc1 FTMP2, 0(RD)
+ | b <4
+ | cvt.d.w FTMP0, FTMP0
+ |.else
+ |. sextw CARG1, CARG1
+ | bal ->vm_sfi2d_1
+ |. nop
+ | b <4
+ |. nop
+ |.endif
+ |
+ |.if FFI
+ |8:
+ | li AT, LJ_TCDATA
+ | bne CARG3, AT, <2
+ |. nop
+ | b ->vmeta_equal_cd
+ |. nop
+ |9:
+ | li AT, LJ_TCDATA
+ | bne CARG4, AT, <2
+ |. nop
+ | b ->vmeta_equal_cd
+ |. nop
+ |.endif
+ break;
+
+ case BC_ISEQP: case BC_ISNEP:
+ vk = op == BC_ISEQP;
+ | // RA = src*8, RD = primitive_type*8 (~), JMP with RD = target
+ | daddu RA, BASE, RA
+ | srl TMP1, RD, 3
+ | ld TMP0, 0(RA)
+ | lhu TMP2, OFS_RD(PC)
+ | not TMP1, TMP1
+ | gettp TMP0, TMP0
+ | daddiu PC, PC, 4
+ |.if FFI
+ | li AT, LJ_TCDATA
+ | beq TMP0, AT, ->vmeta_equal_cd
+ |.endif
+ |. xor TMP0, TMP0, TMP1
+ | decode_RD4b TMP2
+ | lui TMP3, (-(BCBIAS_J*4 >> 16) & 65535)
+ | addu TMP2, TMP2, TMP3
+ |.if MIPSR6
+ if (vk) {
+ | seleqz TMP2, TMP2, TMP0
+ } else {
+ | selnez TMP2, TMP2, TMP0
+ }
+ |.else
+ if (vk) {
+ | movn TMP2, r0, TMP0
+ } else {
+ | movz TMP2, r0, TMP0
+ }
+ |.endif
+ | daddu PC, PC, TMP2
+ | ins_next
+ break;
+
+ /* -- Unary test and copy ops ------------------------------------------- */
+
+ case BC_ISTC: case BC_ISFC: case BC_IST: case BC_ISF:
+ | // RA = dst*8 or unused, RD = src*8, JMP with RD = target
+ | daddu RD, BASE, RD
+ | lhu TMP2, OFS_RD(PC)
+ | ld TMP0, 0(RD)
+ | daddiu PC, PC, 4
+ | gettp TMP0, TMP0
+ | sltiu TMP0, TMP0, LJ_TISTRUECOND
+ if (op == BC_IST || op == BC_ISF) {
+ | decode_RD4b TMP2
+ | lui TMP3, (-(BCBIAS_J*4 >> 16) & 65535)
+ | addu TMP2, TMP2, TMP3
+ |.if MIPSR6
+ if (op == BC_IST) {
+ | selnez TMP2, TMP2, TMP0;
+ } else {
+ | seleqz TMP2, TMP2, TMP0;
+ }
+ |.else
+ if (op == BC_IST) {
+ | movz TMP2, r0, TMP0
+ } else {
+ | movn TMP2, r0, TMP0
+ }
+ |.endif
+ | daddu PC, PC, TMP2
+ } else {
+ | ld CRET1, 0(RD)
+ if (op == BC_ISTC) {
+ | beqz TMP0, >1
+ } else {
+ | bnez TMP0, >1
+ }
+ |. daddu RA, BASE, RA
+ | decode_RD4b TMP2
+ | lui TMP3, (-(BCBIAS_J*4 >> 16) & 65535)
+ | addu TMP2, TMP2, TMP3
+ | sd CRET1, 0(RA)
+ | daddu PC, PC, TMP2
+ |1:
+ }
+ | ins_next
+ break;
+
+ case BC_ISTYPE:
+ | // RA = src*8, RD = -type*8
+ | daddu TMP2, BASE, RA
+ | srl TMP1, RD, 3
+ | ld TMP0, 0(TMP2)
+ | ins_next1
+ | gettp TMP0, TMP0
+ | daddu AT, TMP0, TMP1
+ | bnez AT, ->vmeta_istype
+ |. ins_next2
+ break;
+ case BC_ISNUM:
+ | // RA = src*8, RD = -(TISNUM-1)*8
+ | daddu TMP2, BASE, RA
+ | ld TMP0, 0(TMP2)
+ | ins_next1
+ | checknum TMP0, ->vmeta_istype
+ |. ins_next2
+ break;
+
+ /* -- Unary ops --------------------------------------------------------- */
+
+ case BC_MOV:
+ | // RA = dst*8, RD = src*8
+ | daddu RD, BASE, RD
+ | daddu RA, BASE, RA
+ | ld CRET1, 0(RD)
+ | ins_next1
+ | sd CRET1, 0(RA)
+ | ins_next2
+ break;
+ case BC_NOT:
+ | // RA = dst*8, RD = src*8
+ | daddu RD, BASE, RD
+ | daddu RA, BASE, RA
+ | ld TMP0, 0(RD)
+ | li AT, LJ_TTRUE
+ | gettp TMP0, TMP0
+ | sltu TMP0, AT, TMP0
+ | addiu TMP0, TMP0, 1
+ | dsll TMP0, TMP0, 47
+ | not TMP0, TMP0
+ | ins_next1
+ | sd TMP0, 0(RA)
+ | ins_next2
+ break;
+ case BC_UNM:
+ | // RA = dst*8, RD = src*8
+ | daddu RB, BASE, RD
+ | ld CARG1, 0(RB)
+ | daddu RA, BASE, RA
+ | gettp CARG3, CARG1
+ | bne CARG3, TISNUM, >2
+ |. lui TMP1, 0x8000
+ | sextw CARG1, CARG1
+ | beq CARG1, TMP1, ->vmeta_unm // Meta handler deals with -2^31.
+ |. negu CARG1, CARG1
+ | zextw CARG1, CARG1
+ | settp CARG1, TISNUM
+ |1:
+ | ins_next1
+ | sd CARG1, 0(RA)
+ | ins_next2
+ |2:
+ | sltiu AT, CARG3, LJ_TISNUM
+ | beqz AT, ->vmeta_unm
+ |. dsll TMP1, TMP1, 32
+ | b <1
+ |. xor CARG1, CARG1, TMP1
+ break;
+ case BC_LEN:
+ | // RA = dst*8, RD = src*8
+ | daddu CARG2, BASE, RD
+ | daddu RA, BASE, RA
+ | ld TMP0, 0(CARG2)
+ | gettp TMP1, TMP0
+ | daddiu AT, TMP1, -LJ_TSTR
+ | bnez AT, >2
+ |. cleartp STR:CARG1, TMP0
+ | lw CRET1, STR:CARG1->len
+ |1:
+ | settp CRET1, TISNUM
+ | ins_next1
+ | sd CRET1, 0(RA)
+ | ins_next2
+ |2:
+ | daddiu AT, TMP1, -LJ_TTAB
+ | bnez AT, ->vmeta_len
+ |. nop
+#if LJ_52
+ | ld TAB:TMP2, TAB:CARG1->metatable
+ | bnez TAB:TMP2, >9
+ |. nop
+ |3:
+#endif
+ |->BC_LEN_Z:
+ | load_got lj_tab_len
+ | call_intern lj_tab_len // (GCtab *t)
+ |. nop
+ | // Returns uint32_t (but less than 2^31).
+ | b <1
+ |. nop
+#if LJ_52
+ |9:
+ | lbu TMP0, TAB:TMP2->nomm
+ | andi TMP0, TMP0, 1<<MM_len
+ | bnez TMP0, <3 // 'no __len' flag set: done.
+ |. nop
+ | b ->vmeta_len
+ |. nop
+#endif
+ break;
+
+ /* -- Binary ops -------------------------------------------------------- */
+
+ |.macro fpmod, a, b, c
+ | bal ->vm_floor // floor(b/c)
+ |. div.d FARG1, b, c
+ | mul.d a, FRET1, c
+ | sub.d a, b, a // b - floor(b/c)*c
+ |.endmacro
+
+ |.macro sfpmod
+ | daddiu sp, sp, -16
+ |
+ | load_got __divdf3
+ | sd CARG1, 0(sp)
+ | call_extern
+ |. sd CARG2, 8(sp)
+ |
+ | load_got floor
+ | call_extern
+ |. move CARG1, CRET1
+ |
+ | load_got __muldf3
+ | move CARG1, CRET1
+ | call_extern
+ |. ld CARG2, 8(sp)
+ |
+ | load_got __subdf3
+ | ld CARG1, 0(sp)
+ | call_extern
+ |. move CARG2, CRET1
+ |
+ | daddiu sp, sp, 16
+ |.endmacro
+
+ |.macro ins_arithpre, label
+ ||vk = ((int)op - BC_ADDVN) / (BC_ADDNV-BC_ADDVN);
+ | // RA = dst*8, RB = src1*8, RC = src2*8 | num_const*8
+ ||switch (vk) {
+ ||case 0:
+ | decode_RB8a RB, INS
+ | decode_RB8b RB
+ | decode_RDtoRC8 RC, RD
+ | // RA = dst*8, RB = src1*8, RC = num_const*8
+ | daddu RB, BASE, RB
+ |.if "label" ~= "none"
+ | b label
+ |.endif
+ |. daddu RC, KBASE, RC
+ || break;
+ ||case 1:
+ | decode_RB8a RC, INS
+ | decode_RB8b RC
+ | decode_RDtoRC8 RB, RD
+ | // RA = dst*8, RB = num_const*8, RC = src1*8
+ | daddu RC, BASE, RC
+ |.if "label" ~= "none"
+ | b label
+ |.endif
+ |. daddu RB, KBASE, RB
+ || break;
+ ||default:
+ | decode_RB8a RB, INS
+ | decode_RB8b RB
+ | decode_RDtoRC8 RC, RD
+ | // RA = dst*8, RB = src1*8, RC = src2*8
+ | daddu RB, BASE, RB
+ |.if "label" ~= "none"
+ | b label
+ |.endif
+ |. daddu RC, BASE, RC
+ || break;
+ ||}
+ |.endmacro
+ |
+ |.macro ins_arith, intins, fpins, fpcall, label
+ | ins_arithpre none
+ |
+ |.if "label" ~= "none"
+ |label:
+ |.endif
+ |
+ |// Used in 5.
+ | ld CARG1, 0(RB)
+ | ld CARG2, 0(RC)
+ | gettp TMP0, CARG1
+ | gettp TMP1, CARG2
+ |
+ |.if "intins" ~= "div"
+ |
+ | // Check for two integers.
+ | sextw CARG3, CARG1
+ | bne TMP0, TISNUM, >5
+ |. sextw CARG4, CARG2
+ | bne TMP1, TISNUM, >5
+ |
+ |.if "intins" == "addu"
+ |. intins CRET1, CARG3, CARG4
+ | xor TMP1, CRET1, CARG3 // ((y^a) & (y^b)) < 0: overflow.
+ | xor TMP2, CRET1, CARG4
+ | and TMP1, TMP1, TMP2
+ | bltz TMP1, ->vmeta_arith
+ |. daddu RA, BASE, RA
+ |.elif "intins" == "subu"
+ |. intins CRET1, CARG3, CARG4
+ | xor TMP1, CRET1, CARG3 // ((y^a) & (a^b)) < 0: overflow.
+ | xor TMP2, CARG3, CARG4
+ | and TMP1, TMP1, TMP2
+ | bltz TMP1, ->vmeta_arith
+ |. daddu RA, BASE, RA
+ |.elif "intins" == "mult"
+ |.if MIPSR6
+ |. nop
+ | mul CRET1, CARG3, CARG4
+ | muh TMP2, CARG3, CARG4
+ |.else
+ |. intins CARG3, CARG4
+ | mflo CRET1
+ | mfhi TMP2
+ |.endif
+ | sra TMP1, CRET1, 31
+ | bne TMP1, TMP2, ->vmeta_arith
+ |. daddu RA, BASE, RA
+ |.else
+ |. load_got lj_vm_modi
+ | beqz CARG4, ->vmeta_arith
+ |. daddu RA, BASE, RA
+ | move CARG1, CARG3
+ | call_extern
+ |. move CARG2, CARG4
+ |.endif
+ |
+ | zextw CRET1, CRET1
+ | settp CRET1, TISNUM
+ | ins_next1
+ | sd CRET1, 0(RA)
+ |3:
+ | ins_next2
+ |
+ |.endif
+ |
+ |5: // Check for two numbers.
+ | .FPU ldc1 FTMP0, 0(RB)
+ | sltu AT, TMP0, TISNUM
+ | sltu TMP0, TMP1, TISNUM
+ | .FPU ldc1 FTMP2, 0(RC)
+ | and AT, AT, TMP0
+ | beqz AT, ->vmeta_arith
+ |. daddu RA, BASE, RA
+ |
+ |.if FPU
+ | fpins FRET1, FTMP0, FTMP2
+ |.elif "fpcall" == "sfpmod"
+ | sfpmod
+ |.else
+ | load_got fpcall
+ | call_extern
+ |. nop
+ |.endif
+ |
+ | ins_next1
+ |.if "intins" ~= "div"
+ | b <3
+ |.endif
+ |.if FPU
+ |. sdc1 FRET1, 0(RA)
+ |.else
+ |. sd CRET1, 0(RA)
+ |.endif
+ |.if "intins" == "div"
+ | ins_next2
+ |.endif
+ |
+ |.endmacro
+
+ case BC_ADDVN: case BC_ADDNV: case BC_ADDVV:
+ | ins_arith addu, add.d, __adddf3, none
+ break;
+ case BC_SUBVN: case BC_SUBNV: case BC_SUBVV:
+ | ins_arith subu, sub.d, __subdf3, none
+ break;
+ case BC_MULVN: case BC_MULNV: case BC_MULVV:
+ | ins_arith mult, mul.d, __muldf3, none
+ break;
+ case BC_DIVVN:
+ | ins_arith div, div.d, __divdf3, ->BC_DIVVN_Z
+ break;
+ case BC_DIVNV: case BC_DIVVV:
+ | ins_arithpre ->BC_DIVVN_Z
+ break;
+ case BC_MODVN:
+ | ins_arith modi, fpmod, sfpmod, ->BC_MODVN_Z
+ break;
+ case BC_MODNV: case BC_MODVV:
+ | ins_arithpre ->BC_MODVN_Z
+ break;
+ case BC_POW:
+ | ins_arithpre none
+ | ld CARG1, 0(RB)
+ | ld CARG2, 0(RC)
+ | gettp TMP0, CARG1
+ | gettp TMP1, CARG2
+ | sltiu TMP0, TMP0, LJ_TISNUM
+ | sltiu TMP1, TMP1, LJ_TISNUM
+ | and AT, TMP0, TMP1
+ | load_got pow
+ | beqz AT, ->vmeta_arith
+ |. daddu RA, BASE, RA
+ |.if FPU
+ | ldc1 FARG1, 0(RB)
+ | ldc1 FARG2, 0(RC)
+ |.endif
+ | call_extern
+ |. nop
+ | ins_next1
+ |.if FPU
+ | sdc1 FRET1, 0(RA)
+ |.else
+ | sd CRET1, 0(RA)
+ |.endif
+ | ins_next2
+ break;
+
+ case BC_CAT:
+ | // RA = dst*8, RB = src_start*8, RC = src_end*8
+ | decode_RB8a RB, INS
+ | decode_RB8b RB
+ | decode_RDtoRC8 RC, RD
+ | dsubu CARG3, RC, RB
+ | sd BASE, L->base
+ | daddu CARG2, BASE, RC
+ | move MULTRES, RB
+ |->BC_CAT_Z:
+ | load_got lj_meta_cat
+ | srl CARG3, CARG3, 3
+ | sd PC, SAVE_PC
+ | call_intern lj_meta_cat // (lua_State *L, TValue *top, int left)
+ |. move CARG1, L
+ | // Returns NULL (finished) or TValue * (metamethod).
+ | bnez CRET1, ->vmeta_binop
+ |. ld BASE, L->base
+ | daddu RB, BASE, MULTRES
+ | ld CRET1, 0(RB)
+ | daddu RA, BASE, RA
+ | ins_next1
+ | sd CRET1, 0(RA)
+ | ins_next2
+ break;
+
+ /* -- Constant ops ------------------------------------------------------ */
+
+ case BC_KSTR:
+ | // RA = dst*8, RD = str_const*8 (~)
+ | dsubu TMP1, KBASE, RD
+ | ins_next1
+ | li TMP2, LJ_TSTR
+ | ld TMP0, -8(TMP1) // KBASE-8-str_const*8
+ | daddu RA, BASE, RA
+ | settp TMP0, TMP2
+ | sd TMP0, 0(RA)
+ | ins_next2
+ break;
+ case BC_KCDATA:
+ |.if FFI
+ | // RA = dst*8, RD = cdata_const*8 (~)
+ | dsubu TMP1, KBASE, RD
+ | ins_next1
+ | ld TMP0, -8(TMP1) // KBASE-8-cdata_const*8
+ | li TMP2, LJ_TCDATA
+ | daddu RA, BASE, RA
+ | settp TMP0, TMP2
+ | sd TMP0, 0(RA)
+ | ins_next2
+ |.endif
+ break;
+ case BC_KSHORT:
+ | // RA = dst*8, RD = int16_literal*8
+ | sra RD, INS, 16
+ | daddu RA, BASE, RA
+ | zextw RD, RD
+ | ins_next1
+ | settp RD, TISNUM
+ | sd RD, 0(RA)
+ | ins_next2
+ break;
+ case BC_KNUM:
+ | // RA = dst*8, RD = num_const*8
+ | daddu RD, KBASE, RD
+ | daddu RA, BASE, RA
+ | ld CRET1, 0(RD)
+ | ins_next1
+ | sd CRET1, 0(RA)
+ | ins_next2
+ break;
+ case BC_KPRI:
+ | // RA = dst*8, RD = primitive_type*8 (~)
+ | daddu RA, BASE, RA
+ | dsll TMP0, RD, 44
+ | not TMP0, TMP0
+ | ins_next1
+ | sd TMP0, 0(RA)
+ | ins_next2
+ break;
+ case BC_KNIL:
+ | // RA = base*8, RD = end*8
+ | daddu RA, BASE, RA
+ | sd TISNIL, 0(RA)
+ | daddiu RA, RA, 8
+ | daddu RD, BASE, RD
+ |1:
+ | sd TISNIL, 0(RA)
+ | slt AT, RA, RD
+ | bnez AT, <1
+ |. daddiu RA, RA, 8
+ | ins_next_
+ break;
+
+ /* -- Upvalue and function ops ------------------------------------------ */
+
+ case BC_UGET:
+ | // RA = dst*8, RD = uvnum*8
+ | ld LFUNC:RB, FRAME_FUNC(BASE)
+ | daddu RA, BASE, RA
+ | cleartp LFUNC:RB
+ | daddu RD, RD, LFUNC:RB
+ | ld UPVAL:RB, LFUNC:RD->uvptr
+ | ins_next1
+ | ld TMP1, UPVAL:RB->v
+ | ld CRET1, 0(TMP1)
+ | sd CRET1, 0(RA)
+ | ins_next2
+ break;
+ case BC_USETV:
+ | // RA = uvnum*8, RD = src*8
+ | ld LFUNC:RB, FRAME_FUNC(BASE)
+ | daddu RD, BASE, RD
+ | cleartp LFUNC:RB
+ | daddu RA, RA, LFUNC:RB
+ | ld UPVAL:RB, LFUNC:RA->uvptr
+ | ld CRET1, 0(RD)
+ | lbu TMP3, UPVAL:RB->marked
+ | ld CARG2, UPVAL:RB->v
+ | andi TMP3, TMP3, LJ_GC_BLACK // isblack(uv)
+ | lbu TMP0, UPVAL:RB->closed
+ | gettp TMP2, CRET1
+ | sd CRET1, 0(CARG2)
+ | li AT, LJ_GC_BLACK|1
+ | or TMP3, TMP3, TMP0
+ | beq TMP3, AT, >2 // Upvalue is closed and black?
+ |. daddiu TMP2, TMP2, -(LJ_TNUMX+1)
+ |1:
+ | ins_next
+ |
+ |2: // Check if new value is collectable.
+ | sltiu AT, TMP2, LJ_TISGCV - (LJ_TNUMX+1)
+ | beqz AT, <1 // tvisgcv(v)
+ |. cleartp GCOBJ:CRET1, CRET1
+ | lbu TMP3, GCOBJ:CRET1->gch.marked
+ | andi TMP3, TMP3, LJ_GC_WHITES // iswhite(v)
+ | beqz TMP3, <1
+ |. load_got lj_gc_barrieruv
+ | // Crossed a write barrier. Move the barrier forward.
+ | call_intern lj_gc_barrieruv // (global_State *g, TValue *tv)
+ |. daddiu CARG1, DISPATCH, GG_DISP2G
+ | b <1
+ |. nop
+ break;
+ case BC_USETS:
+ | // RA = uvnum*8, RD = str_const*8 (~)
+ | ld LFUNC:RB, FRAME_FUNC(BASE)
+ | dsubu TMP1, KBASE, RD
+ | cleartp LFUNC:RB
+ | daddu RA, RA, LFUNC:RB
+ | ld UPVAL:RB, LFUNC:RA->uvptr
+ | ld STR:TMP1, -8(TMP1) // KBASE-8-str_const*8
+ | lbu TMP2, UPVAL:RB->marked
+ | ld CARG2, UPVAL:RB->v
+ | lbu TMP3, STR:TMP1->marked
+ | andi AT, TMP2, LJ_GC_BLACK // isblack(uv)
+ | lbu TMP2, UPVAL:RB->closed
+ | li TMP0, LJ_TSTR
+ | settp TMP1, TMP0
+ | bnez AT, >2
+ |. sd TMP1, 0(CARG2)
+ |1:
+ | ins_next
+ |
+ |2: // Check if string is white and ensure upvalue is closed.
+ | beqz TMP2, <1
+ |. andi AT, TMP3, LJ_GC_WHITES // iswhite(str)
+ | beqz AT, <1
+ |. load_got lj_gc_barrieruv
+ | // Crossed a write barrier. Move the barrier forward.
+ | call_intern lj_gc_barrieruv // (global_State *g, TValue *tv)
+ |. daddiu CARG1, DISPATCH, GG_DISP2G
+ | b <1
+ |. nop
+ break;
+ case BC_USETN:
+ | // RA = uvnum*8, RD = num_const*8
+ | ld LFUNC:RB, FRAME_FUNC(BASE)
+ | daddu RD, KBASE, RD
+ | cleartp LFUNC:RB
+ | daddu RA, RA, LFUNC:RB
+ | ld UPVAL:RB, LFUNC:RA->uvptr
+ | ld CRET1, 0(RD)
+ | ld TMP1, UPVAL:RB->v
+ | ins_next1
+ | sd CRET1, 0(TMP1)
+ | ins_next2
+ break;
+ case BC_USETP:
+ | // RA = uvnum*8, RD = primitive_type*8 (~)
+ | ld LFUNC:RB, FRAME_FUNC(BASE)
+ | dsll TMP0, RD, 44
+ | cleartp LFUNC:RB
+ | daddu RA, RA, LFUNC:RB
+ | not TMP0, TMP0
+ | ld UPVAL:RB, LFUNC:RA->uvptr
+ | ins_next1
+ | ld TMP1, UPVAL:RB->v
+ | sd TMP0, 0(TMP1)
+ | ins_next2
+ break;
+
+ case BC_UCLO:
+ | // RA = level*8, RD = target
+ | ld TMP2, L->openupval
+ | branch_RD // Do this first since RD is not saved.
+ | load_got lj_func_closeuv
+ | sd BASE, L->base
+ | beqz TMP2, >1
+ |. move CARG1, L
+ | call_intern lj_func_closeuv // (lua_State *L, TValue *level)
+ |. daddu CARG2, BASE, RA
+ | ld BASE, L->base
+ |1:
+ | ins_next
+ break;
+
+ case BC_FNEW:
+ | // RA = dst*8, RD = proto_const*8 (~) (holding function prototype)
+ | load_got lj_func_newL_gc
+ | dsubu TMP1, KBASE, RD
+ | ld CARG3, FRAME_FUNC(BASE)
+ | ld CARG2, -8(TMP1) // KBASE-8-tab_const*8
+ | sd BASE, L->base
+ | sd PC, SAVE_PC
+ | cleartp CARG3
+ | // (lua_State *L, GCproto *pt, GCfuncL *parent)
+ | call_intern lj_func_newL_gc
+ |. move CARG1, L
+ | // Returns GCfuncL *.
+ | li TMP0, LJ_TFUNC
+ | ld BASE, L->base
+ | ins_next1
+ | settp CRET1, TMP0
+ | daddu RA, BASE, RA
+ | sd CRET1, 0(RA)
+ | ins_next2
+ break;
+
+ /* -- Table ops --------------------------------------------------------- */
+
+ case BC_TNEW:
+ case BC_TDUP:
+ | // RA = dst*8, RD = (hbits|asize)*8 | tab_const*8 (~)
+ | ld TMP0, DISPATCH_GL(gc.total)(DISPATCH)
+ | ld TMP1, DISPATCH_GL(gc.threshold)(DISPATCH)
+ | sd BASE, L->base
+ | sd PC, SAVE_PC
+ | sltu AT, TMP0, TMP1
+ | beqz AT, >5
+ |1:
+ if (op == BC_TNEW) {
+ | load_got lj_tab_new
+ | srl CARG2, RD, 3
+ | andi CARG2, CARG2, 0x7ff
+ | li TMP0, 0x801
+ | addiu AT, CARG2, -0x7ff
+ | srl CARG3, RD, 14
+ |.if MIPSR6
+ | seleqz TMP0, TMP0, AT
+ | selnez CARG2, CARG2, AT
+ | or CARG2, CARG2, TMP0
+ |.else
+ | movz CARG2, TMP0, AT
+ |.endif
+ | // (lua_State *L, int32_t asize, uint32_t hbits)
+ | call_intern lj_tab_new
+ |. move CARG1, L
+ | // Returns Table *.
+ } else {
+ | load_got lj_tab_dup
+ | dsubu TMP1, KBASE, RD
+ | move CARG1, L
+ | call_intern lj_tab_dup // (lua_State *L, Table *kt)
+ |. ld CARG2, -8(TMP1) // KBASE-8-str_const*8
+ | // Returns Table *.
+ }
+ | li TMP0, LJ_TTAB
+ | ld BASE, L->base
+ | ins_next1
+ | daddu RA, BASE, RA
+ | settp CRET1, TMP0
+ | sd CRET1, 0(RA)
+ | ins_next2
+ |5:
+ | load_got lj_gc_step_fixtop
+ | move MULTRES, RD
+ | call_intern lj_gc_step_fixtop // (lua_State *L)
+ |. move CARG1, L
+ | b <1
+ |. move RD, MULTRES
+ break;
+
+ case BC_GGET:
+ | // RA = dst*8, RD = str_const*8 (~)
+ case BC_GSET:
+ | // RA = src*8, RD = str_const*8 (~)
+ | ld LFUNC:TMP2, FRAME_FUNC(BASE)
+ | dsubu TMP1, KBASE, RD
+ | ld STR:RC, -8(TMP1) // KBASE-8-str_const*8
+ | cleartp LFUNC:TMP2
+ | ld TAB:RB, LFUNC:TMP2->env
+ if (op == BC_GGET) {
+ | b ->BC_TGETS_Z
+ } else {
+ | b ->BC_TSETS_Z
+ }
+ |. daddu RA, BASE, RA
+ break;
+
+ case BC_TGETV:
+ | // RA = dst*8, RB = table*8, RC = key*8
+ | decode_RB8a RB, INS
+ | decode_RB8b RB
+ | decode_RDtoRC8 RC, RD
+ | daddu CARG2, BASE, RB
+ | daddu CARG3, BASE, RC
+ | ld TAB:RB, 0(CARG2)
+ | ld TMP2, 0(CARG3)
+ | daddu RA, BASE, RA
+ | checktab TAB:RB, ->vmeta_tgetv
+ | gettp TMP3, TMP2
+ | bne TMP3, TISNUM, >5 // Integer key?
+ |. lw TMP0, TAB:RB->asize
+ | sextw TMP2, TMP2
+ | ld TMP1, TAB:RB->array
+ | sltu AT, TMP2, TMP0
+ | sll TMP2, TMP2, 3
+ | beqz AT, ->vmeta_tgetv // Integer key and in array part?
+ |. daddu TMP2, TMP1, TMP2
+ | ld AT, 0(TMP2)
+ | beq AT, TISNIL, >2
+ |. ld CRET1, 0(TMP2)
+ |1:
+ | ins_next1
+ | sd CRET1, 0(RA)
+ | ins_next2
+ |
+ |2: // Check for __index if table value is nil.
+ | ld TAB:TMP2, TAB:RB->metatable
+ | beqz TAB:TMP2, <1 // No metatable: done.
+ |. nop
+ | lbu TMP0, TAB:TMP2->nomm
+ | andi TMP0, TMP0, 1<<MM_index
+ | bnez TMP0, <1 // 'no __index' flag set: done.
+ |. nop
+ | b ->vmeta_tgetv
+ |. nop
+ |
+ |5:
+ | li AT, LJ_TSTR
+ | bne TMP3, AT, ->vmeta_tgetv
+ |. cleartp RC, TMP2
+ | b ->BC_TGETS_Z // String key?
+ |. nop
+ break;
+ case BC_TGETS:
+ | // RA = dst*8, RB = table*8, RC = str_const*8 (~)
+ | decode_RB8a RB, INS
+ | decode_RB8b RB
+ | decode_RC8a RC, INS
+ | daddu CARG2, BASE, RB
+ | decode_RC8b RC
+ | ld TAB:RB, 0(CARG2)
+ | dsubu CARG3, KBASE, RC
+ | daddu RA, BASE, RA
+ | ld STR:RC, -8(CARG3) // KBASE-8-str_const*8
+ | checktab TAB:RB, ->vmeta_tgets1
+ |->BC_TGETS_Z:
+ | // TAB:RB = GCtab *, STR:RC = GCstr *, RA = dst*8
+ | lw TMP0, TAB:RB->hmask
+ | lw TMP1, STR:RC->sid
+ | ld NODE:TMP2, TAB:RB->node
+ | and TMP1, TMP1, TMP0 // idx = str->sid & tab->hmask
+ | sll TMP0, TMP1, 5
+ | sll TMP1, TMP1, 3
+ | subu TMP1, TMP0, TMP1
+ | li TMP3, LJ_TSTR
+ | daddu NODE:TMP2, NODE:TMP2, TMP1 // node = tab->node + (idx*32-idx*8)
+ | settp STR:RC, TMP3 // Tagged key to look for.
+ |1:
+ | ld CARG1, NODE:TMP2->key
+ | ld CRET1, NODE:TMP2->val
+ | ld NODE:TMP1, NODE:TMP2->next
+ | bne CARG1, RC, >4
+ |. ld TAB:TMP3, TAB:RB->metatable
+ | beq CRET1, TISNIL, >5 // Key found, but nil value?
+ |. nop
+ |3:
+ | ins_next1
+ | sd CRET1, 0(RA)
+ | ins_next2
+ |
+ |4: // Follow hash chain.
+ | bnez NODE:TMP1, <1
+ |. move NODE:TMP2, NODE:TMP1
+ | // End of hash chain: key not found, nil result.
+ |
+ |5: // Check for __index if table value is nil.
+ | beqz TAB:TMP3, <3 // No metatable: done.
+ |. move CRET1, TISNIL
+ | lbu TMP0, TAB:TMP3->nomm
+ | andi TMP0, TMP0, 1<<MM_index
+ | bnez TMP0, <3 // 'no __index' flag set: done.
+ |. nop
+ | b ->vmeta_tgets
+ |. nop
+ break;
+ case BC_TGETB:
+ | // RA = dst*8, RB = table*8, RC = index*8
+ | decode_RB8a RB, INS
+ | decode_RB8b RB
+ | daddu CARG2, BASE, RB
+ | decode_RDtoRC8 RC, RD
+ | ld TAB:RB, 0(CARG2)
+ | daddu RA, BASE, RA
+ | srl TMP0, RC, 3
+ | checktab TAB:RB, ->vmeta_tgetb
+ | lw TMP1, TAB:RB->asize
+ | ld TMP2, TAB:RB->array
+ | sltu AT, TMP0, TMP1
+ | beqz AT, ->vmeta_tgetb
+ |. daddu RC, TMP2, RC
+ | ld AT, 0(RC)
+ | beq AT, TISNIL, >5
+ |. ld CRET1, 0(RC)
+ |1:
+ | ins_next1
+ | sd CRET1, 0(RA)
+ | ins_next2
+ |
+ |5: // Check for __index if table value is nil.
+ | ld TAB:TMP2, TAB:RB->metatable
+ | beqz TAB:TMP2, <1 // No metatable: done.
+ |. nop
+ | lbu TMP1, TAB:TMP2->nomm
+ | andi TMP1, TMP1, 1<<MM_index
+ | bnez TMP1, <1 // 'no __index' flag set: done.
+ |. nop
+ | b ->vmeta_tgetb // Caveat: preserve TMP0 and CARG2!
+ |. nop
+ break;
+ case BC_TGETR:
+ | // RA = dst*8, RB = table*8, RC = key*8
+ | decode_RB8a RB, INS
+ | decode_RB8b RB
+ | decode_RDtoRC8 RC, RD
+ | daddu RB, BASE, RB
+ | daddu RC, BASE, RC
+ | ld TAB:CARG1, 0(RB)
+ | lw CARG2, LO(RC)
+ | daddu RA, BASE, RA
+ | cleartp TAB:CARG1
+ | lw TMP0, TAB:CARG1->asize
+ | ld TMP1, TAB:CARG1->array
+ | sltu AT, CARG2, TMP0
+ | sll TMP2, CARG2, 3
+ | beqz AT, ->vmeta_tgetr // In array part?
+ |. daddu CRET1, TMP1, TMP2
+ | ld CARG2, 0(CRET1)
+ |->BC_TGETR_Z:
+ | ins_next1
+ | sd CARG2, 0(RA)
+ | ins_next2
+ break;
+
+ case BC_TSETV:
+ | // RA = src*8, RB = table*8, RC = key*8
+ | decode_RB8a RB, INS
+ | decode_RB8b RB
+ | decode_RDtoRC8 RC, RD
+ | daddu CARG2, BASE, RB
+ | daddu CARG3, BASE, RC
+ | ld RB, 0(CARG2)
+ | ld TMP2, 0(CARG3)
+ | daddu RA, BASE, RA
+ | checktab RB, ->vmeta_tsetv
+ | checkint TMP2, >5
+ |. sextw RC, TMP2
+ | lw TMP0, TAB:RB->asize
+ | ld TMP1, TAB:RB->array
+ | sltu AT, RC, TMP0
+ | sll TMP2, RC, 3
+ | beqz AT, ->vmeta_tsetv // Integer key and in array part?
+ |. daddu TMP1, TMP1, TMP2
+ | ld TMP0, 0(TMP1)
+ | lbu TMP3, TAB:RB->marked
+ | beq TMP0, TISNIL, >3
+ |. ld CRET1, 0(RA)
+ |1:
+ | andi AT, TMP3, LJ_GC_BLACK // isblack(table)
+ | bnez AT, >7
+ |. sd CRET1, 0(TMP1)
+ |2:
+ | ins_next
+ |
+ |3: // Check for __newindex if previous value is nil.
+ | ld TAB:TMP2, TAB:RB->metatable
+ | beqz TAB:TMP2, <1 // No metatable: done.
+ |. nop
+ | lbu TMP2, TAB:TMP2->nomm
+ | andi TMP2, TMP2, 1<<MM_newindex
+ | bnez TMP2, <1 // 'no __newindex' flag set: done.
+ |. nop
+ | b ->vmeta_tsetv
+ |. nop
+ |
+ |5:
+ | gettp AT, TMP2
+ | daddiu AT, AT, -LJ_TSTR
+ | bnez AT, ->vmeta_tsetv
+ |. nop
+ | b ->BC_TSETS_Z // String key?
+ |. cleartp STR:RC, TMP2
+ |
+ |7: // Possible table write barrier for the value. Skip valiswhite check.
+ | barrierback TAB:RB, TMP3, TMP0, <2
+ break;
+ case BC_TSETS:
+ | // RA = src*8, RB = table*8, RC = str_const*8 (~)
+ | decode_RB8a RB, INS
+ | decode_RB8b RB
+ | daddu CARG2, BASE, RB
+ | decode_RC8a RC, INS
+ | ld TAB:RB, 0(CARG2)
+ | decode_RC8b RC
+ | dsubu CARG3, KBASE, RC
+ | ld RC, -8(CARG3) // KBASE-8-str_const*8
+ | daddu RA, BASE, RA
+ | cleartp STR:RC
+ | checktab TAB:RB, ->vmeta_tsets1
+ |->BC_TSETS_Z:
+ | // TAB:RB = GCtab *, STR:RC = GCstr *, RA = BASE+src*8
+ | lw TMP0, TAB:RB->hmask
+ | lw TMP1, STR:RC->sid
+ | ld NODE:TMP2, TAB:RB->node
+ | sb r0, TAB:RB->nomm // Clear metamethod cache.
+ | and TMP1, TMP1, TMP0 // idx = str->sid & tab->hmask
+ | sll TMP0, TMP1, 5
+ | sll TMP1, TMP1, 3
+ | subu TMP1, TMP0, TMP1
+ | li TMP3, LJ_TSTR
+ | daddu NODE:TMP2, NODE:TMP2, TMP1 // node = tab->node + (idx*32-idx*8)
+ | settp STR:RC, TMP3 // Tagged key to look for.
+ |.if FPU
+ | ldc1 FTMP0, 0(RA)
+ |.else
+ | ld CRET1, 0(RA)
+ |.endif
+ |1:
+ | ld TMP0, NODE:TMP2->key
+ | ld CARG2, NODE:TMP2->val
+ | ld NODE:TMP1, NODE:TMP2->next
+ | bne TMP0, RC, >5
+ |. lbu TMP3, TAB:RB->marked
+ | beq CARG2, TISNIL, >4 // Key found, but nil value?
+ |. ld TAB:TMP0, TAB:RB->metatable
+ |2:
+ | andi AT, TMP3, LJ_GC_BLACK // isblack(table)
+ | bnez AT, >7
+ |.if FPU
+ |. sdc1 FTMP0, NODE:TMP2->val
+ |.else
+ |. sd CRET1, NODE:TMP2->val
+ |.endif
+ |3:
+ | ins_next
+ |
+ |4: // Check for __newindex if previous value is nil.
+ | beqz TAB:TMP0, <2 // No metatable: done.
+ |. nop
+ | lbu TMP0, TAB:TMP0->nomm
+ | andi TMP0, TMP0, 1<<MM_newindex
+ | bnez TMP0, <2 // 'no __newindex' flag set: done.
+ |. nop
+ | b ->vmeta_tsets
+ |. nop
+ |
+ |5: // Follow hash chain.
+ | bnez NODE:TMP1, <1
+ |. move NODE:TMP2, NODE:TMP1
+ | // End of hash chain: key not found, add a new one
+ |
+ | // But check for __newindex first.
+ | ld TAB:TMP2, TAB:RB->metatable
+ | beqz TAB:TMP2, >6 // No metatable: continue.
+ |. daddiu CARG3, DISPATCH, DISPATCH_GL(tmptv)
+ | lbu TMP0, TAB:TMP2->nomm
+ | andi TMP0, TMP0, 1<<MM_newindex
+ | beqz TMP0, ->vmeta_tsets // 'no __newindex' flag NOT set: check.
+ |6:
+ | load_got lj_tab_newkey
+ | sd RC, 0(CARG3)
+ | sd BASE, L->base
+ | move CARG2, TAB:RB
+ | sd PC, SAVE_PC
+ | call_intern lj_tab_newkey // (lua_State *L, GCtab *t, TValue *k
+ |. move CARG1, L
+ | // Returns TValue *.
+ | ld BASE, L->base
+ |.if FPU
+ | b <3 // No 2nd write barrier needed.
+ |. sdc1 FTMP0, 0(CRET1)
+ |.else
+ | ld CARG1, 0(RA)
+ | b <3 // No 2nd write barrier needed.
+ |. sd CARG1, 0(CRET1)
+ |.endif
+ |
+ |7: // Possible table write barrier for the value. Skip valiswhite check.
+ | barrierback TAB:RB, TMP3, TMP0, <3
+ break;
+ case BC_TSETB:
+ | // RA = src*8, RB = table*8, RC = index*8
+ | decode_RB8a RB, INS
+ | decode_RB8b RB
+ | daddu CARG2, BASE, RB
+ | decode_RDtoRC8 RC, RD
+ | ld TAB:RB, 0(CARG2)
+ | daddu RA, BASE, RA
+ | srl TMP0, RC, 3
+ | checktab RB, ->vmeta_tsetb
+ | lw TMP1, TAB:RB->asize
+ | ld TMP2, TAB:RB->array
+ | sltu AT, TMP0, TMP1
+ | beqz AT, ->vmeta_tsetb
+ |. daddu RC, TMP2, RC
+ | ld TMP1, 0(RC)
+ | lbu TMP3, TAB:RB->marked
+ | beq TMP1, TISNIL, >5
+ |1:
+ |. ld CRET1, 0(RA)
+ | andi AT, TMP3, LJ_GC_BLACK // isblack(table)
+ | bnez AT, >7
+ |. sd CRET1, 0(RC)
+ |2:
+ | ins_next
+ |
+ |5: // Check for __newindex if previous value is nil.
+ | ld TAB:TMP2, TAB:RB->metatable
+ | beqz TAB:TMP2, <1 // No metatable: done.
+ |. nop
+ | lbu TMP1, TAB:TMP2->nomm
+ | andi TMP1, TMP1, 1<<MM_newindex
+ | bnez TMP1, <1 // 'no __newindex' flag set: done.
+ |. nop
+ | b ->vmeta_tsetb // Caveat: preserve TMP0 and CARG2!
+ |. nop
+ |
+ |7: // Possible table write barrier for the value. Skip valiswhite check.
+ | barrierback TAB:RB, TMP3, TMP0, <2
+ break;
+ case BC_TSETR:
+ | // RA = dst*8, RB = table*8, RC = key*8
+ | decode_RB8a RB, INS
+ | decode_RB8b RB
+ | decode_RDtoRC8 RC, RD
+ | daddu CARG1, BASE, RB
+ | daddu CARG3, BASE, RC
+ | ld TAB:CARG2, 0(CARG1)
+ | lw CARG3, LO(CARG3)
+ | cleartp TAB:CARG2
+ | lbu TMP3, TAB:CARG2->marked
+ | lw TMP0, TAB:CARG2->asize
+ | ld TMP1, TAB:CARG2->array
+ | andi AT, TMP3, LJ_GC_BLACK // isblack(table)
+ | bnez AT, >7
+ |. daddu RA, BASE, RA
+ |2:
+ | sltu AT, CARG3, TMP0
+ | sll TMP2, CARG3, 3
+ | beqz AT, ->vmeta_tsetr // In array part?
+ |. daddu CRET1, TMP1, TMP2
+ |->BC_TSETR_Z:
+ | ld CARG1, 0(RA)
+ | ins_next1
+ | sd CARG1, 0(CRET1)
+ | ins_next2
+ |
+ |7: // Possible table write barrier for the value. Skip valiswhite check.
+ | barrierback TAB:CARG2, TMP3, CRET1, <2
+ break;
+
+ case BC_TSETM:
+ | // RA = base*8 (table at base-1), RD = num_const*8 (start index)
+ | daddu RA, BASE, RA
+ |1:
+ | daddu TMP3, KBASE, RD
+ | ld TAB:CARG2, -8(RA) // Guaranteed to be a table.
+ | addiu TMP0, MULTRES, -8
+ | lw TMP3, LO(TMP3) // Integer constant is in lo-word.
+ | beqz TMP0, >4 // Nothing to copy?
+ |. srl CARG3, TMP0, 3
+ | cleartp CARG2
+ | addu CARG3, CARG3, TMP3
+ | lw TMP2, TAB:CARG2->asize
+ | sll TMP1, TMP3, 3
+ | lbu TMP3, TAB:CARG2->marked
+ | ld CARG1, TAB:CARG2->array
+ | sltu AT, TMP2, CARG3
+ | bnez AT, >5
+ |. daddu TMP2, RA, TMP0
+ | daddu TMP1, TMP1, CARG1
+ | andi TMP0, TMP3, LJ_GC_BLACK // isblack(table)
+ |3: // Copy result slots to table.
+ | ld CRET1, 0(RA)
+ | daddiu RA, RA, 8
+ | sltu AT, RA, TMP2
+ | sd CRET1, 0(TMP1)
+ | bnez AT, <3
+ |. daddiu TMP1, TMP1, 8
+ | bnez TMP0, >7
+ |. nop
+ |4:
+ | ins_next
+ |
+ |5: // Need to resize array part.
+ | load_got lj_tab_reasize
+ | sd BASE, L->base
+ | sd PC, SAVE_PC
+ | move BASE, RD
+ | call_intern lj_tab_reasize // (lua_State *L, GCtab *t, int nasize)
+ |. move CARG1, L
+ | // Must not reallocate the stack.
+ | move RD, BASE
+ | b <1
+ |. ld BASE, L->base // Reload BASE for lack of a saved register.
+ |
+ |7: // Possible table write barrier for any value. Skip valiswhite check.
+ | barrierback TAB:CARG2, TMP3, TMP0, <4
+ break;
+
+ /* -- Calls and vararg handling ----------------------------------------- */
+
+ case BC_CALLM:
+ | // RA = base*8, (RB = (nresults+1)*8,) RC = extra_nargs*8
+ | decode_RDtoRC8 NARGS8:RC, RD
+ | b ->BC_CALL_Z
+ |. addu NARGS8:RC, NARGS8:RC, MULTRES
+ break;
+ case BC_CALL:
+ | // RA = base*8, (RB = (nresults+1)*8,) RC = (nargs+1)*8
+ | decode_RDtoRC8 NARGS8:RC, RD
+ |->BC_CALL_Z:
+ | move TMP2, BASE
+ | daddu BASE, BASE, RA
+ | ld LFUNC:RB, 0(BASE)
+ | daddiu BASE, BASE, 16
+ | addiu NARGS8:RC, NARGS8:RC, -8
+ | checkfunc RB, ->vmeta_call
+ | ins_call
+ break;
+
+ case BC_CALLMT:
+ | // RA = base*8, (RB = 0,) RC = extra_nargs*8
+ | addu NARGS8:RD, NARGS8:RD, MULTRES // BC_CALLT gets RC from RD.
+ | // Fall through. Assumes BC_CALLT follows.
+ break;
+ case BC_CALLT:
+ | // RA = base*8, (RB = 0,) RC = (nargs+1)*8
+ | daddu RA, BASE, RA
+ | ld RB, 0(RA)
+ | move NARGS8:RC, RD
+ | ld TMP1, FRAME_PC(BASE)
+ | daddiu RA, RA, 16
+ | addiu NARGS8:RC, NARGS8:RC, -8
+ | checktp CARG3, RB, -LJ_TFUNC, ->vmeta_callt
+ |->BC_CALLT_Z:
+ | andi TMP0, TMP1, FRAME_TYPE // Caveat: preserve TMP0 until the 'or'.
+ | lbu TMP3, LFUNC:CARG3->ffid
+ | bnez TMP0, >7
+ |. xori TMP2, TMP1, FRAME_VARG
+ |1:
+ | sd RB, FRAME_FUNC(BASE) // Copy function down, but keep PC.
+ | sltiu AT, TMP3, 2 // (> FF_C) Calling a fast function?
+ | move TMP2, BASE
+ | move RB, CARG3
+ | beqz NARGS8:RC, >3
+ |. move TMP3, NARGS8:RC
+ |2:
+ | ld CRET1, 0(RA)
+ | daddiu RA, RA, 8
+ | addiu TMP3, TMP3, -8
+ | sd CRET1, 0(TMP2)
+ | bnez TMP3, <2
+ |. daddiu TMP2, TMP2, 8
+ |3:
+ | or TMP0, TMP0, AT
+ | beqz TMP0, >5
+ |. nop
+ |4:
+ | ins_callt
+ |
+ |5: // Tailcall to a fast function with a Lua frame below.
+ | lw INS, -4(TMP1)
+ | decode_RA8a RA, INS
+ | decode_RA8b RA
+ | dsubu TMP1, BASE, RA
+ | ld TMP1, -32(TMP1)
+ | cleartp LFUNC:TMP1
+ | ld TMP1, LFUNC:TMP1->pc
+ | b <4
+ |. ld KBASE, PC2PROTO(k)(TMP1) // Need to prepare KBASE.
+ |
+ |7: // Tailcall from a vararg function.
+ | andi AT, TMP2, FRAME_TYPEP
+ | bnez AT, <1 // Vararg frame below?
+ |. dsubu TMP2, BASE, TMP2 // Relocate BASE down.
+ | move BASE, TMP2
+ | ld TMP1, FRAME_PC(TMP2)
+ | b <1
+ |. andi TMP0, TMP1, FRAME_TYPE
+ break;
+
+ case BC_ITERC:
+ | // RA = base*8, (RB = (nresults+1)*8, RC = (nargs+1)*8 ((2+1)*8))
+ | move TMP2, BASE // Save old BASE fir vmeta_call.
+ | daddu BASE, BASE, RA
+ | ld RB, -24(BASE)
+ | ld CARG1, -16(BASE)
+ | ld CARG2, -8(BASE)
+ | li NARGS8:RC, 16 // Iterators get 2 arguments.
+ | sd RB, 0(BASE) // Copy callable.
+ | sd CARG1, 16(BASE) // Copy state.
+ | sd CARG2, 24(BASE) // Copy control var.
+ | daddiu BASE, BASE, 16
+ | checkfunc RB, ->vmeta_call
+ | ins_call
+ break;
+
+ case BC_ITERN:
+ |.if JIT and ENDIAN_LE
+ | hotloop
+ |.endif
+ |->vm_IITERN:
+ | // RA = base*8, (RB = (nresults+1)*8, RC = (nargs+1)*8 (2+1)*8)
+ | daddu RA, BASE, RA
+ | ld TAB:RB, -16(RA)
+ | lw RC, -8+LO(RA) // Get index from control var.
+ | cleartp TAB:RB
+ | daddiu PC, PC, 4
+ | lw TMP0, TAB:RB->asize
+ | ld TMP1, TAB:RB->array
+ | dsll CARG3, TISNUM, 47
+ |1: // Traverse array part.
+ | sltu AT, RC, TMP0
+ | beqz AT, >5 // Index points after array part?
+ |. sll TMP3, RC, 3
+ | daddu TMP3, TMP1, TMP3
+ | ld CARG1, 0(TMP3)
+ | lhu RD, -4+OFS_RD(PC)
+ | or TMP2, RC, CARG3
+ | beq CARG1, TISNIL, <1 // Skip holes in array part.
+ |. addiu RC, RC, 1
+ | sd TMP2, 0(RA)
+ | sd CARG1, 8(RA)
+ | lui TMP3, (-(BCBIAS_J*4 >> 16) & 65535)
+ | decode_RD4b RD
+ | daddu RD, RD, TMP3
+ | sw RC, -8+LO(RA) // Update control var.
+ | daddu PC, PC, RD
+ |3:
+ | ins_next
+ |
+ |5: // Traverse hash part.
+ | lw TMP1, TAB:RB->hmask
+ | subu RC, RC, TMP0
+ | ld TMP2, TAB:RB->node
+ |6:
+ | sltu AT, TMP1, RC // End of iteration? Branch to ITERL+1.
+ | bnez AT, <3
+ |. sll TMP3, RC, 5
+ | sll RB, RC, 3
+ | subu TMP3, TMP3, RB
+ | daddu NODE:TMP3, TMP3, TMP2
+ | ld CARG1, 0(NODE:TMP3)
+ | lhu RD, -4+OFS_RD(PC)
+ | beq CARG1, TISNIL, <6 // Skip holes in hash part.
+ |. addiu RC, RC, 1
+ | ld CARG2, NODE:TMP3->key
+ | lui TMP3, (-(BCBIAS_J*4 >> 16) & 65535)
+ | sd CARG1, 8(RA)
+ | addu RC, RC, TMP0
+ | decode_RD4b RD
+ | addu RD, RD, TMP3
+ | sd CARG2, 0(RA)
+ | daddu PC, PC, RD
+ | b <3
+ |. sw RC, -8+LO(RA) // Update control var.
+ break;
+
+ case BC_ISNEXT:
+ | // RA = base*8, RD = target (points to ITERN)
+ | daddu RA, BASE, RA
+ | srl TMP0, RD, 1
+ | ld CFUNC:CARG1, -24(RA)
+ | daddu TMP0, PC, TMP0
+ | ld CARG2, -16(RA)
+ | ld CARG3, -8(RA)
+ | lui TMP2, (-(BCBIAS_J*4 >> 16) & 65535)
+ | checkfunc CFUNC:CARG1, >5
+ | gettp CARG2, CARG2
+ | daddiu CARG2, CARG2, -LJ_TTAB
+ | lbu TMP1, CFUNC:CARG1->ffid
+ | daddiu CARG3, CARG3, -LJ_TNIL
+ | or AT, CARG2, CARG3
+ | daddiu TMP1, TMP1, -FF_next_N
+ | or AT, AT, TMP1
+ | bnez AT, >5
+ |. lui TMP1, (LJ_KEYINDEX >> 16)
+ | daddu PC, TMP0, TMP2
+ | ori TMP1, TMP1, (LJ_KEYINDEX & 0xffff)
+ | dsll TMP1, TMP1, 32
+ | sd TMP1, -8(RA)
+ |1:
+ | ins_next
+ |5: // Despecialize bytecode if any of the checks fail.
+ | li TMP3, BC_JMP
+ | li TMP1, BC_ITERC
+ | sb TMP3, -4+OFS_OP(PC)
+ | daddu PC, TMP0, TMP2
+ |.if JIT
+ | lb TMP0, OFS_OP(PC)
+ | li AT, BC_ITERN
+ | bne TMP0, AT, >6
+ |. lhu TMP2, OFS_RD(PC)
+ |.endif
+ | b <1
+ |. sb TMP1, OFS_OP(PC)
+ |.if JIT
+ |6: // Unpatch JLOOP.
+ | ld TMP0, DISPATCH_J(trace)(DISPATCH)
+ | sll TMP2, TMP2, 3
+ | daddu TMP0, TMP0, TMP2
+ | ld TRACE:TMP2, 0(TMP0)
+ | lw TMP0, TRACE:TMP2->startins
+ | li AT, -256
+ | and TMP0, TMP0, AT
+ | or TMP0, TMP0, TMP1
+ | b <1
+ |. sw TMP0, 0(PC)
+ |.endif
+ break;
+
+ case BC_VARG:
+ | // RA = base*8, RB = (nresults+1)*8, RC = numparams*8
+ | ld TMP0, FRAME_PC(BASE)
+ | decode_RDtoRC8 RC, RD
+ | decode_RB8a RB, INS
+ | daddu RC, BASE, RC
+ | decode_RB8b RB
+ | daddu RA, BASE, RA
+ | daddiu RC, RC, FRAME_VARG
+ | daddu TMP2, RA, RB
+ | daddiu TMP3, BASE, -16 // TMP3 = vtop
+ | dsubu RC, RC, TMP0 // RC = vbase
+ | // Note: RC may now be even _above_ BASE if nargs was < numparams.
+ | beqz RB, >5 // Copy all varargs?
+ |. dsubu TMP1, TMP3, RC
+ | daddiu TMP2, TMP2, -16
+ |1: // Copy vararg slots to destination slots.
+ | ld CARG1, 0(RC)
+ | sltu AT, RC, TMP3
+ | daddiu RC, RC, 8
+ |.if MIPSR6
+ | selnez CARG1, CARG1, AT
+ | seleqz AT, TISNIL, AT
+ | or CARG1, CARG1, AT
+ |.else
+ | movz CARG1, TISNIL, AT
+ |.endif
+ | sd CARG1, 0(RA)
+ | sltu AT, RA, TMP2
+ | bnez AT, <1
+ |. daddiu RA, RA, 8
+ |3:
+ | ins_next
+ |
+ |5: // Copy all varargs.
+ | ld TMP0, L->maxstack
+ | blez TMP1, <3 // No vararg slots?
+ |. li MULTRES, 8 // MULTRES = (0+1)*8
+ | daddu TMP2, RA, TMP1
+ | sltu AT, TMP0, TMP2
+ | bnez AT, >7
+ |. daddiu MULTRES, TMP1, 8
+ |6:
+ | ld CRET1, 0(RC)
+ | daddiu RC, RC, 8
+ | sd CRET1, 0(RA)
+ | sltu AT, RC, TMP3
+ | bnez AT, <6 // More vararg slots?
+ |. daddiu RA, RA, 8
+ | b <3
+ |. nop
+ |
+ |7: // Grow stack for varargs.
+ | load_got lj_state_growstack
+ | sd RA, L->top
+ | dsubu RA, RA, BASE
+ | sd BASE, L->base
+ | dsubu BASE, RC, BASE // Need delta, because BASE may change.
+ | sd PC, SAVE_PC
+ | srl CARG2, TMP1, 3
+ | call_intern lj_state_growstack // (lua_State *L, int n)
+ |. move CARG1, L
+ | move RC, BASE
+ | ld BASE, L->base
+ | daddu RA, BASE, RA
+ | daddu RC, BASE, RC
+ | b <6
+ |. daddiu TMP3, BASE, -16
+ break;
+
+ /* -- Returns ----------------------------------------------------------- */
+
+ case BC_RETM:
+ | // RA = results*8, RD = extra_nresults*8
+ | addu RD, RD, MULTRES // MULTRES >= 8, so RD >= 8.
+ | // Fall through. Assumes BC_RET follows.
+ break;
+
+ case BC_RET:
+ | // RA = results*8, RD = (nresults+1)*8
+ | ld PC, FRAME_PC(BASE)
+ | daddu RA, BASE, RA
+ | move MULTRES, RD
+ |1:
+ | andi TMP0, PC, FRAME_TYPE
+ | bnez TMP0, ->BC_RETV_Z
+ |. xori TMP1, PC, FRAME_VARG
+ |
+ |->BC_RET_Z:
+ | // BASE = base, RA = resultptr, RD = (nresults+1)*8, PC = return
+ | lw INS, -4(PC)
+ | daddiu TMP2, BASE, -16
+ | daddiu RC, RD, -8
+ | decode_RA8a TMP0, INS
+ | decode_RB8a RB, INS
+ | decode_RA8b TMP0
+ | decode_RB8b RB
+ | daddu TMP3, TMP2, RB
+ | beqz RC, >3
+ |. dsubu BASE, TMP2, TMP0
+ |2:
+ | ld CRET1, 0(RA)
+ | daddiu RA, RA, 8
+ | daddiu RC, RC, -8
+ | sd CRET1, 0(TMP2)
+ | bnez RC, <2
+ |. daddiu TMP2, TMP2, 8
+ |3:
+ | daddiu TMP3, TMP3, -8
+ |5:
+ | sltu AT, TMP2, TMP3
+ | bnez AT, >6
+ |. ld LFUNC:TMP1, FRAME_FUNC(BASE)
+ | ins_next1
+ | cleartp LFUNC:TMP1
+ | ld TMP1, LFUNC:TMP1->pc
+ | ld KBASE, PC2PROTO(k)(TMP1)
+ | ins_next2
+ |
+ |6: // Fill up results with nil.
+ | sd TISNIL, 0(TMP2)
+ | b <5
+ |. daddiu TMP2, TMP2, 8
+ |
+ |->BC_RETV_Z: // Non-standard return case.
+ | andi TMP2, TMP1, FRAME_TYPEP
+ | bnez TMP2, ->vm_return
+ |. nop
+ | // Return from vararg function: relocate BASE down.
+ | dsubu BASE, BASE, TMP1
+ | b <1
+ |. ld PC, FRAME_PC(BASE)
+ break;
+
+ case BC_RET0: case BC_RET1:
+ | // RA = results*8, RD = (nresults+1)*8
+ | ld PC, FRAME_PC(BASE)
+ | daddu RA, BASE, RA
+ | move MULTRES, RD
+ | andi TMP0, PC, FRAME_TYPE
+ | bnez TMP0, ->BC_RETV_Z
+ |. xori TMP1, PC, FRAME_VARG
+ | lw INS, -4(PC)
+ | daddiu TMP2, BASE, -16
+ if (op == BC_RET1) {
+ | ld CRET1, 0(RA)
+ }
+ | decode_RB8a RB, INS
+ | decode_RA8a RA, INS
+ | decode_RB8b RB
+ | decode_RA8b RA
+ | dsubu BASE, TMP2, RA
+ if (op == BC_RET1) {
+ | sd CRET1, 0(TMP2)
+ }
+ |5:
+ | sltu AT, RD, RB
+ | bnez AT, >6
+ |. ld TMP1, FRAME_FUNC(BASE)
+ | ins_next1
+ | cleartp LFUNC:TMP1
+ | ld TMP1, LFUNC:TMP1->pc
+ | ld KBASE, PC2PROTO(k)(TMP1)
+ | ins_next2
+ |
+ |6: // Fill up results with nil.
+ | daddiu TMP2, TMP2, 8
+ | daddiu RD, RD, 8
+ | b <5
+ if (op == BC_RET1) {
+ |. sd TISNIL, 0(TMP2)
+ } else {
+ |. sd TISNIL, -8(TMP2)
+ }
+ break;
+
+ /* -- Loops and branches ------------------------------------------------ */
+
+ case BC_FORL:
+ |.if JIT
+ | hotloop
+ |.endif
+ | // Fall through. Assumes BC_IFORL follows.
+ break;
+
+ case BC_JFORI:
+ case BC_JFORL:
+#if !LJ_HASJIT
+ break;
+#endif
+ case BC_FORI:
+ case BC_IFORL:
+ | // RA = base*8, RD = target (after end of loop or start of loop)
+ vk = (op == BC_IFORL || op == BC_JFORL);
+ | daddu RA, BASE, RA
+ | ld CARG1, FORL_IDX*8(RA) // IDX CARG1 - CARG3 type
+ | gettp CARG3, CARG1
+ if (op != BC_JFORL) {
+ | srl RD, RD, 1
+ | lui TMP2, (-(BCBIAS_J*4 >> 16) & 65535)
+ | daddu TMP2, RD, TMP2
+ }
+ if (!vk) {
+ | ld CARG2, FORL_STOP*8(RA) // STOP CARG2 - CARG4 type
+ | ld CRET1, FORL_STEP*8(RA) // STEP CRET1 - CRET2 type
+ | gettp CARG4, CARG2
+ | bne CARG3, TISNUM, >5
+ |. gettp CRET2, CRET1
+ | bne CARG4, TISNUM, ->vmeta_for
+ |. sextw CARG3, CARG1
+ | bne CRET2, TISNUM, ->vmeta_for
+ |. sextw CARG2, CARG2
+ | dext AT, CRET1, 31, 0
+ | slt CRET1, CARG2, CARG3
+ | slt TMP1, CARG3, CARG2
+ |.if MIPSR6
+ | selnez TMP1, TMP1, AT
+ | seleqz CRET1, CRET1, AT
+ | or CRET1, CRET1, TMP1
+ |.else
+ | movn CRET1, TMP1, AT
+ |.endif
+ } else {
+ | bne CARG3, TISNUM, >5
+ |. ld CARG2, FORL_STEP*8(RA) // STEP CARG2 - CARG4 type
+ | ld CRET1, FORL_STOP*8(RA) // STOP CRET1 - CRET2 type
+ | sextw TMP3, CARG1
+ | sextw CARG2, CARG2
+ | sextw CRET1, CRET1
+ | addu CARG1, TMP3, CARG2
+ | xor TMP0, CARG1, TMP3
+ | xor TMP1, CARG1, CARG2
+ | and TMP0, TMP0, TMP1
+ | slt TMP1, CARG1, CRET1
+ | slt CRET1, CRET1, CARG1
+ | slt AT, CARG2, r0
+ | slt TMP0, TMP0, r0 // ((y^a) & (y^b)) < 0: overflow.
+ |.if MIPSR6
+ | selnez TMP1, TMP1, AT
+ | seleqz CRET1, CRET1, AT
+ | or CRET1, CRET1, TMP1
+ |.else
+ | movn CRET1, TMP1, AT
+ |.endif
+ | or CRET1, CRET1, TMP0
+ | zextw CARG1, CARG1
+ | settp CARG1, TISNUM
+ }
+ |1:
+ if (op == BC_FORI) {
+ |.if MIPSR6
+ | selnez TMP2, TMP2, CRET1
+ |.else
+ | movz TMP2, r0, CRET1
+ |.endif
+ | daddu PC, PC, TMP2
+ } else if (op == BC_JFORI) {
+ | daddu PC, PC, TMP2
+ | lhu RD, -4+OFS_RD(PC)
+ } else if (op == BC_IFORL) {
+ |.if MIPSR6
+ | seleqz TMP2, TMP2, CRET1
+ |.else
+ | movn TMP2, r0, CRET1
+ |.endif
+ | daddu PC, PC, TMP2
+ }
+ if (vk) {
+ | sd CARG1, FORL_IDX*8(RA)
+ }
+ | ins_next1
+ | sd CARG1, FORL_EXT*8(RA)
+ |2:
+ if (op == BC_JFORI) {
+ | beqz CRET1, =>BC_JLOOP
+ |. decode_RD8b RD
+ } else if (op == BC_JFORL) {
+ | beqz CRET1, =>BC_JLOOP
+ }
+ | ins_next2
+ |
+ |5: // FP loop.
+ |.if FPU
+ if (!vk) {
+ | ldc1 f0, FORL_IDX*8(RA)
+ | ldc1 f2, FORL_STOP*8(RA)
+ | sltiu TMP0, CARG3, LJ_TISNUM
+ | sltiu TMP1, CARG4, LJ_TISNUM
+ | sltiu AT, CRET2, LJ_TISNUM
+ | ld TMP3, FORL_STEP*8(RA)
+ | and TMP0, TMP0, TMP1
+ | and AT, AT, TMP0
+ | beqz AT, ->vmeta_for
+ |. slt TMP3, TMP3, r0
+ |.if MIPSR6
+ | dmtc1 TMP3, FTMP2
+ | cmp.lt.d FTMP0, f0, f2
+ | cmp.lt.d FTMP1, f2, f0
+ | sel.d FTMP2, FTMP1, FTMP0
+ | b <1
+ |. dmfc1 CRET1, FTMP2
+ |.else
+ | c.ole.d 0, f0, f2
+ | c.ole.d 1, f2, f0
+ | li CRET1, 1
+ | movt CRET1, r0, 0
+ | movt AT, r0, 1
+ | b <1
+ |. movn CRET1, AT, TMP3
+ |.endif
+ } else {
+ | ldc1 f0, FORL_IDX*8(RA)
+ | ldc1 f4, FORL_STEP*8(RA)
+ | ldc1 f2, FORL_STOP*8(RA)
+ | ld TMP3, FORL_STEP*8(RA)
+ | add.d f0, f0, f4
+ |.if MIPSR6
+ | slt TMP3, TMP3, r0
+ | dmtc1 TMP3, FTMP2
+ | cmp.lt.d FTMP0, f0, f2
+ | cmp.lt.d FTMP1, f2, f0
+ | sel.d FTMP2, FTMP1, FTMP0
+ | dmfc1 CRET1, FTMP2
+ if (op == BC_IFORL) {
+ | seleqz TMP2, TMP2, CRET1
+ | daddu PC, PC, TMP2
+ }
+ |.else
+ | c.ole.d 0, f0, f2
+ | c.ole.d 1, f2, f0
+ | slt TMP3, TMP3, r0
+ | li CRET1, 1
+ | li AT, 1
+ | movt CRET1, r0, 0
+ | movt AT, r0, 1
+ | movn CRET1, AT, TMP3
+ if (op == BC_IFORL) {
+ | movn TMP2, r0, CRET1
+ | daddu PC, PC, TMP2
+ }
+ |.endif
+ | sdc1 f0, FORL_IDX*8(RA)
+ | ins_next1
+ | b <2
+ |. sdc1 f0, FORL_EXT*8(RA)
+ }
+ |.else
+ if (!vk) {
+ | sltiu TMP0, CARG3, LJ_TISNUM
+ | sltiu TMP1, CARG4, LJ_TISNUM
+ | sltiu AT, CRET2, LJ_TISNUM
+ | and TMP0, TMP0, TMP1
+ | and AT, AT, TMP0
+ | beqz AT, ->vmeta_for
+ |. nop
+ | bal ->vm_sfcmpolex
+ |. lw TMP3, FORL_STEP*8+HI(RA)
+ | b <1
+ |. nop
+ } else {
+ | load_got __adddf3
+ | call_extern
+ |. sw TMP2, TMPD
+ | ld CARG2, FORL_STOP*8(RA)
+ | move CARG1, CRET1
+ if ( op == BC_JFORL ) {
+ | lhu RD, -4+OFS_RD(PC)
+ | decode_RD8b RD
+ }
+ | bal ->vm_sfcmpolex
+ |. lw TMP3, FORL_STEP*8+HI(RA)
+ | b <1
+ |. lw TMP2, TMPD
+ }
+ |.endif
+ break;
+
+ case BC_ITERL:
+ |.if JIT
+ | hotloop
+ |.endif
+ | // Fall through. Assumes BC_IITERL follows.
+ break;
+
+ case BC_JITERL:
+#if !LJ_HASJIT
+ break;
+#endif
+ case BC_IITERL:
+ | // RA = base*8, RD = target
+ | daddu RA, BASE, RA
+ | ld TMP1, 0(RA)
+ | beq TMP1, TISNIL, >1 // Stop if iterator returned nil.
+ |. nop
+ if (op == BC_JITERL) {
+ | b =>BC_JLOOP
+ |. sd TMP1, -8(RA)
+ } else {
+ | branch_RD // Otherwise save control var + branch.
+ | sd TMP1, -8(RA)
+ }
+ |1:
+ | ins_next
+ break;
+
+ case BC_LOOP:
+ | // RA = base*8, RD = target (loop extent)
+ | // Note: RA/RD is only used by trace recorder to determine scope/extent
+ | // This opcode does NOT jump, it's only purpose is to detect a hot loop.
+ |.if JIT
+ | hotloop
+ |.endif
+ | // Fall through. Assumes BC_ILOOP follows.
+ break;
+
+ case BC_ILOOP:
+ | // RA = base*8, RD = target (loop extent)
+ | ins_next
+ break;
+
+ case BC_JLOOP:
+ |.if JIT
+ | // RA = base*8 (ignored), RD = traceno*8
+ | ld TMP1, DISPATCH_J(trace)(DISPATCH)
+ | li AT, 0
+ | daddu TMP1, TMP1, RD
+ | // Traces on MIPS don't store the trace number, so use 0.
+ | sd AT, DISPATCH_GL(vmstate)(DISPATCH)
+ | ld TRACE:TMP2, 0(TMP1)
+ | sd BASE, DISPATCH_GL(jit_base)(DISPATCH)
+ | ld TMP2, TRACE:TMP2->mcode
+ | sd L, DISPATCH_GL(tmpbuf.L)(DISPATCH)
+ | jr TMP2
+ |. daddiu JGL, DISPATCH, GG_DISP2G+32768
+ |.endif
+ break;
+
+ case BC_JMP:
+ | // RA = base*8 (only used by trace recorder), RD = target
+ | branch_RD
+ | ins_next
+ break;
+
+ /* -- Function headers -------------------------------------------------- */
+
+ case BC_FUNCF:
+ |.if JIT
+ | hotcall
+ |.endif
+ case BC_FUNCV: /* NYI: compiled vararg functions. */
+ | // Fall through. Assumes BC_IFUNCF/BC_IFUNCV follow.
+ break;
+
+ case BC_JFUNCF:
+#if !LJ_HASJIT
+ break;
+#endif
+ case BC_IFUNCF:
+ | // BASE = new base, RA = BASE+framesize*8, RB = LFUNC, RC = nargs*8
+ | ld TMP2, L->maxstack
+ | lbu TMP1, -4+PC2PROTO(numparams)(PC)
+ | ld KBASE, -4+PC2PROTO(k)(PC)
+ | sltu AT, TMP2, RA
+ | bnez AT, ->vm_growstack_l
+ |. sll TMP1, TMP1, 3
+ if (op != BC_JFUNCF) {
+ | ins_next1
+ }
+ |2:
+ | sltu AT, NARGS8:RC, TMP1 // Check for missing parameters.
+ | bnez AT, >3
+ |. daddu AT, BASE, NARGS8:RC
+ if (op == BC_JFUNCF) {
+ | decode_RD8a RD, INS
+ | b =>BC_JLOOP
+ |. decode_RD8b RD
+ } else {
+ | ins_next2
+ }
+ |
+ |3: // Clear missing parameters.
+ | sd TISNIL, 0(AT)
+ | b <2
+ |. addiu NARGS8:RC, NARGS8:RC, 8
+ break;
+
+ case BC_JFUNCV:
+#if !LJ_HASJIT
+ break;
+#endif
+ | NYI // NYI: compiled vararg functions
+ break; /* NYI: compiled vararg functions. */
+
+ case BC_IFUNCV:
+ | // BASE = new base, RA = BASE+framesize*8, RB = LFUNC, RC = nargs*8
+ | li TMP0, LJ_TFUNC
+ | daddu TMP1, BASE, RC
+ | ld TMP2, L->maxstack
+ | settp LFUNC:RB, TMP0
+ | daddu TMP0, RA, RC
+ | sd LFUNC:RB, 0(TMP1) // Store (tagged) copy of LFUNC.
+ | daddiu TMP3, RC, 16+FRAME_VARG
+ | sltu AT, TMP0, TMP2
+ | ld KBASE, -4+PC2PROTO(k)(PC)
+ | beqz AT, ->vm_growstack_l
+ |. sd TMP3, 8(TMP1) // Store delta + FRAME_VARG.
+ | lbu TMP2, -4+PC2PROTO(numparams)(PC)
+ | move RA, BASE
+ | move RC, TMP1
+ | ins_next1
+ | beqz TMP2, >3
+ |. daddiu BASE, TMP1, 16
+ |1:
+ | ld TMP0, 0(RA)
+ | sltu AT, RA, RC // Less args than parameters?
+ | move CARG1, TMP0
+ |.if MIPSR6
+ | selnez TMP0, TMP0, AT
+ | seleqz TMP3, TISNIL, AT
+ | or TMP0, TMP0, TMP3
+ | seleqz TMP3, CARG1, AT
+ | selnez CARG1, TISNIL, AT
+ | or CARG1, CARG1, TMP3
+ |.else
+ | movz TMP0, TISNIL, AT // Clear missing parameters.
+ | movn CARG1, TISNIL, AT // Clear old fixarg slot (help the GC).
+ |.endif
+ | addiu TMP2, TMP2, -1
+ | sd TMP0, 16(TMP1)
+ | daddiu TMP1, TMP1, 8
+ | sd CARG1, 0(RA)
+ | bnez TMP2, <1
+ |. daddiu RA, RA, 8
+ |3:
+ | ins_next2
+ break;
+
+ case BC_FUNCC:
+ case BC_FUNCCW:
+ | // BASE = new base, RA = BASE+framesize*8, RB = CFUNC, RC = nargs*8
+ if (op == BC_FUNCC) {
+ | ld CFUNCADDR, CFUNC:RB->f
+ } else {
+ | ld CFUNCADDR, DISPATCH_GL(wrapf)(DISPATCH)
+ }
+ | daddu TMP1, RA, NARGS8:RC
+ | ld TMP2, L->maxstack
+ | daddu RC, BASE, NARGS8:RC
+ | sd BASE, L->base
+ | sltu AT, TMP2, TMP1
+ | sd RC, L->top
+ | li_vmstate C
+ if (op == BC_FUNCCW) {
+ | ld CARG2, CFUNC:RB->f
+ }
+ | bnez AT, ->vm_growstack_c // Need to grow stack.
+ |. move CARG1, L
+ | jalr CFUNCADDR // (lua_State *L [, lua_CFunction f])
+ |. st_vmstate
+ | // Returns nresults.
+ | ld BASE, L->base
+ | sll RD, CRET1, 3
+ | ld TMP1, L->top
+ | li_vmstate INTERP
+ | ld PC, FRAME_PC(BASE) // Fetch PC of caller.
+ | dsubu RA, TMP1, RD // RA = L->top - nresults*8
+ | sd L, DISPATCH_GL(cur_L)(DISPATCH)
+ | b ->vm_returnc
+ |. st_vmstate
+ break;
+
+ /* ---------------------------------------------------------------------- */
+
+ default:
+ fprintf(stderr, "Error: undefined opcode BC_%s\n", bc_names[op]);
+ exit(2);
+ break;
+ }
+}
+
+static int build_backend(BuildCtx *ctx)
+{
+ int op;
+
+ dasm_growpc(Dst, BC__MAX);
+
+ build_subroutines(ctx);
+
+ |.code_op
+ for (op = 0; op < BC__MAX; op++)
+ build_ins(ctx, (BCOp)op, op);
+
+ return BC__MAX;
+}
+
+/* Emit pseudo frame-info for all assembler functions. */
+static void emit_asm_debug(BuildCtx *ctx)
+{
+ int fcofs = (int)((uint8_t *)ctx->glob[GLOB_vm_ffi_call] - ctx->code);
+ int i;
+ switch (ctx->mode) {
+ case BUILD_elfasm:
+ fprintf(ctx->fp, "\t.section .debug_frame,\"\",@progbits\n");
+ fprintf(ctx->fp,
+ ".Lframe0:\n"
+ "\t.4byte .LECIE0-.LSCIE0\n"
+ ".LSCIE0:\n"
+ "\t.4byte 0xffffffff\n"
+ "\t.byte 0x1\n"
+ "\t.string \"\"\n"
+ "\t.uleb128 0x1\n"
+ "\t.sleb128 -4\n"
+ "\t.byte 31\n"
+ "\t.byte 0xc\n\t.uleb128 29\n\t.uleb128 0\n"
+ "\t.align 2\n"
+ ".LECIE0:\n\n");
+ fprintf(ctx->fp,
+ ".LSFDE0:\n"
+ "\t.4byte .LEFDE0-.LASFDE0\n"
+ ".LASFDE0:\n"
+ "\t.4byte .Lframe0\n"
+ "\t.8byte .Lbegin\n"
+ "\t.8byte %d\n"
+ "\t.byte 0xe\n\t.uleb128 %d\n"
+ "\t.byte 0x9f\n\t.sleb128 2*5\n"
+ "\t.byte 0x9e\n\t.sleb128 2*6\n",
+ fcofs, CFRAME_SIZE);
+ for (i = 23; i >= 16; i--)
+ fprintf(ctx->fp, "\t.byte %d\n\t.uleb128 %d\n", 0x80+i, 2*(30-i));
+#if !LJ_SOFTFP
+ for (i = 31; i >= 24; i--)
+ fprintf(ctx->fp, "\t.byte %d\n\t.uleb128 %d\n", 0x80+32+i, 2*(46-i));
+#endif
+ fprintf(ctx->fp,
+ "\t.align 2\n"
+ ".LEFDE0:\n\n");
+#if LJ_HASFFI
+ fprintf(ctx->fp,
+ ".LSFDE1:\n"
+ "\t.4byte .LEFDE1-.LASFDE1\n"
+ ".LASFDE1:\n"
+ "\t.4byte .Lframe0\n"
+ "\t.4byte lj_vm_ffi_call\n"
+ "\t.4byte %d\n"
+ "\t.byte 0x9f\n\t.uleb128 2*1\n"
+ "\t.byte 0x90\n\t.uleb128 2*2\n"
+ "\t.byte 0xd\n\t.uleb128 0x10\n"
+ "\t.align 2\n"
+ ".LEFDE1:\n\n", (int)ctx->codesz - fcofs);
+#endif
+#if !LJ_NO_UNWIND
+ /* NYI */
+#endif
+ break;
+ default:
+ break;
+ }
+}
+
diff --git a/libs/luajit-cmake/luajit/src/vm_ppc.dasc b/libs/luajit-cmake/luajit/src/vm_ppc.dasc
new file mode 100644
index 0000000..3cad37d
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/vm_ppc.dasc
@@ -0,0 +1,6041 @@
+|// Low-level VM code for PowerPC 32 bit or 32on64 bit mode.
+|// Bytecode interpreter, fast functions and helper functions.
+|// Copyright (C) 2005-2022 Mike Pall. See Copyright Notice in luajit.h
+|
+|.arch ppc
+|.section code_op, code_sub
+|
+|.actionlist build_actionlist
+|.globals GLOB_
+|.globalnames globnames
+|.externnames extnames
+|
+|// Note: The ragged indentation of the instructions is intentional.
+|// The starting columns indicate data dependencies.
+|
+|//-----------------------------------------------------------------------
+|
+|// DynASM defines used by the PPC port:
+|//
+|// P64 64 bit pointers (only for GPR64 testing).
+|// GPR64 64 bit registers (but possibly 32 bit pointers, e.g. PS3).
+|// Affects reg saves, stack layout, carry/overflow/dot flags etc.
+|// FRAME32 Use 32 bit frame layout, even with GPR64 (Xbox 360).
+|// TOC Need table of contents (64 bit or 32 bit variant, e.g. PS3).
+|// Function pointers are really a struct: code, TOC, env (optional).
+|// TOCENV Function pointers have an environment pointer, too (not on PS3).
+|// PPE Power Processor Element of Cell (PS3) or Xenon (Xbox 360).
+|// Must avoid (slow) micro-coded instructions.
+|
+|.if P64
+|.define TOC, 1
+|.define TOCENV, 1
+|.macro lpx, a, b, c; ldx a, b, c; .endmacro
+|.macro lp, a, b; ld a, b; .endmacro
+|.macro stp, a, b; std a, b; .endmacro
+|.define decode_OPP, decode_OP8
+|.if FFI
+|// Missing: Calling conventions, 64 bit regs, TOC.
+|.error lib_ffi not yet implemented for PPC64
+|.endif
+|.else
+|.macro lpx, a, b, c; lwzx a, b, c; .endmacro
+|.macro lp, a, b; lwz a, b; .endmacro
+|.macro stp, a, b; stw a, b; .endmacro
+|.define decode_OPP, decode_OP4
+|.endif
+|
+|// Convenience macros for TOC handling.
+|.if TOC
+|// Linker needs a TOC patch area for every external call relocation.
+|.macro blex, target; bl extern target@plt; nop; .endmacro
+|.macro .toc, a, b; a, b; .endmacro
+|.if P64
+|.define TOC_OFS, 8
+|.define ENV_OFS, 16
+|.else
+|.define TOC_OFS, 4
+|.define ENV_OFS, 8
+|.endif
+|.else // No TOC.
+|.macro blex, target; bl extern target@plt; .endmacro
+|.macro .toc, a, b; .endmacro
+|.endif
+|.macro .tocenv, a, b; .if TOCENV; a, b; .endif; .endmacro
+|
+|.macro .gpr64, a, b; .if GPR64; a, b; .endif; .endmacro
+|
+|.macro andix., y, a, i
+|.if PPE
+| rlwinm y, a, 0, 31-lj_fls(i), 31-lj_ffs(i)
+| cmpwi y, 0
+|.else
+| andi. y, a, i
+|.endif
+|.endmacro
+|
+|.macro clrso, reg
+|.if PPE
+| li reg, 0
+| mtxer reg
+|.else
+| mcrxr cr0
+|.endif
+|.endmacro
+|
+|.macro checkov, reg, noov
+|.if PPE
+| mfxer reg
+| add reg, reg, reg
+| cmpwi reg, 0
+| li reg, 0
+| mtxer reg
+| bgey noov
+|.else
+| mcrxr cr0
+| bley noov
+|.endif
+|.endmacro
+|
+|//-----------------------------------------------------------------------
+|
+|// Fixed register assignments for the interpreter.
+|// Don't use: r1 = sp, r2 and r13 = reserved (TOC, TLS or SDATA)
+|
+|.macro .FPU, a, b
+|.if FPU
+| a, b
+|.endif
+|.endmacro
+|
+|.macro .FPU, a, b, c
+|.if FPU
+| a, b, c
+|.endif
+|.endmacro
+|
+|// The following must be C callee-save (but BASE is often refetched).
+|.define BASE, r14 // Base of current Lua stack frame.
+|.define KBASE, r15 // Constants of current Lua function.
+|.define PC, r16 // Next PC.
+|.define DISPATCH, r17 // Opcode dispatch table.
+|.define LREG, r18 // Register holding lua_State (also in SAVE_L).
+|.define MULTRES, r19 // Size of multi-result: (nresults+1)*8.
+|.define JGL, r31 // On-trace: global_State + 32768.
+|
+|// Constants for type-comparisons, stores and conversions. C callee-save.
+|.define TISNUM, r22
+|.define TISNIL, r23
+|.define ZERO, r24
+|.if FPU
+|.define TOBIT, f30 // 2^52 + 2^51.
+|.define TONUM, f31 // 2^52 + 2^51 + 2^31.
+|.endif
+|
+|// The following temporaries are not saved across C calls, except for RA.
+|.define RA, r20 // Callee-save.
+|.define RB, r10
+|.define RC, r11
+|.define RD, r12
+|.define INS, r7 // Overlaps CARG5.
+|
+|.define TMP0, r0
+|.define TMP1, r8
+|.define TMP2, r9
+|.define TMP3, r6 // Overlaps CARG4.
+|
+|// Saved temporaries.
+|.define SAVE0, r21
+|.define SAVE1, r25
+|
+|// Calling conventions.
+|.define CARG1, r3
+|.define CARG2, r4
+|.define CARG3, r5
+|.define CARG4, r6 // Overlaps TMP3.
+|.define CARG5, r7 // Overlaps INS.
+|
+|.if FPU
+|.define FARG1, f1
+|.define FARG2, f2
+|.endif
+|
+|.define CRET1, r3
+|.define CRET2, r4
+|
+|.define TOCREG, r2 // TOC register (only used by C code).
+|.define ENVREG, r11 // Environment pointer (nested C functions).
+|
+|// Stack layout while in interpreter. Must match with lj_frame.h.
+|.if GPR64
+|.if FRAME32
+|
+|// 456(sp) // \ 32/64 bit C frame info
+|.define TONUM_LO, 452(sp) // |
+|.define TONUM_HI, 448(sp) // |
+|.define TMPD_LO, 444(sp) // |
+|.define TMPD_HI, 440(sp) // |
+|.define SAVE_CR, 432(sp) // | 64 bit CR save.
+|.define SAVE_ERRF, 424(sp) // > Parameter save area.
+|.define SAVE_NRES, 420(sp) // |
+|.define SAVE_L, 416(sp) // |
+|.define SAVE_PC, 412(sp) // |
+|.define SAVE_MULTRES, 408(sp) // |
+|.define SAVE_CFRAME, 400(sp) // / 64 bit C frame chain.
+|// 392(sp) // Reserved.
+|.define CFRAME_SPACE, 384 // Delta for sp.
+|// Back chain for sp: 384(sp) <-- sp entering interpreter
+|.define SAVE_LR, 376(sp) // 32 bit LR stored in hi-part.
+|.define SAVE_GPR_, 232 // .. 232+18*8: 64 bit GPR saves.
+|.define SAVE_FPR_, 88 // .. 88+18*8: 64 bit FPR saves.
+|// 80(sp) // Needed for 16 byte stack frame alignment.
+|// 16(sp) // Callee parameter save area (ABI mandated).
+|// 8(sp) // Reserved
+|// Back chain for sp: 0(sp) <-- sp while in interpreter
+|// 32 bit sp stored in hi-part of 0(sp).
+|
+|.define TMPD_BLO, 447(sp)
+|.define TMPD, TMPD_HI
+|.define TONUM_D, TONUM_HI
+|
+|.else
+|
+|// 508(sp) // \ 32 bit C frame info.
+|.define SAVE_ERRF, 472(sp) // |
+|.define SAVE_NRES, 468(sp) // |
+|.define SAVE_L, 464(sp) // > Parameter save area.
+|.define SAVE_PC, 460(sp) // |
+|.define SAVE_MULTRES, 456(sp) // |
+|.define SAVE_CFRAME, 448(sp) // / 64 bit C frame chain.
+|.define SAVE_LR, 416(sp)
+|.define CFRAME_SPACE, 400 // Delta for sp.
+|// Back chain for sp: 400(sp) <-- sp entering interpreter
+|.define SAVE_FPR_, 256 // .. 256+18*8: 64 bit FPR saves.
+|.define SAVE_GPR_, 112 // .. 112+18*8: 64 bit GPR saves.
+|// 48(sp) // Callee parameter save area (ABI mandated).
+|.define SAVE_TOC, 40(sp) // TOC save area.
+|.define TMPD_LO, 36(sp) // \ Link editor temp (ABI mandated).
+|.define TMPD_HI, 32(sp) // /
+|.define TONUM_LO, 28(sp) // \ Compiler temp (ABI mandated).
+|.define TONUM_HI, 24(sp) // /
+|// Next frame lr: 16(sp)
+|.define SAVE_CR, 8(sp) // 64 bit CR save.
+|// Back chain for sp: 0(sp) <-- sp while in interpreter
+|
+|.define TMPD_BLO, 39(sp)
+|.define TMPD, TMPD_HI
+|.define TONUM_D, TONUM_HI
+|
+|.endif
+|.else
+|
+|.if FPU
+|.define SAVE_LR, 276(sp)
+|.define CFRAME_SPACE, 272 // Delta for sp.
+|// Back chain for sp: 272(sp) <-- sp entering interpreter
+|.define SAVE_FPR_, 128 // .. 128+18*8: 64 bit FPR saves.
+|.else
+|.define SAVE_LR, 132(sp)
+|.define CFRAME_SPACE, 128 // Delta for sp.
+|// Back chain for sp: 128(sp) <-- sp entering interpreter
+|.endif
+|.define SAVE_GPR_, 56 // .. 56+18*4: 32 bit GPR saves.
+|.define SAVE_CR, 52(sp) // 32 bit CR save.
+|.define SAVE_ERRF, 48(sp) // 32 bit C frame info.
+|.define SAVE_NRES, 44(sp)
+|.define SAVE_CFRAME, 40(sp)
+|.define SAVE_L, 36(sp)
+|.define SAVE_PC, 32(sp)
+|.define SAVE_MULTRES, 28(sp)
+|.define UNUSED1, 24(sp)
+|.if FPU
+|.define TMPD_LO, 20(sp)
+|.define TMPD_HI, 16(sp)
+|.define TONUM_LO, 12(sp)
+|.define TONUM_HI, 8(sp)
+|.else
+|.define SFSAVE_4, 20(sp)
+|.define SFSAVE_3, 16(sp)
+|.define SFSAVE_2, 12(sp)
+|.define SFSAVE_1, 8(sp)
+|.endif
+|// Next frame lr: 4(sp)
+|// Back chain for sp: 0(sp) <-- sp while in interpreter
+|
+|.if FPU
+|.define TMPD_BLO, 23(sp)
+|.define TMPD, TMPD_HI
+|.define TONUM_D, TONUM_HI
+|.endif
+|
+|.endif
+|
+|.macro save_, reg
+|.if GPR64
+| std r..reg, SAVE_GPR_+(reg-14)*8(sp)
+|.else
+| stw r..reg, SAVE_GPR_+(reg-14)*4(sp)
+|.endif
+| .FPU stfd f..reg, SAVE_FPR_+(reg-14)*8(sp)
+|.endmacro
+|.macro rest_, reg
+|.if GPR64
+| ld r..reg, SAVE_GPR_+(reg-14)*8(sp)
+|.else
+| lwz r..reg, SAVE_GPR_+(reg-14)*4(sp)
+|.endif
+| .FPU lfd f..reg, SAVE_FPR_+(reg-14)*8(sp)
+|.endmacro
+|
+|.macro saveregs
+|.if GPR64 and not FRAME32
+| stdu sp, -CFRAME_SPACE(sp)
+|.else
+| stwu sp, -CFRAME_SPACE(sp)
+|.endif
+| save_ 14; save_ 15; save_ 16
+| mflr r0
+| save_ 17; save_ 18; save_ 19; save_ 20; save_ 21; save_ 22
+|.if GPR64 and not FRAME32
+| std r0, SAVE_LR
+|.else
+| stw r0, SAVE_LR
+|.endif
+| save_ 23; save_ 24; save_ 25
+| mfcr r0
+| save_ 26; save_ 27; save_ 28; save_ 29; save_ 30; save_ 31
+|.if GPR64
+| std r0, SAVE_CR
+|.else
+| stw r0, SAVE_CR
+|.endif
+| .toc std TOCREG, SAVE_TOC
+|.endmacro
+|
+|.macro restoreregs
+|.if GPR64 and not FRAME32
+| ld r0, SAVE_LR
+|.else
+| lwz r0, SAVE_LR
+|.endif
+|.if GPR64
+| ld r12, SAVE_CR
+|.else
+| lwz r12, SAVE_CR
+|.endif
+| rest_ 14; rest_ 15; rest_ 16; rest_ 17; rest_ 18; rest_ 19
+| mtlr r0;
+|.if PPE; mtocrf 0x20, r12; .else; mtcrf 0x38, r12; .endif
+| rest_ 20; rest_ 21; rest_ 22; rest_ 23; rest_ 24; rest_ 25
+|.if PPE; mtocrf 0x10, r12; .endif
+| rest_ 26; rest_ 27; rest_ 28; rest_ 29; rest_ 30; rest_ 31
+|.if PPE; mtocrf 0x08, r12; .endif
+| addi sp, sp, CFRAME_SPACE
+|.endmacro
+|
+|// Type definitions. Some of these are only used for documentation.
+|.type L, lua_State, LREG
+|.type GL, global_State
+|.type TVALUE, TValue
+|.type GCOBJ, GCobj
+|.type STR, GCstr
+|.type TAB, GCtab
+|.type LFUNC, GCfuncL
+|.type CFUNC, GCfuncC
+|.type PROTO, GCproto
+|.type UPVAL, GCupval
+|.type NODE, Node
+|.type NARGS8, int
+|.type TRACE, GCtrace
+|.type SBUF, SBuf
+|
+|//-----------------------------------------------------------------------
+|
+|// Trap for not-yet-implemented parts.
+|.macro NYI; tw 4, sp, sp; .endmacro
+|
+|.if FPU
+|// int/FP conversions.
+|.macro tonum_i, freg, reg
+| xoris reg, reg, 0x8000
+| stw reg, TONUM_LO
+| lfd freg, TONUM_D
+| fsub freg, freg, TONUM
+|.endmacro
+|
+|.macro tonum_u, freg, reg
+| stw reg, TONUM_LO
+| lfd freg, TONUM_D
+| fsub freg, freg, TOBIT
+|.endmacro
+|
+|.macro toint, reg, freg, tmpfreg
+| fctiwz tmpfreg, freg
+| stfd tmpfreg, TMPD
+| lwz reg, TMPD_LO
+|.endmacro
+|
+|.macro toint, reg, freg
+| toint reg, freg, freg
+|.endmacro
+|.endif
+|
+|//-----------------------------------------------------------------------
+|
+|// Access to frame relative to BASE.
+|.define FRAME_PC, -8
+|.define FRAME_FUNC, -4
+|
+|// Instruction decode.
+|.macro decode_OP4, dst, ins; rlwinm dst, ins, 2, 22, 29; .endmacro
+|.macro decode_OP8, dst, ins; rlwinm dst, ins, 3, 21, 28; .endmacro
+|.macro decode_RA8, dst, ins; rlwinm dst, ins, 27, 21, 28; .endmacro
+|.macro decode_RB8, dst, ins; rlwinm dst, ins, 11, 21, 28; .endmacro
+|.macro decode_RC8, dst, ins; rlwinm dst, ins, 19, 21, 28; .endmacro
+|.macro decode_RD8, dst, ins; rlwinm dst, ins, 19, 13, 28; .endmacro
+|
+|.macro decode_OP1, dst, ins; rlwinm dst, ins, 0, 24, 31; .endmacro
+|.macro decode_RD4, dst, ins; rlwinm dst, ins, 18, 14, 29; .endmacro
+|
+|// Instruction fetch.
+|.macro ins_NEXT1
+| lwz INS, 0(PC)
+| addi PC, PC, 4
+|.endmacro
+|// Instruction decode+dispatch. Note: optimized for e300!
+|.macro ins_NEXT2
+| decode_OPP TMP1, INS
+| lpx TMP0, DISPATCH, TMP1
+| mtctr TMP0
+| decode_RB8 RB, INS
+| decode_RD8 RD, INS
+| decode_RA8 RA, INS
+| decode_RC8 RC, INS
+| bctr
+|.endmacro
+|.macro ins_NEXT
+| ins_NEXT1
+| ins_NEXT2
+|.endmacro
+|
+|// Instruction footer.
+|.if 1
+| // Replicated dispatch. Less unpredictable branches, but higher I-Cache use.
+| .define ins_next, ins_NEXT
+| .define ins_next_, ins_NEXT
+| .define ins_next1, ins_NEXT1
+| .define ins_next2, ins_NEXT2
+|.else
+| // Common dispatch. Lower I-Cache use, only one (very) unpredictable branch.
+| // Affects only certain kinds of benchmarks (and only with -j off).
+| .macro ins_next
+| b ->ins_next
+| .endmacro
+| .macro ins_next1
+| .endmacro
+| .macro ins_next2
+| b ->ins_next
+| .endmacro
+| .macro ins_next_
+| ->ins_next:
+| ins_NEXT
+| .endmacro
+|.endif
+|
+|// Call decode and dispatch.
+|.macro ins_callt
+| // BASE = new base, RB = LFUNC/CFUNC, RC = nargs*8, FRAME_PC(BASE) = PC
+| lwz PC, LFUNC:RB->pc
+| lwz INS, 0(PC)
+| addi PC, PC, 4
+| decode_OPP TMP1, INS
+| decode_RA8 RA, INS
+| lpx TMP0, DISPATCH, TMP1
+| add RA, RA, BASE
+| mtctr TMP0
+| bctr
+|.endmacro
+|
+|.macro ins_call
+| // BASE = new base, RB = LFUNC/CFUNC, RC = nargs*8, PC = caller PC
+| stw PC, FRAME_PC(BASE)
+| ins_callt
+|.endmacro
+|
+|//-----------------------------------------------------------------------
+|
+|// Macros to test operand types.
+|.macro checknum, reg; cmplw reg, TISNUM; .endmacro
+|.macro checknum, cr, reg; cmplw cr, reg, TISNUM; .endmacro
+|.macro checkstr, reg; cmpwi reg, LJ_TSTR; .endmacro
+|.macro checktab, reg; cmpwi reg, LJ_TTAB; .endmacro
+|.macro checkfunc, reg; cmpwi reg, LJ_TFUNC; .endmacro
+|.macro checknil, reg; cmpwi reg, LJ_TNIL; .endmacro
+|
+|.macro branch_RD
+| srwi TMP0, RD, 1
+| addis PC, PC, -(BCBIAS_J*4 >> 16)
+| add PC, PC, TMP0
+|.endmacro
+|
+|// Assumes DISPATCH is relative to GL.
+#define DISPATCH_GL(field) (GG_DISP2G + (int)offsetof(global_State, field))
+#define DISPATCH_J(field) (GG_DISP2J + (int)offsetof(jit_State, field))
+|
+#define PC2PROTO(field) ((int)offsetof(GCproto, field)-(int)sizeof(GCproto))
+|
+|.macro hotcheck, delta, target
+| rlwinm TMP1, PC, 31, 25, 30
+| addi TMP1, TMP1, GG_DISP2HOT
+| lhzx TMP2, DISPATCH, TMP1
+| addic. TMP2, TMP2, -delta
+| sthx TMP2, DISPATCH, TMP1
+| blt target
+|.endmacro
+|
+|.macro hotloop
+| hotcheck HOTCOUNT_LOOP, ->vm_hotloop
+|.endmacro
+|
+|.macro hotcall
+| hotcheck HOTCOUNT_CALL, ->vm_hotcall
+|.endmacro
+|
+|// Set current VM state. Uses TMP0.
+|.macro li_vmstate, st; li TMP0, ~LJ_VMST_..st; .endmacro
+|.macro st_vmstate; stw TMP0, DISPATCH_GL(vmstate)(DISPATCH); .endmacro
+|
+|// Move table write barrier back. Overwrites mark and tmp.
+|.macro barrierback, tab, mark, tmp
+| lwz tmp, DISPATCH_GL(gc.grayagain)(DISPATCH)
+| // Assumes LJ_GC_BLACK is 0x04.
+| rlwinm mark, mark, 0, 30, 28 // black2gray(tab)
+| stw tab, DISPATCH_GL(gc.grayagain)(DISPATCH)
+| stb mark, tab->marked
+| stw tmp, tab->gclist
+|.endmacro
+|
+|//-----------------------------------------------------------------------
+
+/* Generate subroutines used by opcodes and other parts of the VM. */
+/* The .code_sub section should be last to help static branch prediction. */
+static void build_subroutines(BuildCtx *ctx)
+{
+ |.code_sub
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Return handling ----------------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |->vm_returnp:
+ | // See vm_return. Also: TMP2 = previous base.
+ | andix. TMP0, PC, FRAME_P
+ | li TMP1, LJ_TTRUE
+ | beq ->cont_dispatch
+ |
+ | // Return from pcall or xpcall fast func.
+ | lwz PC, FRAME_PC(TMP2) // Fetch PC of previous frame.
+ | mr BASE, TMP2 // Restore caller base.
+ | // Prepending may overwrite the pcall frame, so do it at the end.
+ | stwu TMP1, FRAME_PC(RA) // Prepend true to results.
+ |
+ |->vm_returnc:
+ | addi RD, RD, 8 // RD = (nresults+1)*8.
+ | andix. TMP0, PC, FRAME_TYPE
+ | cmpwi cr1, RD, 0
+ | li CRET1, LUA_YIELD
+ | beq cr1, ->vm_unwind_c_eh
+ | mr MULTRES, RD
+ | beq ->BC_RET_Z // Handle regular return to Lua.
+ |
+ |->vm_return:
+ | // BASE = base, RA = resultptr, RD/MULTRES = (nresults+1)*8, PC = return
+ | // TMP0 = PC & FRAME_TYPE
+ | cmpwi TMP0, FRAME_C
+ | rlwinm TMP2, PC, 0, 0, 28
+ | li_vmstate C
+ | sub TMP2, BASE, TMP2 // TMP2 = previous base.
+ | bney ->vm_returnp
+ |
+ | addic. TMP1, RD, -8
+ | stp TMP2, L->base
+ | lwz TMP2, SAVE_NRES
+ | subi BASE, BASE, 8
+ | st_vmstate
+ | slwi TMP2, TMP2, 3
+ | beq >2
+ |1:
+ | addic. TMP1, TMP1, -8
+ |.if FPU
+ | lfd f0, 0(RA)
+ |.else
+ | lwz CARG1, 0(RA)
+ | lwz CARG2, 4(RA)
+ |.endif
+ | addi RA, RA, 8
+ |.if FPU
+ | stfd f0, 0(BASE)
+ |.else
+ | stw CARG1, 0(BASE)
+ | stw CARG2, 4(BASE)
+ |.endif
+ | addi BASE, BASE, 8
+ | bney <1
+ |
+ |2:
+ | cmpw TMP2, RD // More/less results wanted?
+ | bne >6
+ |3:
+ | stp BASE, L->top // Store new top.
+ |
+ |->vm_leave_cp:
+ | lp TMP0, SAVE_CFRAME // Restore previous C frame.
+ | li CRET1, 0 // Ok return status for vm_pcall.
+ | stp TMP0, L->cframe
+ |
+ |->vm_leave_unw:
+ | restoreregs
+ | blr
+ |
+ |6:
+ | ble >7 // Less results wanted?
+ | // More results wanted. Check stack size and fill up results with nil.
+ | lwz TMP1, L->maxstack
+ | cmplw BASE, TMP1
+ | bge >8
+ | stw TISNIL, 0(BASE)
+ | addi RD, RD, 8
+ | addi BASE, BASE, 8
+ | b <2
+ |
+ |7: // Less results wanted.
+ | subfic TMP3, TMP2, 0 // LUA_MULTRET+1 case?
+ | sub TMP0, RD, TMP2
+ | subfe TMP1, TMP1, TMP1 // TMP1 = TMP2 == 0 ? 0 : -1
+ | and TMP0, TMP0, TMP1
+ | sub BASE, BASE, TMP0 // Either keep top or shrink it.
+ | b <3
+ |
+ |8: // Corner case: need to grow stack for filling up results.
+ | // This can happen if:
+ | // - A C function grows the stack (a lot).
+ | // - The GC shrinks the stack in between.
+ | // - A return back from a lua_call() with (high) nresults adjustment.
+ | stp BASE, L->top // Save current top held in BASE (yes).
+ | mr SAVE0, RD
+ | srwi CARG2, TMP2, 3
+ | mr CARG1, L
+ | bl extern lj_state_growstack // (lua_State *L, int n)
+ | lwz TMP2, SAVE_NRES
+ | mr RD, SAVE0
+ | slwi TMP2, TMP2, 3
+ | lp BASE, L->top // Need the (realloced) L->top in BASE.
+ | b <2
+ |
+ |->vm_unwind_c: // Unwind C stack, return from vm_pcall.
+ | // (void *cframe, int errcode)
+ | mr sp, CARG1
+ | mr CRET1, CARG2
+ |->vm_unwind_c_eh: // Landing pad for external unwinder.
+ | lwz L, SAVE_L
+ | .toc ld TOCREG, SAVE_TOC
+ | li TMP0, ~LJ_VMST_C
+ | lwz GL:TMP1, L->glref
+ | stw TMP0, GL:TMP1->vmstate
+ | b ->vm_leave_unw
+ |
+ |->vm_unwind_ff: // Unwind C stack, return from ff pcall.
+ | // (void *cframe)
+ |.if GPR64
+ | rldicr sp, CARG1, 0, 61
+ |.else
+ | rlwinm sp, CARG1, 0, 0, 29
+ |.endif
+ |->vm_unwind_ff_eh: // Landing pad for external unwinder.
+ | lwz L, SAVE_L
+ | .toc ld TOCREG, SAVE_TOC
+ | li TISNUM, LJ_TISNUM // Setup type comparison constants.
+ | lp BASE, L->base
+ | .FPU lus TMP3, 0x59c0 // TOBIT = 2^52 + 2^51 (float).
+ | lwz DISPATCH, L->glref // Setup pointer to dispatch table.
+ | li ZERO, 0
+ | .FPU stw TMP3, TMPD
+ | li TMP1, LJ_TFALSE
+ | .FPU ori TMP3, TMP3, 0x0004 // TONUM = 2^52 + 2^51 + 2^31 (float).
+ | li TISNIL, LJ_TNIL
+ | li_vmstate INTERP
+ | .FPU lfs TOBIT, TMPD
+ | lwz PC, FRAME_PC(BASE) // Fetch PC of previous frame.
+ | la RA, -8(BASE) // Results start at BASE-8.
+ | .FPU stw TMP3, TMPD
+ | addi DISPATCH, DISPATCH, GG_G2DISP
+ | stw TMP1, 0(RA) // Prepend false to error message.
+ | li RD, 16 // 2 results: false + error message.
+ | st_vmstate
+ | .FPU lfs TONUM, TMPD
+ | b ->vm_returnc
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Grow stack for calls -----------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |->vm_growstack_c: // Grow stack for C function.
+ | li CARG2, LUA_MINSTACK
+ | b >2
+ |
+ |->vm_growstack_l: // Grow stack for Lua function.
+ | // BASE = new base, RA = BASE+framesize*8, RC = nargs*8, PC = first PC
+ | add RC, BASE, RC
+ | sub RA, RA, BASE
+ | stp BASE, L->base
+ | addi PC, PC, 4 // Must point after first instruction.
+ | stp RC, L->top
+ | srwi CARG2, RA, 3
+ |2:
+ | // L->base = new base, L->top = top
+ | stw PC, SAVE_PC
+ | mr CARG1, L
+ | bl extern lj_state_growstack // (lua_State *L, int n)
+ | lp BASE, L->base
+ | lp RC, L->top
+ | lwz LFUNC:RB, FRAME_FUNC(BASE)
+ | sub RC, RC, BASE
+ | // BASE = new base, RB = LFUNC/CFUNC, RC = nargs*8, FRAME_PC(BASE) = PC
+ | ins_callt // Just retry the call.
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Entry points into the assembler VM ---------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |->vm_resume: // Setup C frame and resume thread.
+ | // (lua_State *L, TValue *base, int nres1 = 0, ptrdiff_t ef = 0)
+ | saveregs
+ | mr L, CARG1
+ | lwz DISPATCH, L->glref // Setup pointer to dispatch table.
+ | mr BASE, CARG2
+ | lbz TMP1, L->status
+ | stw L, SAVE_L
+ | li PC, FRAME_CP
+ | addi TMP0, sp, CFRAME_RESUME
+ | addi DISPATCH, DISPATCH, GG_G2DISP
+ | stw CARG3, SAVE_NRES
+ | cmplwi TMP1, 0
+ | stw CARG3, SAVE_ERRF
+ | stp CARG3, SAVE_CFRAME
+ | stw CARG1, SAVE_PC // Any value outside of bytecode is ok.
+ | stp TMP0, L->cframe
+ | beq >3
+ |
+ | // Resume after yield (like a return).
+ | stw L, DISPATCH_GL(cur_L)(DISPATCH)
+ | mr RA, BASE
+ | lp BASE, L->base
+ | li TISNUM, LJ_TISNUM // Setup type comparison constants.
+ | lp TMP1, L->top
+ | lwz PC, FRAME_PC(BASE)
+ | .FPU lus TMP3, 0x59c0 // TOBIT = 2^52 + 2^51 (float).
+ | stb CARG3, L->status
+ | .FPU stw TMP3, TMPD
+ | .FPU ori TMP3, TMP3, 0x0004 // TONUM = 2^52 + 2^51 + 2^31 (float).
+ | .FPU lfs TOBIT, TMPD
+ | sub RD, TMP1, BASE
+ | .FPU stw TMP3, TMPD
+ | .FPU lus TMP0, 0x4338 // Hiword of 2^52 + 2^51 (double)
+ | addi RD, RD, 8
+ | .FPU stw TMP0, TONUM_HI
+ | li_vmstate INTERP
+ | li ZERO, 0
+ | st_vmstate
+ | andix. TMP0, PC, FRAME_TYPE
+ | mr MULTRES, RD
+ | .FPU lfs TONUM, TMPD
+ | li TISNIL, LJ_TNIL
+ | beq ->BC_RET_Z
+ | b ->vm_return
+ |
+ |->vm_pcall: // Setup protected C frame and enter VM.
+ | // (lua_State *L, TValue *base, int nres1, ptrdiff_t ef)
+ | saveregs
+ | li PC, FRAME_CP
+ | stw CARG4, SAVE_ERRF
+ | b >1
+ |
+ |->vm_call: // Setup C frame and enter VM.
+ | // (lua_State *L, TValue *base, int nres1)
+ | saveregs
+ | li PC, FRAME_C
+ |
+ |1: // Entry point for vm_pcall above (PC = ftype).
+ | lp TMP1, L:CARG1->cframe
+ | mr L, CARG1
+ | stw CARG3, SAVE_NRES
+ | lwz DISPATCH, L->glref // Setup pointer to dispatch table.
+ | stw CARG1, SAVE_L
+ | mr BASE, CARG2
+ | addi DISPATCH, DISPATCH, GG_G2DISP
+ | stw CARG1, SAVE_PC // Any value outside of bytecode is ok.
+ | stp TMP1, SAVE_CFRAME
+ | stp sp, L->cframe // Add our C frame to cframe chain.
+ |
+ |3: // Entry point for vm_cpcall/vm_resume (BASE = base, PC = ftype).
+ | stw L, DISPATCH_GL(cur_L)(DISPATCH)
+ | lp TMP2, L->base // TMP2 = old base (used in vmeta_call).
+ | li TISNUM, LJ_TISNUM // Setup type comparison constants.
+ | lp TMP1, L->top
+ | .FPU lus TMP3, 0x59c0 // TOBIT = 2^52 + 2^51 (float).
+ | add PC, PC, BASE
+ | .FPU stw TMP3, TMPD
+ | li ZERO, 0
+ | .FPU ori TMP3, TMP3, 0x0004 // TONUM = 2^52 + 2^51 + 2^31 (float).
+ | .FPU lfs TOBIT, TMPD
+ | sub PC, PC, TMP2 // PC = frame delta + frame type
+ | .FPU stw TMP3, TMPD
+ | .FPU lus TMP0, 0x4338 // Hiword of 2^52 + 2^51 (double)
+ | sub NARGS8:RC, TMP1, BASE
+ | .FPU stw TMP0, TONUM_HI
+ | li_vmstate INTERP
+ | .FPU lfs TONUM, TMPD
+ | li TISNIL, LJ_TNIL
+ | st_vmstate
+ |
+ |->vm_call_dispatch:
+ | // TMP2 = old base, BASE = new base, RC = nargs*8, PC = caller PC
+ | lwz TMP0, FRAME_PC(BASE)
+ | lwz LFUNC:RB, FRAME_FUNC(BASE)
+ | checkfunc TMP0; bne ->vmeta_call
+ |
+ |->vm_call_dispatch_f:
+ | ins_call
+ | // BASE = new base, RB = func, RC = nargs*8, PC = caller PC
+ |
+ |->vm_cpcall: // Setup protected C frame, call C.
+ | // (lua_State *L, lua_CFunction func, void *ud, lua_CPFunction cp)
+ | saveregs
+ | mr L, CARG1
+ | lwz TMP0, L:CARG1->stack
+ | stw CARG1, SAVE_L
+ | lp TMP1, L->top
+ | lwz DISPATCH, L->glref // Setup pointer to dispatch table.
+ | stw CARG1, SAVE_PC // Any value outside of bytecode is ok.
+ | sub TMP0, TMP0, TMP1 // Compute -savestack(L, L->top).
+ | lp TMP1, L->cframe
+ | addi DISPATCH, DISPATCH, GG_G2DISP
+ | .toc lp CARG4, 0(CARG4)
+ | li TMP2, 0
+ | stw TMP0, SAVE_NRES // Neg. delta means cframe w/o frame.
+ | stw TMP2, SAVE_ERRF // No error function.
+ | stp TMP1, SAVE_CFRAME
+ | stp sp, L->cframe // Add our C frame to cframe chain.
+ | stw L, DISPATCH_GL(cur_L)(DISPATCH)
+ | mtctr CARG4
+ | bctrl // (lua_State *L, lua_CFunction func, void *ud)
+ |.if PPE
+ | mr BASE, CRET1
+ | cmpwi CRET1, 0
+ |.else
+ | mr. BASE, CRET1
+ |.endif
+ | li PC, FRAME_CP
+ | bne <3 // Else continue with the call.
+ | b ->vm_leave_cp // No base? Just remove C frame.
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Metamethod handling ------------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |// The lj_meta_* functions (except for lj_meta_cat) don't reallocate the
+ |// stack, so BASE doesn't need to be reloaded across these calls.
+ |
+ |//-- Continuation dispatch ----------------------------------------------
+ |
+ |->cont_dispatch:
+ | // BASE = meta base, RA = resultptr, RD = (nresults+1)*8
+ | lwz TMP0, -12(BASE) // Continuation.
+ | mr RB, BASE
+ | mr BASE, TMP2 // Restore caller BASE.
+ | lwz LFUNC:TMP1, FRAME_FUNC(TMP2)
+ |.if FFI
+ | cmplwi TMP0, 1
+ |.endif
+ | lwz PC, -16(RB) // Restore PC from [cont|PC].
+ | subi TMP2, RD, 8
+ | stwx TISNIL, RA, TMP2 // Ensure one valid arg.
+ |.if FFI
+ | ble >1
+ |.endif
+ | lwz TMP1, LFUNC:TMP1->pc
+ | lwz KBASE, PC2PROTO(k)(TMP1)
+ | // BASE = base, RA = resultptr, RB = meta base
+ | mtctr TMP0
+ | bctr // Jump to continuation.
+ |
+ |.if FFI
+ |1:
+ | beq ->cont_ffi_callback // cont = 1: return from FFI callback.
+ | // cont = 0: tailcall from C function.
+ | subi TMP1, RB, 16
+ | sub RC, TMP1, BASE
+ | b ->vm_call_tail
+ |.endif
+ |
+ |->cont_cat: // RA = resultptr, RB = meta base
+ | lwz INS, -4(PC)
+ | subi CARG2, RB, 16
+ | decode_RB8 SAVE0, INS
+ |.if FPU
+ | lfd f0, 0(RA)
+ |.else
+ | lwz TMP2, 0(RA)
+ | lwz TMP3, 4(RA)
+ |.endif
+ | add TMP1, BASE, SAVE0
+ | stp BASE, L->base
+ | cmplw TMP1, CARG2
+ | sub CARG3, CARG2, TMP1
+ | decode_RA8 RA, INS
+ |.if FPU
+ | stfd f0, 0(CARG2)
+ |.else
+ | stw TMP2, 0(CARG2)
+ | stw TMP3, 4(CARG2)
+ |.endif
+ | bney ->BC_CAT_Z
+ |.if FPU
+ | stfdx f0, BASE, RA
+ |.else
+ | stwux TMP2, RA, BASE
+ | stw TMP3, 4(RA)
+ |.endif
+ | b ->cont_nop
+ |
+ |//-- Table indexing metamethods -----------------------------------------
+ |
+ |->vmeta_tgets1:
+ | la CARG3, DISPATCH_GL(tmptv)(DISPATCH)
+ | li TMP0, LJ_TSTR
+ | decode_RB8 RB, INS
+ | stw STR:RC, 4(CARG3)
+ | add CARG2, BASE, RB
+ | stw TMP0, 0(CARG3)
+ | b >1
+ |
+ |->vmeta_tgets:
+ | la CARG2, DISPATCH_GL(tmptv)(DISPATCH)
+ | li TMP0, LJ_TTAB
+ | stw TAB:RB, 4(CARG2)
+ | la CARG3, DISPATCH_GL(tmptv2)(DISPATCH)
+ | stw TMP0, 0(CARG2)
+ | li TMP1, LJ_TSTR
+ | stw STR:RC, 4(CARG3)
+ | stw TMP1, 0(CARG3)
+ | b >1
+ |
+ |->vmeta_tgetb: // TMP0 = index
+ |.if not DUALNUM
+ | tonum_u f0, TMP0
+ |.endif
+ | decode_RB8 RB, INS
+ | la CARG3, DISPATCH_GL(tmptv)(DISPATCH)
+ | add CARG2, BASE, RB
+ |.if DUALNUM
+ | stw TISNUM, 0(CARG3)
+ | stw TMP0, 4(CARG3)
+ |.else
+ | stfd f0, 0(CARG3)
+ |.endif
+ | b >1
+ |
+ |->vmeta_tgetv:
+ | decode_RB8 RB, INS
+ | decode_RC8 RC, INS
+ | add CARG2, BASE, RB
+ | add CARG3, BASE, RC
+ |1:
+ | stp BASE, L->base
+ | mr CARG1, L
+ | stw PC, SAVE_PC
+ | bl extern lj_meta_tget // (lua_State *L, TValue *o, TValue *k)
+ | // Returns TValue * (finished) or NULL (metamethod).
+ | cmplwi CRET1, 0
+ | beq >3
+ |.if FPU
+ | lfd f0, 0(CRET1)
+ |.else
+ | lwz TMP0, 0(CRET1)
+ | lwz TMP1, 4(CRET1)
+ |.endif
+ | ins_next1
+ |.if FPU
+ | stfdx f0, BASE, RA
+ |.else
+ | stwux TMP0, RA, BASE
+ | stw TMP1, 4(RA)
+ |.endif
+ | ins_next2
+ |
+ |3: // Call __index metamethod.
+ | // BASE = base, L->top = new base, stack = cont/func/t/k
+ | subfic TMP1, BASE, FRAME_CONT
+ | lp BASE, L->top
+ | stw PC, -16(BASE) // [cont|PC]
+ | add PC, TMP1, BASE
+ | lwz LFUNC:RB, FRAME_FUNC(BASE) // Guaranteed to be a function here.
+ | li NARGS8:RC, 16 // 2 args for func(t, k).
+ | b ->vm_call_dispatch_f
+ |
+ |->vmeta_tgetr:
+ | bl extern lj_tab_getinth // (GCtab *t, int32_t key)
+ | // Returns cTValue * or NULL.
+ | cmplwi CRET1, 0
+ | beq >1
+ |.if FPU
+ | lfd f14, 0(CRET1)
+ |.else
+ | lwz SAVE0, 0(CRET1)
+ | lwz SAVE1, 4(CRET1)
+ |.endif
+ | b ->BC_TGETR_Z
+ |1:
+ | stwx TISNIL, BASE, RA
+ | b ->cont_nop
+ |
+ |//-----------------------------------------------------------------------
+ |
+ |->vmeta_tsets1:
+ | la CARG3, DISPATCH_GL(tmptv)(DISPATCH)
+ | li TMP0, LJ_TSTR
+ | decode_RB8 RB, INS
+ | stw STR:RC, 4(CARG3)
+ | add CARG2, BASE, RB
+ | stw TMP0, 0(CARG3)
+ | b >1
+ |
+ |->vmeta_tsets:
+ | la CARG2, DISPATCH_GL(tmptv)(DISPATCH)
+ | li TMP0, LJ_TTAB
+ | stw TAB:RB, 4(CARG2)
+ | la CARG3, DISPATCH_GL(tmptv2)(DISPATCH)
+ | stw TMP0, 0(CARG2)
+ | li TMP1, LJ_TSTR
+ | stw STR:RC, 4(CARG3)
+ | stw TMP1, 0(CARG3)
+ | b >1
+ |
+ |->vmeta_tsetb: // TMP0 = index
+ |.if not DUALNUM
+ | tonum_u f0, TMP0
+ |.endif
+ | decode_RB8 RB, INS
+ | la CARG3, DISPATCH_GL(tmptv)(DISPATCH)
+ | add CARG2, BASE, RB
+ |.if DUALNUM
+ | stw TISNUM, 0(CARG3)
+ | stw TMP0, 4(CARG3)
+ |.else
+ | stfd f0, 0(CARG3)
+ |.endif
+ | b >1
+ |
+ |->vmeta_tsetv:
+ | decode_RB8 RB, INS
+ | decode_RC8 RC, INS
+ | add CARG2, BASE, RB
+ | add CARG3, BASE, RC
+ |1:
+ | stp BASE, L->base
+ | mr CARG1, L
+ | stw PC, SAVE_PC
+ | bl extern lj_meta_tset // (lua_State *L, TValue *o, TValue *k)
+ | // Returns TValue * (finished) or NULL (metamethod).
+ | cmplwi CRET1, 0
+ |.if FPU
+ | lfdx f0, BASE, RA
+ |.else
+ | lwzux TMP2, RA, BASE
+ | lwz TMP3, 4(RA)
+ |.endif
+ | beq >3
+ | // NOBARRIER: lj_meta_tset ensures the table is not black.
+ | ins_next1
+ |.if FPU
+ | stfd f0, 0(CRET1)
+ |.else
+ | stw TMP2, 0(CRET1)
+ | stw TMP3, 4(CRET1)
+ |.endif
+ | ins_next2
+ |
+ |3: // Call __newindex metamethod.
+ | // BASE = base, L->top = new base, stack = cont/func/t/k/(v)
+ | subfic TMP1, BASE, FRAME_CONT
+ | lp BASE, L->top
+ | stw PC, -16(BASE) // [cont|PC]
+ | add PC, TMP1, BASE
+ | lwz LFUNC:RB, FRAME_FUNC(BASE) // Guaranteed to be a function here.
+ | li NARGS8:RC, 24 // 3 args for func(t, k, v)
+ |.if FPU
+ | stfd f0, 16(BASE) // Copy value to third argument.
+ |.else
+ | stw TMP2, 16(BASE)
+ | stw TMP3, 20(BASE)
+ |.endif
+ | b ->vm_call_dispatch_f
+ |
+ |->vmeta_tsetr:
+ | stp BASE, L->base
+ | mr CARG1, L
+ | stw PC, SAVE_PC
+ | bl extern lj_tab_setinth // (lua_State *L, GCtab *t, int32_t key)
+ | // Returns TValue *.
+ |.if FPU
+ | stfd f14, 0(CRET1)
+ |.else
+ | stw SAVE0, 0(CRET1)
+ | stw SAVE1, 4(CRET1)
+ |.endif
+ | b ->cont_nop
+ |
+ |//-- Comparison metamethods ---------------------------------------------
+ |
+ |->vmeta_comp:
+ | mr CARG1, L
+ | subi PC, PC, 4
+ |.if DUALNUM
+ | mr CARG2, RA
+ |.else
+ | add CARG2, BASE, RA
+ |.endif
+ | stw PC, SAVE_PC
+ |.if DUALNUM
+ | mr CARG3, RD
+ |.else
+ | add CARG3, BASE, RD
+ |.endif
+ | stp BASE, L->base
+ | decode_OP1 CARG4, INS
+ | bl extern lj_meta_comp // (lua_State *L, TValue *o1, *o2, int op)
+ | // Returns 0/1 or TValue * (metamethod).
+ |3:
+ | cmplwi CRET1, 1
+ | bgt ->vmeta_binop
+ | subfic CRET1, CRET1, 0
+ |4:
+ | lwz INS, 0(PC)
+ | addi PC, PC, 4
+ | decode_RD4 TMP2, INS
+ | addis TMP2, TMP2, -(BCBIAS_J*4 >> 16)
+ | and TMP2, TMP2, CRET1
+ | add PC, PC, TMP2
+ |->cont_nop:
+ | ins_next
+ |
+ |->cont_ra: // RA = resultptr
+ | lwz INS, -4(PC)
+ |.if FPU
+ | lfd f0, 0(RA)
+ |.else
+ | lwz CARG1, 0(RA)
+ | lwz CARG2, 4(RA)
+ |.endif
+ | decode_RA8 TMP1, INS
+ |.if FPU
+ | stfdx f0, BASE, TMP1
+ |.else
+ | stwux CARG1, TMP1, BASE
+ | stw CARG2, 4(TMP1)
+ |.endif
+ | b ->cont_nop
+ |
+ |->cont_condt: // RA = resultptr
+ | lwz TMP0, 0(RA)
+ | .gpr64 extsw TMP0, TMP0
+ | subfic TMP0, TMP0, LJ_TTRUE // Branch if result is true.
+ | subfe CRET1, CRET1, CRET1
+ | not CRET1, CRET1
+ | b <4
+ |
+ |->cont_condf: // RA = resultptr
+ | lwz TMP0, 0(RA)
+ | .gpr64 extsw TMP0, TMP0
+ | subfic TMP0, TMP0, LJ_TTRUE // Branch if result is false.
+ | subfe CRET1, CRET1, CRET1
+ | b <4
+ |
+ |->vmeta_equal:
+ | // CARG2, CARG3, CARG4 are already set by BC_ISEQV/BC_ISNEV.
+ | subi PC, PC, 4
+ | stp BASE, L->base
+ | mr CARG1, L
+ | stw PC, SAVE_PC
+ | bl extern lj_meta_equal // (lua_State *L, GCobj *o1, *o2, int ne)
+ | // Returns 0/1 or TValue * (metamethod).
+ | b <3
+ |
+ |->vmeta_equal_cd:
+ |.if FFI
+ | mr CARG2, INS
+ | subi PC, PC, 4
+ | stp BASE, L->base
+ | mr CARG1, L
+ | stw PC, SAVE_PC
+ | bl extern lj_meta_equal_cd // (lua_State *L, BCIns op)
+ | // Returns 0/1 or TValue * (metamethod).
+ | b <3
+ |.endif
+ |
+ |->vmeta_istype:
+ | subi PC, PC, 4
+ | stp BASE, L->base
+ | srwi CARG2, RA, 3
+ | mr CARG1, L
+ | srwi CARG3, RD, 3
+ | stw PC, SAVE_PC
+ | bl extern lj_meta_istype // (lua_State *L, BCReg ra, BCReg tp)
+ | b ->cont_nop
+ |
+ |//-- Arithmetic metamethods ---------------------------------------------
+ |
+ |->vmeta_arith_nv:
+ | add CARG3, KBASE, RC
+ | add CARG4, BASE, RB
+ | b >1
+ |->vmeta_arith_nv2:
+ |.if DUALNUM
+ | mr CARG3, RC
+ | mr CARG4, RB
+ | b >1
+ |.endif
+ |
+ |->vmeta_unm:
+ | mr CARG3, RD
+ | mr CARG4, RD
+ | b >1
+ |
+ |->vmeta_arith_vn:
+ | add CARG3, BASE, RB
+ | add CARG4, KBASE, RC
+ | b >1
+ |
+ |->vmeta_arith_vv:
+ | add CARG3, BASE, RB
+ | add CARG4, BASE, RC
+ |.if DUALNUM
+ | b >1
+ |.endif
+ |->vmeta_arith_vn2:
+ |->vmeta_arith_vv2:
+ |.if DUALNUM
+ | mr CARG3, RB
+ | mr CARG4, RC
+ |.endif
+ |1:
+ | add CARG2, BASE, RA
+ | stp BASE, L->base
+ | mr CARG1, L
+ | stw PC, SAVE_PC
+ | decode_OP1 CARG5, INS // Caveat: CARG5 overlaps INS.
+ | bl extern lj_meta_arith // (lua_State *L, TValue *ra,*rb,*rc, BCReg op)
+ | // Returns NULL (finished) or TValue * (metamethod).
+ | cmplwi CRET1, 0
+ | beq ->cont_nop
+ |
+ | // Call metamethod for binary op.
+ |->vmeta_binop:
+ | // BASE = old base, CRET1 = new base, stack = cont/func/o1/o2
+ | sub TMP1, CRET1, BASE
+ | stw PC, -16(CRET1) // [cont|PC]
+ | mr TMP2, BASE
+ | addi PC, TMP1, FRAME_CONT
+ | mr BASE, CRET1
+ | li NARGS8:RC, 16 // 2 args for func(o1, o2).
+ | b ->vm_call_dispatch
+ |
+ |->vmeta_len:
+#if LJ_52
+ | mr SAVE0, CARG1
+#endif
+ | mr CARG2, RD
+ | stp BASE, L->base
+ | mr CARG1, L
+ | stw PC, SAVE_PC
+ | bl extern lj_meta_len // (lua_State *L, TValue *o)
+ | // Returns NULL (retry) or TValue * (metamethod base).
+#if LJ_52
+ | cmplwi CRET1, 0
+ | bne ->vmeta_binop // Binop call for compatibility.
+ | mr CARG1, SAVE0
+ | b ->BC_LEN_Z
+#else
+ | b ->vmeta_binop // Binop call for compatibility.
+#endif
+ |
+ |//-- Call metamethod ----------------------------------------------------
+ |
+ |->vmeta_call: // Resolve and call __call metamethod.
+ | // TMP2 = old base, BASE = new base, RC = nargs*8
+ | mr CARG1, L
+ | stp TMP2, L->base // This is the callers base!
+ | subi CARG2, BASE, 8
+ | stw PC, SAVE_PC
+ | add CARG3, BASE, RC
+ | mr SAVE0, NARGS8:RC
+ | bl extern lj_meta_call // (lua_State *L, TValue *func, TValue *top)
+ | lwz LFUNC:RB, FRAME_FUNC(BASE) // Guaranteed to be a function here.
+ | addi NARGS8:RC, SAVE0, 8 // Got one more argument now.
+ | ins_call
+ |
+ |->vmeta_callt: // Resolve __call for BC_CALLT.
+ | // BASE = old base, RA = new base, RC = nargs*8
+ | mr CARG1, L
+ | stp BASE, L->base
+ | subi CARG2, RA, 8
+ | stw PC, SAVE_PC
+ | add CARG3, RA, RC
+ | mr SAVE0, NARGS8:RC
+ | bl extern lj_meta_call // (lua_State *L, TValue *func, TValue *top)
+ | lwz TMP1, FRAME_PC(BASE)
+ | addi NARGS8:RC, SAVE0, 8 // Got one more argument now.
+ | lwz LFUNC:RB, FRAME_FUNC(RA) // Guaranteed to be a function here.
+ | b ->BC_CALLT_Z
+ |
+ |//-- Argument coercion for 'for' statement ------------------------------
+ |
+ |->vmeta_for:
+ | mr CARG1, L
+ | stp BASE, L->base
+ | mr CARG2, RA
+ | stw PC, SAVE_PC
+ | mr SAVE0, INS
+ | bl extern lj_meta_for // (lua_State *L, TValue *base)
+ |.if JIT
+ | decode_OP1 TMP0, SAVE0
+ |.endif
+ | decode_RA8 RA, SAVE0
+ |.if JIT
+ | cmpwi TMP0, BC_JFORI
+ |.endif
+ | decode_RD8 RD, SAVE0
+ |.if JIT
+ | beqy =>BC_JFORI
+ |.endif
+ | b =>BC_FORI
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Fast functions -----------------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |.macro .ffunc, name
+ |->ff_ .. name:
+ |.endmacro
+ |
+ |.macro .ffunc_1, name
+ |->ff_ .. name:
+ | cmplwi NARGS8:RC, 8
+ | lwz CARG3, 0(BASE)
+ | lwz CARG1, 4(BASE)
+ | blt ->fff_fallback
+ |.endmacro
+ |
+ |.macro .ffunc_2, name
+ |->ff_ .. name:
+ | cmplwi NARGS8:RC, 16
+ | lwz CARG3, 0(BASE)
+ | lwz CARG4, 8(BASE)
+ | lwz CARG1, 4(BASE)
+ | lwz CARG2, 12(BASE)
+ | blt ->fff_fallback
+ |.endmacro
+ |
+ |.macro .ffunc_n, name
+ |->ff_ .. name:
+ | cmplwi NARGS8:RC, 8
+ | lwz CARG1, 0(BASE)
+ |.if FPU
+ | lfd FARG1, 0(BASE)
+ |.else
+ | lwz CARG2, 4(BASE)
+ |.endif
+ | blt ->fff_fallback
+ | checknum CARG1; bge ->fff_fallback
+ |.endmacro
+ |
+ |.macro .ffunc_nn, name
+ |->ff_ .. name:
+ | cmplwi NARGS8:RC, 16
+ | lwz CARG1, 0(BASE)
+ |.if FPU
+ | lfd FARG1, 0(BASE)
+ | lwz CARG3, 8(BASE)
+ | lfd FARG2, 8(BASE)
+ |.else
+ | lwz CARG2, 4(BASE)
+ | lwz CARG3, 8(BASE)
+ | lwz CARG4, 12(BASE)
+ |.endif
+ | blt ->fff_fallback
+ | checknum CARG1; bge ->fff_fallback
+ | checknum CARG3; bge ->fff_fallback
+ |.endmacro
+ |
+ |// Inlined GC threshold check. Caveat: uses TMP0 and TMP1.
+ |.macro ffgccheck
+ | lwz TMP0, DISPATCH_GL(gc.total)(DISPATCH)
+ | lwz TMP1, DISPATCH_GL(gc.threshold)(DISPATCH)
+ | cmplw TMP0, TMP1
+ | bgel ->fff_gcstep
+ |.endmacro
+ |
+ |//-- Base library: checks -----------------------------------------------
+ |
+ |.ffunc_1 assert
+ | li TMP1, LJ_TFALSE
+ | la RA, -8(BASE)
+ | cmplw cr1, CARG3, TMP1
+ | lwz PC, FRAME_PC(BASE)
+ | bge cr1, ->fff_fallback
+ | stw CARG3, 0(RA)
+ | addi RD, NARGS8:RC, 8 // Compute (nresults+1)*8.
+ | addi TMP1, BASE, 8
+ | add TMP2, RA, NARGS8:RC
+ | stw CARG1, 4(RA)
+ | beq ->fff_res // Done if exactly 1 argument.
+ |1:
+ | cmplw TMP1, TMP2
+ |.if FPU
+ | lfd f0, 0(TMP1)
+ | stfd f0, 0(TMP1)
+ |.else
+ | lwz CARG1, 0(TMP1)
+ | lwz CARG2, 4(TMP1)
+ | stw CARG1, -8(TMP1)
+ | stw CARG2, -4(TMP1)
+ |.endif
+ | addi TMP1, TMP1, 8
+ | bney <1
+ | b ->fff_res
+ |
+ |.ffunc type
+ | cmplwi NARGS8:RC, 8
+ | lwz CARG1, 0(BASE)
+ | blt ->fff_fallback
+ | .gpr64 extsw CARG1, CARG1
+ | subfc TMP0, TISNUM, CARG1
+ | subfe TMP2, CARG1, CARG1
+ | orc TMP1, TMP2, TMP0
+ | addi TMP1, TMP1, ~LJ_TISNUM+1
+ | slwi TMP1, TMP1, 3
+ |.if FPU
+ | la TMP2, CFUNC:RB->upvalue
+ | lfdx FARG1, TMP2, TMP1
+ |.else
+ | add TMP1, CFUNC:RB, TMP1
+ | lwz CARG1, CFUNC:TMP1->upvalue[0].u32.hi
+ | lwz CARG2, CFUNC:TMP1->upvalue[0].u32.lo
+ |.endif
+ | b ->fff_resn
+ |
+ |//-- Base library: getters and setters ---------------------------------
+ |
+ |.ffunc_1 getmetatable
+ | checktab CARG3; bne >6
+ |1: // Field metatable must be at same offset for GCtab and GCudata!
+ | lwz TAB:CARG1, TAB:CARG1->metatable
+ |2:
+ | li CARG3, LJ_TNIL
+ | cmplwi TAB:CARG1, 0
+ | lwz STR:RC, DISPATCH_GL(gcroot[GCROOT_MMNAME+MM_metatable])(DISPATCH)
+ | beq ->fff_restv
+ | lwz TMP0, TAB:CARG1->hmask
+ | li CARG3, LJ_TTAB // Use metatable as default result.
+ | lwz TMP1, STR:RC->sid
+ | lwz NODE:TMP2, TAB:CARG1->node
+ | and TMP1, TMP1, TMP0 // idx = str->sid & tab->hmask
+ | slwi TMP0, TMP1, 5
+ | slwi TMP1, TMP1, 3
+ | sub TMP1, TMP0, TMP1
+ | add NODE:TMP2, NODE:TMP2, TMP1 // node = tab->node + (idx*32-idx*8)
+ |3: // Rearranged logic, because we expect _not_ to find the key.
+ | lwz CARG4, NODE:TMP2->key
+ | lwz TMP0, 4+offsetof(Node, key)(NODE:TMP2)
+ | lwz CARG2, NODE:TMP2->val
+ | lwz TMP1, 4+offsetof(Node, val)(NODE:TMP2)
+ | checkstr CARG4; bne >4
+ | cmpw TMP0, STR:RC; beq >5
+ |4:
+ | lwz NODE:TMP2, NODE:TMP2->next
+ | cmplwi NODE:TMP2, 0
+ | beq ->fff_restv // Not found, keep default result.
+ | b <3
+ |5:
+ | checknil CARG2
+ | beq ->fff_restv // Ditto for nil value.
+ | mr CARG3, CARG2 // Return value of mt.__metatable.
+ | mr CARG1, TMP1
+ | b ->fff_restv
+ |
+ |6:
+ | cmpwi CARG3, LJ_TUDATA; beq <1
+ | .gpr64 extsw CARG3, CARG3
+ | subfc TMP0, TISNUM, CARG3
+ | subfe TMP2, CARG3, CARG3
+ | orc TMP1, TMP2, TMP0
+ | addi TMP1, TMP1, ~LJ_TISNUM+1
+ | slwi TMP1, TMP1, 2
+ | la TMP2, DISPATCH_GL(gcroot[GCROOT_BASEMT])(DISPATCH)
+ | lwzx TAB:CARG1, TMP2, TMP1
+ | b <2
+ |
+ |.ffunc_2 setmetatable
+ | // Fast path: no mt for table yet and not clearing the mt.
+ | checktab CARG3; bne ->fff_fallback
+ | lwz TAB:TMP1, TAB:CARG1->metatable
+ | checktab CARG4; bne ->fff_fallback
+ | cmplwi TAB:TMP1, 0
+ | lbz TMP3, TAB:CARG1->marked
+ | bne ->fff_fallback
+ | andix. TMP0, TMP3, LJ_GC_BLACK // isblack(table)
+ | stw TAB:CARG2, TAB:CARG1->metatable
+ | beq ->fff_restv
+ | barrierback TAB:CARG1, TMP3, TMP0
+ | b ->fff_restv
+ |
+ |.ffunc rawget
+ | cmplwi NARGS8:RC, 16
+ | lwz CARG4, 0(BASE)
+ | lwz TAB:CARG2, 4(BASE)
+ | blt ->fff_fallback
+ | checktab CARG4; bne ->fff_fallback
+ | la CARG3, 8(BASE)
+ | mr CARG1, L
+ | bl extern lj_tab_get // (lua_State *L, GCtab *t, cTValue *key)
+ | // Returns cTValue *.
+ |.if FPU
+ | lfd FARG1, 0(CRET1)
+ |.else
+ | lwz CARG2, 4(CRET1)
+ | lwz CARG1, 0(CRET1) // Caveat: CARG1 == CRET1.
+ |.endif
+ | b ->fff_resn
+ |
+ |//-- Base library: conversions ------------------------------------------
+ |
+ |.ffunc tonumber
+ | // Only handles the number case inline (without a base argument).
+ | cmplwi NARGS8:RC, 8
+ | lwz CARG1, 0(BASE)
+ |.if FPU
+ | lfd FARG1, 0(BASE)
+ |.else
+ | lwz CARG2, 4(BASE)
+ |.endif
+ | bne ->fff_fallback // Exactly one argument.
+ | checknum CARG1; bgt ->fff_fallback
+ | b ->fff_resn
+ |
+ |.ffunc_1 tostring
+ | // Only handles the string or number case inline.
+ | checkstr CARG3
+ | // A __tostring method in the string base metatable is ignored.
+ | beq ->fff_restv // String key?
+ | // Handle numbers inline, unless a number base metatable is present.
+ | lwz TMP0, DISPATCH_GL(gcroot[GCROOT_BASEMT_NUM])(DISPATCH)
+ | checknum CARG3
+ | cmplwi cr1, TMP0, 0
+ | stp BASE, L->base // Add frame since C call can throw.
+ | crorc 4*cr0+eq, 4*cr0+gt, 4*cr1+eq
+ | stw PC, SAVE_PC // Redundant (but a defined value).
+ | beq ->fff_fallback
+ | ffgccheck
+ | mr CARG1, L
+ | mr CARG2, BASE
+ |.if DUALNUM
+ | bl extern lj_strfmt_number // (lua_State *L, cTValue *o)
+ |.else
+ | bl extern lj_strfmt_num // (lua_State *L, lua_Number *np)
+ |.endif
+ | // Returns GCstr *.
+ | li CARG3, LJ_TSTR
+ | b ->fff_restv
+ |
+ |//-- Base library: iterators -------------------------------------------
+ |
+ |.ffunc_1 next
+ | stwx TISNIL, BASE, NARGS8:RC // Set missing 2nd arg to nil.
+ | checktab CARG3
+ | lwz PC, FRAME_PC(BASE)
+ | bne ->fff_fallback
+ | la CARG2, 8(BASE)
+ | la CARG3, -8(BASE)
+ | bl extern lj_tab_next // (GCtab *t, cTValue *key, TValue *o)
+ | // Returns 1=found, 0=end, -1=error.
+ | cmpwi CRET1, 0
+ | la RA, -8(BASE)
+ | li RD, (2+1)*8
+ | bgt ->fff_res // Found key/value.
+ | li CARG3, LJ_TNIL
+ | beq ->fff_restv // End of traversal: return nil.
+ | lwz CFUNC:RB, FRAME_FUNC(BASE)
+ | li NARGS8:RC, 2*8
+ | b ->fff_fallback // Invalid key.
+ |
+ |.ffunc_1 pairs
+ | checktab CARG3
+ | lwz PC, FRAME_PC(BASE)
+ | bne ->fff_fallback
+#if LJ_52
+ | lwz TAB:TMP2, TAB:CARG1->metatable
+ |.if FPU
+ | lfd f0, CFUNC:RB->upvalue[0]
+ |.else
+ | lwz TMP0, CFUNC:RB->upvalue[0].u32.hi
+ | lwz TMP1, CFUNC:RB->upvalue[0].u32.lo
+ |.endif
+ | cmplwi TAB:TMP2, 0
+ | la RA, -8(BASE)
+ | bne ->fff_fallback
+#else
+ |.if FPU
+ | lfd f0, CFUNC:RB->upvalue[0]
+ |.else
+ | lwz TMP0, CFUNC:RB->upvalue[0].u32.hi
+ | lwz TMP1, CFUNC:RB->upvalue[0].u32.lo
+ |.endif
+ | la RA, -8(BASE)
+#endif
+ | stw TISNIL, 8(BASE)
+ | li RD, (3+1)*8
+ |.if FPU
+ | stfd f0, 0(RA)
+ |.else
+ | stw TMP0, 0(RA)
+ | stw TMP1, 4(RA)
+ |.endif
+ | b ->fff_res
+ |
+ |.ffunc ipairs_aux
+ | cmplwi NARGS8:RC, 16
+ | lwz CARG3, 0(BASE)
+ | lwz TAB:CARG1, 4(BASE)
+ | lwz CARG4, 8(BASE)
+ |.if DUALNUM
+ | lwz TMP2, 12(BASE)
+ |.else
+ | lfd FARG2, 8(BASE)
+ |.endif
+ | blt ->fff_fallback
+ | checktab CARG3
+ | checknum cr1, CARG4
+ | lwz PC, FRAME_PC(BASE)
+ |.if DUALNUM
+ | bne ->fff_fallback
+ | bne cr1, ->fff_fallback
+ |.else
+ | lus TMP0, 0x3ff0
+ | stw ZERO, TMPD_LO
+ | bne ->fff_fallback
+ | stw TMP0, TMPD_HI
+ | bge cr1, ->fff_fallback
+ | lfd FARG1, TMPD
+ | toint TMP2, FARG2, f0
+ |.endif
+ | lwz TMP0, TAB:CARG1->asize
+ | lwz TMP1, TAB:CARG1->array
+ |.if not DUALNUM
+ | fadd FARG2, FARG2, FARG1
+ |.endif
+ | addi TMP2, TMP2, 1
+ | la RA, -8(BASE)
+ | cmplw TMP0, TMP2
+ |.if DUALNUM
+ | stw TISNUM, 0(RA)
+ | slwi TMP3, TMP2, 3
+ | stw TMP2, 4(RA)
+ |.else
+ | slwi TMP3, TMP2, 3
+ | stfd FARG2, 0(RA)
+ |.endif
+ | ble >2 // Not in array part?
+ |.if FPU
+ | lwzx TMP2, TMP1, TMP3
+ | lfdx f0, TMP1, TMP3
+ |.else
+ | lwzux TMP2, TMP1, TMP3
+ | lwz TMP3, 4(TMP1)
+ |.endif
+ |1:
+ | checknil TMP2
+ | li RD, (0+1)*8
+ | beq ->fff_res // End of iteration, return 0 results.
+ | li RD, (2+1)*8
+ |.if FPU
+ | stfd f0, 8(RA)
+ |.else
+ | stw TMP2, 8(RA)
+ | stw TMP3, 12(RA)
+ |.endif
+ | b ->fff_res
+ |2: // Check for empty hash part first. Otherwise call C function.
+ | lwz TMP0, TAB:CARG1->hmask
+ | cmplwi TMP0, 0
+ | li RD, (0+1)*8
+ | beq ->fff_res
+ | mr CARG2, TMP2
+ | bl extern lj_tab_getinth // (GCtab *t, int32_t key)
+ | // Returns cTValue * or NULL.
+ | cmplwi CRET1, 0
+ | li RD, (0+1)*8
+ | beq ->fff_res
+ | lwz TMP2, 0(CRET1)
+ |.if FPU
+ | lfd f0, 0(CRET1)
+ |.else
+ | lwz TMP3, 4(CRET1)
+ |.endif
+ | b <1
+ |
+ |.ffunc_1 ipairs
+ | checktab CARG3
+ | lwz PC, FRAME_PC(BASE)
+ | bne ->fff_fallback
+#if LJ_52
+ | lwz TAB:TMP2, TAB:CARG1->metatable
+ |.if FPU
+ | lfd f0, CFUNC:RB->upvalue[0]
+ |.else
+ | lwz TMP0, CFUNC:RB->upvalue[0].u32.hi
+ | lwz TMP1, CFUNC:RB->upvalue[0].u32.lo
+ |.endif
+ | cmplwi TAB:TMP2, 0
+ | la RA, -8(BASE)
+ | bne ->fff_fallback
+#else
+ |.if FPU
+ | lfd f0, CFUNC:RB->upvalue[0]
+ |.else
+ | lwz TMP0, CFUNC:RB->upvalue[0].u32.hi
+ | lwz TMP1, CFUNC:RB->upvalue[0].u32.lo
+ |.endif
+ | la RA, -8(BASE)
+#endif
+ |.if DUALNUM
+ | stw TISNUM, 8(BASE)
+ |.else
+ | stw ZERO, 8(BASE)
+ |.endif
+ | stw ZERO, 12(BASE)
+ | li RD, (3+1)*8
+ |.if FPU
+ | stfd f0, 0(RA)
+ |.else
+ | stw TMP0, 0(RA)
+ | stw TMP1, 4(RA)
+ |.endif
+ | b ->fff_res
+ |
+ |//-- Base library: catch errors ----------------------------------------
+ |
+ |.ffunc pcall
+ | cmplwi NARGS8:RC, 8
+ | lbz TMP3, DISPATCH_GL(hookmask)(DISPATCH)
+ | blt ->fff_fallback
+ | mr TMP2, BASE
+ | la BASE, 8(BASE)
+ | // Remember active hook before pcall.
+ | rlwinm TMP3, TMP3, 32-HOOK_ACTIVE_SHIFT, 31, 31
+ | subi NARGS8:RC, NARGS8:RC, 8
+ | addi PC, TMP3, 8+FRAME_PCALL
+ | b ->vm_call_dispatch
+ |
+ |.ffunc xpcall
+ | cmplwi NARGS8:RC, 16
+ | lwz CARG3, 8(BASE)
+ |.if FPU
+ | lfd FARG2, 8(BASE)
+ | lfd FARG1, 0(BASE)
+ |.else
+ | lwz CARG1, 0(BASE)
+ | lwz CARG2, 4(BASE)
+ | lwz CARG4, 12(BASE)
+ |.endif
+ | blt ->fff_fallback
+ | lbz TMP1, DISPATCH_GL(hookmask)(DISPATCH)
+ | mr TMP2, BASE
+ | checkfunc CARG3; bne ->fff_fallback // Traceback must be a function.
+ | la BASE, 16(BASE)
+ | // Remember active hook before pcall.
+ | rlwinm TMP1, TMP1, 32-HOOK_ACTIVE_SHIFT, 31, 31
+ |.if FPU
+ | stfd FARG2, 0(TMP2) // Swap function and traceback.
+ | stfd FARG1, 8(TMP2)
+ |.else
+ | stw CARG3, 0(TMP2)
+ | stw CARG4, 4(TMP2)
+ | stw CARG1, 8(TMP2)
+ | stw CARG2, 12(TMP2)
+ |.endif
+ | subi NARGS8:RC, NARGS8:RC, 16
+ | addi PC, TMP1, 16+FRAME_PCALL
+ | b ->vm_call_dispatch
+ |
+ |//-- Coroutine library --------------------------------------------------
+ |
+ |.macro coroutine_resume_wrap, resume
+ |.if resume
+ |.ffunc_1 coroutine_resume
+ | cmpwi CARG3, LJ_TTHREAD; bne ->fff_fallback
+ |.else
+ |.ffunc coroutine_wrap_aux
+ | lwz L:CARG1, CFUNC:RB->upvalue[0].gcr
+ |.endif
+ | lbz TMP0, L:CARG1->status
+ | lp TMP1, L:CARG1->cframe
+ | lp CARG2, L:CARG1->top
+ | cmplwi cr0, TMP0, LUA_YIELD
+ | lp TMP2, L:CARG1->base
+ | cmplwi cr1, TMP1, 0
+ | lwz TMP0, L:CARG1->maxstack
+ | cmplw cr7, CARG2, TMP2
+ | lwz PC, FRAME_PC(BASE)
+ | crorc 4*cr6+lt, 4*cr0+gt, 4*cr1+eq // st>LUA_YIELD || cframe!=0
+ | add TMP2, CARG2, NARGS8:RC
+ | crandc 4*cr6+gt, 4*cr7+eq, 4*cr0+eq // base==top && st!=LUA_YIELD
+ | cmplw cr1, TMP2, TMP0
+ | cror 4*cr6+lt, 4*cr6+lt, 4*cr6+gt
+ | stw PC, SAVE_PC
+ | cror 4*cr6+lt, 4*cr6+lt, 4*cr1+gt // cond1 || cond2 || stackov
+ | stp BASE, L->base
+ | blt cr6, ->fff_fallback
+ |1:
+ |.if resume
+ | addi BASE, BASE, 8 // Keep resumed thread in stack for GC.
+ | subi NARGS8:RC, NARGS8:RC, 8
+ | subi TMP2, TMP2, 8
+ |.endif
+ | stp TMP2, L:CARG1->top
+ | li TMP1, 0
+ | stp BASE, L->top
+ |2: // Move args to coroutine.
+ | cmpw TMP1, NARGS8:RC
+ |.if FPU
+ | lfdx f0, BASE, TMP1
+ |.else
+ | add CARG3, BASE, TMP1
+ | lwz TMP2, 0(CARG3)
+ | lwz TMP3, 4(CARG3)
+ |.endif
+ | beq >3
+ |.if FPU
+ | stfdx f0, CARG2, TMP1
+ |.else
+ | add CARG3, CARG2, TMP1
+ | stw TMP2, 0(CARG3)
+ | stw TMP3, 4(CARG3)
+ |.endif
+ | addi TMP1, TMP1, 8
+ | b <2
+ |3:
+ | li CARG3, 0
+ | mr L:SAVE0, L:CARG1
+ | li CARG4, 0
+ | bl ->vm_resume // (lua_State *L, TValue *base, 0, 0)
+ | // Returns thread status.
+ |4:
+ | lp TMP2, L:SAVE0->base
+ | cmplwi CRET1, LUA_YIELD
+ | lp TMP3, L:SAVE0->top
+ | li_vmstate INTERP
+ | lp BASE, L->base
+ | stw L, DISPATCH_GL(cur_L)(DISPATCH)
+ | st_vmstate
+ | bgt >8
+ | sub RD, TMP3, TMP2
+ | lwz TMP0, L->maxstack
+ | cmplwi RD, 0
+ | add TMP1, BASE, RD
+ | beq >6 // No results?
+ | cmplw TMP1, TMP0
+ | li TMP1, 0
+ | bgt >9 // Need to grow stack?
+ |
+ | subi TMP3, RD, 8
+ | stp TMP2, L:SAVE0->top // Clear coroutine stack.
+ |5: // Move results from coroutine.
+ | cmplw TMP1, TMP3
+ |.if FPU
+ | lfdx f0, TMP2, TMP1
+ | stfdx f0, BASE, TMP1
+ |.else
+ | add CARG3, TMP2, TMP1
+ | lwz CARG1, 0(CARG3)
+ | lwz CARG2, 4(CARG3)
+ | add CARG3, BASE, TMP1
+ | stw CARG1, 0(CARG3)
+ | stw CARG2, 4(CARG3)
+ |.endif
+ | addi TMP1, TMP1, 8
+ | bne <5
+ |6:
+ | andix. TMP0, PC, FRAME_TYPE
+ |.if resume
+ | li TMP1, LJ_TTRUE
+ | la RA, -8(BASE)
+ | stw TMP1, -8(BASE) // Prepend true to results.
+ | addi RD, RD, 16
+ |.else
+ | mr RA, BASE
+ | addi RD, RD, 8
+ |.endif
+ |7:
+ | stw PC, SAVE_PC
+ | mr MULTRES, RD
+ | beq ->BC_RET_Z
+ | b ->vm_return
+ |
+ |8: // Coroutine returned with error (at co->top-1).
+ |.if resume
+ | andix. TMP0, PC, FRAME_TYPE
+ | la TMP3, -8(TMP3)
+ | li TMP1, LJ_TFALSE
+ |.if FPU
+ | lfd f0, 0(TMP3)
+ |.else
+ | lwz CARG1, 0(TMP3)
+ | lwz CARG2, 4(TMP3)
+ |.endif
+ | stp TMP3, L:SAVE0->top // Remove error from coroutine stack.
+ | li RD, (2+1)*8
+ | stw TMP1, -8(BASE) // Prepend false to results.
+ | la RA, -8(BASE)
+ |.if FPU
+ | stfd f0, 0(BASE) // Copy error message.
+ |.else
+ | stw CARG1, 0(BASE) // Copy error message.
+ | stw CARG2, 4(BASE)
+ |.endif
+ | b <7
+ |.else
+ | mr CARG1, L
+ | mr CARG2, L:SAVE0
+ | bl extern lj_ffh_coroutine_wrap_err // (lua_State *L, lua_State *co)
+ |.endif
+ |
+ |9: // Handle stack expansion on return from yield.
+ | mr CARG1, L
+ | srwi CARG2, RD, 3
+ | bl extern lj_state_growstack // (lua_State *L, int n)
+ | li CRET1, 0
+ | b <4
+ |.endmacro
+ |
+ | coroutine_resume_wrap 1 // coroutine.resume
+ | coroutine_resume_wrap 0 // coroutine.wrap
+ |
+ |.ffunc coroutine_yield
+ | lp TMP0, L->cframe
+ | add TMP1, BASE, NARGS8:RC
+ | stp BASE, L->base
+ | andix. TMP0, TMP0, CFRAME_RESUME
+ | stp TMP1, L->top
+ | li CRET1, LUA_YIELD
+ | beq ->fff_fallback
+ | stp ZERO, L->cframe
+ | stb CRET1, L->status
+ | b ->vm_leave_unw
+ |
+ |//-- Math library -------------------------------------------------------
+ |
+ |.ffunc_1 math_abs
+ | checknum CARG3
+ |.if DUALNUM
+ | bne >2
+ | srawi TMP1, CARG1, 31
+ | xor TMP2, TMP1, CARG1
+ |.if GPR64
+ | lus TMP0, 0x8000
+ | sub CARG1, TMP2, TMP1
+ | cmplw CARG1, TMP0
+ | beq >1
+ |.else
+ | sub. CARG1, TMP2, TMP1
+ | blt >1
+ |.endif
+ |->fff_resi:
+ | lwz PC, FRAME_PC(BASE)
+ | la RA, -8(BASE)
+ | stw TISNUM, -8(BASE)
+ | stw CRET1, -4(BASE)
+ | b ->fff_res1
+ |1:
+ | lus CARG3, 0x41e0 // 2^31.
+ | li CARG1, 0
+ | b ->fff_restv
+ |2:
+ |.endif
+ | bge ->fff_fallback
+ | rlwinm CARG3, CARG3, 0, 1, 31
+ | // Fallthrough.
+ |
+ |->fff_restv:
+ | // CARG3/CARG1 = TValue result.
+ | lwz PC, FRAME_PC(BASE)
+ | stw CARG3, -8(BASE)
+ | la RA, -8(BASE)
+ | stw CARG1, -4(BASE)
+ |->fff_res1:
+ | // RA = results, PC = return.
+ | li RD, (1+1)*8
+ |->fff_res:
+ | // RA = results, RD = (nresults+1)*8, PC = return.
+ | andix. TMP0, PC, FRAME_TYPE
+ | mr MULTRES, RD
+ | bney ->vm_return
+ | lwz INS, -4(PC)
+ | decode_RB8 RB, INS
+ |5:
+ | cmplw RB, RD // More results expected?
+ | decode_RA8 TMP0, INS
+ | bgt >6
+ | ins_next1
+ | // Adjust BASE. KBASE is assumed to be set for the calling frame.
+ | sub BASE, RA, TMP0
+ | ins_next2
+ |
+ |6: // Fill up results with nil.
+ | subi TMP1, RD, 8
+ | addi RD, RD, 8
+ | stwx TISNIL, RA, TMP1
+ | b <5
+ |
+ |.macro math_extern, func
+ | .ffunc_n math_ .. func
+ | blex func
+ | b ->fff_resn
+ |.endmacro
+ |
+ |.macro math_extern2, func
+ | .ffunc_nn math_ .. func
+ | blex func
+ | b ->fff_resn
+ |.endmacro
+ |
+ |.macro math_round, func
+ | .ffunc_1 math_ .. func
+ | checknum CARG3; beqy ->fff_restv
+ | rlwinm TMP2, CARG3, 12, 21, 31
+ | bge ->fff_fallback
+ | addic. TMP2, TMP2, -1023 // exp = exponent(x) - 1023
+ | cmplwi cr1, TMP2, 31 // 0 <= exp < 31?
+ | subfic TMP0, TMP2, 31
+ | blt >3
+ | slwi TMP1, CARG3, 11
+ | srwi TMP3, CARG1, 21
+ | oris TMP1, TMP1, 0x8000
+ | addi TMP2, TMP2, 1
+ | or TMP1, TMP1, TMP3
+ | slwi CARG2, CARG1, 11
+ | bge cr1, >4
+ | slw TMP3, TMP1, TMP2
+ | srw RD, TMP1, TMP0
+ | or TMP3, TMP3, CARG2
+ | srawi TMP2, CARG3, 31
+ |.if "func" == "floor"
+ | and TMP1, TMP3, TMP2
+ | addic TMP0, TMP1, -1
+ | subfe TMP1, TMP0, TMP1
+ | add CARG1, RD, TMP1
+ | xor CARG1, CARG1, TMP2
+ | sub CARG1, CARG1, TMP2
+ | b ->fff_resi
+ |.else
+ | andc TMP1, TMP3, TMP2
+ | addic TMP0, TMP1, -1
+ | subfe TMP1, TMP0, TMP1
+ | add CARG1, RD, TMP1
+ | cmpw CARG1, RD
+ | xor CARG1, CARG1, TMP2
+ | sub CARG1, CARG1, TMP2
+ | bge ->fff_resi
+ | // Overflow to 2^31.
+ | lus CARG3, 0x41e0 // 2^31.
+ | li CARG1, 0
+ | b ->fff_restv
+ |.endif
+ |3: // |x| < 1
+ | slwi TMP2, CARG3, 1
+ | srawi TMP1, CARG3, 31
+ | or TMP2, CARG1, TMP2 // ztest = (hi+hi) | lo
+ |.if "func" == "floor"
+ | and TMP1, TMP2, TMP1 // (ztest & sign) == 0 ? 0 : -1
+ | subfic TMP2, TMP1, 0
+ | subfe CARG1, CARG1, CARG1
+ |.else
+ | andc TMP1, TMP2, TMP1 // (ztest & ~sign) == 0 ? 0 : 1
+ | addic TMP2, TMP1, -1
+ | subfe CARG1, TMP2, TMP1
+ |.endif
+ | b ->fff_resi
+ |4: // exp >= 31. Check for -(2^31).
+ | xoris TMP1, TMP1, 0x8000
+ | srawi TMP2, CARG3, 31
+ |.if "func" == "floor"
+ | or TMP1, TMP1, CARG2
+ |.endif
+ |.if PPE
+ | orc TMP1, TMP1, TMP2
+ | cmpwi TMP1, 0
+ |.else
+ | orc. TMP1, TMP1, TMP2
+ |.endif
+ | crand 4*cr0+eq, 4*cr0+eq, 4*cr1+eq
+ | lus CARG1, 0x8000 // -(2^31).
+ | beqy ->fff_resi
+ |5:
+ |.if FPU
+ | lfd FARG1, 0(BASE)
+ |.else
+ | lwz CARG1, 0(BASE)
+ | lwz CARG2, 4(BASE)
+ |.endif
+ | blex func
+ | b ->fff_resn
+ |.endmacro
+ |
+ |.if DUALNUM
+ | math_round floor
+ | math_round ceil
+ |.else
+ | // NYI: use internal implementation.
+ | math_extern floor
+ | math_extern ceil
+ |.endif
+ |
+ |.if SQRT
+ |.ffunc_n math_sqrt
+ | fsqrt FARG1, FARG1
+ | b ->fff_resn
+ |.else
+ | math_extern sqrt
+ |.endif
+ |
+ |.ffunc math_log
+ | cmplwi NARGS8:RC, 8
+ | lwz CARG1, 0(BASE)
+ | bne ->fff_fallback // Need exactly 1 argument.
+ | checknum CARG1; bge ->fff_fallback
+ |.if FPU
+ | lfd FARG1, 0(BASE)
+ |.else
+ | lwz CARG2, 4(BASE)
+ |.endif
+ | blex log
+ | b ->fff_resn
+ |
+ | math_extern log10
+ | math_extern exp
+ | math_extern sin
+ | math_extern cos
+ | math_extern tan
+ | math_extern asin
+ | math_extern acos
+ | math_extern atan
+ | math_extern sinh
+ | math_extern cosh
+ | math_extern tanh
+ | math_extern2 pow
+ | math_extern2 atan2
+ | math_extern2 fmod
+ |
+ |.if DUALNUM
+ |.ffunc math_ldexp
+ | cmplwi NARGS8:RC, 16
+ | lwz TMP0, 0(BASE)
+ |.if FPU
+ | lfd FARG1, 0(BASE)
+ |.else
+ | lwz CARG1, 0(BASE)
+ | lwz CARG2, 4(BASE)
+ |.endif
+ | lwz TMP1, 8(BASE)
+ |.if GPR64
+ | lwz CARG2, 12(BASE)
+ |.elif FPU
+ | lwz CARG1, 12(BASE)
+ |.else
+ | lwz CARG3, 12(BASE)
+ |.endif
+ | blt ->fff_fallback
+ | checknum TMP0; bge ->fff_fallback
+ | checknum TMP1; bne ->fff_fallback
+ |.else
+ |.ffunc_nn math_ldexp
+ |.if GPR64
+ | toint CARG2, FARG2
+ |.else
+ | toint CARG1, FARG2
+ |.endif
+ |.endif
+ | blex ldexp
+ | b ->fff_resn
+ |
+ |.ffunc_n math_frexp
+ |.if GPR64
+ | la CARG2, DISPATCH_GL(tmptv)(DISPATCH)
+ |.elif FPU
+ | la CARG1, DISPATCH_GL(tmptv)(DISPATCH)
+ |.else
+ | la CARG3, DISPATCH_GL(tmptv)(DISPATCH)
+ |.endif
+ | lwz PC, FRAME_PC(BASE)
+ | blex frexp
+ | lwz TMP1, DISPATCH_GL(tmptv)(DISPATCH)
+ | la RA, -8(BASE)
+ |.if not DUALNUM
+ | tonum_i FARG2, TMP1
+ |.endif
+ |.if FPU
+ | stfd FARG1, 0(RA)
+ |.else
+ | stw CRET1, 0(RA)
+ | stw CRET2, 4(RA)
+ |.endif
+ | li RD, (2+1)*8
+ |.if DUALNUM
+ | stw TISNUM, 8(RA)
+ | stw TMP1, 12(RA)
+ |.else
+ | stfd FARG2, 8(RA)
+ |.endif
+ | b ->fff_res
+ |
+ |.ffunc_n math_modf
+ |.if GPR64
+ | la CARG2, -8(BASE)
+ |.elif FPU
+ | la CARG1, -8(BASE)
+ |.else
+ | la CARG3, -8(BASE)
+ |.endif
+ | lwz PC, FRAME_PC(BASE)
+ | blex modf
+ | la RA, -8(BASE)
+ |.if FPU
+ | stfd FARG1, 0(BASE)
+ |.else
+ | stw CRET1, 0(BASE)
+ | stw CRET2, 4(BASE)
+ |.endif
+ | li RD, (2+1)*8
+ | b ->fff_res
+ |
+ |.macro math_minmax, name, ismax
+ |.if DUALNUM
+ | .ffunc_1 name
+ | checknum CARG3
+ | addi SAVE0, BASE, 8
+ | add SAVE1, BASE, NARGS8:RC
+ | bne >4
+ |1: // Handle integers.
+ | lwz CARG4, 0(SAVE0)
+ | cmplw cr1, SAVE0, SAVE1
+ | lwz CARG2, 4(SAVE0)
+ | bge cr1, ->fff_resi
+ | checknum CARG4
+ | xoris TMP0, CARG1, 0x8000
+ | xoris TMP3, CARG2, 0x8000
+ | bne >3
+ | subfc TMP3, TMP3, TMP0
+ | subfe TMP0, TMP0, TMP0
+ |.if ismax
+ | andc TMP3, TMP3, TMP0
+ |.else
+ | and TMP3, TMP3, TMP0
+ |.endif
+ | add CARG1, TMP3, CARG2
+ |.if GPR64
+ | rldicl CARG1, CARG1, 0, 32
+ |.endif
+ | addi SAVE0, SAVE0, 8
+ | b <1
+ |3:
+ | bge ->fff_fallback
+ | // Convert intermediate result to number and continue below.
+ |.if FPU
+ | tonum_i FARG1, CARG1
+ | lfd FARG2, 0(SAVE0)
+ |.else
+ | mr CARG2, CARG1
+ | bl ->vm_sfi2d_1
+ | lwz CARG3, 0(SAVE0)
+ | lwz CARG4, 4(SAVE0)
+ |.endif
+ | b >6
+ |4:
+ |.if FPU
+ | lfd FARG1, 0(BASE)
+ |.else
+ | lwz CARG1, 0(BASE)
+ | lwz CARG2, 4(BASE)
+ |.endif
+ | bge ->fff_fallback
+ |5: // Handle numbers.
+ | lwz CARG3, 0(SAVE0)
+ | cmplw cr1, SAVE0, SAVE1
+ |.if FPU
+ | lfd FARG2, 0(SAVE0)
+ |.else
+ | lwz CARG4, 4(SAVE0)
+ |.endif
+ | bge cr1, ->fff_resn
+ | checknum CARG3; bge >7
+ |6:
+ | addi SAVE0, SAVE0, 8
+ |.if FPU
+ |.if ismax
+ | fsub f0, FARG1, FARG2
+ |.else
+ | fsub f0, FARG2, FARG1
+ |.endif
+ | fsel FARG1, f0, FARG1, FARG2
+ |.else
+ | stw CARG1, SFSAVE_1
+ | stw CARG2, SFSAVE_2
+ | stw CARG3, SFSAVE_3
+ | stw CARG4, SFSAVE_4
+ | blex __ledf2
+ | cmpwi CRET1, 0
+ |.if ismax
+ | blt >8
+ |.else
+ | bge >8
+ |.endif
+ | lwz CARG1, SFSAVE_1
+ | lwz CARG2, SFSAVE_2
+ | b <5
+ |8:
+ | lwz CARG1, SFSAVE_3
+ | lwz CARG2, SFSAVE_4
+ |.endif
+ | b <5
+ |7: // Convert integer to number and continue above.
+ | lwz CARG3, 4(SAVE0)
+ | bne ->fff_fallback
+ |.if FPU
+ | tonum_i FARG2, CARG3
+ |.else
+ | bl ->vm_sfi2d_2
+ |.endif
+ | b <6
+ |.else
+ | .ffunc_n name
+ | li TMP1, 8
+ |1:
+ | lwzx CARG2, BASE, TMP1
+ | lfdx FARG2, BASE, TMP1
+ | cmplw cr1, TMP1, NARGS8:RC
+ | checknum CARG2
+ | bge cr1, ->fff_resn
+ | bge ->fff_fallback
+ |.if ismax
+ | fsub f0, FARG1, FARG2
+ |.else
+ | fsub f0, FARG2, FARG1
+ |.endif
+ | addi TMP1, TMP1, 8
+ | fsel FARG1, f0, FARG1, FARG2
+ | b <1
+ |.endif
+ |.endmacro
+ |
+ | math_minmax math_min, 0
+ | math_minmax math_max, 1
+ |
+ |//-- String library -----------------------------------------------------
+ |
+ |.ffunc string_byte // Only handle the 1-arg case here.
+ | cmplwi NARGS8:RC, 8
+ | lwz CARG3, 0(BASE)
+ | lwz STR:CARG1, 4(BASE)
+ | bne ->fff_fallback // Need exactly 1 argument.
+ | checkstr CARG3
+ | bne ->fff_fallback
+ | lwz TMP0, STR:CARG1->len
+ |.if DUALNUM
+ | lbz CARG1, STR:CARG1[1] // Access is always ok (NUL at end).
+ | li RD, (0+1)*8
+ | lwz PC, FRAME_PC(BASE)
+ | cmplwi TMP0, 0
+ | la RA, -8(BASE)
+ | beqy ->fff_res
+ | b ->fff_resi
+ |.else
+ | lbz TMP1, STR:CARG1[1] // Access is always ok (NUL at end).
+ | addic TMP3, TMP0, -1 // RD = ((str->len != 0)+1)*8
+ | subfe RD, TMP3, TMP0
+ | stw TMP1, TONUM_LO // Inlined tonum_u f0, TMP1.
+ | addi RD, RD, 1
+ | lfd f0, TONUM_D
+ | la RA, -8(BASE)
+ | lwz PC, FRAME_PC(BASE)
+ | fsub f0, f0, TOBIT
+ | slwi RD, RD, 3
+ | stfd f0, 0(RA)
+ | b ->fff_res
+ |.endif
+ |
+ |.ffunc string_char // Only handle the 1-arg case here.
+ | ffgccheck
+ | cmplwi NARGS8:RC, 8
+ | lwz CARG3, 0(BASE)
+ |.if DUALNUM
+ | lwz TMP0, 4(BASE)
+ | bne ->fff_fallback // Exactly 1 argument.
+ | checknum CARG3; bne ->fff_fallback
+ | la CARG2, 7(BASE)
+ |.else
+ | lfd FARG1, 0(BASE)
+ | bne ->fff_fallback // Exactly 1 argument.
+ | checknum CARG3; bge ->fff_fallback
+ | toint TMP0, FARG1
+ | la CARG2, TMPD_BLO
+ |.endif
+ | li CARG3, 1
+ | cmplwi TMP0, 255; bgt ->fff_fallback
+ |->fff_newstr:
+ | mr CARG1, L
+ | stp BASE, L->base
+ | stw PC, SAVE_PC
+ | bl extern lj_str_new // (lua_State *L, char *str, size_t l)
+ |->fff_resstr:
+ | // Returns GCstr *.
+ | lp BASE, L->base
+ | li CARG3, LJ_TSTR
+ | b ->fff_restv
+ |
+ |.ffunc string_sub
+ | ffgccheck
+ | cmplwi NARGS8:RC, 16
+ | lwz CARG3, 16(BASE)
+ |.if not DUALNUM
+ | lfd f0, 16(BASE)
+ |.endif
+ | lwz TMP0, 0(BASE)
+ | lwz STR:CARG1, 4(BASE)
+ | blt ->fff_fallback
+ | lwz CARG2, 8(BASE)
+ |.if DUALNUM
+ | lwz TMP1, 12(BASE)
+ |.else
+ | lfd f1, 8(BASE)
+ |.endif
+ | li TMP2, -1
+ | beq >1
+ |.if DUALNUM
+ | checknum CARG3
+ | lwz TMP2, 20(BASE)
+ | bne ->fff_fallback
+ |1:
+ | checknum CARG2; bne ->fff_fallback
+ |.else
+ | checknum CARG3; bge ->fff_fallback
+ | toint TMP2, f0
+ |1:
+ | checknum CARG2; bge ->fff_fallback
+ |.endif
+ | checkstr TMP0; bne ->fff_fallback
+ |.if not DUALNUM
+ | toint TMP1, f1
+ |.endif
+ | lwz TMP0, STR:CARG1->len
+ | cmplw TMP0, TMP2 // len < end? (unsigned compare)
+ | addi TMP3, TMP2, 1
+ | blt >5
+ |2:
+ | cmpwi TMP1, 0 // start <= 0?
+ | add TMP3, TMP1, TMP0
+ | ble >7
+ |3:
+ | sub CARG3, TMP2, TMP1
+ | addi CARG2, STR:CARG1, #STR-1
+ | srawi TMP0, CARG3, 31
+ | addi CARG3, CARG3, 1
+ | add CARG2, CARG2, TMP1
+ | andc CARG3, CARG3, TMP0
+ |.if GPR64
+ | rldicl CARG2, CARG2, 0, 32
+ | rldicl CARG3, CARG3, 0, 32
+ |.endif
+ | b ->fff_newstr
+ |
+ |5: // Negative end or overflow.
+ | cmpw TMP0, TMP2 // len >= end? (signed compare)
+ | add TMP2, TMP0, TMP3 // Negative end: end = end+len+1.
+ | bge <2
+ | mr TMP2, TMP0 // Overflow: end = len.
+ | b <2
+ |
+ |7: // Negative start or underflow.
+ | .gpr64 extsw TMP1, TMP1
+ | addic CARG3, TMP1, -1
+ | subfe CARG3, CARG3, CARG3
+ | srawi CARG2, TMP3, 31 // Note: modifies carry.
+ | andc TMP3, TMP3, CARG3
+ | andc TMP1, TMP3, CARG2
+ | addi TMP1, TMP1, 1 // start = 1 + (start ? start+len : 0)
+ | b <3
+ |
+ |.macro ffstring_op, name
+ | .ffunc string_ .. name
+ | ffgccheck
+ | cmplwi NARGS8:RC, 8
+ | lwz CARG3, 0(BASE)
+ | lwz STR:CARG2, 4(BASE)
+ | blt ->fff_fallback
+ | checkstr CARG3
+ | la SBUF:CARG1, DISPATCH_GL(tmpbuf)(DISPATCH)
+ | bne ->fff_fallback
+ | lwz TMP0, SBUF:CARG1->b
+ | stw L, SBUF:CARG1->L
+ | stp BASE, L->base
+ | stw PC, SAVE_PC
+ | stw TMP0, SBUF:CARG1->w
+ | bl extern lj_buf_putstr_ .. name
+ | bl extern lj_buf_tostr
+ | b ->fff_resstr
+ |.endmacro
+ |
+ |ffstring_op reverse
+ |ffstring_op lower
+ |ffstring_op upper
+ |
+ |//-- Bit library --------------------------------------------------------
+ |
+ |.macro .ffunc_bit, name
+ |.if DUALNUM
+ | .ffunc_1 bit_..name
+ | checknum CARG3; bnel ->fff_tobit_fb
+ |.else
+ | .ffunc_n bit_..name
+ | fadd FARG1, FARG1, TOBIT
+ | stfd FARG1, TMPD
+ | lwz CARG1, TMPD_LO
+ |.endif
+ |.endmacro
+ |
+ |.macro .ffunc_bit_op, name, ins
+ | .ffunc_bit name
+ | addi SAVE0, BASE, 8
+ | add SAVE1, BASE, NARGS8:RC
+ |1:
+ | lwz CARG4, 0(SAVE0)
+ | cmplw cr1, SAVE0, SAVE1
+ |.if DUALNUM
+ | lwz CARG2, 4(SAVE0)
+ |.else
+ | lfd FARG1, 0(SAVE0)
+ |.endif
+ | bgey cr1, ->fff_resi
+ | checknum CARG4
+ |.if DUALNUM
+ |.if FPU
+ | bnel ->fff_bitop_fb
+ |.else
+ | beq >3
+ | stw CARG1, SFSAVE_1
+ | bl ->fff_bitop_fb
+ | mr CARG2, CARG1
+ | lwz CARG1, SFSAVE_1
+ |3:
+ |.endif
+ |.else
+ | fadd FARG1, FARG1, TOBIT
+ | bge ->fff_fallback
+ | stfd FARG1, TMPD
+ | lwz CARG2, TMPD_LO
+ |.endif
+ | ins CARG1, CARG1, CARG2
+ | addi SAVE0, SAVE0, 8
+ | b <1
+ |.endmacro
+ |
+ |.ffunc_bit_op band, and
+ |.ffunc_bit_op bor, or
+ |.ffunc_bit_op bxor, xor
+ |
+ |.ffunc_bit bswap
+ | rotlwi TMP0, CARG1, 8
+ | rlwimi TMP0, CARG1, 24, 0, 7
+ | rlwimi TMP0, CARG1, 24, 16, 23
+ | mr CRET1, TMP0
+ | b ->fff_resi
+ |
+ |.ffunc_bit bnot
+ | not CRET1, CARG1
+ | b ->fff_resi
+ |
+ |.macro .ffunc_bit_sh, name, ins, shmod
+ |.if DUALNUM
+ | .ffunc_2 bit_..name
+ |.if FPU
+ | checknum CARG3; bnel ->fff_tobit_fb
+ |.else
+ | checknum CARG3; beq >1
+ | bl ->fff_tobit_fb
+ | lwz CARG2, 12(BASE) // Conversion polluted CARG2.
+ |1:
+ |.endif
+ | // Note: no inline conversion from number for 2nd argument!
+ | checknum CARG4; bne ->fff_fallback
+ |.else
+ | .ffunc_nn bit_..name
+ | fadd FARG1, FARG1, TOBIT
+ | fadd FARG2, FARG2, TOBIT
+ | stfd FARG1, TMPD
+ | lwz CARG1, TMPD_LO
+ | stfd FARG2, TMPD
+ | lwz CARG2, TMPD_LO
+ |.endif
+ |.if shmod == 1
+ | rlwinm CARG2, CARG2, 0, 27, 31
+ |.elif shmod == 2
+ | neg CARG2, CARG2
+ |.endif
+ | ins CRET1, CARG1, CARG2
+ | b ->fff_resi
+ |.endmacro
+ |
+ |.ffunc_bit_sh lshift, slw, 1
+ |.ffunc_bit_sh rshift, srw, 1
+ |.ffunc_bit_sh arshift, sraw, 1
+ |.ffunc_bit_sh rol, rotlw, 0
+ |.ffunc_bit_sh ror, rotlw, 2
+ |
+ |.ffunc_bit tobit
+ |.if DUALNUM
+ | b ->fff_resi
+ |.else
+ |->fff_resi:
+ | tonum_i FARG1, CRET1
+ |.endif
+ |->fff_resn:
+ | lwz PC, FRAME_PC(BASE)
+ | la RA, -8(BASE)
+ |.if FPU
+ | stfd FARG1, -8(BASE)
+ |.else
+ | stw CARG1, -8(BASE)
+ | stw CARG2, -4(BASE)
+ |.endif
+ | b ->fff_res1
+ |
+ |// Fallback FP number to bit conversion.
+ |->fff_tobit_fb:
+ |.if DUALNUM
+ |.if FPU
+ | lfd FARG1, 0(BASE)
+ | bgt ->fff_fallback
+ | fadd FARG1, FARG1, TOBIT
+ | stfd FARG1, TMPD
+ | lwz CARG1, TMPD_LO
+ | blr
+ |.else
+ | bgt ->fff_fallback
+ | mr CARG2, CARG1
+ | mr CARG1, CARG3
+ |// Modifies: CARG1, CARG2, TMP0, TMP1, TMP2.
+ |->vm_tobit:
+ | slwi TMP2, CARG1, 1
+ | addis TMP2, TMP2, 0x0020
+ | cmpwi TMP2, 0
+ | bge >2
+ | li TMP1, 0x3e0
+ | srawi TMP2, TMP2, 21
+ | not TMP1, TMP1
+ | sub. TMP2, TMP1, TMP2
+ | cmpwi cr7, CARG1, 0
+ | blt >1
+ | slwi TMP1, CARG1, 11
+ | srwi TMP0, CARG2, 21
+ | oris TMP1, TMP1, 0x8000
+ | or TMP1, TMP1, TMP0
+ | srw CARG1, TMP1, TMP2
+ | bclr 4, 28 // Return if cr7[lt] == 0, no hint.
+ | neg CARG1, CARG1
+ | blr
+ |1:
+ | addi TMP2, TMP2, 21
+ | srw TMP1, CARG2, TMP2
+ | slwi CARG2, CARG1, 12
+ | subfic TMP2, TMP2, 20
+ | slw TMP0, CARG2, TMP2
+ | or CARG1, TMP1, TMP0
+ | bclr 4, 28 // Return if cr7[lt] == 0, no hint.
+ | neg CARG1, CARG1
+ | blr
+ |2:
+ | li CARG1, 0
+ | blr
+ |.endif
+ |.endif
+ |->fff_bitop_fb:
+ |.if DUALNUM
+ |.if FPU
+ | lfd FARG1, 0(SAVE0)
+ | bgt ->fff_fallback
+ | fadd FARG1, FARG1, TOBIT
+ | stfd FARG1, TMPD
+ | lwz CARG2, TMPD_LO
+ | blr
+ |.else
+ | bgt ->fff_fallback
+ | mr CARG1, CARG4
+ | b ->vm_tobit
+ |.endif
+ |.endif
+ |
+ |//-----------------------------------------------------------------------
+ |
+ |->fff_fallback: // Call fast function fallback handler.
+ | // BASE = new base, RB = CFUNC, RC = nargs*8
+ | lp TMP3, CFUNC:RB->f
+ | add TMP1, BASE, NARGS8:RC
+ | lwz PC, FRAME_PC(BASE) // Fallback may overwrite PC.
+ | addi TMP0, TMP1, 8*LUA_MINSTACK
+ | lwz TMP2, L->maxstack
+ | stw PC, SAVE_PC // Redundant (but a defined value).
+ | .toc lp TMP3, 0(TMP3)
+ | cmplw TMP0, TMP2
+ | stp BASE, L->base
+ | stp TMP1, L->top
+ | mr CARG1, L
+ | bgt >5 // Need to grow stack.
+ | mtctr TMP3
+ | bctrl // (lua_State *L)
+ | // Either throws an error, or recovers and returns -1, 0 or nresults+1.
+ | lp BASE, L->base
+ | cmpwi CRET1, 0
+ | slwi RD, CRET1, 3
+ | la RA, -8(BASE)
+ | bgt ->fff_res // Returned nresults+1?
+ |1: // Returned 0 or -1: retry fast path.
+ | lp TMP0, L->top
+ | lwz LFUNC:RB, FRAME_FUNC(BASE)
+ | sub NARGS8:RC, TMP0, BASE
+ | bne ->vm_call_tail // Returned -1?
+ | ins_callt // Returned 0: retry fast path.
+ |
+ |// Reconstruct previous base for vmeta_call during tailcall.
+ |->vm_call_tail:
+ | andix. TMP0, PC, FRAME_TYPE
+ | rlwinm TMP1, PC, 0, 0, 28
+ | bne >3
+ | lwz INS, -4(PC)
+ | decode_RA8 TMP1, INS
+ | addi TMP1, TMP1, 8
+ |3:
+ | sub TMP2, BASE, TMP1
+ | b ->vm_call_dispatch // Resolve again for tailcall.
+ |
+ |5: // Grow stack for fallback handler.
+ | li CARG2, LUA_MINSTACK
+ | bl extern lj_state_growstack // (lua_State *L, int n)
+ | lp BASE, L->base
+ | cmpw TMP0, TMP0 // Set 4*cr0+eq to force retry.
+ | b <1
+ |
+ |->fff_gcstep: // Call GC step function.
+ | // BASE = new base, RC = nargs*8
+ | mflr SAVE0
+ | stp BASE, L->base
+ | add TMP0, BASE, NARGS8:RC
+ | stw PC, SAVE_PC // Redundant (but a defined value).
+ | stp TMP0, L->top
+ | mr CARG1, L
+ | bl extern lj_gc_step // (lua_State *L)
+ | lp BASE, L->base
+ | mtlr SAVE0
+ | lp TMP0, L->top
+ | sub NARGS8:RC, TMP0, BASE
+ | lwz CFUNC:RB, FRAME_FUNC(BASE)
+ | blr
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Special dispatch targets -------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |->vm_record: // Dispatch target for recording phase.
+ |.if JIT
+ | lbz TMP3, DISPATCH_GL(hookmask)(DISPATCH)
+ | andix. TMP0, TMP3, HOOK_VMEVENT // No recording while in vmevent.
+ | bne >5
+ | // Decrement the hookcount for consistency, but always do the call.
+ | lwz TMP2, DISPATCH_GL(hookcount)(DISPATCH)
+ | andix. TMP0, TMP3, HOOK_ACTIVE
+ | bne >1
+ | subi TMP2, TMP2, 1
+ | andi. TMP0, TMP3, LUA_MASKLINE|LUA_MASKCOUNT
+ | beqy >1
+ | stw TMP2, DISPATCH_GL(hookcount)(DISPATCH)
+ | b >1
+ |.endif
+ |
+ |->vm_rethook: // Dispatch target for return hooks.
+ | lbz TMP3, DISPATCH_GL(hookmask)(DISPATCH)
+ | andix. TMP0, TMP3, HOOK_ACTIVE // Hook already active?
+ | beq >1
+ |5: // Re-dispatch to static ins.
+ | addi TMP1, TMP1, GG_DISP2STATIC // Assumes decode_OPP TMP1, INS.
+ | lpx TMP0, DISPATCH, TMP1
+ | mtctr TMP0
+ | bctr
+ |
+ |->vm_inshook: // Dispatch target for instr/line hooks.
+ | lbz TMP3, DISPATCH_GL(hookmask)(DISPATCH)
+ | lwz TMP2, DISPATCH_GL(hookcount)(DISPATCH)
+ | andix. TMP0, TMP3, HOOK_ACTIVE // Hook already active?
+ | rlwinm TMP0, TMP3, 31-LUA_HOOKLINE, 31, 0
+ | bne <5
+ |
+ | cmpwi cr1, TMP0, 0
+ | addic. TMP2, TMP2, -1
+ | beq cr1, <5
+ | stw TMP2, DISPATCH_GL(hookcount)(DISPATCH)
+ | beq >1
+ | bge cr1, <5
+ |1:
+ | mr CARG1, L
+ | stw MULTRES, SAVE_MULTRES
+ | mr CARG2, PC
+ | stp BASE, L->base
+ | // SAVE_PC must hold the _previous_ PC. The callee updates it with PC.
+ | bl extern lj_dispatch_ins // (lua_State *L, const BCIns *pc)
+ |3:
+ | lp BASE, L->base
+ |4: // Re-dispatch to static ins.
+ | lwz INS, -4(PC)
+ | decode_OPP TMP1, INS
+ | decode_RB8 RB, INS
+ | addi TMP1, TMP1, GG_DISP2STATIC
+ | decode_RD8 RD, INS
+ | lpx TMP0, DISPATCH, TMP1
+ | decode_RA8 RA, INS
+ | decode_RC8 RC, INS
+ | mtctr TMP0
+ | bctr
+ |
+ |->cont_hook: // Continue from hook yield.
+ | addi PC, PC, 4
+ | lwz MULTRES, -20(RB) // Restore MULTRES for *M ins.
+ | b <4
+ |
+ |->vm_hotloop: // Hot loop counter underflow.
+ |.if JIT
+ | lwz LFUNC:TMP1, FRAME_FUNC(BASE)
+ | addi CARG1, DISPATCH, GG_DISP2J
+ | stw PC, SAVE_PC
+ | lwz TMP1, LFUNC:TMP1->pc
+ | mr CARG2, PC
+ | stw L, DISPATCH_J(L)(DISPATCH)
+ | lbz TMP1, PC2PROTO(framesize)(TMP1)
+ | stp BASE, L->base
+ | slwi TMP1, TMP1, 3
+ | add TMP1, BASE, TMP1
+ | stp TMP1, L->top
+ | bl extern lj_trace_hot // (jit_State *J, const BCIns *pc)
+ | b <3
+ |.endif
+ |
+ |->vm_callhook: // Dispatch target for call hooks.
+ | mr CARG2, PC
+ |.if JIT
+ | b >1
+ |.endif
+ |
+ |->vm_hotcall: // Hot call counter underflow.
+ |.if JIT
+ | ori CARG2, PC, 1
+ |1:
+ |.endif
+ | add TMP0, BASE, RC
+ | stw PC, SAVE_PC
+ | mr CARG1, L
+ | stp BASE, L->base
+ | sub RA, RA, BASE
+ | stp TMP0, L->top
+ | bl extern lj_dispatch_call // (lua_State *L, const BCIns *pc)
+ | // Returns ASMFunction.
+ | lp BASE, L->base
+ | lp TMP0, L->top
+ | stw ZERO, SAVE_PC // Invalidate for subsequent line hook.
+ | sub NARGS8:RC, TMP0, BASE
+ | add RA, BASE, RA
+ | lwz LFUNC:RB, FRAME_FUNC(BASE)
+ | lwz INS, -4(PC)
+ | mtctr CRET1
+ | bctr
+ |
+ |->cont_stitch: // Trace stitching.
+ |.if JIT
+ | // RA = resultptr, RB = meta base
+ | lwz INS, -4(PC)
+ | lwz TRACE:TMP2, -20(RB) // Save previous trace.
+ | addic. TMP1, MULTRES, -8
+ | decode_RA8 RC, INS // Call base.
+ | beq >2
+ |1: // Move results down.
+ |.if FPU
+ | lfd f0, 0(RA)
+ |.else
+ | lwz CARG1, 0(RA)
+ | lwz CARG2, 4(RA)
+ |.endif
+ | addic. TMP1, TMP1, -8
+ | addi RA, RA, 8
+ |.if FPU
+ | stfdx f0, BASE, RC
+ |.else
+ | add CARG3, BASE, RC
+ | stw CARG1, 0(CARG3)
+ | stw CARG2, 4(CARG3)
+ |.endif
+ | addi RC, RC, 8
+ | bne <1
+ |2:
+ | decode_RA8 RA, INS
+ | decode_RB8 RB, INS
+ | add RA, RA, RB
+ |3:
+ | cmplw RA, RC
+ | bgt >9 // More results wanted?
+ |
+ | lhz TMP3, TRACE:TMP2->traceno
+ | lhz RD, TRACE:TMP2->link
+ | cmpw RD, TMP3
+ | cmpwi cr1, RD, 0
+ | beq ->cont_nop // Blacklisted.
+ | slwi RD, RD, 3
+ | bne cr1, =>BC_JLOOP // Jump to stitched trace.
+ |
+ | // Stitch a new trace to the previous trace.
+ | stw TMP3, DISPATCH_J(exitno)(DISPATCH)
+ | stp L, DISPATCH_J(L)(DISPATCH)
+ | stp BASE, L->base
+ | addi CARG1, DISPATCH, GG_DISP2J
+ | mr CARG2, PC
+ | bl extern lj_dispatch_stitch // (jit_State *J, const BCIns *pc)
+ | lp BASE, L->base
+ | b ->cont_nop
+ |
+ |9:
+ | stwx TISNIL, BASE, RC
+ | addi RC, RC, 8
+ | b <3
+ |.endif
+ |
+ |->vm_profhook: // Dispatch target for profiler hook.
+#if LJ_HASPROFILE
+ | mr CARG1, L
+ | stw MULTRES, SAVE_MULTRES
+ | mr CARG2, PC
+ | stp BASE, L->base
+ | bl extern lj_dispatch_profile // (lua_State *L, const BCIns *pc)
+ | // HOOK_PROFILE is off again, so re-dispatch to dynamic instruction.
+ | lp BASE, L->base
+ | subi PC, PC, 4
+ | b ->cont_nop
+#endif
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Trace exit handler -------------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |.macro savex_, a, b, c, d
+ |.if FPU
+ | stfd f..a, 16+a*8(sp)
+ | stfd f..b, 16+b*8(sp)
+ | stfd f..c, 16+c*8(sp)
+ | stfd f..d, 16+d*8(sp)
+ |.endif
+ |.endmacro
+ |
+ |->vm_exit_handler:
+ |.if JIT
+ | addi sp, sp, -(16+32*8+32*4)
+ | stmw r2, 16+32*8+2*4(sp)
+ | addi DISPATCH, JGL, -GG_DISP2G-32768
+ | li CARG2, ~LJ_VMST_EXIT
+ | lwz CARG1, 16+32*8+32*4(sp) // Get stack chain.
+ | stw CARG2, DISPATCH_GL(vmstate)(DISPATCH)
+ | savex_ 0,1,2,3
+ | stw CARG1, 0(sp) // Store extended stack chain.
+ | clrso TMP1
+ | savex_ 4,5,6,7
+ | addi CARG2, sp, 16+32*8+32*4 // Recompute original value of sp.
+ | savex_ 8,9,10,11
+ | stw CARG2, 16+32*8+1*4(sp) // Store sp in RID_SP.
+ | savex_ 12,13,14,15
+ | mflr CARG3
+ | li TMP1, 0
+ | savex_ 16,17,18,19
+ | stw TMP1, 16+32*8+0*4(sp) // Clear RID_TMP.
+ | savex_ 20,21,22,23
+ | lhz CARG4, 2(CARG3) // Load trace number.
+ | savex_ 24,25,26,27
+ | lwz L, DISPATCH_GL(cur_L)(DISPATCH)
+ | savex_ 28,29,30,31
+ | sub CARG3, TMP0, CARG3 // Compute exit number.
+ | lp BASE, DISPATCH_GL(jit_base)(DISPATCH)
+ | srwi CARG3, CARG3, 2
+ | stp L, DISPATCH_J(L)(DISPATCH)
+ | subi CARG3, CARG3, 2
+ | stp BASE, L->base
+ | stw CARG4, DISPATCH_J(parent)(DISPATCH)
+ | stw TMP1, DISPATCH_GL(jit_base)(DISPATCH)
+ | addi CARG1, DISPATCH, GG_DISP2J
+ | stw CARG3, DISPATCH_J(exitno)(DISPATCH)
+ | addi CARG2, sp, 16
+ | bl extern lj_trace_exit // (jit_State *J, ExitState *ex)
+ | // Returns MULTRES (unscaled) or negated error code.
+ | lp TMP1, L->cframe
+ | lwz TMP2, 0(sp)
+ | lp BASE, L->base
+ |.if GPR64
+ | rldicr sp, TMP1, 0, 61
+ |.else
+ | rlwinm sp, TMP1, 0, 0, 29
+ |.endif
+ | lwz PC, SAVE_PC // Get SAVE_PC.
+ | stw TMP2, 0(sp)
+ | stw L, SAVE_L // Set SAVE_L (on-trace resume/yield).
+ | b >1
+ |.endif
+ |->vm_exit_interp:
+ |.if JIT
+ | // CARG1 = MULTRES or negated error code, BASE, PC and JGL set.
+ | lwz L, SAVE_L
+ | addi DISPATCH, JGL, -GG_DISP2G-32768
+ | stp BASE, L->base
+ |1:
+ | cmpwi CARG1, 0
+ | blt >9 // Check for error from exit.
+ | lwz LFUNC:RB, FRAME_FUNC(BASE)
+ | slwi MULTRES, CARG1, 3
+ | li TMP2, 0
+ | stw MULTRES, SAVE_MULTRES
+ | lwz TMP1, LFUNC:RB->pc
+ | stw TMP2, DISPATCH_GL(jit_base)(DISPATCH)
+ | lwz KBASE, PC2PROTO(k)(TMP1)
+ | // Setup type comparison constants.
+ | li TISNUM, LJ_TISNUM
+ | .FPU lus TMP3, 0x59c0 // TOBIT = 2^52 + 2^51 (float).
+ | .FPU stw TMP3, TMPD
+ | li ZERO, 0
+ | .FPU ori TMP3, TMP3, 0x0004 // TONUM = 2^52 + 2^51 + 2^31 (float).
+ | .FPU lfs TOBIT, TMPD
+ | .FPU stw TMP3, TMPD
+ | .FPU lus TMP0, 0x4338 // Hiword of 2^52 + 2^51 (double)
+ | li TISNIL, LJ_TNIL
+ | .FPU stw TMP0, TONUM_HI
+ | .FPU lfs TONUM, TMPD
+ | // Modified copy of ins_next which handles function header dispatch, too.
+ | lwz INS, 0(PC)
+ | addi PC, PC, 4
+ | // Assumes TISNIL == ~LJ_VMST_INTERP == -1.
+ | stw TISNIL, DISPATCH_GL(vmstate)(DISPATCH)
+ | decode_OPP TMP1, INS
+ | decode_RA8 RA, INS
+ | lpx TMP0, DISPATCH, TMP1
+ | mtctr TMP0
+ | cmplwi TMP1, BC_FUNCF*4 // Function header?
+ | bge >2
+ | decode_RB8 RB, INS
+ | decode_RD8 RD, INS
+ | decode_RC8 RC, INS
+ | bctr
+ |2:
+ | cmplwi TMP1, (BC_FUNCC+2)*4 // Fast function?
+ | blt >3
+ | // Check frame below fast function.
+ | lwz TMP1, FRAME_PC(BASE)
+ | andix. TMP0, TMP1, FRAME_TYPE
+ | bney >3 // Trace stitching continuation?
+ | // Otherwise set KBASE for Lua function below fast function.
+ | lwz TMP2, -4(TMP1)
+ | decode_RA8 TMP0, TMP2
+ | sub TMP1, BASE, TMP0
+ | lwz LFUNC:TMP2, -12(TMP1)
+ | lwz TMP1, LFUNC:TMP2->pc
+ | lwz KBASE, PC2PROTO(k)(TMP1)
+ |3:
+ | subi RC, MULTRES, 8
+ | add RA, RA, BASE
+ | bctr
+ |
+ |9: // Rethrow error from the right C frame.
+ | neg CARG2, CARG1
+ | mr CARG1, L
+ | bl extern lj_err_trace // (lua_State *L, int errcode)
+ |.endif
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Math helper functions ----------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |// NYI: Use internal implementations of floor, ceil, trunc, sfcmp.
+ |
+ |.macro sfi2d, AHI, ALO
+ |.if not FPU
+ | mr. AHI, ALO
+ | bclr 12, 2 // Handle zero first.
+ | srawi TMP0, ALO, 31
+ | xor TMP1, ALO, TMP0
+ | sub TMP1, TMP1, TMP0 // Absolute value in TMP1.
+ | cntlzw AHI, TMP1
+ | andix. TMP0, TMP0, 0x800 // Mask sign bit.
+ | slw TMP1, TMP1, AHI // Align mantissa left with leading 1.
+ | subfic AHI, AHI, 0x3ff+31-1 // Exponent -1 in AHI.
+ | slwi ALO, TMP1, 21
+ | or AHI, AHI, TMP0 // Sign | Exponent.
+ | srwi TMP1, TMP1, 11
+ | slwi AHI, AHI, 20 // Align left.
+ | add AHI, AHI, TMP1 // Add mantissa, increment exponent.
+ | blr
+ |.endif
+ |.endmacro
+ |
+ |// Input: CARG2. Output: CARG1, CARG2. Temporaries: TMP0, TMP1.
+ |->vm_sfi2d_1:
+ | sfi2d CARG1, CARG2
+ |
+ |// Input: CARG4. Output: CARG3, CARG4. Temporaries: TMP0, TMP1.
+ |->vm_sfi2d_2:
+ | sfi2d CARG3, CARG4
+ |
+ |->vm_modi:
+ | divwo. TMP0, CARG1, CARG2
+ | bso >1
+ |.if GPR64
+ | xor CARG3, CARG1, CARG2
+ | cmpwi CARG3, 0
+ |.else
+ | xor. CARG3, CARG1, CARG2
+ |.endif
+ | mullw TMP0, TMP0, CARG2
+ | sub CARG1, CARG1, TMP0
+ | bgelr
+ | cmpwi CARG1, 0; beqlr
+ | add CARG1, CARG1, CARG2
+ | blr
+ |1:
+ | cmpwi CARG2, 0
+ | li CARG1, 0
+ | beqlr
+ | clrso TMP0 // Clear SO for -2147483648 % -1 and return 0.
+ | blr
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Miscellaneous functions --------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |// void lj_vm_cachesync(void *start, void *end)
+ |// Flush D-Cache and invalidate I-Cache. Assumes 32 byte cache line size.
+ |// This is a good lower bound, except for very ancient PPC models.
+ |->vm_cachesync:
+ |.if JIT or FFI
+ | // Compute start of first cache line and number of cache lines.
+ | rlwinm CARG1, CARG1, 0, 0, 26
+ | sub CARG2, CARG2, CARG1
+ | addi CARG2, CARG2, 31
+ | rlwinm. CARG2, CARG2, 27, 5, 31
+ | beqlr
+ | mtctr CARG2
+ | mr CARG3, CARG1
+ |1: // Flush D-Cache.
+ | dcbst r0, CARG1
+ | addi CARG1, CARG1, 32
+ | bdnz <1
+ | sync
+ | mtctr CARG2
+ |1: // Invalidate I-Cache.
+ | icbi r0, CARG3
+ | addi CARG3, CARG3, 32
+ | bdnz <1
+ | isync
+ | blr
+ |.endif
+ |
+ |->vm_next:
+ |.if JIT
+ | NYI // On big-endian.
+ |.endif
+ |
+ |//-----------------------------------------------------------------------
+ |//-- FFI helper functions -----------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |// Handler for callback functions. Callback slot number in r11, g in r12.
+ |->vm_ffi_callback:
+ |.if FFI
+ |.type CTSTATE, CTState, PC
+ | saveregs
+ | lwz CTSTATE, GL:r12->ctype_state
+ | addi DISPATCH, r12, GG_G2DISP
+ | stw r11, CTSTATE->cb.slot
+ | stw r3, CTSTATE->cb.gpr[0]
+ | .FPU stfd f1, CTSTATE->cb.fpr[0]
+ | stw r4, CTSTATE->cb.gpr[1]
+ | .FPU stfd f2, CTSTATE->cb.fpr[1]
+ | stw r5, CTSTATE->cb.gpr[2]
+ | .FPU stfd f3, CTSTATE->cb.fpr[2]
+ | stw r6, CTSTATE->cb.gpr[3]
+ | .FPU stfd f4, CTSTATE->cb.fpr[3]
+ | stw r7, CTSTATE->cb.gpr[4]
+ | .FPU stfd f5, CTSTATE->cb.fpr[4]
+ | stw r8, CTSTATE->cb.gpr[5]
+ | .FPU stfd f6, CTSTATE->cb.fpr[5]
+ | stw r9, CTSTATE->cb.gpr[6]
+ | .FPU stfd f7, CTSTATE->cb.fpr[6]
+ | stw r10, CTSTATE->cb.gpr[7]
+ | .FPU stfd f8, CTSTATE->cb.fpr[7]
+ | addi TMP0, sp, CFRAME_SPACE+8
+ | stw TMP0, CTSTATE->cb.stack
+ | mr CARG1, CTSTATE
+ | stw CTSTATE, SAVE_PC // Any value outside of bytecode is ok.
+ | mr CARG2, sp
+ | bl extern lj_ccallback_enter // (CTState *cts, void *cf)
+ | // Returns lua_State *.
+ | lp BASE, L:CRET1->base
+ | li TISNUM, LJ_TISNUM // Setup type comparison constants.
+ | lp RC, L:CRET1->top
+ | .FPU lus TMP3, 0x59c0 // TOBIT = 2^52 + 2^51 (float).
+ | li ZERO, 0
+ | mr L, CRET1
+ | .FPU stw TMP3, TMPD
+ | .FPU lus TMP0, 0x4338 // Hiword of 2^52 + 2^51 (double)
+ | lwz LFUNC:RB, FRAME_FUNC(BASE)
+ | .FPU ori TMP3, TMP3, 0x0004 // TONUM = 2^52 + 2^51 + 2^31 (float).
+ | .FPU stw TMP0, TONUM_HI
+ | li TISNIL, LJ_TNIL
+ | li_vmstate INTERP
+ | .FPU lfs TOBIT, TMPD
+ | .FPU stw TMP3, TMPD
+ | sub RC, RC, BASE
+ | st_vmstate
+ | .FPU lfs TONUM, TMPD
+ | ins_callt
+ |.endif
+ |
+ |->cont_ffi_callback: // Return from FFI callback.
+ |.if FFI
+ | lwz CTSTATE, DISPATCH_GL(ctype_state)(DISPATCH)
+ | stp BASE, L->base
+ | stp RB, L->top
+ | stp L, CTSTATE->L
+ | mr CARG1, CTSTATE
+ | mr CARG2, RA
+ | bl extern lj_ccallback_leave // (CTState *cts, TValue *o)
+ | lwz CRET1, CTSTATE->cb.gpr[0]
+ | .FPU lfd FARG1, CTSTATE->cb.fpr[0]
+ | lwz CRET2, CTSTATE->cb.gpr[1]
+ | b ->vm_leave_unw
+ |.endif
+ |
+ |->vm_ffi_call: // Call C function via FFI.
+ | // Caveat: needs special frame unwinding, see below.
+ |.if FFI
+ | .type CCSTATE, CCallState, CARG1
+ | lwz TMP1, CCSTATE->spadj
+ | mflr TMP0
+ | lbz CARG2, CCSTATE->nsp
+ | lbz CARG3, CCSTATE->nfpr
+ | neg TMP1, TMP1
+ | stw TMP0, 4(sp)
+ | cmpwi cr1, CARG3, 0
+ | mr TMP2, sp
+ | addic. CARG2, CARG2, -1
+ | stwux sp, sp, TMP1
+ | crnot 4*cr1+eq, 4*cr1+eq // For vararg calls.
+ | stw r14, -4(TMP2)
+ | stw CCSTATE, -8(TMP2)
+ | mr r14, TMP2
+ | la TMP1, CCSTATE->stack
+ | slwi CARG2, CARG2, 2
+ | blty >2
+ | la TMP2, 8(sp)
+ |1:
+ | lwzx TMP0, TMP1, CARG2
+ | stwx TMP0, TMP2, CARG2
+ | addic. CARG2, CARG2, -4
+ | bge <1
+ |2:
+ | bney cr1, >3
+ | .FPU lfd f1, CCSTATE->fpr[0]
+ | .FPU lfd f2, CCSTATE->fpr[1]
+ | .FPU lfd f3, CCSTATE->fpr[2]
+ | .FPU lfd f4, CCSTATE->fpr[3]
+ | .FPU lfd f5, CCSTATE->fpr[4]
+ | .FPU lfd f6, CCSTATE->fpr[5]
+ | .FPU lfd f7, CCSTATE->fpr[6]
+ | .FPU lfd f8, CCSTATE->fpr[7]
+ |3:
+ | lp TMP0, CCSTATE->func
+ | lwz CARG2, CCSTATE->gpr[1]
+ | lwz CARG3, CCSTATE->gpr[2]
+ | lwz CARG4, CCSTATE->gpr[3]
+ | lwz CARG5, CCSTATE->gpr[4]
+ | mtctr TMP0
+ | lwz r8, CCSTATE->gpr[5]
+ | lwz r9, CCSTATE->gpr[6]
+ | lwz r10, CCSTATE->gpr[7]
+ | lwz CARG1, CCSTATE->gpr[0] // Do this last, since CCSTATE is CARG1.
+ | bctrl
+ | lwz CCSTATE:TMP1, -8(r14)
+ | lwz TMP2, -4(r14)
+ | lwz TMP0, 4(r14)
+ | stw CARG1, CCSTATE:TMP1->gpr[0]
+ | .FPU stfd FARG1, CCSTATE:TMP1->fpr[0]
+ | stw CARG2, CCSTATE:TMP1->gpr[1]
+ | mtlr TMP0
+ | stw CARG3, CCSTATE:TMP1->gpr[2]
+ | mr sp, r14
+ | stw CARG4, CCSTATE:TMP1->gpr[3]
+ | mr r14, TMP2
+ | blr
+ |.endif
+ |// Note: vm_ffi_call must be the last function in this object file!
+ |
+ |//-----------------------------------------------------------------------
+}
+
+/* Generate the code for a single instruction. */
+static void build_ins(BuildCtx *ctx, BCOp op, int defop)
+{
+ int vk = 0;
+ |=>defop:
+
+ switch (op) {
+
+ /* -- Comparison ops ---------------------------------------------------- */
+
+ /* Remember: all ops branch for a true comparison, fall through otherwise. */
+
+ case BC_ISLT: case BC_ISGE: case BC_ISLE: case BC_ISGT:
+ | // RA = src1*8, RD = src2*8, JMP with RD = target
+ |.if DUALNUM
+ | lwzux CARG1, RA, BASE
+ | addi PC, PC, 4
+ | lwz CARG2, 4(RA)
+ | lwzux CARG3, RD, BASE
+ | lwz TMP2, -4(PC)
+ | checknum cr0, CARG1
+ | lwz CARG4, 4(RD)
+ | decode_RD4 TMP2, TMP2
+ | checknum cr1, CARG3
+ | addis SAVE0, TMP2, -(BCBIAS_J*4 >> 16)
+ | bne cr0, >7
+ | bne cr1, >8
+ | cmpw CARG2, CARG4
+ if (op == BC_ISLT) {
+ | bge >2
+ } else if (op == BC_ISGE) {
+ | blt >2
+ } else if (op == BC_ISLE) {
+ | bgt >2
+ } else {
+ | ble >2
+ }
+ |1:
+ | add PC, PC, SAVE0
+ |2:
+ | ins_next
+ |
+ |7: // RA is not an integer.
+ | bgt cr0, ->vmeta_comp
+ | // RA is a number.
+ | .FPU lfd f0, 0(RA)
+ | bgt cr1, ->vmeta_comp
+ | blt cr1, >4
+ | // RA is a number, RD is an integer.
+ |.if FPU
+ | tonum_i f1, CARG4
+ |.else
+ | bl ->vm_sfi2d_2
+ |.endif
+ | b >5
+ |
+ |8: // RA is an integer, RD is not an integer.
+ | bgt cr1, ->vmeta_comp
+ | // RA is an integer, RD is a number.
+ |.if FPU
+ | tonum_i f0, CARG2
+ |.else
+ | bl ->vm_sfi2d_1
+ |.endif
+ |4:
+ | .FPU lfd f1, 0(RD)
+ |5:
+ |.if FPU
+ | fcmpu cr0, f0, f1
+ |.else
+ | blex __ledf2
+ | cmpwi CRET1, 0
+ |.endif
+ if (op == BC_ISLT) {
+ | bge <2
+ } else if (op == BC_ISGE) {
+ | blt <2
+ } else if (op == BC_ISLE) {
+ | cror 4*cr0+lt, 4*cr0+lt, 4*cr0+eq
+ | bge <2
+ } else {
+ | cror 4*cr0+lt, 4*cr0+lt, 4*cr0+eq
+ | blt <2
+ }
+ | b <1
+ |.else
+ | lwzx TMP0, BASE, RA
+ | addi PC, PC, 4
+ | lfdx f0, BASE, RA
+ | lwzx TMP1, BASE, RD
+ | checknum cr0, TMP0
+ | lwz TMP2, -4(PC)
+ | lfdx f1, BASE, RD
+ | checknum cr1, TMP1
+ | decode_RD4 TMP2, TMP2
+ | bge cr0, ->vmeta_comp
+ | addis TMP2, TMP2, -(BCBIAS_J*4 >> 16)
+ | bge cr1, ->vmeta_comp
+ | fcmpu cr0, f0, f1
+ if (op == BC_ISLT) {
+ | bge >1
+ } else if (op == BC_ISGE) {
+ | blt >1
+ } else if (op == BC_ISLE) {
+ | cror 4*cr0+lt, 4*cr0+lt, 4*cr0+eq
+ | bge >1
+ } else {
+ | cror 4*cr0+lt, 4*cr0+lt, 4*cr0+eq
+ | blt >1
+ }
+ | add PC, PC, TMP2
+ |1:
+ | ins_next
+ |.endif
+ break;
+
+ case BC_ISEQV: case BC_ISNEV:
+ vk = op == BC_ISEQV;
+ | // RA = src1*8, RD = src2*8, JMP with RD = target
+ |.if DUALNUM
+ | lwzux CARG1, RA, BASE
+ | addi PC, PC, 4
+ | lwz CARG2, 4(RA)
+ | lwzux CARG3, RD, BASE
+ | checknum cr0, CARG1
+ | lwz SAVE0, -4(PC)
+ | checknum cr1, CARG3
+ | decode_RD4 SAVE0, SAVE0
+ | lwz CARG4, 4(RD)
+ | cror 4*cr7+gt, 4*cr0+gt, 4*cr1+gt
+ | addis SAVE0, SAVE0, -(BCBIAS_J*4 >> 16)
+ if (vk) {
+ | ble cr7, ->BC_ISEQN_Z
+ } else {
+ | ble cr7, ->BC_ISNEN_Z
+ }
+ |.else
+ | lwzux CARG1, RA, BASE
+ | lwz SAVE0, 0(PC)
+ | lfd f0, 0(RA)
+ | addi PC, PC, 4
+ | lwzux CARG3, RD, BASE
+ | checknum cr0, CARG1
+ | decode_RD4 SAVE0, SAVE0
+ | lfd f1, 0(RD)
+ | checknum cr1, CARG3
+ | addis SAVE0, SAVE0, -(BCBIAS_J*4 >> 16)
+ | bge cr0, >5
+ | bge cr1, >5
+ | fcmpu cr0, f0, f1
+ if (vk) {
+ | bne >1
+ | add PC, PC, SAVE0
+ } else {
+ | beq >1
+ | add PC, PC, SAVE0
+ }
+ |1:
+ | ins_next
+ |.endif
+ |5: // Either or both types are not numbers.
+ |.if not DUALNUM
+ | lwz CARG2, 4(RA)
+ | lwz CARG4, 4(RD)
+ |.endif
+ |.if FFI
+ | cmpwi cr7, CARG1, LJ_TCDATA
+ | cmpwi cr5, CARG3, LJ_TCDATA
+ |.endif
+ | not TMP2, CARG1
+ | cmplw CARG1, CARG3
+ | cmplwi cr1, TMP2, ~LJ_TISPRI // Primitive?
+ |.if FFI
+ | cror 4*cr7+eq, 4*cr7+eq, 4*cr5+eq
+ |.endif
+ | cmplwi cr6, TMP2, ~LJ_TISTABUD // Table or userdata?
+ |.if FFI
+ | beq cr7, ->vmeta_equal_cd
+ |.endif
+ | cmplw cr5, CARG2, CARG4
+ | crandc 4*cr0+gt, 4*cr0+eq, 4*cr1+gt // 2: Same type and primitive.
+ | crorc 4*cr0+lt, 4*cr5+eq, 4*cr0+eq // 1: Same tv or different type.
+ | crand 4*cr0+eq, 4*cr0+eq, 4*cr5+eq // 0: Same type and same tv.
+ | mr SAVE1, PC
+ | cror 4*cr0+eq, 4*cr0+eq, 4*cr0+gt // 0 or 2.
+ | cror 4*cr0+lt, 4*cr0+lt, 4*cr0+gt // 1 or 2.
+ if (vk) {
+ | bne cr0, >6
+ | add PC, PC, SAVE0
+ |6:
+ } else {
+ | beq cr0, >6
+ | add PC, PC, SAVE0
+ |6:
+ }
+ |.if DUALNUM
+ | bge cr0, >2 // Done if 1 or 2.
+ |1:
+ | ins_next
+ |2:
+ |.else
+ | blt cr0, <1 // Done if 1 or 2.
+ |.endif
+ | blt cr6, <1 // Done if not tab/ud.
+ |
+ | // Different tables or userdatas. Need to check __eq metamethod.
+ | // Field metatable must be at same offset for GCtab and GCudata!
+ | mr CARG3, CARG4
+ | lwz TAB:TMP2, TAB:CARG2->metatable
+ | li CARG4, 1-vk // ne = 0 or 1.
+ | cmplwi TAB:TMP2, 0
+ | beq <1 // No metatable?
+ | lbz TMP2, TAB:TMP2->nomm
+ | andix. TMP2, TMP2, 1<<MM_eq
+ | bne <1 // Or 'no __eq' flag set?
+ | mr PC, SAVE1 // Restore old PC.
+ | b ->vmeta_equal // Handle __eq metamethod.
+ break;
+
+ case BC_ISEQS: case BC_ISNES:
+ vk = op == BC_ISEQS;
+ | // RA = src*8, RD = str_const*8 (~), JMP with RD = target
+ | lwzux TMP0, RA, BASE
+ | srwi RD, RD, 1
+ | lwz STR:TMP3, 4(RA)
+ | lwz TMP2, 0(PC)
+ | subfic RD, RD, -4
+ | addi PC, PC, 4
+ |.if FFI
+ | cmpwi TMP0, LJ_TCDATA
+ |.endif
+ | lwzx STR:TMP1, KBASE, RD // KBASE-4-str_const*4
+ | .gpr64 extsw TMP0, TMP0
+ | subfic TMP0, TMP0, LJ_TSTR
+ |.if FFI
+ | beq ->vmeta_equal_cd
+ |.endif
+ | sub TMP1, STR:TMP1, STR:TMP3
+ | or TMP0, TMP0, TMP1
+ | decode_RD4 TMP2, TMP2
+ | subfic TMP0, TMP0, 0
+ | addis TMP2, TMP2, -(BCBIAS_J*4 >> 16)
+ | subfe TMP1, TMP1, TMP1
+ if (vk) {
+ | andc TMP2, TMP2, TMP1
+ } else {
+ | and TMP2, TMP2, TMP1
+ }
+ | add PC, PC, TMP2
+ | ins_next
+ break;
+
+ case BC_ISEQN: case BC_ISNEN:
+ vk = op == BC_ISEQN;
+ | // RA = src*8, RD = num_const*8, JMP with RD = target
+ |.if DUALNUM
+ | lwzux CARG1, RA, BASE
+ | addi PC, PC, 4
+ | lwz CARG2, 4(RA)
+ | lwzux CARG3, RD, KBASE
+ | checknum cr0, CARG1
+ | lwz SAVE0, -4(PC)
+ | checknum cr1, CARG3
+ | decode_RD4 SAVE0, SAVE0
+ | lwz CARG4, 4(RD)
+ | addis SAVE0, SAVE0, -(BCBIAS_J*4 >> 16)
+ if (vk) {
+ |->BC_ISEQN_Z:
+ } else {
+ |->BC_ISNEN_Z:
+ }
+ | bne cr0, >7
+ | bne cr1, >8
+ | cmpw CARG2, CARG4
+ |4:
+ |.else
+ if (vk) {
+ |->BC_ISEQN_Z: // Dummy label.
+ } else {
+ |->BC_ISNEN_Z: // Dummy label.
+ }
+ | lwzx CARG1, BASE, RA
+ | addi PC, PC, 4
+ | lfdx f0, BASE, RA
+ | lwz SAVE0, -4(PC)
+ | lfdx f1, KBASE, RD
+ | decode_RD4 SAVE0, SAVE0
+ | checknum CARG1
+ | addis SAVE0, SAVE0, -(BCBIAS_J*4 >> 16)
+ | bge >3
+ | fcmpu cr0, f0, f1
+ |.endif
+ if (vk) {
+ | bne >1
+ | add PC, PC, SAVE0
+ |1:
+ |.if not FFI
+ |3:
+ |.endif
+ } else {
+ | beq >2
+ |1:
+ |.if not FFI
+ |3:
+ |.endif
+ | add PC, PC, SAVE0
+ |2:
+ }
+ | ins_next
+ |.if FFI
+ |3:
+ | cmpwi CARG1, LJ_TCDATA
+ | beq ->vmeta_equal_cd
+ | b <1
+ |.endif
+ |.if DUALNUM
+ |7: // RA is not an integer.
+ | bge cr0, <3
+ | // RA is a number.
+ | .FPU lfd f0, 0(RA)
+ | blt cr1, >1
+ | // RA is a number, RD is an integer.
+ |.if FPU
+ | tonum_i f1, CARG4
+ |.else
+ | bl ->vm_sfi2d_2
+ |.endif
+ | b >2
+ |
+ |8: // RA is an integer, RD is a number.
+ |.if FPU
+ | tonum_i f0, CARG2
+ |.else
+ | bl ->vm_sfi2d_1
+ |.endif
+ |1:
+ | .FPU lfd f1, 0(RD)
+ |2:
+ |.if FPU
+ | fcmpu cr0, f0, f1
+ |.else
+ | blex __ledf2
+ | cmpwi CRET1, 0
+ |.endif
+ | b <4
+ |.endif
+ break;
+
+ case BC_ISEQP: case BC_ISNEP:
+ vk = op == BC_ISEQP;
+ | // RA = src*8, RD = primitive_type*8 (~), JMP with RD = target
+ | lwzx TMP0, BASE, RA
+ | srwi TMP1, RD, 3
+ | lwz TMP2, 0(PC)
+ | not TMP1, TMP1
+ | addi PC, PC, 4
+ |.if FFI
+ | cmpwi TMP0, LJ_TCDATA
+ |.endif
+ | sub TMP0, TMP0, TMP1
+ |.if FFI
+ | beq ->vmeta_equal_cd
+ |.endif
+ | decode_RD4 TMP2, TMP2
+ | .gpr64 extsw TMP0, TMP0
+ | addic TMP0, TMP0, -1
+ | addis TMP2, TMP2, -(BCBIAS_J*4 >> 16)
+ | subfe TMP1, TMP1, TMP1
+ if (vk) {
+ | and TMP2, TMP2, TMP1
+ } else {
+ | andc TMP2, TMP2, TMP1
+ }
+ | add PC, PC, TMP2
+ | ins_next
+ break;
+
+ /* -- Unary test and copy ops ------------------------------------------- */
+
+ case BC_ISTC: case BC_ISFC: case BC_IST: case BC_ISF:
+ | // RA = dst*8 or unused, RD = src*8, JMP with RD = target
+ | lwzx TMP0, BASE, RD
+ | lwz INS, 0(PC)
+ | addi PC, PC, 4
+ if (op == BC_IST || op == BC_ISF) {
+ | .gpr64 extsw TMP0, TMP0
+ | subfic TMP0, TMP0, LJ_TTRUE
+ | decode_RD4 TMP2, INS
+ | subfe TMP1, TMP1, TMP1
+ | addis TMP2, TMP2, -(BCBIAS_J*4 >> 16)
+ if (op == BC_IST) {
+ | andc TMP2, TMP2, TMP1
+ } else {
+ | and TMP2, TMP2, TMP1
+ }
+ | add PC, PC, TMP2
+ } else {
+ | li TMP1, LJ_TFALSE
+ |.if FPU
+ | lfdx f0, BASE, RD
+ |.else
+ | lwzux CARG1, RD, BASE
+ | lwz CARG2, 4(RD)
+ |.endif
+ | cmplw TMP0, TMP1
+ if (op == BC_ISTC) {
+ | bge >1
+ } else {
+ | blt >1
+ }
+ | addis PC, PC, -(BCBIAS_J*4 >> 16)
+ | decode_RD4 TMP2, INS
+ |.if FPU
+ | stfdx f0, BASE, RA
+ |.else
+ | stwux CARG1, RA, BASE
+ | stw CARG2, 4(RA)
+ |.endif
+ | add PC, PC, TMP2
+ |1:
+ }
+ | ins_next
+ break;
+
+ case BC_ISTYPE:
+ | // RA = src*8, RD = -type*8
+ | lwzx TMP0, BASE, RA
+ | srwi TMP1, RD, 3
+ | ins_next1
+ |.if not PPE and not GPR64
+ | add. TMP0, TMP0, TMP1
+ |.else
+ | neg TMP1, TMP1
+ | cmpw TMP0, TMP1
+ |.endif
+ | bne ->vmeta_istype
+ | ins_next2
+ break;
+ case BC_ISNUM:
+ | // RA = src*8, RD = -(TISNUM-1)*8
+ | lwzx TMP0, BASE, RA
+ | ins_next1
+ | checknum TMP0
+ | bge ->vmeta_istype
+ | ins_next2
+ break;
+
+ /* -- Unary ops --------------------------------------------------------- */
+
+ case BC_MOV:
+ | // RA = dst*8, RD = src*8
+ | ins_next1
+ |.if FPU
+ | lfdx f0, BASE, RD
+ | stfdx f0, BASE, RA
+ |.else
+ | lwzux TMP0, RD, BASE
+ | lwz TMP1, 4(RD)
+ | stwux TMP0, RA, BASE
+ | stw TMP1, 4(RA)
+ |.endif
+ | ins_next2
+ break;
+ case BC_NOT:
+ | // RA = dst*8, RD = src*8
+ | ins_next1
+ | lwzx TMP0, BASE, RD
+ | .gpr64 extsw TMP0, TMP0
+ | subfic TMP1, TMP0, LJ_TTRUE
+ | adde TMP0, TMP0, TMP1
+ | stwx TMP0, BASE, RA
+ | ins_next2
+ break;
+ case BC_UNM:
+ | // RA = dst*8, RD = src*8
+ | lwzux TMP1, RD, BASE
+ | lwz TMP0, 4(RD)
+ | checknum TMP1
+ |.if DUALNUM
+ | bne >5
+ |.if GPR64
+ | lus TMP2, 0x8000
+ | neg TMP0, TMP0
+ | cmplw TMP0, TMP2
+ | beq >4
+ |.else
+ | nego. TMP0, TMP0
+ | bso >4
+ |1:
+ |.endif
+ | ins_next1
+ | stwux TISNUM, RA, BASE
+ | stw TMP0, 4(RA)
+ |3:
+ | ins_next2
+ |4:
+ |.if not GPR64
+ | // Potential overflow.
+ | checkov TMP1, <1 // Ignore unrelated overflow.
+ |.endif
+ | lus TMP1, 0x41e0 // 2^31.
+ | li TMP0, 0
+ | b >7
+ |.endif
+ |5:
+ | bge ->vmeta_unm
+ | xoris TMP1, TMP1, 0x8000
+ |7:
+ | ins_next1
+ | stwux TMP1, RA, BASE
+ | stw TMP0, 4(RA)
+ |.if DUALNUM
+ | b <3
+ |.else
+ | ins_next2
+ |.endif
+ break;
+ case BC_LEN:
+ | // RA = dst*8, RD = src*8
+ | lwzux TMP0, RD, BASE
+ | lwz CARG1, 4(RD)
+ | checkstr TMP0; bne >2
+ | lwz CRET1, STR:CARG1->len
+ |1:
+ |.if DUALNUM
+ | ins_next1
+ | stwux TISNUM, RA, BASE
+ | stw CRET1, 4(RA)
+ |.else
+ | tonum_u f0, CRET1 // Result is a non-negative integer.
+ | ins_next1
+ | stfdx f0, BASE, RA
+ |.endif
+ | ins_next2
+ |2:
+ | checktab TMP0; bne ->vmeta_len
+#if LJ_52
+ | lwz TAB:TMP2, TAB:CARG1->metatable
+ | cmplwi TAB:TMP2, 0
+ | bne >9
+ |3:
+#endif
+ |->BC_LEN_Z:
+ | bl extern lj_tab_len // (GCtab *t)
+ | // Returns uint32_t (but less than 2^31).
+ | b <1
+#if LJ_52
+ |9:
+ | lbz TMP0, TAB:TMP2->nomm
+ | andix. TMP0, TMP0, 1<<MM_len
+ | bne <3 // 'no __len' flag set: done.
+ | b ->vmeta_len
+#endif
+ break;
+
+ /* -- Binary ops -------------------------------------------------------- */
+
+ |.macro ins_arithpre
+ | // RA = dst*8, RB = src1*8, RC = src2*8 | num_const*8
+ ||vk = ((int)op - BC_ADDVN) / (BC_ADDNV-BC_ADDVN);
+ ||switch (vk) {
+ ||case 0:
+ | lwzx CARG1, BASE, RB
+ | .if DUALNUM
+ | lwzx CARG3, KBASE, RC
+ | .endif
+ | .if FPU
+ | lfdx f14, BASE, RB
+ | lfdx f15, KBASE, RC
+ | .else
+ | add TMP1, BASE, RB
+ | add TMP2, KBASE, RC
+ | lwz CARG2, 4(TMP1)
+ | lwz CARG4, 4(TMP2)
+ | .endif
+ | .if DUALNUM
+ | checknum cr0, CARG1
+ | checknum cr1, CARG3
+ | crand 4*cr0+lt, 4*cr0+lt, 4*cr1+lt
+ | bge ->vmeta_arith_vn
+ | .else
+ | checknum CARG1; bge ->vmeta_arith_vn
+ | .endif
+ || break;
+ ||case 1:
+ | lwzx CARG1, BASE, RB
+ | .if DUALNUM
+ | lwzx CARG3, KBASE, RC
+ | .endif
+ | .if FPU
+ | lfdx f15, BASE, RB
+ | lfdx f14, KBASE, RC
+ | .else
+ | add TMP1, BASE, RB
+ | add TMP2, KBASE, RC
+ | lwz CARG2, 4(TMP1)
+ | lwz CARG4, 4(TMP2)
+ | .endif
+ | .if DUALNUM
+ | checknum cr0, CARG1
+ | checknum cr1, CARG3
+ | crand 4*cr0+lt, 4*cr0+lt, 4*cr1+lt
+ | bge ->vmeta_arith_nv
+ | .else
+ | checknum CARG1; bge ->vmeta_arith_nv
+ | .endif
+ || break;
+ ||default:
+ | lwzx CARG1, BASE, RB
+ | lwzx CARG3, BASE, RC
+ | .if FPU
+ | lfdx f14, BASE, RB
+ | lfdx f15, BASE, RC
+ | .else
+ | add TMP1, BASE, RB
+ | add TMP2, BASE, RC
+ | lwz CARG2, 4(TMP1)
+ | lwz CARG4, 4(TMP2)
+ | .endif
+ | checknum cr0, CARG1
+ | checknum cr1, CARG3
+ | crand 4*cr0+lt, 4*cr0+lt, 4*cr1+lt
+ | bge ->vmeta_arith_vv
+ || break;
+ ||}
+ |.endmacro
+ |
+ |.macro ins_arithfallback, ins
+ ||switch (vk) {
+ ||case 0:
+ | ins ->vmeta_arith_vn2
+ || break;
+ ||case 1:
+ | ins ->vmeta_arith_nv2
+ || break;
+ ||default:
+ | ins ->vmeta_arith_vv2
+ || break;
+ ||}
+ |.endmacro
+ |
+ |.macro intmod, a, b, c
+ | bl ->vm_modi
+ |.endmacro
+ |
+ |.macro fpmod, a, b, c
+ |->BC_MODVN_Z:
+ | fdiv FARG1, b, c
+ | // NYI: Use internal implementation of floor.
+ | blex floor // floor(b/c)
+ | fmul a, FARG1, c
+ | fsub a, b, a // b - floor(b/c)*c
+ |.endmacro
+ |
+ |.macro sfpmod
+ |->BC_MODVN_Z:
+ | stw CARG1, SFSAVE_1
+ | stw CARG2, SFSAVE_2
+ | mr SAVE0, CARG3
+ | mr SAVE1, CARG4
+ | blex __divdf3
+ | blex floor
+ | mr CARG3, SAVE0
+ | mr CARG4, SAVE1
+ | blex __muldf3
+ | mr CARG3, CRET1
+ | mr CARG4, CRET2
+ | lwz CARG1, SFSAVE_1
+ | lwz CARG2, SFSAVE_2
+ | blex __subdf3
+ |.endmacro
+ |
+ |.macro ins_arithfp, fpins
+ | ins_arithpre
+ |.if "fpins" == "fpmod_"
+ | b ->BC_MODVN_Z // Avoid 3 copies. It's slow anyway.
+ |.elif FPU
+ | fpins f0, f14, f15
+ | ins_next1
+ | stfdx f0, BASE, RA
+ | ins_next2
+ |.else
+ | blex __divdf3 // Only soft-float div uses this macro.
+ | ins_next1
+ | stwux CRET1, RA, BASE
+ | stw CRET2, 4(RA)
+ | ins_next2
+ |.endif
+ |.endmacro
+ |
+ |.macro ins_arithdn, intins, fpins, fpcall
+ | // RA = dst*8, RB = src1*8, RC = src2*8 | num_const*8
+ ||vk = ((int)op - BC_ADDVN) / (BC_ADDNV-BC_ADDVN);
+ ||switch (vk) {
+ ||case 0:
+ | lwzux CARG1, RB, BASE
+ | lwzux CARG3, RC, KBASE
+ | lwz CARG2, 4(RB)
+ | checknum cr0, CARG1
+ | lwz CARG4, 4(RC)
+ | checknum cr1, CARG3
+ || break;
+ ||case 1:
+ | lwzux CARG3, RB, BASE
+ | lwzux CARG1, RC, KBASE
+ | lwz CARG4, 4(RB)
+ | checknum cr0, CARG3
+ | lwz CARG2, 4(RC)
+ | checknum cr1, CARG1
+ || break;
+ ||default:
+ | lwzux CARG1, RB, BASE
+ | lwzux CARG3, RC, BASE
+ | lwz CARG2, 4(RB)
+ | checknum cr0, CARG1
+ | lwz CARG4, 4(RC)
+ | checknum cr1, CARG3
+ || break;
+ ||}
+ | bne >5
+ | bne cr1, >5
+ |.if "intins" == "intmod"
+ | mr CARG1, CARG2
+ | mr CARG2, CARG4
+ |.endif
+ | intins CARG1, CARG2, CARG4
+ | bso >4
+ |1:
+ | ins_next1
+ | stwux TISNUM, RA, BASE
+ | stw CARG1, 4(RA)
+ |2:
+ | ins_next2
+ |4: // Overflow.
+ | checkov TMP0, <1 // Ignore unrelated overflow.
+ | ins_arithfallback b
+ |5: // FP variant.
+ |.if FPU
+ ||if (vk == 1) {
+ | lfd f15, 0(RB)
+ | lfd f14, 0(RC)
+ ||} else {
+ | lfd f14, 0(RB)
+ | lfd f15, 0(RC)
+ ||}
+ |.endif
+ | crand 4*cr0+lt, 4*cr0+lt, 4*cr1+lt
+ | ins_arithfallback bge
+ |.if "fpins" == "fpmod_"
+ | b ->BC_MODVN_Z // Avoid 3 copies. It's slow anyway.
+ |.else
+ |.if FPU
+ | fpins f0, f14, f15
+ | stfdx f0, BASE, RA
+ |.else
+ |.if "fpcall" == "sfpmod"
+ | sfpmod
+ |.else
+ | blex fpcall
+ |.endif
+ | stwux CRET1, RA, BASE
+ | stw CRET2, 4(RA)
+ |.endif
+ | ins_next1
+ | b <2
+ |.endif
+ |.endmacro
+ |
+ |.macro ins_arith, intins, fpins, fpcall
+ |.if DUALNUM
+ | ins_arithdn intins, fpins, fpcall
+ |.else
+ | ins_arithfp fpins
+ |.endif
+ |.endmacro
+
+ case BC_ADDVN: case BC_ADDNV: case BC_ADDVV:
+ |.if GPR64
+ |.macro addo32., y, a, b
+ | // Need to check overflow for (a<<32) + (b<<32).
+ | rldicr TMP0, a, 32, 31
+ | rldicr TMP1, b, 32, 31
+ | addo. TMP0, TMP0, TMP1
+ | add y, a, b
+ |.endmacro
+ | ins_arith addo32., fadd, __adddf3
+ |.else
+ | ins_arith addo., fadd, __adddf3
+ |.endif
+ break;
+ case BC_SUBVN: case BC_SUBNV: case BC_SUBVV:
+ |.if GPR64
+ |.macro subo32., y, a, b
+ | // Need to check overflow for (a<<32) - (b<<32).
+ | rldicr TMP0, a, 32, 31
+ | rldicr TMP1, b, 32, 31
+ | subo. TMP0, TMP0, TMP1
+ | sub y, a, b
+ |.endmacro
+ | ins_arith subo32., fsub, __subdf3
+ |.else
+ | ins_arith subo., fsub, __subdf3
+ |.endif
+ break;
+ case BC_MULVN: case BC_MULNV: case BC_MULVV:
+ | ins_arith mullwo., fmul, __muldf3
+ break;
+ case BC_DIVVN: case BC_DIVNV: case BC_DIVVV:
+ | ins_arithfp fdiv
+ break;
+ case BC_MODVN:
+ | ins_arith intmod, fpmod, sfpmod
+ break;
+ case BC_MODNV: case BC_MODVV:
+ | ins_arith intmod, fpmod_, sfpmod
+ break;
+ case BC_POW:
+ | // NYI: (partial) integer arithmetic.
+ | lwzx CARG1, BASE, RB
+ | lwzx CARG3, BASE, RC
+ |.if FPU
+ | lfdx FARG1, BASE, RB
+ | lfdx FARG2, BASE, RC
+ |.else
+ | add TMP1, BASE, RB
+ | add TMP2, BASE, RC
+ | lwz CARG2, 4(TMP1)
+ | lwz CARG4, 4(TMP2)
+ |.endif
+ | checknum cr0, CARG1
+ | checknum cr1, CARG3
+ | crand 4*cr0+lt, 4*cr0+lt, 4*cr1+lt
+ | bge ->vmeta_arith_vv
+ | blex pow
+ | ins_next1
+ |.if FPU
+ | stfdx FARG1, BASE, RA
+ |.else
+ | stwux CARG1, RA, BASE
+ | stw CARG2, 4(RA)
+ |.endif
+ | ins_next2
+ break;
+
+ case BC_CAT:
+ | // RA = dst*8, RB = src_start*8, RC = src_end*8
+ | sub CARG3, RC, RB
+ | stp BASE, L->base
+ | add CARG2, BASE, RC
+ | mr SAVE0, RB
+ |->BC_CAT_Z:
+ | stw PC, SAVE_PC
+ | mr CARG1, L
+ | srwi CARG3, CARG3, 3
+ | bl extern lj_meta_cat // (lua_State *L, TValue *top, int left)
+ | // Returns NULL (finished) or TValue * (metamethod).
+ | cmplwi CRET1, 0
+ | lp BASE, L->base
+ | bne ->vmeta_binop
+ | ins_next1
+ |.if FPU
+ | lfdx f0, BASE, SAVE0 // Copy result from RB to RA.
+ | stfdx f0, BASE, RA
+ |.else
+ | lwzux TMP0, SAVE0, BASE
+ | lwz TMP1, 4(SAVE0)
+ | stwux TMP0, RA, BASE
+ | stw TMP1, 4(RA)
+ |.endif
+ | ins_next2
+ break;
+
+ /* -- Constant ops ------------------------------------------------------ */
+
+ case BC_KSTR:
+ | // RA = dst*8, RD = str_const*8 (~)
+ | srwi TMP1, RD, 1
+ | subfic TMP1, TMP1, -4
+ | ins_next1
+ | lwzx TMP0, KBASE, TMP1 // KBASE-4-str_const*4
+ | li TMP2, LJ_TSTR
+ | stwux TMP2, RA, BASE
+ | stw TMP0, 4(RA)
+ | ins_next2
+ break;
+ case BC_KCDATA:
+ |.if FFI
+ | // RA = dst*8, RD = cdata_const*8 (~)
+ | srwi TMP1, RD, 1
+ | subfic TMP1, TMP1, -4
+ | ins_next1
+ | lwzx TMP0, KBASE, TMP1 // KBASE-4-cdata_const*4
+ | li TMP2, LJ_TCDATA
+ | stwux TMP2, RA, BASE
+ | stw TMP0, 4(RA)
+ | ins_next2
+ |.endif
+ break;
+ case BC_KSHORT:
+ | // RA = dst*8, RD = int16_literal*8
+ |.if DUALNUM
+ | slwi RD, RD, 13
+ | srawi RD, RD, 16
+ | ins_next1
+ | stwux TISNUM, RA, BASE
+ | stw RD, 4(RA)
+ | ins_next2
+ |.else
+ | // The soft-float approach is faster.
+ | slwi RD, RD, 13
+ | srawi TMP1, RD, 31
+ | xor TMP2, TMP1, RD
+ | sub TMP2, TMP2, TMP1 // TMP2 = abs(x)
+ | cntlzw TMP3, TMP2
+ | subfic TMP1, TMP3, 0x40d // TMP1 = exponent-1
+ | slw TMP2, TMP2, TMP3 // TMP2 = left aligned mantissa
+ | subfic TMP3, RD, 0
+ | slwi TMP1, TMP1, 20
+ | rlwimi RD, TMP2, 21, 1, 31 // hi = sign(x) | (mantissa>>11)
+ | subfe TMP0, TMP0, TMP0
+ | add RD, RD, TMP1 // hi = hi + exponent-1
+ | and RD, RD, TMP0 // hi = x == 0 ? 0 : hi
+ | ins_next1
+ | stwux RD, RA, BASE
+ | stw ZERO, 4(RA)
+ | ins_next2
+ |.endif
+ break;
+ case BC_KNUM:
+ | // RA = dst*8, RD = num_const*8
+ | ins_next1
+ |.if FPU
+ | lfdx f0, KBASE, RD
+ | stfdx f0, BASE, RA
+ |.else
+ | lwzux TMP0, RD, KBASE
+ | lwz TMP1, 4(RD)
+ | stwux TMP0, RA, BASE
+ | stw TMP1, 4(RA)
+ |.endif
+ | ins_next2
+ break;
+ case BC_KPRI:
+ | // RA = dst*8, RD = primitive_type*8 (~)
+ | srwi TMP1, RD, 3
+ | not TMP0, TMP1
+ | ins_next1
+ | stwx TMP0, BASE, RA
+ | ins_next2
+ break;
+ case BC_KNIL:
+ | // RA = base*8, RD = end*8
+ | stwx TISNIL, BASE, RA
+ | addi RA, RA, 8
+ |1:
+ | stwx TISNIL, BASE, RA
+ | cmpw RA, RD
+ | addi RA, RA, 8
+ | blt <1
+ | ins_next_
+ break;
+
+ /* -- Upvalue and function ops ------------------------------------------ */
+
+ case BC_UGET:
+ | // RA = dst*8, RD = uvnum*8
+ | lwz LFUNC:RB, FRAME_FUNC(BASE)
+ | srwi RD, RD, 1
+ | addi RD, RD, offsetof(GCfuncL, uvptr)
+ | lwzx UPVAL:RB, LFUNC:RB, RD
+ | ins_next1
+ | lwz TMP1, UPVAL:RB->v
+ |.if FPU
+ | lfd f0, 0(TMP1)
+ | stfdx f0, BASE, RA
+ |.else
+ | lwz TMP2, 0(TMP1)
+ | lwz TMP3, 4(TMP1)
+ | stwux TMP2, RA, BASE
+ | stw TMP3, 4(RA)
+ |.endif
+ | ins_next2
+ break;
+ case BC_USETV:
+ | // RA = uvnum*8, RD = src*8
+ | lwz LFUNC:RB, FRAME_FUNC(BASE)
+ | srwi RA, RA, 1
+ | addi RA, RA, offsetof(GCfuncL, uvptr)
+ |.if FPU
+ | lfdux f0, RD, BASE
+ |.else
+ | lwzux CARG1, RD, BASE
+ | lwz CARG3, 4(RD)
+ |.endif
+ | lwzx UPVAL:RB, LFUNC:RB, RA
+ | lbz TMP3, UPVAL:RB->marked
+ | lwz CARG2, UPVAL:RB->v
+ | andix. TMP3, TMP3, LJ_GC_BLACK // isblack(uv)
+ | lbz TMP0, UPVAL:RB->closed
+ | lwz TMP2, 0(RD)
+ |.if FPU
+ | stfd f0, 0(CARG2)
+ |.else
+ | stw CARG1, 0(CARG2)
+ | stw CARG3, 4(CARG2)
+ |.endif
+ | cmplwi cr1, TMP0, 0
+ | lwz TMP1, 4(RD)
+ | cror 4*cr0+eq, 4*cr0+eq, 4*cr1+eq
+ | subi TMP2, TMP2, (LJ_TNUMX+1)
+ | bne >2 // Upvalue is closed and black?
+ |1:
+ | ins_next
+ |
+ |2: // Check if new value is collectable.
+ | cmplwi TMP2, LJ_TISGCV - (LJ_TNUMX+1)
+ | bge <1 // tvisgcv(v)
+ | lbz TMP3, GCOBJ:TMP1->gch.marked
+ | andix. TMP3, TMP3, LJ_GC_WHITES // iswhite(v)
+ | la CARG1, GG_DISP2G(DISPATCH)
+ | // Crossed a write barrier. Move the barrier forward.
+ | beq <1
+ | bl extern lj_gc_barrieruv // (global_State *g, TValue *tv)
+ | b <1
+ break;
+ case BC_USETS:
+ | // RA = uvnum*8, RD = str_const*8 (~)
+ | lwz LFUNC:RB, FRAME_FUNC(BASE)
+ | srwi TMP1, RD, 1
+ | srwi RA, RA, 1
+ | subfic TMP1, TMP1, -4
+ | addi RA, RA, offsetof(GCfuncL, uvptr)
+ | lwzx STR:TMP1, KBASE, TMP1 // KBASE-4-str_const*4
+ | lwzx UPVAL:RB, LFUNC:RB, RA
+ | lbz TMP3, UPVAL:RB->marked
+ | lwz CARG2, UPVAL:RB->v
+ | andix. TMP3, TMP3, LJ_GC_BLACK // isblack(uv)
+ | lbz TMP3, STR:TMP1->marked
+ | lbz TMP2, UPVAL:RB->closed
+ | li TMP0, LJ_TSTR
+ | stw STR:TMP1, 4(CARG2)
+ | stw TMP0, 0(CARG2)
+ | bne >2
+ |1:
+ | ins_next
+ |
+ |2: // Check if string is white and ensure upvalue is closed.
+ | andix. TMP3, TMP3, LJ_GC_WHITES // iswhite(str)
+ | cmplwi cr1, TMP2, 0
+ | cror 4*cr0+eq, 4*cr0+eq, 4*cr1+eq
+ | la CARG1, GG_DISP2G(DISPATCH)
+ | // Crossed a write barrier. Move the barrier forward.
+ | beq <1
+ | bl extern lj_gc_barrieruv // (global_State *g, TValue *tv)
+ | b <1
+ break;
+ case BC_USETN:
+ | // RA = uvnum*8, RD = num_const*8
+ | lwz LFUNC:RB, FRAME_FUNC(BASE)
+ | srwi RA, RA, 1
+ | addi RA, RA, offsetof(GCfuncL, uvptr)
+ |.if FPU
+ | lfdx f0, KBASE, RD
+ |.else
+ | lwzux TMP2, RD, KBASE
+ | lwz TMP3, 4(RD)
+ |.endif
+ | lwzx UPVAL:RB, LFUNC:RB, RA
+ | ins_next1
+ | lwz TMP1, UPVAL:RB->v
+ |.if FPU
+ | stfd f0, 0(TMP1)
+ |.else
+ | stw TMP2, 0(TMP1)
+ | stw TMP3, 4(TMP1)
+ |.endif
+ | ins_next2
+ break;
+ case BC_USETP:
+ | // RA = uvnum*8, RD = primitive_type*8 (~)
+ | lwz LFUNC:RB, FRAME_FUNC(BASE)
+ | srwi RA, RA, 1
+ | srwi TMP0, RD, 3
+ | addi RA, RA, offsetof(GCfuncL, uvptr)
+ | not TMP0, TMP0
+ | lwzx UPVAL:RB, LFUNC:RB, RA
+ | ins_next1
+ | lwz TMP1, UPVAL:RB->v
+ | stw TMP0, 0(TMP1)
+ | ins_next2
+ break;
+
+ case BC_UCLO:
+ | // RA = level*8, RD = target
+ | lwz TMP1, L->openupval
+ | branch_RD // Do this first since RD is not saved.
+ | stp BASE, L->base
+ | cmplwi TMP1, 0
+ | mr CARG1, L
+ | beq >1
+ | add CARG2, BASE, RA
+ | bl extern lj_func_closeuv // (lua_State *L, TValue *level)
+ | lp BASE, L->base
+ |1:
+ | ins_next
+ break;
+
+ case BC_FNEW:
+ | // RA = dst*8, RD = proto_const*8 (~) (holding function prototype)
+ | srwi TMP1, RD, 1
+ | stp BASE, L->base
+ | subfic TMP1, TMP1, -4
+ | stw PC, SAVE_PC
+ | lwzx CARG2, KBASE, TMP1 // KBASE-4-tab_const*4
+ | mr CARG1, L
+ | lwz CARG3, FRAME_FUNC(BASE)
+ | // (lua_State *L, GCproto *pt, GCfuncL *parent)
+ | bl extern lj_func_newL_gc
+ | // Returns GCfuncL *.
+ | lp BASE, L->base
+ | li TMP0, LJ_TFUNC
+ | stwux TMP0, RA, BASE
+ | stw LFUNC:CRET1, 4(RA)
+ | ins_next
+ break;
+
+ /* -- Table ops --------------------------------------------------------- */
+
+ case BC_TNEW:
+ case BC_TDUP:
+ | // RA = dst*8, RD = (hbits|asize)*8 | tab_const*8 (~)
+ | lwz TMP0, DISPATCH_GL(gc.total)(DISPATCH)
+ | mr CARG1, L
+ | lwz TMP1, DISPATCH_GL(gc.threshold)(DISPATCH)
+ | stp BASE, L->base
+ | cmplw TMP0, TMP1
+ | stw PC, SAVE_PC
+ | bge >5
+ |1:
+ if (op == BC_TNEW) {
+ | rlwinm CARG2, RD, 29, 21, 31
+ | rlwinm CARG3, RD, 18, 27, 31
+ | cmpwi CARG2, 0x7ff; beq >3
+ |2:
+ | bl extern lj_tab_new // (lua_State *L, int32_t asize, uint32_t hbits)
+ | // Returns Table *.
+ } else {
+ | srwi TMP1, RD, 1
+ | subfic TMP1, TMP1, -4
+ | lwzx CARG2, KBASE, TMP1 // KBASE-4-tab_const*4
+ | bl extern lj_tab_dup // (lua_State *L, Table *kt)
+ | // Returns Table *.
+ }
+ | lp BASE, L->base
+ | li TMP0, LJ_TTAB
+ | stwux TMP0, RA, BASE
+ | stw TAB:CRET1, 4(RA)
+ | ins_next
+ if (op == BC_TNEW) {
+ |3:
+ | li CARG2, 0x801
+ | b <2
+ }
+ |5:
+ | mr SAVE0, RD
+ | bl extern lj_gc_step_fixtop // (lua_State *L)
+ | mr RD, SAVE0
+ | mr CARG1, L
+ | b <1
+ break;
+
+ case BC_GGET:
+ | // RA = dst*8, RD = str_const*8 (~)
+ case BC_GSET:
+ | // RA = src*8, RD = str_const*8 (~)
+ | lwz LFUNC:TMP2, FRAME_FUNC(BASE)
+ | srwi TMP1, RD, 1
+ | lwz TAB:RB, LFUNC:TMP2->env
+ | subfic TMP1, TMP1, -4
+ | lwzx STR:RC, KBASE, TMP1 // KBASE-4-str_const*4
+ if (op == BC_GGET) {
+ | b ->BC_TGETS_Z
+ } else {
+ | b ->BC_TSETS_Z
+ }
+ break;
+
+ case BC_TGETV:
+ | // RA = dst*8, RB = table*8, RC = key*8
+ | lwzux CARG1, RB, BASE
+ | lwzux CARG2, RC, BASE
+ | lwz TAB:RB, 4(RB)
+ |.if DUALNUM
+ | lwz RC, 4(RC)
+ |.else
+ | lfd f0, 0(RC)
+ |.endif
+ | checktab CARG1
+ | checknum cr1, CARG2
+ | bne ->vmeta_tgetv
+ |.if DUALNUM
+ | lwz TMP0, TAB:RB->asize
+ | bne cr1, >5
+ | lwz TMP1, TAB:RB->array
+ | cmplw TMP0, RC
+ | slwi TMP2, RC, 3
+ |.else
+ | bge cr1, >5
+ | // Convert number key to integer, check for integerness and range.
+ | fctiwz f1, f0
+ | fadd f2, f0, TOBIT
+ | stfd f1, TMPD
+ | lwz TMP0, TAB:RB->asize
+ | fsub f2, f2, TOBIT
+ | lwz TMP2, TMPD_LO
+ | lwz TMP1, TAB:RB->array
+ | fcmpu cr1, f0, f2
+ | cmplw cr0, TMP0, TMP2
+ | crand 4*cr0+gt, 4*cr0+gt, 4*cr1+eq
+ | slwi TMP2, TMP2, 3
+ |.endif
+ | ble ->vmeta_tgetv // Integer key and in array part?
+ | lwzx TMP0, TMP1, TMP2
+ |.if FPU
+ | lfdx f14, TMP1, TMP2
+ |.else
+ | lwzux SAVE0, TMP1, TMP2
+ | lwz SAVE1, 4(TMP1)
+ |.endif
+ | checknil TMP0; beq >2
+ |1:
+ | ins_next1
+ |.if FPU
+ | stfdx f14, BASE, RA
+ |.else
+ | stwux SAVE0, RA, BASE
+ | stw SAVE1, 4(RA)
+ |.endif
+ | ins_next2
+ |
+ |2: // Check for __index if table value is nil.
+ | lwz TAB:TMP2, TAB:RB->metatable
+ | cmplwi TAB:TMP2, 0
+ | beq <1 // No metatable: done.
+ | lbz TMP0, TAB:TMP2->nomm
+ | andix. TMP0, TMP0, 1<<MM_index
+ | bne <1 // 'no __index' flag set: done.
+ | b ->vmeta_tgetv
+ |
+ |5:
+ | checkstr CARG2; bne ->vmeta_tgetv
+ |.if not DUALNUM
+ | lwz STR:RC, 4(RC)
+ |.endif
+ | b ->BC_TGETS_Z // String key?
+ break;
+ case BC_TGETS:
+ | // RA = dst*8, RB = table*8, RC = str_const*8 (~)
+ | lwzux CARG1, RB, BASE
+ | srwi TMP1, RC, 1
+ | lwz TAB:RB, 4(RB)
+ | subfic TMP1, TMP1, -4
+ | checktab CARG1
+ | lwzx STR:RC, KBASE, TMP1 // KBASE-4-str_const*4
+ | bne ->vmeta_tgets1
+ |->BC_TGETS_Z:
+ | // TAB:RB = GCtab *, STR:RC = GCstr *, RA = dst*8
+ | lwz TMP0, TAB:RB->hmask
+ | lwz TMP1, STR:RC->sid
+ | lwz NODE:TMP2, TAB:RB->node
+ | and TMP1, TMP1, TMP0 // idx = str->sid & tab->hmask
+ | slwi TMP0, TMP1, 5
+ | slwi TMP1, TMP1, 3
+ | sub TMP1, TMP0, TMP1
+ | add NODE:TMP2, NODE:TMP2, TMP1 // node = tab->node + (idx*32-idx*8)
+ |1:
+ | lwz CARG1, NODE:TMP2->key
+ | lwz TMP0, 4+offsetof(Node, key)(NODE:TMP2)
+ | lwz CARG2, NODE:TMP2->val
+ | lwz TMP1, 4+offsetof(Node, val)(NODE:TMP2)
+ | checkstr CARG1; bne >4
+ | cmpw TMP0, STR:RC; bne >4
+ | checknil CARG2; beq >5 // Key found, but nil value?
+ |3:
+ | stwux CARG2, RA, BASE
+ | stw TMP1, 4(RA)
+ | ins_next
+ |
+ |4: // Follow hash chain.
+ | lwz NODE:TMP2, NODE:TMP2->next
+ | cmplwi NODE:TMP2, 0
+ | bne <1
+ | // End of hash chain: key not found, nil result.
+ | li CARG2, LJ_TNIL
+ |
+ |5: // Check for __index if table value is nil.
+ | lwz TAB:TMP2, TAB:RB->metatable
+ | cmplwi TAB:TMP2, 0
+ | beq <3 // No metatable: done.
+ | lbz TMP0, TAB:TMP2->nomm
+ | andix. TMP0, TMP0, 1<<MM_index
+ | bne <3 // 'no __index' flag set: done.
+ | b ->vmeta_tgets
+ break;
+ case BC_TGETB:
+ | // RA = dst*8, RB = table*8, RC = index*8
+ | lwzux CARG1, RB, BASE
+ | srwi TMP0, RC, 3
+ | lwz TAB:RB, 4(RB)
+ | checktab CARG1; bne ->vmeta_tgetb
+ | lwz TMP1, TAB:RB->asize
+ | lwz TMP2, TAB:RB->array
+ | cmplw TMP0, TMP1; bge ->vmeta_tgetb
+ |.if FPU
+ | lwzx TMP1, TMP2, RC
+ | lfdx f0, TMP2, RC
+ |.else
+ | lwzux TMP1, TMP2, RC
+ | lwz TMP3, 4(TMP2)
+ |.endif
+ | checknil TMP1; beq >5
+ |1:
+ | ins_next1
+ |.if FPU
+ | stfdx f0, BASE, RA
+ |.else
+ | stwux TMP1, RA, BASE
+ | stw TMP3, 4(RA)
+ |.endif
+ | ins_next2
+ |
+ |5: // Check for __index if table value is nil.
+ | lwz TAB:TMP2, TAB:RB->metatable
+ | cmplwi TAB:TMP2, 0
+ | beq <1 // No metatable: done.
+ | lbz TMP2, TAB:TMP2->nomm
+ | andix. TMP2, TMP2, 1<<MM_index
+ | bne <1 // 'no __index' flag set: done.
+ | b ->vmeta_tgetb // Caveat: preserve TMP0!
+ break;
+ case BC_TGETR:
+ | // RA = dst*8, RB = table*8, RC = key*8
+ | add RB, BASE, RB
+ | lwz TAB:CARG1, 4(RB)
+ |.if DUALNUM
+ | add RC, BASE, RC
+ | lwz TMP0, TAB:CARG1->asize
+ | lwz CARG2, 4(RC)
+ | lwz TMP1, TAB:CARG1->array
+ |.else
+ | lfdx f0, BASE, RC
+ | lwz TMP0, TAB:CARG1->asize
+ | toint CARG2, f0
+ | lwz TMP1, TAB:CARG1->array
+ |.endif
+ | cmplw TMP0, CARG2
+ | slwi TMP2, CARG2, 3
+ | ble ->vmeta_tgetr // In array part?
+ |.if FPU
+ | lfdx f14, TMP1, TMP2
+ |.else
+ | lwzux SAVE0, TMP2, TMP1
+ | lwz SAVE1, 4(TMP2)
+ |.endif
+ |->BC_TGETR_Z:
+ | ins_next1
+ |.if FPU
+ | stfdx f14, BASE, RA
+ |.else
+ | stwux SAVE0, RA, BASE
+ | stw SAVE1, 4(RA)
+ |.endif
+ | ins_next2
+ break;
+
+ case BC_TSETV:
+ | // RA = src*8, RB = table*8, RC = key*8
+ | lwzux CARG1, RB, BASE
+ | lwzux CARG2, RC, BASE
+ | lwz TAB:RB, 4(RB)
+ |.if DUALNUM
+ | lwz RC, 4(RC)
+ |.else
+ | lfd f0, 0(RC)
+ |.endif
+ | checktab CARG1
+ | checknum cr1, CARG2
+ | bne ->vmeta_tsetv
+ |.if DUALNUM
+ | lwz TMP0, TAB:RB->asize
+ | bne cr1, >5
+ | lwz TMP1, TAB:RB->array
+ | cmplw TMP0, RC
+ | slwi TMP0, RC, 3
+ |.else
+ | bge cr1, >5
+ | // Convert number key to integer, check for integerness and range.
+ | fctiwz f1, f0
+ | fadd f2, f0, TOBIT
+ | stfd f1, TMPD
+ | lwz TMP0, TAB:RB->asize
+ | fsub f2, f2, TOBIT
+ | lwz TMP2, TMPD_LO
+ | lwz TMP1, TAB:RB->array
+ | fcmpu cr1, f0, f2
+ | cmplw cr0, TMP0, TMP2
+ | crand 4*cr0+gt, 4*cr0+gt, 4*cr1+eq
+ | slwi TMP0, TMP2, 3
+ |.endif
+ | ble ->vmeta_tsetv // Integer key and in array part?
+ | lwzx TMP2, TMP1, TMP0
+ | lbz TMP3, TAB:RB->marked
+ |.if FPU
+ | lfdx f14, BASE, RA
+ |.else
+ | add SAVE1, BASE, RA
+ | lwz SAVE0, 0(SAVE1)
+ | lwz SAVE1, 4(SAVE1)
+ |.endif
+ | checknil TMP2; beq >3
+ |1:
+ | andix. TMP2, TMP3, LJ_GC_BLACK // isblack(table)
+ |.if FPU
+ | stfdx f14, TMP1, TMP0
+ |.else
+ | stwux SAVE0, TMP1, TMP0
+ | stw SAVE1, 4(TMP1)
+ |.endif
+ | bne >7
+ |2:
+ | ins_next
+ |
+ |3: // Check for __newindex if previous value is nil.
+ | lwz TAB:TMP2, TAB:RB->metatable
+ | cmplwi TAB:TMP2, 0
+ | beq <1 // No metatable: done.
+ | lbz TMP2, TAB:TMP2->nomm
+ | andix. TMP2, TMP2, 1<<MM_newindex
+ | bne <1 // 'no __newindex' flag set: done.
+ | b ->vmeta_tsetv
+ |
+ |5:
+ | checkstr CARG2; bne ->vmeta_tsetv
+ |.if not DUALNUM
+ | lwz STR:RC, 4(RC)
+ |.endif
+ | b ->BC_TSETS_Z // String key?
+ |
+ |7: // Possible table write barrier for the value. Skip valiswhite check.
+ | barrierback TAB:RB, TMP3, TMP0
+ | b <2
+ break;
+ case BC_TSETS:
+ | // RA = src*8, RB = table*8, RC = str_const*8 (~)
+ | lwzux CARG1, RB, BASE
+ | srwi TMP1, RC, 1
+ | lwz TAB:RB, 4(RB)
+ | subfic TMP1, TMP1, -4
+ | checktab CARG1
+ | lwzx STR:RC, KBASE, TMP1 // KBASE-4-str_const*4
+ | bne ->vmeta_tsets1
+ |->BC_TSETS_Z:
+ | // TAB:RB = GCtab *, STR:RC = GCstr *, RA = src*8
+ | lwz TMP0, TAB:RB->hmask
+ | lwz TMP1, STR:RC->sid
+ | lwz NODE:TMP2, TAB:RB->node
+ | stb ZERO, TAB:RB->nomm // Clear metamethod cache.
+ | and TMP1, TMP1, TMP0 // idx = str->sid & tab->hmask
+ |.if FPU
+ | lfdx f14, BASE, RA
+ |.else
+ | add CARG2, BASE, RA
+ | lwz SAVE0, 0(CARG2)
+ | lwz SAVE1, 4(CARG2)
+ |.endif
+ | slwi TMP0, TMP1, 5
+ | slwi TMP1, TMP1, 3
+ | sub TMP1, TMP0, TMP1
+ | lbz TMP3, TAB:RB->marked
+ | add NODE:TMP2, NODE:TMP2, TMP1 // node = tab->node + (idx*32-idx*8)
+ |1:
+ | lwz CARG1, NODE:TMP2->key
+ | lwz TMP0, 4+offsetof(Node, key)(NODE:TMP2)
+ | lwz CARG2, NODE:TMP2->val
+ | lwz NODE:TMP1, NODE:TMP2->next
+ | checkstr CARG1; bne >5
+ | cmpw TMP0, STR:RC; bne >5
+ | checknil CARG2; beq >4 // Key found, but nil value?
+ |2:
+ | andix. TMP0, TMP3, LJ_GC_BLACK // isblack(table)
+ |.if FPU
+ | stfd f14, NODE:TMP2->val
+ |.else
+ | stw SAVE0, NODE:TMP2->val.u32.hi
+ | stw SAVE1, NODE:TMP2->val.u32.lo
+ |.endif
+ | bne >7
+ |3:
+ | ins_next
+ |
+ |4: // Check for __newindex if previous value is nil.
+ | lwz TAB:TMP1, TAB:RB->metatable
+ | cmplwi TAB:TMP1, 0
+ | beq <2 // No metatable: done.
+ | lbz TMP0, TAB:TMP1->nomm
+ | andix. TMP0, TMP0, 1<<MM_newindex
+ | bne <2 // 'no __newindex' flag set: done.
+ | b ->vmeta_tsets
+ |
+ |5: // Follow hash chain.
+ | cmplwi NODE:TMP1, 0
+ | mr NODE:TMP2, NODE:TMP1
+ | bne <1
+ | // End of hash chain: key not found, add a new one.
+ |
+ | // But check for __newindex first.
+ | lwz TAB:TMP1, TAB:RB->metatable
+ | la CARG3, DISPATCH_GL(tmptv)(DISPATCH)
+ | stw PC, SAVE_PC
+ | mr CARG1, L
+ | cmplwi TAB:TMP1, 0
+ | stp BASE, L->base
+ | beq >6 // No metatable: continue.
+ | lbz TMP0, TAB:TMP1->nomm
+ | andix. TMP0, TMP0, 1<<MM_newindex
+ | beq ->vmeta_tsets // 'no __newindex' flag NOT set: check.
+ |6:
+ | li TMP0, LJ_TSTR
+ | stw STR:RC, 4(CARG3)
+ | mr CARG2, TAB:RB
+ | stw TMP0, 0(CARG3)
+ | bl extern lj_tab_newkey // (lua_State *L, GCtab *t, TValue *k)
+ | // Returns TValue *.
+ | lp BASE, L->base
+ |.if FPU
+ | stfd f14, 0(CRET1)
+ |.else
+ | stw SAVE0, 0(CRET1)
+ | stw SAVE1, 4(CRET1)
+ |.endif
+ | b <3 // No 2nd write barrier needed.
+ |
+ |7: // Possible table write barrier for the value. Skip valiswhite check.
+ | barrierback TAB:RB, TMP3, TMP0
+ | b <3
+ break;
+ case BC_TSETB:
+ | // RA = src*8, RB = table*8, RC = index*8
+ | lwzux CARG1, RB, BASE
+ | srwi TMP0, RC, 3
+ | lwz TAB:RB, 4(RB)
+ | checktab CARG1; bne ->vmeta_tsetb
+ | lwz TMP1, TAB:RB->asize
+ | lwz TMP2, TAB:RB->array
+ | lbz TMP3, TAB:RB->marked
+ | cmplw TMP0, TMP1
+ |.if FPU
+ | lfdx f14, BASE, RA
+ |.else
+ | add CARG2, BASE, RA
+ | lwz SAVE0, 0(CARG2)
+ | lwz SAVE1, 4(CARG2)
+ |.endif
+ | bge ->vmeta_tsetb
+ | lwzx TMP1, TMP2, RC
+ | checknil TMP1; beq >5
+ |1:
+ | andix. TMP0, TMP3, LJ_GC_BLACK // isblack(table)
+ |.if FPU
+ | stfdx f14, TMP2, RC
+ |.else
+ | stwux SAVE0, RC, TMP2
+ | stw SAVE1, 4(RC)
+ |.endif
+ | bne >7
+ |2:
+ | ins_next
+ |
+ |5: // Check for __newindex if previous value is nil.
+ | lwz TAB:TMP1, TAB:RB->metatable
+ | cmplwi TAB:TMP1, 0
+ | beq <1 // No metatable: done.
+ | lbz TMP1, TAB:TMP1->nomm
+ | andix. TMP1, TMP1, 1<<MM_newindex
+ | bne <1 // 'no __newindex' flag set: done.
+ | b ->vmeta_tsetb // Caveat: preserve TMP0!
+ |
+ |7: // Possible table write barrier for the value. Skip valiswhite check.
+ | barrierback TAB:RB, TMP3, TMP0
+ | b <2
+ break;
+ case BC_TSETR:
+ | // RA = dst*8, RB = table*8, RC = key*8
+ | add RB, BASE, RB
+ | lwz TAB:CARG2, 4(RB)
+ |.if DUALNUM
+ | add RC, BASE, RC
+ | lbz TMP3, TAB:CARG2->marked
+ | lwz TMP0, TAB:CARG2->asize
+ | lwz CARG3, 4(RC)
+ | lwz TMP1, TAB:CARG2->array
+ |.else
+ | lfdx f0, BASE, RC
+ | lbz TMP3, TAB:CARG2->marked
+ | lwz TMP0, TAB:CARG2->asize
+ | toint CARG3, f0
+ | lwz TMP1, TAB:CARG2->array
+ |.endif
+ | andix. TMP2, TMP3, LJ_GC_BLACK // isblack(table)
+ | bne >7
+ |2:
+ | cmplw TMP0, CARG3
+ | slwi TMP2, CARG3, 3
+ |.if FPU
+ | lfdx f14, BASE, RA
+ |.else
+ | lwzux SAVE0, RA, BASE
+ | lwz SAVE1, 4(RA)
+ |.endif
+ | ble ->vmeta_tsetr // In array part?
+ | ins_next1
+ |.if FPU
+ | stfdx f14, TMP1, TMP2
+ |.else
+ | stwux SAVE0, TMP1, TMP2
+ | stw SAVE1, 4(TMP1)
+ |.endif
+ | ins_next2
+ |
+ |7: // Possible table write barrier for the value. Skip valiswhite check.
+ | barrierback TAB:CARG2, TMP3, TMP2
+ | b <2
+ break;
+
+
+ case BC_TSETM:
+ | // RA = base*8 (table at base-1), RD = num_const*8 (start index)
+ | add RA, BASE, RA
+ |1:
+ | add TMP3, KBASE, RD
+ | lwz TAB:CARG2, -4(RA) // Guaranteed to be a table.
+ | addic. TMP0, MULTRES, -8
+ | lwz TMP3, 4(TMP3) // Integer constant is in lo-word.
+ | srwi CARG3, TMP0, 3
+ | beq >4 // Nothing to copy?
+ | add CARG3, CARG3, TMP3
+ | lwz TMP2, TAB:CARG2->asize
+ | slwi TMP1, TMP3, 3
+ | lbz TMP3, TAB:CARG2->marked
+ | cmplw CARG3, TMP2
+ | add TMP2, RA, TMP0
+ | lwz TMP0, TAB:CARG2->array
+ | bgt >5
+ | add TMP1, TMP1, TMP0
+ | andix. TMP0, TMP3, LJ_GC_BLACK // isblack(table)
+ |3: // Copy result slots to table.
+ |.if FPU
+ | lfd f0, 0(RA)
+ |.else
+ | lwz SAVE0, 0(RA)
+ | lwz SAVE1, 4(RA)
+ |.endif
+ | addi RA, RA, 8
+ | cmpw cr1, RA, TMP2
+ |.if FPU
+ | stfd f0, 0(TMP1)
+ |.else
+ | stw SAVE0, 0(TMP1)
+ | stw SAVE1, 4(TMP1)
+ |.endif
+ | addi TMP1, TMP1, 8
+ | blt cr1, <3
+ | bne >7
+ |4:
+ | ins_next
+ |
+ |5: // Need to resize array part.
+ | stp BASE, L->base
+ | mr CARG1, L
+ | stw PC, SAVE_PC
+ | mr SAVE0, RD
+ | bl extern lj_tab_reasize // (lua_State *L, GCtab *t, int nasize)
+ | // Must not reallocate the stack.
+ | mr RD, SAVE0
+ | b <1
+ |
+ |7: // Possible table write barrier for any value. Skip valiswhite check.
+ | barrierback TAB:CARG2, TMP3, TMP0
+ | b <4
+ break;
+
+ /* -- Calls and vararg handling ----------------------------------------- */
+
+ case BC_CALLM:
+ | // RA = base*8, (RB = (nresults+1)*8,) RC = extra_nargs*8
+ | add NARGS8:RC, NARGS8:RC, MULTRES
+ | // Fall through. Assumes BC_CALL follows.
+ break;
+ case BC_CALL:
+ | // RA = base*8, (RB = (nresults+1)*8,) RC = (nargs+1)*8
+ | mr TMP2, BASE
+ | lwzux TMP0, BASE, RA
+ | lwz LFUNC:RB, 4(BASE)
+ | subi NARGS8:RC, NARGS8:RC, 8
+ | addi BASE, BASE, 8
+ | checkfunc TMP0; bne ->vmeta_call
+ | ins_call
+ break;
+
+ case BC_CALLMT:
+ | // RA = base*8, (RB = 0,) RC = extra_nargs*8
+ | add NARGS8:RC, NARGS8:RC, MULTRES
+ | // Fall through. Assumes BC_CALLT follows.
+ break;
+ case BC_CALLT:
+ | // RA = base*8, (RB = 0,) RC = (nargs+1)*8
+ | lwzux TMP0, RA, BASE
+ | lwz LFUNC:RB, 4(RA)
+ | subi NARGS8:RC, NARGS8:RC, 8
+ | lwz TMP1, FRAME_PC(BASE)
+ | checkfunc TMP0
+ | addi RA, RA, 8
+ | bne ->vmeta_callt
+ |->BC_CALLT_Z:
+ | andix. TMP0, TMP1, FRAME_TYPE // Caveat: preserve cr0 until the crand.
+ | lbz TMP3, LFUNC:RB->ffid
+ | xori TMP2, TMP1, FRAME_VARG
+ | cmplwi cr1, NARGS8:RC, 0
+ | bne >7
+ |1:
+ | stw LFUNC:RB, FRAME_FUNC(BASE) // Copy function down, but keep PC.
+ | li TMP2, 0
+ | cmplwi cr7, TMP3, 1 // (> FF_C) Calling a fast function?
+ | beq cr1, >3
+ |2:
+ | addi TMP3, TMP2, 8
+ |.if FPU
+ | lfdx f0, RA, TMP2
+ |.else
+ | add CARG3, RA, TMP2
+ | lwz CARG1, 0(CARG3)
+ | lwz CARG2, 4(CARG3)
+ |.endif
+ | cmplw cr1, TMP3, NARGS8:RC
+ |.if FPU
+ | stfdx f0, BASE, TMP2
+ |.else
+ | stwux CARG1, TMP2, BASE
+ | stw CARG2, 4(TMP2)
+ |.endif
+ | mr TMP2, TMP3
+ | bne cr1, <2
+ |3:
+ | crand 4*cr0+eq, 4*cr0+eq, 4*cr7+gt
+ | beq >5
+ |4:
+ | ins_callt
+ |
+ |5: // Tailcall to a fast function with a Lua frame below.
+ | lwz INS, -4(TMP1)
+ | decode_RA8 RA, INS
+ | sub TMP1, BASE, RA
+ | lwz LFUNC:TMP1, FRAME_FUNC-8(TMP1)
+ | lwz TMP1, LFUNC:TMP1->pc
+ | lwz KBASE, PC2PROTO(k)(TMP1) // Need to prepare KBASE.
+ | b <4
+ |
+ |7: // Tailcall from a vararg function.
+ | andix. TMP0, TMP2, FRAME_TYPEP
+ | bne <1 // Vararg frame below?
+ | sub BASE, BASE, TMP2 // Relocate BASE down.
+ | lwz TMP1, FRAME_PC(BASE)
+ | andix. TMP0, TMP1, FRAME_TYPE
+ | b <1
+ break;
+
+ case BC_ITERC:
+ | // RA = base*8, (RB = (nresults+1)*8, RC = (nargs+1)*8 ((2+1)*8))
+ | mr TMP2, BASE
+ | add BASE, BASE, RA
+ | lwz TMP1, -24(BASE)
+ | lwz LFUNC:RB, -20(BASE)
+ |.if FPU
+ | lfd f1, -8(BASE)
+ | lfd f0, -16(BASE)
+ |.else
+ | lwz CARG1, -8(BASE)
+ | lwz CARG2, -4(BASE)
+ | lwz CARG3, -16(BASE)
+ | lwz CARG4, -12(BASE)
+ |.endif
+ | stw TMP1, 0(BASE) // Copy callable.
+ | stw LFUNC:RB, 4(BASE)
+ | checkfunc TMP1
+ | li NARGS8:RC, 16 // Iterators get 2 arguments.
+ |.if FPU
+ | stfd f1, 16(BASE) // Copy control var.
+ | stfdu f0, 8(BASE) // Copy state.
+ |.else
+ | stw CARG1, 16(BASE) // Copy control var.
+ | stw CARG2, 20(BASE)
+ | stwu CARG3, 8(BASE) // Copy state.
+ | stw CARG4, 4(BASE)
+ |.endif
+ | bne ->vmeta_call
+ | ins_call
+ break;
+
+ case BC_ITERN:
+ | // RA = base*8, (RB = (nresults+1)*8, RC = (nargs+1)*8 (2+1)*8)
+ |.if JIT
+ | // NYI on big-endian
+ |.endif
+ |->vm_IITERN:
+ | add RA, BASE, RA
+ | lwz TAB:RB, -12(RA)
+ | lwz RC, -4(RA) // Get index from control var.
+ | lwz TMP0, TAB:RB->asize
+ | lwz TMP1, TAB:RB->array
+ | addi PC, PC, 4
+ |1: // Traverse array part.
+ | cmplw RC, TMP0
+ | slwi TMP3, RC, 3
+ | bge >5 // Index points after array part?
+ | lwzx TMP2, TMP1, TMP3
+ |.if FPU
+ | lfdx f0, TMP1, TMP3
+ |.else
+ | lwzux CARG1, TMP3, TMP1
+ | lwz CARG2, 4(TMP3)
+ |.endif
+ | checknil TMP2
+ | lwz INS, -4(PC)
+ | beq >4
+ |.if DUALNUM
+ | stw RC, 4(RA)
+ | stw TISNUM, 0(RA)
+ |.else
+ | tonum_u f1, RC
+ |.endif
+ | addi RC, RC, 1
+ | addis TMP3, PC, -(BCBIAS_J*4 >> 16)
+ |.if FPU
+ | stfd f0, 8(RA)
+ |.else
+ | stw CARG1, 8(RA)
+ | stw CARG2, 12(RA)
+ |.endif
+ | decode_RD4 TMP1, INS
+ | stw RC, -4(RA) // Update control var.
+ | add PC, TMP1, TMP3
+ |.if not DUALNUM
+ | stfd f1, 0(RA)
+ |.endif
+ |3:
+ | ins_next
+ |
+ |4: // Skip holes in array part.
+ | addi RC, RC, 1
+ | b <1
+ |
+ |5: // Traverse hash part.
+ | lwz TMP1, TAB:RB->hmask
+ | sub RC, RC, TMP0
+ | lwz TMP2, TAB:RB->node
+ |6:
+ | cmplw RC, TMP1 // End of iteration? Branch to ITERL+1.
+ | slwi TMP3, RC, 5
+ | bgty <3
+ | slwi RB, RC, 3
+ | sub TMP3, TMP3, RB
+ | lwzx RB, TMP2, TMP3
+ |.if FPU
+ | lfdx f0, TMP2, TMP3
+ |.else
+ | add CARG3, TMP2, TMP3
+ | lwz CARG1, 0(CARG3)
+ | lwz CARG2, 4(CARG3)
+ |.endif
+ | add NODE:TMP3, TMP2, TMP3
+ | checknil RB
+ | lwz INS, -4(PC)
+ | beq >7
+ |.if FPU
+ | lfd f1, NODE:TMP3->key
+ |.else
+ | lwz CARG3, NODE:TMP3->key.u32.hi
+ | lwz CARG4, NODE:TMP3->key.u32.lo
+ |.endif
+ | addis TMP2, PC, -(BCBIAS_J*4 >> 16)
+ |.if FPU
+ | stfd f0, 8(RA)
+ |.else
+ | stw CARG1, 8(RA)
+ | stw CARG2, 12(RA)
+ |.endif
+ | add RC, RC, TMP0
+ | decode_RD4 TMP1, INS
+ |.if FPU
+ | stfd f1, 0(RA)
+ |.else
+ | stw CARG3, 0(RA)
+ | stw CARG4, 4(RA)
+ |.endif
+ | addi RC, RC, 1
+ | add PC, TMP1, TMP2
+ | stw RC, -4(RA) // Update control var.
+ | b <3
+ |
+ |7: // Skip holes in hash part.
+ | addi RC, RC, 1
+ | b <6
+ break;
+
+ case BC_ISNEXT:
+ | // RA = base*8, RD = target (points to ITERN)
+ | add RA, BASE, RA
+ | lwz TMP0, -24(RA)
+ | lwz CFUNC:TMP1, -20(RA)
+ | lwz TMP2, -16(RA)
+ | lwz TMP3, -8(RA)
+ | cmpwi cr0, TMP2, LJ_TTAB
+ | cmpwi cr1, TMP0, LJ_TFUNC
+ | cmpwi cr6, TMP3, LJ_TNIL
+ | bne cr1, >5
+ | lbz TMP1, CFUNC:TMP1->ffid
+ | crand 4*cr0+eq, 4*cr0+eq, 4*cr6+eq
+ | cmpwi cr7, TMP1, FF_next_N
+ | srwi TMP0, RD, 1
+ | crand 4*cr0+eq, 4*cr0+eq, 4*cr7+eq
+ | add TMP3, PC, TMP0
+ | bne cr0, >5
+ | lus TMP1, (LJ_KEYINDEX >> 16)
+ | ori TMP1, TMP1, (LJ_KEYINDEX & 0xffff)
+ | stw ZERO, -4(RA) // Initialize control var.
+ | stw TMP1, -8(RA)
+ | addis PC, TMP3, -(BCBIAS_J*4 >> 16)
+ |1:
+ | ins_next
+ |5: // Despecialize bytecode if any of the checks fail.
+ | li TMP0, BC_JMP
+ | li TMP1, BC_ITERC
+ | stb TMP0, -1(PC)
+ | addis PC, TMP3, -(BCBIAS_J*4 >> 16)
+ | // NYI on big-endian: unpatch JLOOP.
+ | stb TMP1, 3(PC)
+ | b <1
+ break;
+
+ case BC_VARG:
+ | // RA = base*8, RB = (nresults+1)*8, RC = numparams*8
+ | lwz TMP0, FRAME_PC(BASE)
+ | add RC, BASE, RC
+ | add RA, BASE, RA
+ | addi RC, RC, FRAME_VARG
+ | add TMP2, RA, RB
+ | subi TMP3, BASE, 8 // TMP3 = vtop
+ | sub RC, RC, TMP0 // RC = vbase
+ | // Note: RC may now be even _above_ BASE if nargs was < numparams.
+ | cmplwi cr1, RB, 0
+ |.if PPE
+ | sub TMP1, TMP3, RC
+ | cmpwi TMP1, 0
+ |.else
+ | sub. TMP1, TMP3, RC
+ |.endif
+ | beq cr1, >5 // Copy all varargs?
+ | subi TMP2, TMP2, 16
+ | ble >2 // No vararg slots?
+ |1: // Copy vararg slots to destination slots.
+ |.if FPU
+ | lfd f0, 0(RC)
+ |.else
+ | lwz CARG1, 0(RC)
+ | lwz CARG2, 4(RC)
+ |.endif
+ | addi RC, RC, 8
+ |.if FPU
+ | stfd f0, 0(RA)
+ |.else
+ | stw CARG1, 0(RA)
+ | stw CARG2, 4(RA)
+ |.endif
+ | cmplw RA, TMP2
+ | cmplw cr1, RC, TMP3
+ | bge >3 // All destination slots filled?
+ | addi RA, RA, 8
+ | blt cr1, <1 // More vararg slots?
+ |2: // Fill up remainder with nil.
+ | stw TISNIL, 0(RA)
+ | cmplw RA, TMP2
+ | addi RA, RA, 8
+ | blt <2
+ |3:
+ | ins_next
+ |
+ |5: // Copy all varargs.
+ | lwz TMP0, L->maxstack
+ | li MULTRES, 8 // MULTRES = (0+1)*8
+ | bley <3 // No vararg slots?
+ | add TMP2, RA, TMP1
+ | cmplw TMP2, TMP0
+ | addi MULTRES, TMP1, 8
+ | bgt >7
+ |6:
+ |.if FPU
+ | lfd f0, 0(RC)
+ |.else
+ | lwz CARG1, 0(RC)
+ | lwz CARG2, 4(RC)
+ |.endif
+ | addi RC, RC, 8
+ |.if FPU
+ | stfd f0, 0(RA)
+ |.else
+ | stw CARG1, 0(RA)
+ | stw CARG2, 4(RA)
+ |.endif
+ | cmplw RC, TMP3
+ | addi RA, RA, 8
+ | blt <6 // More vararg slots?
+ | b <3
+ |
+ |7: // Grow stack for varargs.
+ | mr CARG1, L
+ | stp RA, L->top
+ | sub SAVE0, RC, BASE // Need delta, because BASE may change.
+ | stp BASE, L->base
+ | sub RA, RA, BASE
+ | stw PC, SAVE_PC
+ | srwi CARG2, TMP1, 3
+ | bl extern lj_state_growstack // (lua_State *L, int n)
+ | lp BASE, L->base
+ | add RA, BASE, RA
+ | add RC, BASE, SAVE0
+ | subi TMP3, BASE, 8
+ | b <6
+ break;
+
+ /* -- Returns ----------------------------------------------------------- */
+
+ case BC_RETM:
+ | // RA = results*8, RD = extra_nresults*8
+ | add RD, RD, MULTRES // MULTRES >= 8, so RD >= 8.
+ | // Fall through. Assumes BC_RET follows.
+ break;
+
+ case BC_RET:
+ | // RA = results*8, RD = (nresults+1)*8
+ | lwz PC, FRAME_PC(BASE)
+ | add RA, BASE, RA
+ | mr MULTRES, RD
+ |1:
+ | andix. TMP0, PC, FRAME_TYPE
+ | xori TMP1, PC, FRAME_VARG
+ | bne ->BC_RETV_Z
+ |
+ |->BC_RET_Z:
+ | // BASE = base, RA = resultptr, RD = (nresults+1)*8, PC = return
+ | lwz INS, -4(PC)
+ | cmpwi RD, 8
+ | subi TMP2, BASE, 8
+ | subi RC, RD, 8
+ | decode_RB8 RB, INS
+ | beq >3
+ | li TMP1, 0
+ |2:
+ | addi TMP3, TMP1, 8
+ |.if FPU
+ | lfdx f0, RA, TMP1
+ |.else
+ | add CARG3, RA, TMP1
+ | lwz CARG1, 0(CARG3)
+ | lwz CARG2, 4(CARG3)
+ |.endif
+ | cmpw TMP3, RC
+ |.if FPU
+ | stfdx f0, TMP2, TMP1
+ |.else
+ | add CARG3, TMP2, TMP1
+ | stw CARG1, 0(CARG3)
+ | stw CARG2, 4(CARG3)
+ |.endif
+ | beq >3
+ | addi TMP1, TMP3, 8
+ |.if FPU
+ | lfdx f1, RA, TMP3
+ |.else
+ | add CARG3, RA, TMP3
+ | lwz CARG1, 0(CARG3)
+ | lwz CARG2, 4(CARG3)
+ |.endif
+ | cmpw TMP1, RC
+ |.if FPU
+ | stfdx f1, TMP2, TMP3
+ |.else
+ | add CARG3, TMP2, TMP3
+ | stw CARG1, 0(CARG3)
+ | stw CARG2, 4(CARG3)
+ |.endif
+ | bne <2
+ |3:
+ |5:
+ | cmplw RB, RD
+ | decode_RA8 RA, INS
+ | bgt >6
+ | sub BASE, TMP2, RA
+ | lwz LFUNC:TMP1, FRAME_FUNC(BASE)
+ | ins_next1
+ | lwz TMP1, LFUNC:TMP1->pc
+ | lwz KBASE, PC2PROTO(k)(TMP1)
+ | ins_next2
+ |
+ |6: // Fill up results with nil.
+ | subi TMP1, RD, 8
+ | addi RD, RD, 8
+ | stwx TISNIL, TMP2, TMP1
+ | b <5
+ |
+ |->BC_RETV_Z: // Non-standard return case.
+ | andix. TMP2, TMP1, FRAME_TYPEP
+ | bne ->vm_return
+ | // Return from vararg function: relocate BASE down.
+ | sub BASE, BASE, TMP1
+ | lwz PC, FRAME_PC(BASE)
+ | b <1
+ break;
+
+ case BC_RET0: case BC_RET1:
+ | // RA = results*8, RD = (nresults+1)*8
+ | lwz PC, FRAME_PC(BASE)
+ | add RA, BASE, RA
+ | mr MULTRES, RD
+ | andix. TMP0, PC, FRAME_TYPE
+ | xori TMP1, PC, FRAME_VARG
+ | bney ->BC_RETV_Z
+ |
+ | lwz INS, -4(PC)
+ | subi TMP2, BASE, 8
+ | decode_RB8 RB, INS
+ if (op == BC_RET1) {
+ |.if FPU
+ | lfd f0, 0(RA)
+ | stfd f0, 0(TMP2)
+ |.else
+ | lwz CARG1, 0(RA)
+ | lwz CARG2, 4(RA)
+ | stw CARG1, 0(TMP2)
+ | stw CARG2, 4(TMP2)
+ |.endif
+ }
+ |5:
+ | cmplw RB, RD
+ | decode_RA8 RA, INS
+ | bgt >6
+ | sub BASE, TMP2, RA
+ | lwz LFUNC:TMP1, FRAME_FUNC(BASE)
+ | ins_next1
+ | lwz TMP1, LFUNC:TMP1->pc
+ | lwz KBASE, PC2PROTO(k)(TMP1)
+ | ins_next2
+ |
+ |6: // Fill up results with nil.
+ | subi TMP1, RD, 8
+ | addi RD, RD, 8
+ | stwx TISNIL, TMP2, TMP1
+ | b <5
+ break;
+
+ /* -- Loops and branches ------------------------------------------------ */
+
+ case BC_FORL:
+ |.if JIT
+ | hotloop
+ |.endif
+ | // Fall through. Assumes BC_IFORL follows.
+ break;
+
+ case BC_JFORI:
+ case BC_JFORL:
+#if !LJ_HASJIT
+ break;
+#endif
+ case BC_FORI:
+ case BC_IFORL:
+ | // RA = base*8, RD = target (after end of loop or start of loop)
+ vk = (op == BC_IFORL || op == BC_JFORL);
+ |.if DUALNUM
+ | // Integer loop.
+ | lwzux TMP1, RA, BASE
+ | lwz CARG1, FORL_IDX*8+4(RA)
+ | cmplw cr0, TMP1, TISNUM
+ if (vk) {
+ | lwz CARG3, FORL_STEP*8+4(RA)
+ | bne >9
+ |.if GPR64
+ | // Need to check overflow for (a<<32) + (b<<32).
+ | rldicr TMP0, CARG1, 32, 31
+ | rldicr TMP2, CARG3, 32, 31
+ | add CARG1, CARG1, CARG3
+ | addo. TMP0, TMP0, TMP2
+ |.else
+ | addo. CARG1, CARG1, CARG3
+ |.endif
+ | cmpwi cr6, CARG3, 0
+ | lwz CARG2, FORL_STOP*8+4(RA)
+ | bso >6
+ |4:
+ | stw CARG1, FORL_IDX*8+4(RA)
+ } else {
+ | lwz SAVE0, FORL_STEP*8(RA)
+ | lwz CARG3, FORL_STEP*8+4(RA)
+ | lwz TMP2, FORL_STOP*8(RA)
+ | lwz CARG2, FORL_STOP*8+4(RA)
+ | cmplw cr7, SAVE0, TISNUM
+ | cmplw cr1, TMP2, TISNUM
+ | crand 4*cr0+eq, 4*cr0+eq, 4*cr7+eq
+ | crand 4*cr0+eq, 4*cr0+eq, 4*cr1+eq
+ | cmpwi cr6, CARG3, 0
+ | bne >9
+ }
+ | blt cr6, >5
+ | cmpw CARG1, CARG2
+ |1:
+ | stw TISNUM, FORL_EXT*8(RA)
+ if (op != BC_JFORL) {
+ | srwi RD, RD, 1
+ }
+ | stw CARG1, FORL_EXT*8+4(RA)
+ if (op != BC_JFORL) {
+ | add RD, PC, RD
+ }
+ if (op == BC_FORI) {
+ | bgt >3 // See FP loop below.
+ } else if (op == BC_JFORI) {
+ | addis PC, RD, -(BCBIAS_J*4 >> 16)
+ | bley >7
+ } else if (op == BC_IFORL) {
+ | bgt >2
+ | addis PC, RD, -(BCBIAS_J*4 >> 16)
+ } else {
+ | bley =>BC_JLOOP
+ }
+ |2:
+ | ins_next
+ |5: // Invert check for negative step.
+ | cmpw CARG2, CARG1
+ | b <1
+ if (vk) {
+ |6: // Potential overflow.
+ | checkov TMP0, <4 // Ignore unrelated overflow.
+ | b <2
+ }
+ |.endif
+ if (vk) {
+ |.if DUALNUM
+ |9: // FP loop.
+ |.if FPU
+ | lfd f1, FORL_IDX*8(RA)
+ |.else
+ | lwz CARG1, FORL_IDX*8(RA)
+ | lwz CARG2, FORL_IDX*8+4(RA)
+ |.endif
+ |.else
+ | lfdux f1, RA, BASE
+ |.endif
+ |.if FPU
+ | lfd f3, FORL_STEP*8(RA)
+ | lfd f2, FORL_STOP*8(RA)
+ | fadd f1, f1, f3
+ | stfd f1, FORL_IDX*8(RA)
+ |.else
+ | lwz CARG3, FORL_STEP*8(RA)
+ | lwz CARG4, FORL_STEP*8+4(RA)
+ | mr SAVE1, RD
+ | blex __adddf3
+ | mr RD, SAVE1
+ | stw CRET1, FORL_IDX*8(RA)
+ | stw CRET2, FORL_IDX*8+4(RA)
+ | lwz CARG3, FORL_STOP*8(RA)
+ | lwz CARG4, FORL_STOP*8+4(RA)
+ |.endif
+ | lwz SAVE0, FORL_STEP*8(RA)
+ } else {
+ |.if DUALNUM
+ |9: // FP loop.
+ |.else
+ | lwzux TMP1, RA, BASE
+ | lwz SAVE0, FORL_STEP*8(RA)
+ | lwz TMP2, FORL_STOP*8(RA)
+ | cmplw cr0, TMP1, TISNUM
+ | cmplw cr7, SAVE0, TISNUM
+ | cmplw cr1, TMP2, TISNUM
+ |.endif
+ |.if FPU
+ | lfd f1, FORL_IDX*8(RA)
+ |.else
+ | lwz CARG1, FORL_IDX*8(RA)
+ | lwz CARG2, FORL_IDX*8+4(RA)
+ |.endif
+ | crand 4*cr0+lt, 4*cr0+lt, 4*cr7+lt
+ | crand 4*cr0+lt, 4*cr0+lt, 4*cr1+lt
+ |.if FPU
+ | lfd f2, FORL_STOP*8(RA)
+ |.else
+ | lwz CARG3, FORL_STOP*8(RA)
+ | lwz CARG4, FORL_STOP*8+4(RA)
+ |.endif
+ | bge ->vmeta_for
+ }
+ | cmpwi cr6, SAVE0, 0
+ if (op != BC_JFORL) {
+ | srwi RD, RD, 1
+ }
+ |.if FPU
+ | stfd f1, FORL_EXT*8(RA)
+ |.else
+ | stw CARG1, FORL_EXT*8(RA)
+ | stw CARG2, FORL_EXT*8+4(RA)
+ |.endif
+ if (op != BC_JFORL) {
+ | add RD, PC, RD
+ }
+ |.if FPU
+ | fcmpu cr0, f1, f2
+ |.else
+ | mr SAVE1, RD
+ | blex __ledf2
+ | cmpwi CRET1, 0
+ | mr RD, SAVE1
+ |.endif
+ if (op == BC_JFORI) {
+ | addis PC, RD, -(BCBIAS_J*4 >> 16)
+ }
+ | blt cr6, >5
+ if (op == BC_FORI) {
+ | bgt >3
+ } else if (op == BC_IFORL) {
+ |.if DUALNUM
+ | bgty <2
+ |.else
+ | bgt >2
+ |.endif
+ |1:
+ | addis PC, RD, -(BCBIAS_J*4 >> 16)
+ } else if (op == BC_JFORI) {
+ | bley >7
+ } else {
+ | bley =>BC_JLOOP
+ }
+ |.if DUALNUM
+ | b <2
+ |.else
+ |2:
+ | ins_next
+ |.endif
+ |5: // Negative step.
+ if (op == BC_FORI) {
+ | bge <2
+ |3: // Used by integer loop, too.
+ | addis PC, RD, -(BCBIAS_J*4 >> 16)
+ } else if (op == BC_IFORL) {
+ | bgey <1
+ } else if (op == BC_JFORI) {
+ | bgey >7
+ } else {
+ | bgey =>BC_JLOOP
+ }
+ | b <2
+ if (op == BC_JFORI) {
+ |7:
+ | lwz INS, -4(PC)
+ | decode_RD8 RD, INS
+ | b =>BC_JLOOP
+ }
+ break;
+
+ case BC_ITERL:
+ |.if JIT
+ | hotloop
+ |.endif
+ | // Fall through. Assumes BC_IITERL follows.
+ break;
+
+ case BC_JITERL:
+#if !LJ_HASJIT
+ break;
+#endif
+ case BC_IITERL:
+ | // RA = base*8, RD = target
+ | lwzux TMP1, RA, BASE
+ | lwz TMP2, 4(RA)
+ | checknil TMP1; beq >1 // Stop if iterator returned nil.
+ if (op == BC_JITERL) {
+ | stw TMP1, -8(RA)
+ | stw TMP2, -4(RA)
+ | b =>BC_JLOOP
+ } else {
+ | branch_RD // Otherwise save control var + branch.
+ | stw TMP1, -8(RA)
+ | stw TMP2, -4(RA)
+ }
+ |1:
+ | ins_next
+ break;
+
+ case BC_LOOP:
+ | // RA = base*8, RD = target (loop extent)
+ | // Note: RA/RD is only used by trace recorder to determine scope/extent
+ | // This opcode does NOT jump, it's only purpose is to detect a hot loop.
+ |.if JIT
+ | hotloop
+ |.endif
+ | // Fall through. Assumes BC_ILOOP follows.
+ break;
+
+ case BC_ILOOP:
+ | // RA = base*8, RD = target (loop extent)
+ | ins_next
+ break;
+
+ case BC_JLOOP:
+ |.if JIT
+ | // RA = base*8 (ignored), RD = traceno*8
+ | lwz TMP1, DISPATCH_J(trace)(DISPATCH)
+ | srwi RD, RD, 1
+ | // Traces on PPC don't store the trace number, so use 0.
+ | stw ZERO, DISPATCH_GL(vmstate)(DISPATCH)
+ | lwzx TRACE:TMP2, TMP1, RD
+ | clrso TMP1
+ | lp TMP2, TRACE:TMP2->mcode
+ | stw BASE, DISPATCH_GL(jit_base)(DISPATCH)
+ | mtctr TMP2
+ | addi JGL, DISPATCH, GG_DISP2G+32768
+ | stw L, DISPATCH_GL(tmpbuf.L)(DISPATCH)
+ | bctr
+ |.endif
+ break;
+
+ case BC_JMP:
+ | // RA = base*8 (only used by trace recorder), RD = target
+ | branch_RD
+ | ins_next
+ break;
+
+ /* -- Function headers -------------------------------------------------- */
+
+ case BC_FUNCF:
+ |.if JIT
+ | hotcall
+ |.endif
+ case BC_FUNCV: /* NYI: compiled vararg functions. */
+ | // Fall through. Assumes BC_IFUNCF/BC_IFUNCV follow.
+ break;
+
+ case BC_JFUNCF:
+#if !LJ_HASJIT
+ break;
+#endif
+ case BC_IFUNCF:
+ | // BASE = new base, RA = BASE+framesize*8, RB = LFUNC, RC = nargs*8
+ | lwz TMP2, L->maxstack
+ | lbz TMP1, -4+PC2PROTO(numparams)(PC)
+ | lwz KBASE, -4+PC2PROTO(k)(PC)
+ | cmplw RA, TMP2
+ | slwi TMP1, TMP1, 3
+ | bgt ->vm_growstack_l
+ if (op != BC_JFUNCF) {
+ | ins_next1
+ }
+ |2:
+ | cmplw NARGS8:RC, TMP1 // Check for missing parameters.
+ | blt >3
+ if (op == BC_JFUNCF) {
+ | decode_RD8 RD, INS
+ | b =>BC_JLOOP
+ } else {
+ | ins_next2
+ }
+ |
+ |3: // Clear missing parameters.
+ | stwx TISNIL, BASE, NARGS8:RC
+ | addi NARGS8:RC, NARGS8:RC, 8
+ | b <2
+ break;
+
+ case BC_JFUNCV:
+#if !LJ_HASJIT
+ break;
+#endif
+ | NYI // NYI: compiled vararg functions
+ break; /* NYI: compiled vararg functions. */
+
+ case BC_IFUNCV:
+ | // BASE = new base, RA = BASE+framesize*8, RB = LFUNC, RC = nargs*8
+ | lwz TMP2, L->maxstack
+ | add TMP1, BASE, RC
+ | add TMP0, RA, RC
+ | stw LFUNC:RB, 4(TMP1) // Store copy of LFUNC.
+ | addi TMP3, RC, 8+FRAME_VARG
+ | lwz KBASE, -4+PC2PROTO(k)(PC)
+ | cmplw TMP0, TMP2
+ | stw TMP3, 0(TMP1) // Store delta + FRAME_VARG.
+ | bge ->vm_growstack_l
+ | lbz TMP2, -4+PC2PROTO(numparams)(PC)
+ | mr RA, BASE
+ | mr RC, TMP1
+ | ins_next1
+ | cmpwi TMP2, 0
+ | addi BASE, TMP1, 8
+ | beq >3
+ |1:
+ | cmplw RA, RC // Less args than parameters?
+ | lwz TMP0, 0(RA)
+ | lwz TMP3, 4(RA)
+ | bge >4
+ | stw TISNIL, 0(RA) // Clear old fixarg slot (help the GC).
+ | addi RA, RA, 8
+ |2:
+ | addic. TMP2, TMP2, -1
+ | stw TMP0, 8(TMP1)
+ | stw TMP3, 12(TMP1)
+ | addi TMP1, TMP1, 8
+ | bne <1
+ |3:
+ | ins_next2
+ |
+ |4: // Clear missing parameters.
+ | li TMP0, LJ_TNIL
+ | b <2
+ break;
+
+ case BC_FUNCC:
+ case BC_FUNCCW:
+ | // BASE = new base, RA = BASE+framesize*8, RB = CFUNC, RC = nargs*8
+ if (op == BC_FUNCC) {
+ | lp RD, CFUNC:RB->f
+ } else {
+ | lp RD, DISPATCH_GL(wrapf)(DISPATCH)
+ }
+ | add TMP1, RA, NARGS8:RC
+ | lwz TMP2, L->maxstack
+ | .toc lp TMP3, 0(RD)
+ | add RC, BASE, NARGS8:RC
+ | stp BASE, L->base
+ | cmplw TMP1, TMP2
+ | stp RC, L->top
+ | li_vmstate C
+ |.if TOC
+ | mtctr TMP3
+ |.else
+ | mtctr RD
+ |.endif
+ if (op == BC_FUNCCW) {
+ | lp CARG2, CFUNC:RB->f
+ }
+ | mr CARG1, L
+ | bgt ->vm_growstack_c // Need to grow stack.
+ | .toc lp TOCREG, TOC_OFS(RD)
+ | .tocenv lp ENVREG, ENV_OFS(RD)
+ | st_vmstate
+ | bctrl // (lua_State *L [, lua_CFunction f])
+ | // Returns nresults.
+ | lp BASE, L->base
+ | .toc ld TOCREG, SAVE_TOC
+ | slwi RD, CRET1, 3
+ | lp TMP1, L->top
+ | li_vmstate INTERP
+ | lwz PC, FRAME_PC(BASE) // Fetch PC of caller.
+ | stw L, DISPATCH_GL(cur_L)(DISPATCH)
+ | sub RA, TMP1, RD // RA = L->top - nresults*8
+ | st_vmstate
+ | b ->vm_returnc
+ break;
+
+ /* ---------------------------------------------------------------------- */
+
+ default:
+ fprintf(stderr, "Error: undefined opcode BC_%s\n", bc_names[op]);
+ exit(2);
+ break;
+ }
+}
+
+static int build_backend(BuildCtx *ctx)
+{
+ int op;
+
+ dasm_growpc(Dst, BC__MAX);
+
+ build_subroutines(ctx);
+
+ |.code_op
+ for (op = 0; op < BC__MAX; op++)
+ build_ins(ctx, (BCOp)op, op);
+
+ return BC__MAX;
+}
+
+/* Emit pseudo frame-info for all assembler functions. */
+static void emit_asm_debug(BuildCtx *ctx)
+{
+ int fcofs = (int)((uint8_t *)ctx->glob[GLOB_vm_ffi_call] - ctx->code);
+ int i;
+ switch (ctx->mode) {
+ case BUILD_elfasm:
+ fprintf(ctx->fp, "\t.section .debug_frame,\"\",@progbits\n");
+ fprintf(ctx->fp,
+ ".Lframe0:\n"
+ "\t.long .LECIE0-.LSCIE0\n"
+ ".LSCIE0:\n"
+ "\t.long 0xffffffff\n"
+ "\t.byte 0x1\n"
+ "\t.string \"\"\n"
+ "\t.uleb128 0x1\n"
+ "\t.sleb128 -4\n"
+ "\t.byte 65\n"
+ "\t.byte 0xc\n\t.uleb128 1\n\t.uleb128 0\n"
+ "\t.align 2\n"
+ ".LECIE0:\n\n");
+ fprintf(ctx->fp,
+ ".LSFDE0:\n"
+ "\t.long .LEFDE0-.LASFDE0\n"
+ ".LASFDE0:\n"
+ "\t.long .Lframe0\n"
+ "\t.long .Lbegin\n"
+ "\t.long %d\n"
+ "\t.byte 0xe\n\t.uleb128 %d\n"
+ "\t.byte 0x11\n\t.uleb128 65\n\t.sleb128 -1\n"
+ "\t.byte 0x5\n\t.uleb128 70\n\t.uleb128 55\n",
+ fcofs, CFRAME_SIZE);
+ for (i = 14; i <= 31; i++)
+ fprintf(ctx->fp,
+ "\t.byte %d\n\t.uleb128 %d\n"
+ "\t.byte %d\n\t.uleb128 %d\n",
+ 0x80+i, 37+(31-i), 0x80+32+i, 2+2*(31-i));
+ fprintf(ctx->fp,
+ "\t.align 2\n"
+ ".LEFDE0:\n\n");
+#if LJ_HASFFI
+ fprintf(ctx->fp,
+ ".LSFDE1:\n"
+ "\t.long .LEFDE1-.LASFDE1\n"
+ ".LASFDE1:\n"
+ "\t.long .Lframe0\n"
+#if LJ_TARGET_PS3
+ "\t.long .lj_vm_ffi_call\n"
+#else
+ "\t.long lj_vm_ffi_call\n"
+#endif
+ "\t.long %d\n"
+ "\t.byte 0x11\n\t.uleb128 65\n\t.sleb128 -1\n"
+ "\t.byte 0x8e\n\t.uleb128 2\n"
+ "\t.byte 0xd\n\t.uleb128 0xe\n"
+ "\t.align 2\n"
+ ".LEFDE1:\n\n", (int)ctx->codesz - fcofs);
+#endif
+#if !LJ_NO_UNWIND
+ fprintf(ctx->fp, "\t.section .eh_frame,\"a\",@progbits\n");
+ fprintf(ctx->fp,
+ ".Lframe1:\n"
+ "\t.long .LECIE1-.LSCIE1\n"
+ ".LSCIE1:\n"
+ "\t.long 0\n"
+ "\t.byte 0x1\n"
+ "\t.string \"zPR\"\n"
+ "\t.uleb128 0x1\n"
+ "\t.sleb128 -4\n"
+ "\t.byte 65\n"
+ "\t.uleb128 6\n" /* augmentation length */
+ "\t.byte 0x1b\n" /* pcrel|sdata4 */
+ "\t.long lj_err_unwind_dwarf-.\n"
+ "\t.byte 0x1b\n" /* pcrel|sdata4 */
+ "\t.byte 0xc\n\t.uleb128 1\n\t.uleb128 0\n"
+ "\t.align 2\n"
+ ".LECIE1:\n\n");
+ fprintf(ctx->fp,
+ ".LSFDE2:\n"
+ "\t.long .LEFDE2-.LASFDE2\n"
+ ".LASFDE2:\n"
+ "\t.long .LASFDE2-.Lframe1\n"
+ "\t.long .Lbegin-.\n"
+ "\t.long %d\n"
+ "\t.uleb128 0\n" /* augmentation length */
+ "\t.byte 0xe\n\t.uleb128 %d\n"
+ "\t.byte 0x11\n\t.uleb128 65\n\t.sleb128 -1\n"
+ "\t.byte 0x5\n\t.uleb128 70\n\t.uleb128 55\n",
+ fcofs, CFRAME_SIZE);
+ for (i = 14; i <= 31; i++)
+ fprintf(ctx->fp,
+ "\t.byte %d\n\t.uleb128 %d\n"
+ "\t.byte %d\n\t.uleb128 %d\n",
+ 0x80+i, 37+(31-i), 0x80+32+i, 2+2*(31-i));
+ fprintf(ctx->fp,
+ "\t.align 2\n"
+ ".LEFDE2:\n\n");
+#if LJ_HASFFI
+ fprintf(ctx->fp,
+ ".Lframe2:\n"
+ "\t.long .LECIE2-.LSCIE2\n"
+ ".LSCIE2:\n"
+ "\t.long 0\n"
+ "\t.byte 0x1\n"
+ "\t.string \"zR\"\n"
+ "\t.uleb128 0x1\n"
+ "\t.sleb128 -4\n"
+ "\t.byte 65\n"
+ "\t.uleb128 1\n" /* augmentation length */
+ "\t.byte 0x1b\n" /* pcrel|sdata4 */
+ "\t.byte 0xc\n\t.uleb128 1\n\t.uleb128 0\n"
+ "\t.align 2\n"
+ ".LECIE2:\n\n");
+ fprintf(ctx->fp,
+ ".LSFDE3:\n"
+ "\t.long .LEFDE3-.LASFDE3\n"
+ ".LASFDE3:\n"
+ "\t.long .LASFDE3-.Lframe2\n"
+ "\t.long lj_vm_ffi_call-.\n"
+ "\t.long %d\n"
+ "\t.uleb128 0\n" /* augmentation length */
+ "\t.byte 0x11\n\t.uleb128 65\n\t.sleb128 -1\n"
+ "\t.byte 0x8e\n\t.uleb128 2\n"
+ "\t.byte 0xd\n\t.uleb128 0xe\n"
+ "\t.align 2\n"
+ ".LEFDE3:\n\n", (int)ctx->codesz - fcofs);
+#endif
+#endif
+ break;
+ default:
+ break;
+ }
+}
+
diff --git a/libs/luajit-cmake/luajit/src/vm_x64.dasc b/libs/luajit-cmake/luajit/src/vm_x64.dasc
new file mode 100644
index 0000000..03d9655
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/vm_x64.dasc
@@ -0,0 +1,4935 @@
+|// Low-level VM code for x64 CPUs in LJ_GC64 mode.
+|// Bytecode interpreter, fast functions and helper functions.
+|// Copyright (C) 2005-2022 Mike Pall. See Copyright Notice in luajit.h
+|
+|.arch x64
+|.section code_op, code_sub
+|
+|.actionlist build_actionlist
+|.globals GLOB_
+|.globalnames globnames
+|.externnames extnames
+|
+|//-----------------------------------------------------------------------
+|
+|.if WIN
+|.define X64WIN, 1 // Windows/x64 calling conventions.
+|.endif
+|
+|// Fixed register assignments for the interpreter.
+|// This is very fragile and has many dependencies. Caveat emptor.
+|.define BASE, rdx // Not C callee-save, refetched anyway.
+|.if X64WIN
+|.define KBASE, rdi // Must be C callee-save.
+|.define PC, rsi // Must be C callee-save.
+|.define DISPATCH, rbx // Must be C callee-save.
+|.define KBASEd, edi
+|.define PCd, esi
+|.define DISPATCHd, ebx
+|.else
+|.define KBASE, r15 // Must be C callee-save.
+|.define PC, rbx // Must be C callee-save.
+|.define DISPATCH, r14 // Must be C callee-save.
+|.define KBASEd, r15d
+|.define PCd, ebx
+|.define DISPATCHd, r14d
+|.endif
+|
+|.define RA, rcx
+|.define RAd, ecx
+|.define RAH, ch
+|.define RAL, cl
+|.define RB, rbp // Must be rbp (C callee-save).
+|.define RBd, ebp
+|.define RC, rax // Must be rax.
+|.define RCd, eax
+|.define RCW, ax
+|.define RCH, ah
+|.define RCL, al
+|.define OP, RBd
+|.define RD, RC
+|.define RDd, RCd
+|.define RDW, RCW
+|.define RDL, RCL
+|.define TMPR, r10
+|.define TMPRd, r10d
+|.define ITYPE, r11
+|.define ITYPEd, r11d
+|
+|.if X64WIN
+|.define CARG1, rcx // x64/WIN64 C call arguments.
+|.define CARG2, rdx
+|.define CARG3, r8
+|.define CARG4, r9
+|.define CARG1d, ecx
+|.define CARG2d, edx
+|.define CARG3d, r8d
+|.define CARG4d, r9d
+|.else
+|.define CARG1, rdi // x64/POSIX C call arguments.
+|.define CARG2, rsi
+|.define CARG3, rdx
+|.define CARG4, rcx
+|.define CARG5, r8
+|.define CARG6, r9
+|.define CARG1d, edi
+|.define CARG2d, esi
+|.define CARG3d, edx
+|.define CARG4d, ecx
+|.define CARG5d, r8d
+|.define CARG6d, r9d
+|.endif
+|
+|// Type definitions. Some of these are only used for documentation.
+|.type L, lua_State
+|.type GL, global_State
+|.type TVALUE, TValue
+|.type GCOBJ, GCobj
+|.type STR, GCstr
+|.type TAB, GCtab
+|.type LFUNC, GCfuncL
+|.type CFUNC, GCfuncC
+|.type PROTO, GCproto
+|.type UPVAL, GCupval
+|.type NODE, Node
+|.type NARGS, int
+|.type TRACE, GCtrace
+|.type SBUF, SBuf
+|
+|// Stack layout while in interpreter. Must match with lj_frame.h.
+|//-----------------------------------------------------------------------
+|.if X64WIN // x64/Windows stack layout
+|
+|.define CFRAME_SPACE, aword*5 // Delta for rsp (see <--).
+|.macro saveregs_
+| push rdi; push rsi; push rbx
+| sub rsp, CFRAME_SPACE
+|.endmacro
+|.macro saveregs
+| push rbp; saveregs_
+|.endmacro
+|.macro restoreregs
+| add rsp, CFRAME_SPACE
+| pop rbx; pop rsi; pop rdi; pop rbp
+|.endmacro
+|
+|.define SAVE_CFRAME, aword [rsp+aword*13]
+|.define SAVE_PC, aword [rsp+aword*12]
+|.define SAVE_L, aword [rsp+aword*11]
+|.define SAVE_ERRF, dword [rsp+dword*21]
+|.define SAVE_NRES, dword [rsp+dword*20]
+|//----- 16 byte aligned, ^^^ 32 byte register save area, owned by interpreter
+|.define SAVE_RET, aword [rsp+aword*9] //<-- rsp entering interpreter.
+|.define SAVE_R4, aword [rsp+aword*8]
+|.define SAVE_R3, aword [rsp+aword*7]
+|.define SAVE_R2, aword [rsp+aword*6]
+|.define SAVE_R1, aword [rsp+aword*5] //<-- rsp after register saves.
+|.define ARG5, aword [rsp+aword*4]
+|.define CSAVE_4, aword [rsp+aword*3]
+|.define CSAVE_3, aword [rsp+aword*2]
+|.define CSAVE_2, aword [rsp+aword*1]
+|.define CSAVE_1, aword [rsp] //<-- rsp while in interpreter.
+|//----- 16 byte aligned, ^^^ 32 byte register save area, owned by callee
+|
+|.define ARG5d, dword [rsp+dword*8]
+|.define TMP1, ARG5 // TMP1 overlaps ARG5
+|.define TMP1d, ARG5d
+|.define TMP1hi, dword [rsp+dword*9]
+|.define MULTRES, TMP1d // MULTRES overlaps TMP1d.
+|
+|//-----------------------------------------------------------------------
+|.else // x64/POSIX stack layout
+|
+|.define CFRAME_SPACE, aword*5 // Delta for rsp (see <--).
+|.macro saveregs_
+| push rbx; push r15; push r14
+|.if NO_UNWIND
+| push r13; push r12
+|.endif
+| sub rsp, CFRAME_SPACE
+|.endmacro
+|.macro saveregs
+| push rbp; saveregs_
+|.endmacro
+|.macro restoreregs
+| add rsp, CFRAME_SPACE
+|.if NO_UNWIND
+| pop r12; pop r13
+|.endif
+| pop r14; pop r15; pop rbx; pop rbp
+|.endmacro
+|
+|//----- 16 byte aligned,
+|.if NO_UNWIND
+|.define SAVE_RET, aword [rsp+aword*11] //<-- rsp entering interpreter.
+|.define SAVE_R4, aword [rsp+aword*10]
+|.define SAVE_R3, aword [rsp+aword*9]
+|.define SAVE_R2, aword [rsp+aword*8]
+|.define SAVE_R1, aword [rsp+aword*7]
+|.define SAVE_RU2, aword [rsp+aword*6]
+|.define SAVE_RU1, aword [rsp+aword*5] //<-- rsp after register saves.
+|.else
+|.define SAVE_RET, aword [rsp+aword*9] //<-- rsp entering interpreter.
+|.define SAVE_R4, aword [rsp+aword*8]
+|.define SAVE_R3, aword [rsp+aword*7]
+|.define SAVE_R2, aword [rsp+aword*6]
+|.define SAVE_R1, aword [rsp+aword*5] //<-- rsp after register saves.
+|.endif
+|.define SAVE_CFRAME, aword [rsp+aword*4]
+|.define SAVE_PC, aword [rsp+aword*3]
+|.define SAVE_L, aword [rsp+aword*2]
+|.define SAVE_ERRF, dword [rsp+dword*3]
+|.define SAVE_NRES, dword [rsp+dword*2]
+|.define TMP1, aword [rsp] //<-- rsp while in interpreter.
+|//----- 16 byte aligned
+|
+|.define TMP1d, dword [rsp]
+|.define TMP1hi, dword [rsp+dword*1]
+|.define MULTRES, TMP1d // MULTRES overlaps TMP1d.
+|
+|.endif
+|
+|//-----------------------------------------------------------------------
+|
+|// Instruction headers.
+|.macro ins_A; .endmacro
+|.macro ins_AD; .endmacro
+|.macro ins_AJ; .endmacro
+|.macro ins_ABC; movzx RBd, RCH; movzx RCd, RCL; .endmacro
+|.macro ins_AB_; movzx RBd, RCH; .endmacro
+|.macro ins_A_C; movzx RCd, RCL; .endmacro
+|.macro ins_AND; not RD; .endmacro
+|
+|// Instruction decode+dispatch. Carefully tuned (nope, lodsd is not faster).
+|.macro ins_NEXT
+| mov RCd, [PC]
+| movzx RAd, RCH
+| movzx OP, RCL
+| add PC, 4
+| shr RCd, 16
+| jmp aword [DISPATCH+OP*8]
+|.endmacro
+|
+|// Instruction footer.
+|.if 1
+| // Replicated dispatch. Less unpredictable branches, but higher I-Cache use.
+| .define ins_next, ins_NEXT
+| .define ins_next_, ins_NEXT
+|.else
+| // Common dispatch. Lower I-Cache use, only one (very) unpredictable branch.
+| // Affects only certain kinds of benchmarks (and only with -j off).
+| // Around 10%-30% slower on Core2, a lot more slower on P4.
+| .macro ins_next
+| jmp ->ins_next
+| .endmacro
+| .macro ins_next_
+| ->ins_next:
+| ins_NEXT
+| .endmacro
+|.endif
+|
+|// Call decode and dispatch.
+|.macro ins_callt
+| // BASE = new base, RB = LFUNC, RD = nargs+1, [BASE-8] = PC
+| mov PC, LFUNC:RB->pc
+| mov RAd, [PC]
+| movzx OP, RAL
+| movzx RAd, RAH
+| add PC, 4
+| jmp aword [DISPATCH+OP*8]
+|.endmacro
+|
+|.macro ins_call
+| // BASE = new base, RB = LFUNC, RD = nargs+1
+| mov [BASE-8], PC
+| ins_callt
+|.endmacro
+|
+|//-----------------------------------------------------------------------
+|
+|// Macros to clear or set tags.
+|.macro cleartp, reg; shl reg, 17; shr reg, 17; .endmacro
+|.macro settp, reg, tp
+| mov64 ITYPE, ((uint64_t)tp<<47)
+| or reg, ITYPE
+|.endmacro
+|.macro settp, dst, reg, tp
+| mov64 dst, ((uint64_t)tp<<47)
+| or dst, reg
+|.endmacro
+|.macro setint, reg
+| settp reg, LJ_TISNUM
+|.endmacro
+|.macro setint, dst, reg
+| settp dst, reg, LJ_TISNUM
+|.endmacro
+|
+|// Macros to test operand types.
+|.macro checktp_nc, reg, tp, target
+| mov ITYPE, reg
+| sar ITYPE, 47
+| cmp ITYPEd, tp
+| jne target
+|.endmacro
+|.macro checktp, reg, tp, target
+| mov ITYPE, reg
+| cleartp reg
+| sar ITYPE, 47
+| cmp ITYPEd, tp
+| jne target
+|.endmacro
+|.macro checktptp, src, tp, target
+| mov ITYPE, src
+| sar ITYPE, 47
+| cmp ITYPEd, tp
+| jne target
+|.endmacro
+|.macro checkstr, reg, target; checktp reg, LJ_TSTR, target; .endmacro
+|.macro checktab, reg, target; checktp reg, LJ_TTAB, target; .endmacro
+|.macro checkfunc, reg, target; checktp reg, LJ_TFUNC, target; .endmacro
+|
+|.macro checknumx, reg, target, jump
+| mov ITYPE, reg
+| sar ITYPE, 47
+| cmp ITYPEd, LJ_TISNUM
+| jump target
+|.endmacro
+|.macro checkint, reg, target; checknumx reg, target, jne; .endmacro
+|.macro checkinttp, src, target; checknumx src, target, jne; .endmacro
+|.macro checknum, reg, target; checknumx reg, target, jae; .endmacro
+|.macro checknumtp, src, target; checknumx src, target, jae; .endmacro
+|.macro checknumber, src, target; checknumx src, target, ja; .endmacro
+|
+|.macro mov_false, reg; mov64 reg, (int64_t)~((uint64_t)1<<47); .endmacro
+|.macro mov_true, reg; mov64 reg, (int64_t)~((uint64_t)2<<47); .endmacro
+|
+|// These operands must be used with movzx.
+|.define PC_OP, byte [PC-4]
+|.define PC_RA, byte [PC-3]
+|.define PC_RB, byte [PC-1]
+|.define PC_RC, byte [PC-2]
+|.define PC_RD, word [PC-2]
+|
+|.macro branchPC, reg
+| lea PC, [PC+reg*4-BCBIAS_J*4]
+|.endmacro
+|
+|// Assumes DISPATCH is relative to GL.
+#define DISPATCH_GL(field) (GG_DISP2G + (int)offsetof(global_State, field))
+#define DISPATCH_J(field) (GG_DISP2J + (int)offsetof(jit_State, field))
+|
+#define PC2PROTO(field) ((int)offsetof(GCproto, field)-(int)sizeof(GCproto))
+|
+|// Decrement hashed hotcount and trigger trace recorder if zero.
+|.macro hotloop, reg
+| mov reg, PCd
+| shr reg, 1
+| and reg, HOTCOUNT_PCMASK
+| sub word [DISPATCH+reg+GG_DISP2HOT], HOTCOUNT_LOOP
+| jb ->vm_hotloop
+|.endmacro
+|
+|.macro hotcall, reg
+| mov reg, PCd
+| shr reg, 1
+| and reg, HOTCOUNT_PCMASK
+| sub word [DISPATCH+reg+GG_DISP2HOT], HOTCOUNT_CALL
+| jb ->vm_hotcall
+|.endmacro
+|
+|// Set current VM state.
+|.macro set_vmstate, st
+| mov dword [DISPATCH+DISPATCH_GL(vmstate)], ~LJ_VMST_..st
+|.endmacro
+|
+|.macro fpop1; fstp st1; .endmacro
+|
+|// Synthesize SSE FP constants.
+|.macro sseconst_abs, reg, tmp // Synthesize abs mask.
+| mov64 tmp, U64x(7fffffff,ffffffff); movd reg, tmp
+|.endmacro
+|
+|.macro sseconst_hi, reg, tmp, val // Synthesize hi-32 bit const.
+| mov64 tmp, U64x(val,00000000); movd reg, tmp
+|.endmacro
+|
+|.macro sseconst_sign, reg, tmp // Synthesize sign mask.
+| sseconst_hi reg, tmp, 80000000
+|.endmacro
+|.macro sseconst_1, reg, tmp // Synthesize 1.0.
+| sseconst_hi reg, tmp, 3ff00000
+|.endmacro
+|.macro sseconst_2p52, reg, tmp // Synthesize 2^52.
+| sseconst_hi reg, tmp, 43300000
+|.endmacro
+|.macro sseconst_tobit, reg, tmp // Synthesize 2^52 + 2^51.
+| sseconst_hi reg, tmp, 43380000
+|.endmacro
+|
+|// Move table write barrier back. Overwrites reg.
+|.macro barrierback, tab, reg
+| and byte tab->marked, (uint8_t)~LJ_GC_BLACK // black2gray(tab)
+| mov reg, [DISPATCH+DISPATCH_GL(gc.grayagain)]
+| mov [DISPATCH+DISPATCH_GL(gc.grayagain)], tab
+| mov tab->gclist, reg
+|.endmacro
+|
+|//-----------------------------------------------------------------------
+
+/* Generate subroutines used by opcodes and other parts of the VM. */
+/* The .code_sub section should be last to help static branch prediction. */
+static void build_subroutines(BuildCtx *ctx)
+{
+ |.code_sub
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Return handling ----------------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |->vm_returnp:
+ | test PCd, FRAME_P
+ | jz ->cont_dispatch
+ |
+ | // Return from pcall or xpcall fast func.
+ | and PC, -8
+ | sub BASE, PC // Restore caller base.
+ | lea RA, [RA+PC-8] // Rebase RA and prepend one result.
+ | mov PC, [BASE-8] // Fetch PC of previous frame.
+ | // Prepending may overwrite the pcall frame, so do it at the end.
+ | mov_true ITYPE
+ | mov aword [BASE+RA], ITYPE // Prepend true to results.
+ |
+ |->vm_returnc:
+ | add RDd, 1 // RD = nresults+1
+ | jz ->vm_unwind_yield
+ | mov MULTRES, RDd
+ | test PC, FRAME_TYPE
+ | jz ->BC_RET_Z // Handle regular return to Lua.
+ |
+ |->vm_return:
+ | // BASE = base, RA = resultofs, RD = nresults+1 (= MULTRES), PC = return
+ | xor PC, FRAME_C
+ | test PCd, FRAME_TYPE
+ | jnz ->vm_returnp
+ |
+ | // Return to C.
+ | set_vmstate C
+ | and PC, -8
+ | sub PC, BASE
+ | neg PC // Previous base = BASE - delta.
+ |
+ | sub RDd, 1
+ | jz >2
+ |1: // Move results down.
+ | mov RB, [BASE+RA]
+ | mov [BASE-16], RB
+ | add BASE, 8
+ | sub RDd, 1
+ | jnz <1
+ |2:
+ | mov L:RB, SAVE_L
+ | mov L:RB->base, PC
+ |3:
+ | mov RDd, MULTRES
+ | mov RAd, SAVE_NRES // RA = wanted nresults+1
+ |4:
+ | cmp RAd, RDd
+ | jne >6 // More/less results wanted?
+ |5:
+ | sub BASE, 16
+ | mov L:RB->top, BASE
+ |
+ |->vm_leave_cp:
+ | mov RA, SAVE_CFRAME // Restore previous C frame.
+ | mov L:RB->cframe, RA
+ | xor eax, eax // Ok return status for vm_pcall.
+ |
+ |->vm_leave_unw:
+ | restoreregs
+ | ret
+ |
+ |6:
+ | jb >7 // Less results wanted?
+ | // More results wanted. Check stack size and fill up results with nil.
+ | cmp BASE, L:RB->maxstack
+ | ja >8
+ | mov aword [BASE-16], LJ_TNIL
+ | add BASE, 8
+ | add RDd, 1
+ | jmp <4
+ |
+ |7: // Less results wanted.
+ | test RAd, RAd
+ | jz <5 // But check for LUA_MULTRET+1.
+ | sub RA, RD // Negative result!
+ | lea BASE, [BASE+RA*8] // Correct top.
+ | jmp <5
+ |
+ |8: // Corner case: need to grow stack for filling up results.
+ | // This can happen if:
+ | // - A C function grows the stack (a lot).
+ | // - The GC shrinks the stack in between.
+ | // - A return back from a lua_call() with (high) nresults adjustment.
+ | mov L:RB->top, BASE // Save current top held in BASE (yes).
+ | mov MULTRES, RDd // Need to fill only remainder with nil.
+ | mov CARG2d, RAd
+ | mov CARG1, L:RB
+ | call extern lj_state_growstack // (lua_State *L, int n)
+ | mov BASE, L:RB->top // Need the (realloced) L->top in BASE.
+ | jmp <3
+ |
+ |->vm_unwind_yield:
+ | mov al, LUA_YIELD
+ | jmp ->vm_unwind_c_eh
+ |
+ |->vm_unwind_c: // Unwind C stack, return from vm_pcall.
+ | // (void *cframe, int errcode)
+ | mov eax, CARG2d // Error return status for vm_pcall.
+ | mov rsp, CARG1
+ |->vm_unwind_c_eh: // Landing pad for external unwinder.
+ | mov L:RB, SAVE_L
+ | mov GL:RB, L:RB->glref
+ | mov dword GL:RB->vmstate, ~LJ_VMST_C
+ | jmp ->vm_leave_unw
+ |
+ |->vm_unwind_rethrow:
+ |.if not X64WIN
+ | mov CARG1, SAVE_L
+ | mov CARG2d, eax
+ | restoreregs
+ | jmp extern lj_err_throw // (lua_State *L, int errcode)
+ |.endif
+ |
+ |->vm_unwind_ff: // Unwind C stack, return from ff pcall.
+ | // (void *cframe)
+ | and CARG1, CFRAME_RAWMASK
+ | mov rsp, CARG1
+ |->vm_unwind_ff_eh: // Landing pad for external unwinder.
+ | mov L:RB, SAVE_L
+ | mov RDd, 1+1 // Really 1+2 results, incr. later.
+ | mov BASE, L:RB->base
+ | mov DISPATCH, L:RB->glref // Setup pointer to dispatch table.
+ | add DISPATCH, GG_G2DISP
+ | mov PC, [BASE-8] // Fetch PC of previous frame.
+ | mov_false RA
+ | mov RB, [BASE]
+ | mov [BASE-16], RA // Prepend false to error message.
+ | mov [BASE-8], RB
+ | mov RA, -16 // Results start at BASE+RA = BASE-16.
+ | set_vmstate INTERP
+ | jmp ->vm_returnc // Increments RD/MULTRES and returns.
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Grow stack for calls -----------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |->vm_growstack_c: // Grow stack for C function.
+ | mov CARG2d, LUA_MINSTACK
+ | jmp >2
+ |
+ |->vm_growstack_v: // Grow stack for vararg Lua function.
+ | sub RD, 16 // LJ_FR2
+ | jmp >1
+ |
+ |->vm_growstack_f: // Grow stack for fixarg Lua function.
+ | // BASE = new base, RD = nargs+1, RB = L, PC = first PC
+ | lea RD, [BASE+NARGS:RD*8-8]
+ |1:
+ | movzx RAd, byte [PC-4+PC2PROTO(framesize)]
+ | add PC, 4 // Must point after first instruction.
+ | mov L:RB->base, BASE
+ | mov L:RB->top, RD
+ | mov SAVE_PC, PC
+ | mov CARG2, RA
+ |2:
+ | // RB = L, L->base = new base, L->top = top
+ | mov CARG1, L:RB
+ | call extern lj_state_growstack // (lua_State *L, int n)
+ | mov BASE, L:RB->base
+ | mov RD, L:RB->top
+ | mov LFUNC:RB, [BASE-16]
+ | cleartp LFUNC:RB
+ | sub RD, BASE
+ | shr RDd, 3
+ | add NARGS:RDd, 1
+ | // BASE = new base, RB = LFUNC, RD = nargs+1
+ | ins_callt // Just retry the call.
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Entry points into the assembler VM ---------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |->vm_resume: // Setup C frame and resume thread.
+ | // (lua_State *L, TValue *base, int nres1 = 0, ptrdiff_t ef = 0)
+ | saveregs
+ | mov L:RB, CARG1 // Caveat: CARG1 may be RA.
+ | mov SAVE_L, CARG1
+ | mov RA, CARG2
+ | mov PCd, FRAME_CP
+ | xor RDd, RDd
+ | lea KBASE, [esp+CFRAME_RESUME]
+ | mov DISPATCH, L:RB->glref // Setup pointer to dispatch table.
+ | add DISPATCH, GG_G2DISP
+ | mov SAVE_PC, RD // Any value outside of bytecode is ok.
+ | mov SAVE_CFRAME, RD
+ | mov SAVE_NRES, RDd
+ | mov SAVE_ERRF, RDd
+ | mov L:RB->cframe, KBASE
+ | cmp byte L:RB->status, RDL
+ | je >2 // Initial resume (like a call).
+ |
+ | // Resume after yield (like a return).
+ | mov [DISPATCH+DISPATCH_GL(cur_L)], L:RB
+ | set_vmstate INTERP
+ | mov byte L:RB->status, RDL
+ | mov BASE, L:RB->base
+ | mov RD, L:RB->top
+ | sub RD, RA
+ | shr RDd, 3
+ | add RDd, 1 // RD = nresults+1
+ | sub RA, BASE // RA = resultofs
+ | mov PC, [BASE-8]
+ | mov MULTRES, RDd
+ | test PCd, FRAME_TYPE
+ | jz ->BC_RET_Z
+ | jmp ->vm_return
+ |
+ |->vm_pcall: // Setup protected C frame and enter VM.
+ | // (lua_State *L, TValue *base, int nres1, ptrdiff_t ef)
+ | saveregs
+ | mov PCd, FRAME_CP
+ | mov SAVE_ERRF, CARG4d
+ | jmp >1
+ |
+ |->vm_call: // Setup C frame and enter VM.
+ | // (lua_State *L, TValue *base, int nres1)
+ | saveregs
+ | mov PCd, FRAME_C
+ |
+ |1: // Entry point for vm_pcall above (PC = ftype).
+ | mov SAVE_NRES, CARG3d
+ | mov L:RB, CARG1 // Caveat: CARG1 may be RA.
+ | mov SAVE_L, CARG1
+ | mov RA, CARG2
+ |
+ | mov DISPATCH, L:RB->glref // Setup pointer to dispatch table.
+ | mov KBASE, L:RB->cframe // Add our C frame to cframe chain.
+ | mov SAVE_CFRAME, KBASE
+ | mov SAVE_PC, L:RB // Any value outside of bytecode is ok.
+ | add DISPATCH, GG_G2DISP
+ | mov L:RB->cframe, rsp
+ |
+ |2: // Entry point for vm_resume/vm_cpcall (RA = base, RB = L, PC = ftype).
+ | mov [DISPATCH+DISPATCH_GL(cur_L)], L:RB
+ | set_vmstate INTERP
+ | mov BASE, L:RB->base // BASE = old base (used in vmeta_call).
+ | add PC, RA
+ | sub PC, BASE // PC = frame delta + frame type
+ |
+ | mov RD, L:RB->top
+ | sub RD, RA
+ | shr NARGS:RDd, 3
+ | add NARGS:RDd, 1 // RD = nargs+1
+ |
+ |->vm_call_dispatch:
+ | mov LFUNC:RB, [RA-16]
+ | checkfunc LFUNC:RB, ->vmeta_call // Ensure KBASE defined and != BASE.
+ |
+ |->vm_call_dispatch_f:
+ | mov BASE, RA
+ | ins_call
+ | // BASE = new base, RB = func, RD = nargs+1, PC = caller PC
+ |
+ |->vm_cpcall: // Setup protected C frame, call C.
+ | // (lua_State *L, lua_CFunction func, void *ud, lua_CPFunction cp)
+ | saveregs
+ | mov L:RB, CARG1 // Caveat: CARG1 may be RA.
+ | mov SAVE_L, CARG1
+ | mov SAVE_PC, L:RB // Any value outside of bytecode is ok.
+ |
+ | mov KBASE, L:RB->stack // Compute -savestack(L, L->top).
+ | sub KBASE, L:RB->top
+ | mov DISPATCH, L:RB->glref // Setup pointer to dispatch table.
+ | mov SAVE_ERRF, 0 // No error function.
+ | mov SAVE_NRES, KBASEd // Neg. delta means cframe w/o frame.
+ | add DISPATCH, GG_G2DISP
+ | // Handler may change cframe_nres(L->cframe) or cframe_errfunc(L->cframe).
+ |
+ | mov KBASE, L:RB->cframe // Add our C frame to cframe chain.
+ | mov SAVE_CFRAME, KBASE
+ | mov L:RB->cframe, rsp
+ | mov [DISPATCH+DISPATCH_GL(cur_L)], L:RB
+ |
+ | call CARG4 // (lua_State *L, lua_CFunction func, void *ud)
+ | // TValue * (new base) or NULL returned in eax (RC).
+ | test RC, RC
+ | jz ->vm_leave_cp // No base? Just remove C frame.
+ | mov RA, RC
+ | mov PCd, FRAME_CP
+ | jmp <2 // Else continue with the call.
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Metamethod handling ------------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |//-- Continuation dispatch ----------------------------------------------
+ |
+ |->cont_dispatch:
+ | // BASE = meta base, RA = resultofs, RD = nresults+1 (also in MULTRES)
+ | add RA, BASE
+ | and PC, -8
+ | mov RB, BASE
+ | sub BASE, PC // Restore caller BASE.
+ | mov aword [RA+RD*8-8], LJ_TNIL // Ensure one valid arg.
+ | mov RC, RA // ... in [RC]
+ | mov PC, [RB-24] // Restore PC from [cont|PC].
+ | mov RA, qword [RB-32] // May be negative on WIN64 with debug.
+ |.if FFI
+ | cmp RA, 1
+ | jbe >1
+ |.endif
+ | mov LFUNC:KBASE, [BASE-16]
+ | cleartp LFUNC:KBASE
+ | mov KBASE, LFUNC:KBASE->pc
+ | mov KBASE, [KBASE+PC2PROTO(k)]
+ | // BASE = base, RC = result, RB = meta base
+ | jmp RA // Jump to continuation.
+ |
+ |.if FFI
+ |1:
+ | je ->cont_ffi_callback // cont = 1: return from FFI callback.
+ | // cont = 0: Tail call from C function.
+ | sub RB, BASE
+ | shr RBd, 3
+ | lea RDd, [RBd-3]
+ | jmp ->vm_call_tail
+ |.endif
+ |
+ |->cont_cat: // BASE = base, RC = result, RB = mbase
+ | movzx RAd, PC_RB
+ | sub RB, 32
+ | lea RA, [BASE+RA*8]
+ | sub RA, RB
+ | je ->cont_ra
+ | neg RA
+ | shr RAd, 3
+ |.if X64WIN
+ | mov CARG3d, RAd
+ | mov L:CARG1, SAVE_L
+ | mov L:CARG1->base, BASE
+ | mov RC, [RC]
+ | mov [RB], RC
+ | mov CARG2, RB
+ |.else
+ | mov L:CARG1, SAVE_L
+ | mov L:CARG1->base, BASE
+ | mov CARG3d, RAd
+ | mov RA, [RC]
+ | mov [RB], RA
+ | mov CARG2, RB
+ |.endif
+ | jmp ->BC_CAT_Z
+ |
+ |//-- Table indexing metamethods -----------------------------------------
+ |
+ |->vmeta_tgets:
+ | settp STR:RC, LJ_TSTR // STR:RC = GCstr *
+ | mov TMP1, STR:RC
+ | lea RC, TMP1
+ | cmp PC_OP, BC_GGET
+ | jne >1
+ | settp TAB:RA, TAB:RB, LJ_TTAB // TAB:RB = GCtab *
+ | lea RB, [DISPATCH+DISPATCH_GL(tmptv)] // Store fn->l.env in g->tmptv.
+ | mov [RB], TAB:RA
+ | jmp >2
+ |
+ |->vmeta_tgetb:
+ | movzx RCd, PC_RC
+ |.if DUALNUM
+ | setint RC
+ | mov TMP1, RC
+ |.else
+ | cvtsi2sd xmm0, RCd
+ | movsd TMP1, xmm0
+ |.endif
+ | lea RC, TMP1
+ | jmp >1
+ |
+ |->vmeta_tgetv:
+ | movzx RCd, PC_RC // Reload TValue *k from RC.
+ | lea RC, [BASE+RC*8]
+ |1:
+ | movzx RBd, PC_RB // Reload TValue *t from RB.
+ | lea RB, [BASE+RB*8]
+ |2:
+ | mov L:CARG1, SAVE_L
+ | mov L:CARG1->base, BASE // Caveat: CARG2/CARG3 may be BASE.
+ | mov CARG2, RB
+ | mov CARG3, RC
+ | mov L:RB, L:CARG1
+ | mov SAVE_PC, PC
+ | call extern lj_meta_tget // (lua_State *L, TValue *o, TValue *k)
+ | // TValue * (finished) or NULL (metamethod) returned in eax (RC).
+ | mov BASE, L:RB->base
+ | test RC, RC
+ | jz >3
+ |->cont_ra: // BASE = base, RC = result
+ | movzx RAd, PC_RA
+ | mov RB, [RC]
+ | mov [BASE+RA*8], RB
+ | ins_next
+ |
+ |3: // Call __index metamethod.
+ | // BASE = base, L->top = new base, stack = cont/func/t/k
+ | mov RA, L:RB->top
+ | mov [RA-24], PC // [cont|PC]
+ | lea PC, [RA+FRAME_CONT]
+ | sub PC, BASE
+ | mov LFUNC:RB, [RA-16] // Guaranteed to be a function here.
+ | mov NARGS:RDd, 2+1 // 2 args for func(t, k).
+ | cleartp LFUNC:RB
+ | jmp ->vm_call_dispatch_f
+ |
+ |->vmeta_tgetr:
+ | mov CARG1, TAB:RB
+ | mov RB, BASE // Save BASE.
+ | mov CARG2d, RCd // Caveat: CARG2 == BASE
+ | call extern lj_tab_getinth // (GCtab *t, int32_t key)
+ | // cTValue * or NULL returned in eax (RC).
+ | movzx RAd, PC_RA
+ | mov BASE, RB // Restore BASE.
+ | test RC, RC
+ | jnz ->BC_TGETR_Z
+ | mov ITYPE, LJ_TNIL
+ | jmp ->BC_TGETR2_Z
+ |
+ |//-----------------------------------------------------------------------
+ |
+ |->vmeta_tsets:
+ | settp STR:RC, LJ_TSTR // STR:RC = GCstr *
+ | mov TMP1, STR:RC
+ | lea RC, TMP1
+ | cmp PC_OP, BC_GSET
+ | jne >1
+ | settp TAB:RA, TAB:RB, LJ_TTAB // TAB:RB = GCtab *
+ | lea RB, [DISPATCH+DISPATCH_GL(tmptv)] // Store fn->l.env in g->tmptv.
+ | mov [RB], TAB:RA
+ | jmp >2
+ |
+ |->vmeta_tsetb:
+ | movzx RCd, PC_RC
+ |.if DUALNUM
+ | setint RC
+ | mov TMP1, RC
+ |.else
+ | cvtsi2sd xmm0, RCd
+ | movsd TMP1, xmm0
+ |.endif
+ | lea RC, TMP1
+ | jmp >1
+ |
+ |->vmeta_tsetv:
+ | movzx RCd, PC_RC // Reload TValue *k from RC.
+ | lea RC, [BASE+RC*8]
+ |1:
+ | movzx RBd, PC_RB // Reload TValue *t from RB.
+ | lea RB, [BASE+RB*8]
+ |2:
+ | mov L:CARG1, SAVE_L
+ | mov L:CARG1->base, BASE // Caveat: CARG2/CARG3 may be BASE.
+ | mov CARG2, RB
+ | mov CARG3, RC
+ | mov L:RB, L:CARG1
+ | mov SAVE_PC, PC
+ | call extern lj_meta_tset // (lua_State *L, TValue *o, TValue *k)
+ | // TValue * (finished) or NULL (metamethod) returned in eax (RC).
+ | mov BASE, L:RB->base
+ | test RC, RC
+ | jz >3
+ | // NOBARRIER: lj_meta_tset ensures the table is not black.
+ | movzx RAd, PC_RA
+ | mov RB, [BASE+RA*8]
+ | mov [RC], RB
+ |->cont_nop: // BASE = base, (RC = result)
+ | ins_next
+ |
+ |3: // Call __newindex metamethod.
+ | // BASE = base, L->top = new base, stack = cont/func/t/k/(v)
+ | mov RA, L:RB->top
+ | mov [RA-24], PC // [cont|PC]
+ | movzx RCd, PC_RA
+ | // Copy value to third argument.
+ | mov RB, [BASE+RC*8]
+ | mov [RA+16], RB
+ | lea PC, [RA+FRAME_CONT]
+ | sub PC, BASE
+ | mov LFUNC:RB, [RA-16] // Guaranteed to be a function here.
+ | mov NARGS:RDd, 3+1 // 3 args for func(t, k, v).
+ | cleartp LFUNC:RB
+ | jmp ->vm_call_dispatch_f
+ |
+ |->vmeta_tsetr:
+ |.if X64WIN
+ | mov L:CARG1, SAVE_L
+ | mov CARG3d, RCd
+ | mov L:CARG1->base, BASE
+ | xchg CARG2, TAB:RB // Caveat: CARG2 == BASE.
+ |.else
+ | mov L:CARG1, SAVE_L
+ | mov CARG2, TAB:RB
+ | mov L:CARG1->base, BASE
+ | mov RB, BASE // Save BASE.
+ | mov CARG3d, RCd // Caveat: CARG3 == BASE.
+ |.endif
+ | mov SAVE_PC, PC
+ | call extern lj_tab_setinth // (lua_State *L, GCtab *t, int32_t key)
+ | // TValue * returned in eax (RC).
+ | movzx RAd, PC_RA
+ | mov BASE, RB // Restore BASE.
+ | jmp ->BC_TSETR_Z
+ |
+ |//-- Comparison metamethods ---------------------------------------------
+ |
+ |->vmeta_comp:
+ | movzx RDd, PC_RD
+ | movzx RAd, PC_RA
+ | mov L:RB, SAVE_L
+ | mov L:RB->base, BASE // Caveat: CARG2/CARG3 == BASE.
+ |.if X64WIN
+ | lea CARG3, [BASE+RD*8]
+ | lea CARG2, [BASE+RA*8]
+ |.else
+ | lea CARG2, [BASE+RA*8]
+ | lea CARG3, [BASE+RD*8]
+ |.endif
+ | mov CARG1, L:RB // Caveat: CARG1/CARG4 == RA.
+ | movzx CARG4d, PC_OP
+ | mov SAVE_PC, PC
+ | call extern lj_meta_comp // (lua_State *L, TValue *o1, *o2, int op)
+ | // 0/1 or TValue * (metamethod) returned in eax (RC).
+ |3:
+ | mov BASE, L:RB->base
+ | cmp RC, 1
+ | ja ->vmeta_binop
+ |4:
+ | lea PC, [PC+4]
+ | jb >6
+ |5:
+ | movzx RDd, PC_RD
+ | branchPC RD
+ |6:
+ | ins_next
+ |
+ |->cont_condt: // BASE = base, RC = result
+ | add PC, 4
+ | mov ITYPE, [RC]
+ | sar ITYPE, 47
+ | cmp ITYPEd, LJ_TISTRUECOND // Branch if result is true.
+ | jb <5
+ | jmp <6
+ |
+ |->cont_condf: // BASE = base, RC = result
+ | mov ITYPE, [RC]
+ | sar ITYPE, 47
+ | cmp ITYPEd, LJ_TISTRUECOND // Branch if result is false.
+ | jmp <4
+ |
+ |->vmeta_equal:
+ | cleartp TAB:RD
+ | sub PC, 4
+ |.if X64WIN
+ | mov CARG3, RD
+ | mov CARG4d, RBd
+ | mov L:RB, SAVE_L
+ | mov L:RB->base, BASE // Caveat: CARG2 == BASE.
+ | mov CARG2, RA
+ | mov CARG1, L:RB // Caveat: CARG1 == RA.
+ |.else
+ | mov CARG2, RA
+ | mov CARG4d, RBd // Caveat: CARG4 == RA.
+ | mov L:RB, SAVE_L
+ | mov L:RB->base, BASE // Caveat: CARG3 == BASE.
+ | mov CARG3, RD
+ | mov CARG1, L:RB
+ |.endif
+ | mov SAVE_PC, PC
+ | call extern lj_meta_equal // (lua_State *L, GCobj *o1, *o2, int ne)
+ | // 0/1 or TValue * (metamethod) returned in eax (RC).
+ | jmp <3
+ |
+ |->vmeta_equal_cd:
+ |.if FFI
+ | sub PC, 4
+ | mov L:RB, SAVE_L
+ | mov L:RB->base, BASE
+ | mov CARG1, L:RB
+ | mov CARG2d, dword [PC-4]
+ | mov SAVE_PC, PC
+ | call extern lj_meta_equal_cd // (lua_State *L, BCIns ins)
+ | // 0/1 or TValue * (metamethod) returned in eax (RC).
+ | jmp <3
+ |.endif
+ |
+ |->vmeta_istype:
+ | mov L:RB, SAVE_L
+ | mov L:RB->base, BASE // Caveat: CARG2/CARG3 may be BASE.
+ | mov CARG2d, RAd
+ | mov CARG3d, RDd
+ | mov L:CARG1, L:RB
+ | mov SAVE_PC, PC
+ | call extern lj_meta_istype // (lua_State *L, BCReg ra, BCReg tp)
+ | mov BASE, L:RB->base
+ | jmp <6
+ |
+ |//-- Arithmetic metamethods ---------------------------------------------
+ |
+ |->vmeta_arith_vno:
+ |.if DUALNUM
+ | movzx RBd, PC_RB
+ | movzx RCd, PC_RC
+ |.endif
+ |->vmeta_arith_vn:
+ | lea RC, [KBASE+RC*8]
+ | jmp >1
+ |
+ |->vmeta_arith_nvo:
+ |.if DUALNUM
+ | movzx RBd, PC_RB
+ | movzx RCd, PC_RC
+ |.endif
+ |->vmeta_arith_nv:
+ | lea TMPR, [KBASE+RC*8]
+ | lea RC, [BASE+RB*8]
+ | mov RB, TMPR
+ | jmp >2
+ |
+ |->vmeta_unm:
+ | lea RC, [BASE+RD*8]
+ | mov RB, RC
+ | jmp >2
+ |
+ |->vmeta_arith_vvo:
+ |.if DUALNUM
+ | movzx RBd, PC_RB
+ | movzx RCd, PC_RC
+ |.endif
+ |->vmeta_arith_vv:
+ | lea RC, [BASE+RC*8]
+ |1:
+ | lea RB, [BASE+RB*8]
+ |2:
+ | lea RA, [BASE+RA*8]
+ |.if X64WIN
+ | mov CARG3, RB
+ | mov CARG4, RC
+ | movzx RCd, PC_OP
+ | mov ARG5d, RCd
+ | mov L:RB, SAVE_L
+ | mov L:RB->base, BASE // Caveat: CARG2 == BASE.
+ | mov CARG2, RA
+ | mov CARG1, L:RB // Caveat: CARG1 == RA.
+ |.else
+ | movzx CARG5d, PC_OP
+ | mov CARG2, RA
+ | mov CARG4, RC // Caveat: CARG4 == RA.
+ | mov L:CARG1, SAVE_L
+ | mov L:CARG1->base, BASE // Caveat: CARG3 == BASE.
+ | mov CARG3, RB
+ | mov L:RB, L:CARG1
+ |.endif
+ | mov SAVE_PC, PC
+ | call extern lj_meta_arith // (lua_State *L, TValue *ra,*rb,*rc, BCReg op)
+ | // NULL (finished) or TValue * (metamethod) returned in eax (RC).
+ | mov BASE, L:RB->base
+ | test RC, RC
+ | jz ->cont_nop
+ |
+ | // Call metamethod for binary op.
+ |->vmeta_binop:
+ | // BASE = base, RC = new base, stack = cont/func/o1/o2
+ | mov RA, RC
+ | sub RC, BASE
+ | mov [RA-24], PC // [cont|PC]
+ | lea PC, [RC+FRAME_CONT]
+ | mov NARGS:RDd, 2+1 // 2 args for func(o1, o2).
+ | jmp ->vm_call_dispatch
+ |
+ |->vmeta_len:
+ | movzx RDd, PC_RD
+ | mov L:RB, SAVE_L
+ | mov L:RB->base, BASE
+ | lea CARG2, [BASE+RD*8] // Caveat: CARG2 == BASE
+ | mov L:CARG1, L:RB
+ | mov SAVE_PC, PC
+ | call extern lj_meta_len // (lua_State *L, TValue *o)
+ | // NULL (retry) or TValue * (metamethod) returned in eax (RC).
+ | mov BASE, L:RB->base
+#if LJ_52
+ | test RC, RC
+ | jne ->vmeta_binop // Binop call for compatibility.
+ | movzx RDd, PC_RD
+ | mov TAB:CARG1, [BASE+RD*8]
+ | cleartp TAB:CARG1
+ | jmp ->BC_LEN_Z
+#else
+ | jmp ->vmeta_binop // Binop call for compatibility.
+#endif
+ |
+ |//-- Call metamethod ----------------------------------------------------
+ |
+ |->vmeta_call_ra:
+ | lea RA, [BASE+RA*8+16]
+ |->vmeta_call: // Resolve and call __call metamethod.
+ | // BASE = old base, RA = new base, RC = nargs+1, PC = return
+ | mov TMP1d, NARGS:RDd // Save RA, RC for us.
+ | mov RB, RA
+ |.if X64WIN
+ | mov L:TMPR, SAVE_L
+ | mov L:TMPR->base, BASE // Caveat: CARG2 is BASE.
+ | lea CARG2, [RA-16]
+ | lea CARG3, [RA+NARGS:RD*8-8]
+ | mov CARG1, L:TMPR // Caveat: CARG1 is RA.
+ |.else
+ | mov L:CARG1, SAVE_L
+ | mov L:CARG1->base, BASE // Caveat: CARG3 is BASE.
+ | lea CARG2, [RA-16]
+ | lea CARG3, [RA+NARGS:RD*8-8]
+ |.endif
+ | mov SAVE_PC, PC
+ | call extern lj_meta_call // (lua_State *L, TValue *func, TValue *top)
+ | mov RA, RB
+ | mov L:RB, SAVE_L
+ | mov BASE, L:RB->base
+ | mov NARGS:RDd, TMP1d
+ | mov LFUNC:RB, [RA-16]
+ | add NARGS:RDd, 1
+ | // This is fragile. L->base must not move, KBASE must always be defined.
+ | cmp KBASE, BASE // Continue with CALLT if flag set.
+ | je ->BC_CALLT_Z
+ | cleartp LFUNC:RB
+ | mov BASE, RA
+ | ins_call // Otherwise call resolved metamethod.
+ |
+ |//-- Argument coercion for 'for' statement ------------------------------
+ |
+ |->vmeta_for:
+ | mov L:RB, SAVE_L
+ | mov L:RB->base, BASE
+ | mov CARG2, RA // Caveat: CARG2 == BASE
+ | mov L:CARG1, L:RB // Caveat: CARG1 == RA
+ | mov SAVE_PC, PC
+ | call extern lj_meta_for // (lua_State *L, TValue *base)
+ | mov BASE, L:RB->base
+ | mov RCd, [PC-4]
+ | movzx RAd, RCH
+ | movzx OP, RCL
+ | shr RCd, 16
+ | jmp aword [DISPATCH+OP*8+GG_DISP2STATIC] // Retry FORI or JFORI.
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Fast functions -----------------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |.macro .ffunc, name
+ |->ff_ .. name:
+ |.endmacro
+ |
+ |.macro .ffunc_1, name
+ |->ff_ .. name:
+ | cmp NARGS:RDd, 1+1; jb ->fff_fallback
+ |.endmacro
+ |
+ |.macro .ffunc_2, name
+ |->ff_ .. name:
+ | cmp NARGS:RDd, 2+1; jb ->fff_fallback
+ |.endmacro
+ |
+ |.macro .ffunc_n, name, op
+ | .ffunc_1 name
+ | checknumtp [BASE], ->fff_fallback
+ | op xmm0, qword [BASE]
+ |.endmacro
+ |
+ |.macro .ffunc_n, name
+ | .ffunc_n name, movsd
+ |.endmacro
+ |
+ |.macro .ffunc_nn, name
+ | .ffunc_2 name
+ | checknumtp [BASE], ->fff_fallback
+ | checknumtp [BASE+8], ->fff_fallback
+ | movsd xmm0, qword [BASE]
+ | movsd xmm1, qword [BASE+8]
+ |.endmacro
+ |
+ |// Inlined GC threshold check. Caveat: uses label 1.
+ |.macro ffgccheck
+ | mov RB, [DISPATCH+DISPATCH_GL(gc.total)]
+ | cmp RB, [DISPATCH+DISPATCH_GL(gc.threshold)]
+ | jb >1
+ | call ->fff_gcstep
+ |1:
+ |.endmacro
+ |
+ |//-- Base library: checks -----------------------------------------------
+ |
+ |.ffunc_1 assert
+ | mov ITYPE, [BASE]
+ | mov RB, ITYPE
+ | sar ITYPE, 47
+ | cmp ITYPEd, LJ_TISTRUECOND; jae ->fff_fallback
+ | mov PC, [BASE-8]
+ | mov MULTRES, RDd
+ | mov RB, [BASE]
+ | mov [BASE-16], RB
+ | sub RDd, 2
+ | jz >2
+ | mov RA, BASE
+ |1:
+ | add RA, 8
+ | mov RB, [RA]
+ | mov [RA-16], RB
+ | sub RDd, 1
+ | jnz <1
+ |2:
+ | mov RDd, MULTRES
+ | jmp ->fff_res_
+ |
+ |.ffunc_1 type
+ | mov RC, [BASE]
+ | sar RC, 47
+ | mov RBd, LJ_TISNUM
+ | cmp RCd, RBd
+ | cmovb RCd, RBd
+ | not RCd
+ |2:
+ | mov CFUNC:RB, [BASE-16]
+ | cleartp CFUNC:RB
+ | mov STR:RC, [CFUNC:RB+RC*8+((char *)(&((GCfuncC *)0)->upvalue))]
+ | mov PC, [BASE-8]
+ | settp STR:RC, LJ_TSTR
+ | mov [BASE-16], STR:RC
+ | jmp ->fff_res1
+ |
+ |//-- Base library: getters and setters ---------------------------------
+ |
+ |.ffunc_1 getmetatable
+ | mov TAB:RB, [BASE]
+ | mov PC, [BASE-8]
+ | checktab TAB:RB, >6
+ |1: // Field metatable must be at same offset for GCtab and GCudata!
+ | mov TAB:RB, TAB:RB->metatable
+ |2:
+ | test TAB:RB, TAB:RB
+ | mov aword [BASE-16], LJ_TNIL
+ | jz ->fff_res1
+ | settp TAB:RC, TAB:RB, LJ_TTAB
+ | mov [BASE-16], TAB:RC // Store metatable as default result.
+ | mov STR:RC, [DISPATCH+DISPATCH_GL(gcroot)+8*(GCROOT_MMNAME+MM_metatable)]
+ | mov RAd, TAB:RB->hmask
+ | and RAd, STR:RC->sid
+ | settp STR:RC, LJ_TSTR
+ | imul RAd, #NODE
+ | add NODE:RA, TAB:RB->node
+ |3: // Rearranged logic, because we expect _not_ to find the key.
+ | cmp NODE:RA->key, STR:RC
+ | je >5
+ |4:
+ | mov NODE:RA, NODE:RA->next
+ | test NODE:RA, NODE:RA
+ | jnz <3
+ | jmp ->fff_res1 // Not found, keep default result.
+ |5:
+ | mov RB, NODE:RA->val
+ | cmp RB, LJ_TNIL; je ->fff_res1 // Ditto for nil value.
+ | mov [BASE-16], RB // Return value of mt.__metatable.
+ | jmp ->fff_res1
+ |
+ |6:
+ | cmp ITYPEd, LJ_TUDATA; je <1
+ | cmp ITYPEd, LJ_TISNUM; ja >7
+ | mov ITYPEd, LJ_TISNUM
+ |7:
+ | not ITYPEd
+ | mov TAB:RB, [DISPATCH+ITYPE*8+DISPATCH_GL(gcroot[GCROOT_BASEMT])]
+ | jmp <2
+ |
+ |.ffunc_2 setmetatable
+ | mov TAB:RB, [BASE]
+ | mov TAB:TMPR, TAB:RB
+ | checktab TAB:RB, ->fff_fallback
+ | // Fast path: no mt for table yet and not clearing the mt.
+ | cmp aword TAB:RB->metatable, 0; jne ->fff_fallback
+ | mov TAB:RA, [BASE+8]
+ | checktab TAB:RA, ->fff_fallback
+ | mov TAB:RB->metatable, TAB:RA
+ | mov PC, [BASE-8]
+ | mov [BASE-16], TAB:TMPR // Return original table.
+ | test byte TAB:RB->marked, LJ_GC_BLACK // isblack(table)
+ | jz >1
+ | // Possible write barrier. Table is black, but skip iswhite(mt) check.
+ | barrierback TAB:RB, RC
+ |1:
+ | jmp ->fff_res1
+ |
+ |.ffunc_2 rawget
+ |.if X64WIN
+ | mov TAB:RA, [BASE]
+ | checktab TAB:RA, ->fff_fallback
+ | mov RB, BASE // Save BASE.
+ | lea CARG3, [BASE+8]
+ | mov CARG2, TAB:RA // Caveat: CARG2 == BASE.
+ | mov CARG1, SAVE_L
+ |.else
+ | mov TAB:CARG2, [BASE]
+ | checktab TAB:CARG2, ->fff_fallback
+ | mov RB, BASE // Save BASE.
+ | lea CARG3, [BASE+8] // Caveat: CARG3 == BASE.
+ | mov CARG1, SAVE_L
+ |.endif
+ | call extern lj_tab_get // (lua_State *L, GCtab *t, cTValue *key)
+ | // cTValue * returned in eax (RD).
+ | mov BASE, RB // Restore BASE.
+ | // Copy table slot.
+ | mov RB, [RD]
+ | mov PC, [BASE-8]
+ | mov [BASE-16], RB
+ | jmp ->fff_res1
+ |
+ |//-- Base library: conversions ------------------------------------------
+ |
+ |.ffunc tonumber
+ | // Only handles the number case inline (without a base argument).
+ | cmp NARGS:RDd, 1+1; jne ->fff_fallback // Exactly one argument.
+ | mov RB, [BASE]
+ | checknumber RB, ->fff_fallback
+ | mov PC, [BASE-8]
+ | mov [BASE-16], RB
+ | jmp ->fff_res1
+ |
+ |.ffunc_1 tostring
+ | // Only handles the string or number case inline.
+ | mov PC, [BASE-8]
+ | mov STR:RB, [BASE]
+ | checktp_nc STR:RB, LJ_TSTR, >3
+ | // A __tostring method in the string base metatable is ignored.
+ |2:
+ | mov [BASE-16], STR:RB
+ | jmp ->fff_res1
+ |3: // Handle numbers inline, unless a number base metatable is present.
+ | cmp ITYPEd, LJ_TISNUM; ja ->fff_fallback_1
+ | cmp aword [DISPATCH+DISPATCH_GL(gcroot[GCROOT_BASEMT_NUM])], 0
+ | jne ->fff_fallback
+ | ffgccheck // Caveat: uses label 1.
+ | mov L:RB, SAVE_L
+ | mov L:RB->base, BASE // Add frame since C call can throw.
+ | mov SAVE_PC, PC // Redundant (but a defined value).
+ |.if not X64WIN
+ | mov CARG2, BASE // Otherwise: CARG2 == BASE
+ |.endif
+ | mov L:CARG1, L:RB
+ |.if DUALNUM
+ | call extern lj_strfmt_number // (lua_State *L, cTValue *o)
+ |.else
+ | call extern lj_strfmt_num // (lua_State *L, lua_Number *np)
+ |.endif
+ | // GCstr returned in eax (RD).
+ | mov BASE, L:RB->base
+ | settp STR:RB, RD, LJ_TSTR
+ | jmp <2
+ |
+ |//-- Base library: iterators -------------------------------------------
+ |
+ |.ffunc_1 next
+ | je >2 // Missing 2nd arg?
+ |1:
+ | mov CARG1, [BASE]
+ | mov PC, [BASE-8]
+ | checktab CARG1, ->fff_fallback
+ | mov RB, BASE // Save BASE.
+ |.if X64WIN
+ | lea CARG3, [BASE-16]
+ | lea CARG2, [BASE+8] // Caveat: CARG2 == BASE.
+ |.else
+ | lea CARG2, [BASE+8]
+ | lea CARG3, [BASE-16] // Caveat: CARG3 == BASE.
+ |.endif
+ | call extern lj_tab_next // (GCtab *t, cTValue *key, TValue *o)
+ | // 1=found, 0=end, -1=error returned in eax (RD).
+ | mov BASE, RB // Restore BASE.
+ | test RDd, RDd; jg ->fff_res2 // Found key/value.
+ | js ->fff_fallback_2 // Invalid key.
+ | // End of traversal: return nil.
+ | mov aword [BASE-16], LJ_TNIL
+ | jmp ->fff_res1
+ |2: // Set missing 2nd arg to nil.
+ | mov aword [BASE+8], LJ_TNIL
+ | jmp <1
+ |
+ |.ffunc_1 pairs
+ | mov TAB:RB, [BASE]
+ | mov TMPR, TAB:RB
+ | checktab TAB:RB, ->fff_fallback
+#if LJ_52
+ | cmp aword TAB:RB->metatable, 0; jne ->fff_fallback
+#endif
+ | mov CFUNC:RD, [BASE-16]
+ | cleartp CFUNC:RD
+ | mov CFUNC:RD, CFUNC:RD->upvalue[0]
+ | settp CFUNC:RD, LJ_TFUNC
+ | mov PC, [BASE-8]
+ | mov [BASE-16], CFUNC:RD
+ | mov [BASE-8], TMPR
+ | mov aword [BASE], LJ_TNIL
+ | mov RDd, 1+3
+ | jmp ->fff_res
+ |
+ |.ffunc_2 ipairs_aux
+ | mov TAB:RB, [BASE]
+ | checktab TAB:RB, ->fff_fallback
+ |.if DUALNUM
+ | mov RA, [BASE+8]
+ | checkint RA, ->fff_fallback
+ |.else
+ | checknumtp [BASE+8], ->fff_fallback
+ | movsd xmm0, qword [BASE+8]
+ |.endif
+ | mov PC, [BASE-8]
+ |.if DUALNUM
+ | add RAd, 1
+ | setint ITYPE, RA
+ | mov [BASE-16], ITYPE
+ |.else
+ | sseconst_1 xmm1, TMPR
+ | addsd xmm0, xmm1
+ | cvttsd2si RAd, xmm0
+ | movsd qword [BASE-16], xmm0
+ |.endif
+ | cmp RAd, TAB:RB->asize; jae >2 // Not in array part?
+ | mov RD, TAB:RB->array
+ | lea RD, [RD+RA*8]
+ |1:
+ | cmp aword [RD], LJ_TNIL; je ->fff_res0
+ | // Copy array slot.
+ | mov RB, [RD]
+ | mov [BASE-8], RB
+ |->fff_res2:
+ | mov RDd, 1+2
+ | jmp ->fff_res
+ |2: // Check for empty hash part first. Otherwise call C function.
+ | cmp dword TAB:RB->hmask, 0; je ->fff_res0
+ |.if X64WIN
+ | mov TMPR, BASE
+ | mov CARG2d, RAd
+ | mov CARG1, TAB:RB
+ | mov RB, TMPR
+ |.else
+ | mov CARG1, TAB:RB
+ | mov RB, BASE // Save BASE.
+ | mov CARG2d, RAd // Caveat: CARG2 == BASE
+ |.endif
+ | call extern lj_tab_getinth // (GCtab *t, int32_t key)
+ | // cTValue * or NULL returned in eax (RD).
+ | mov BASE, RB
+ | test RD, RD
+ | jnz <1
+ |->fff_res0:
+ | mov RDd, 1+0
+ | jmp ->fff_res
+ |
+ |.ffunc_1 ipairs
+ | mov TAB:RB, [BASE]
+ | mov TMPR, TAB:RB
+ | checktab TAB:RB, ->fff_fallback
+#if LJ_52
+ | cmp aword TAB:RB->metatable, 0; jne ->fff_fallback
+#endif
+ | mov CFUNC:RD, [BASE-16]
+ | cleartp CFUNC:RD
+ | mov CFUNC:RD, CFUNC:RD->upvalue[0]
+ | settp CFUNC:RD, LJ_TFUNC
+ | mov PC, [BASE-8]
+ | mov [BASE-16], CFUNC:RD
+ | mov [BASE-8], TMPR
+ |.if DUALNUM
+ | mov64 RD, ((uint64_t)LJ_TISNUM<<47)
+ | mov [BASE], RD
+ |.else
+ | mov qword [BASE], 0
+ |.endif
+ | mov RDd, 1+3
+ | jmp ->fff_res
+ |
+ |//-- Base library: catch errors ----------------------------------------
+ |
+ |.ffunc_1 pcall
+ | lea RA, [BASE+16]
+ | sub NARGS:RDd, 1
+ | mov PCd, 16+FRAME_PCALL
+ |1:
+ | movzx RBd, byte [DISPATCH+DISPATCH_GL(hookmask)]
+ | shr RB, HOOK_ACTIVE_SHIFT
+ | and RB, 1
+ | add PC, RB // Remember active hook before pcall.
+ | // Note: this does a (harmless) copy of the function to the PC slot, too.
+ | mov KBASE, RD
+ |2:
+ | mov RB, [RA+KBASE*8-24]
+ | mov [RA+KBASE*8-16], RB
+ | sub KBASE, 1
+ | ja <2
+ | jmp ->vm_call_dispatch
+ |
+ |.ffunc_2 xpcall
+ | mov LFUNC:RA, [BASE+8]
+ | checktp_nc LFUNC:RA, LJ_TFUNC, ->fff_fallback
+ | mov LFUNC:RB, [BASE] // Swap function and traceback.
+ | mov [BASE], LFUNC:RA
+ | mov [BASE+8], LFUNC:RB
+ | lea RA, [BASE+24]
+ | sub NARGS:RDd, 2
+ | mov PCd, 24+FRAME_PCALL
+ | jmp <1
+ |
+ |//-- Coroutine library --------------------------------------------------
+ |
+ |.macro coroutine_resume_wrap, resume
+ |.if resume
+ |.ffunc_1 coroutine_resume
+ | mov L:RB, [BASE]
+ | cleartp L:RB
+ |.else
+ |.ffunc coroutine_wrap_aux
+ | mov CFUNC:RB, [BASE-16]
+ | cleartp CFUNC:RB
+ | mov L:RB, CFUNC:RB->upvalue[0].gcr
+ | cleartp L:RB
+ |.endif
+ | mov PC, [BASE-8]
+ | mov SAVE_PC, PC
+ | mov TMP1, L:RB
+ |.if resume
+ | checktptp [BASE], LJ_TTHREAD, ->fff_fallback
+ |.endif
+ | cmp aword L:RB->cframe, 0; jne ->fff_fallback
+ | cmp byte L:RB->status, LUA_YIELD; ja ->fff_fallback
+ | mov RA, L:RB->top
+ | je >1 // Status != LUA_YIELD (i.e. 0)?
+ | cmp RA, L:RB->base // Check for presence of initial func.
+ | je ->fff_fallback
+ | mov PC, [RA-8] // Move initial function up.
+ | mov [RA], PC
+ | add RA, 8
+ |1:
+ |.if resume
+ | lea PC, [RA+NARGS:RD*8-16] // Check stack space (-1-thread).
+ |.else
+ | lea PC, [RA+NARGS:RD*8-8] // Check stack space (-1).
+ |.endif
+ | cmp PC, L:RB->maxstack; ja ->fff_fallback
+ | mov L:RB->top, PC
+ |
+ | mov L:RB, SAVE_L
+ | mov L:RB->base, BASE
+ |.if resume
+ | add BASE, 8 // Keep resumed thread in stack for GC.
+ |.endif
+ | mov L:RB->top, BASE
+ |.if resume
+ | lea RB, [BASE+NARGS:RD*8-24] // RB = end of source for stack move.
+ |.else
+ | lea RB, [BASE+NARGS:RD*8-16] // RB = end of source for stack move.
+ |.endif
+ | sub RB, PC // Relative to PC.
+ |
+ | cmp PC, RA
+ | je >3
+ |2: // Move args to coroutine.
+ | mov RC, [PC+RB]
+ | mov [PC-8], RC
+ | sub PC, 8
+ | cmp PC, RA
+ | jne <2
+ |3:
+ | mov CARG2, RA
+ | mov CARG1, TMP1
+ | call ->vm_resume // (lua_State *L, TValue *base, 0, 0)
+ |
+ | mov L:RB, SAVE_L
+ | mov L:PC, TMP1
+ | mov BASE, L:RB->base
+ | mov [DISPATCH+DISPATCH_GL(cur_L)], L:RB
+ | set_vmstate INTERP
+ |
+ | cmp eax, LUA_YIELD
+ | ja >8
+ |4:
+ | mov RA, L:PC->base
+ | mov KBASE, L:PC->top
+ | mov L:PC->top, RA // Clear coroutine stack.
+ | mov PC, KBASE
+ | sub PC, RA
+ | je >6 // No results?
+ | lea RD, [BASE+PC]
+ | shr PCd, 3
+ | cmp RD, L:RB->maxstack
+ | ja >9 // Need to grow stack?
+ |
+ | mov RB, BASE
+ | sub RB, RA
+ |5: // Move results from coroutine.
+ | mov RD, [RA]
+ | mov [RA+RB], RD
+ | add RA, 8
+ | cmp RA, KBASE
+ | jne <5
+ |6:
+ |.if resume
+ | lea RDd, [PCd+2] // nresults+1 = 1 + true + results.
+ | mov_true ITYPE // Prepend true to results.
+ | mov [BASE-8], ITYPE
+ |.else
+ | lea RDd, [PCd+1] // nresults+1 = 1 + results.
+ |.endif
+ |7:
+ | mov PC, SAVE_PC
+ | mov MULTRES, RDd
+ |.if resume
+ | mov RA, -8
+ |.else
+ | xor RAd, RAd
+ |.endif
+ | test PCd, FRAME_TYPE
+ | jz ->BC_RET_Z
+ | jmp ->vm_return
+ |
+ |8: // Coroutine returned with error (at co->top-1).
+ |.if resume
+ | mov_false ITYPE // Prepend false to results.
+ | mov [BASE-8], ITYPE
+ | mov RA, L:PC->top
+ | sub RA, 8
+ | mov L:PC->top, RA // Clear error from coroutine stack.
+ | // Copy error message.
+ | mov RD, [RA]
+ | mov [BASE], RD
+ | mov RDd, 1+2 // nresults+1 = 1 + false + error.
+ | jmp <7
+ |.else
+ | mov CARG2, L:PC
+ | mov CARG1, L:RB
+ | call extern lj_ffh_coroutine_wrap_err // (lua_State *L, lua_State *co)
+ | // Error function does not return.
+ |.endif
+ |
+ |9: // Handle stack expansion on return from yield.
+ | mov L:RA, TMP1
+ | mov L:RA->top, KBASE // Undo coroutine stack clearing.
+ | mov CARG2, PC
+ | mov CARG1, L:RB
+ | call extern lj_state_growstack // (lua_State *L, int n)
+ | mov L:PC, TMP1
+ | mov BASE, L:RB->base
+ | jmp <4 // Retry the stack move.
+ |.endmacro
+ |
+ | coroutine_resume_wrap 1 // coroutine.resume
+ | coroutine_resume_wrap 0 // coroutine.wrap
+ |
+ |.ffunc coroutine_yield
+ | mov L:RB, SAVE_L
+ | test aword L:RB->cframe, CFRAME_RESUME
+ | jz ->fff_fallback
+ | mov L:RB->base, BASE
+ | lea RD, [BASE+NARGS:RD*8-8]
+ | mov L:RB->top, RD
+ | xor RDd, RDd
+ | mov aword L:RB->cframe, RD
+ | mov al, LUA_YIELD
+ | mov byte L:RB->status, al
+ | jmp ->vm_leave_unw
+ |
+ |//-- Math library -------------------------------------------------------
+ |
+ | .ffunc_1 math_abs
+ | mov RB, [BASE]
+ |.if DUALNUM
+ | checkint RB, >3
+ | cmp RBd, 0; jns ->fff_resi
+ | neg RBd; js >2
+ |->fff_resbit:
+ |->fff_resi:
+ | setint RB
+ |->fff_resRB:
+ | mov PC, [BASE-8]
+ | mov [BASE-16], RB
+ | jmp ->fff_res1
+ |2:
+ | mov64 RB, U64x(41e00000,00000000) // 2^31.
+ | jmp ->fff_resRB
+ |3:
+ | ja ->fff_fallback
+ |.else
+ | checknum RB, ->fff_fallback
+ |.endif
+ | shl RB, 1
+ | shr RB, 1
+ | mov PC, [BASE-8]
+ | mov [BASE-16], RB
+ | jmp ->fff_res1
+ |
+ |.ffunc_n math_sqrt, sqrtsd
+ |->fff_resxmm0:
+ | mov PC, [BASE-8]
+ | movsd qword [BASE-16], xmm0
+ | // fallthrough
+ |
+ |->fff_res1:
+ | mov RDd, 1+1
+ |->fff_res:
+ | mov MULTRES, RDd
+ |->fff_res_:
+ | test PCd, FRAME_TYPE
+ | jnz >7
+ |5:
+ | cmp PC_RB, RDL // More results expected?
+ | ja >6
+ | // Adjust BASE. KBASE is assumed to be set for the calling frame.
+ | movzx RAd, PC_RA
+ | neg RA
+ | lea BASE, [BASE+RA*8-16] // base = base - (RA+2)*8
+ | ins_next
+ |
+ |6: // Fill up results with nil.
+ | mov aword [BASE+RD*8-24], LJ_TNIL
+ | add RD, 1
+ | jmp <5
+ |
+ |7: // Non-standard return case.
+ | mov RA, -16 // Results start at BASE+RA = BASE-16.
+ | jmp ->vm_return
+ |
+ |.macro math_round, func
+ | .ffunc math_ .. func
+ |.if DUALNUM
+ | mov RB, [BASE]
+ | checknumx RB, ->fff_resRB, je
+ | ja ->fff_fallback
+ |.else
+ | checknumtp [BASE], ->fff_fallback
+ |.endif
+ | movsd xmm0, qword [BASE]
+ | call ->vm_ .. func .. _sse
+ |.if DUALNUM
+ | cvttsd2si RBd, xmm0
+ | cmp RBd, 0x80000000
+ | jne ->fff_resi
+ | cvtsi2sd xmm1, RBd
+ | ucomisd xmm0, xmm1
+ | jp ->fff_resxmm0
+ | je ->fff_resi
+ |.endif
+ | jmp ->fff_resxmm0
+ |.endmacro
+ |
+ | math_round floor
+ | math_round ceil
+ |
+ |.ffunc math_log
+ | cmp NARGS:RDd, 1+1; jne ->fff_fallback // Exactly one argument.
+ | checknumtp [BASE], ->fff_fallback
+ | movsd xmm0, qword [BASE]
+ | mov RB, BASE
+ | call extern log
+ | mov BASE, RB
+ | jmp ->fff_resxmm0
+ |
+ |.macro math_extern, func
+ | .ffunc_n math_ .. func
+ | mov RB, BASE
+ | call extern func
+ | mov BASE, RB
+ | jmp ->fff_resxmm0
+ |.endmacro
+ |
+ |.macro math_extern2, func
+ | .ffunc_nn math_ .. func
+ | mov RB, BASE
+ | call extern func
+ | mov BASE, RB
+ | jmp ->fff_resxmm0
+ |.endmacro
+ |
+ | math_extern log10
+ | math_extern exp
+ | math_extern sin
+ | math_extern cos
+ | math_extern tan
+ | math_extern asin
+ | math_extern acos
+ | math_extern atan
+ | math_extern sinh
+ | math_extern cosh
+ | math_extern tanh
+ | math_extern2 pow
+ | math_extern2 atan2
+ | math_extern2 fmod
+ |
+ |.ffunc_2 math_ldexp
+ | checknumtp [BASE], ->fff_fallback
+ | checknumtp [BASE+8], ->fff_fallback
+ | fld qword [BASE+8]
+ | fld qword [BASE]
+ | fscale
+ | fpop1
+ | mov PC, [BASE-8]
+ | fstp qword [BASE-16]
+ | jmp ->fff_res1
+ |
+ |.ffunc_n math_frexp
+ | mov RB, BASE
+ |.if X64WIN
+ | lea CARG2, TMP1 // Caveat: CARG2 == BASE
+ |.else
+ | lea CARG1, TMP1
+ |.endif
+ | call extern frexp
+ | mov BASE, RB
+ | mov RBd, TMP1d
+ | mov PC, [BASE-8]
+ | movsd qword [BASE-16], xmm0
+ |.if DUALNUM
+ | setint RB
+ | mov [BASE-8], RB
+ |.else
+ | cvtsi2sd xmm1, RBd
+ | movsd qword [BASE-8], xmm1
+ |.endif
+ | mov RDd, 1+2
+ | jmp ->fff_res
+ |
+ |.ffunc_n math_modf
+ | mov RB, BASE
+ |.if X64WIN
+ | lea CARG2, [BASE-16] // Caveat: CARG2 == BASE
+ |.else
+ | lea CARG1, [BASE-16]
+ |.endif
+ | call extern modf
+ | mov BASE, RB
+ | mov PC, [BASE-8]
+ | movsd qword [BASE-8], xmm0
+ | mov RDd, 1+2
+ | jmp ->fff_res
+ |
+ |.macro math_minmax, name, cmovop, sseop
+ | .ffunc_1 name
+ | mov RAd, 2
+ |.if DUALNUM
+ | mov RB, [BASE]
+ | checkint RB, >4
+ |1: // Handle integers.
+ | cmp RAd, RDd; jae ->fff_resRB
+ | mov TMPR, [BASE+RA*8-8]
+ | checkint TMPR, >3
+ | cmp RBd, TMPRd
+ | cmovop RB, TMPR
+ | add RAd, 1
+ | jmp <1
+ |3:
+ | ja ->fff_fallback
+ | // Convert intermediate result to number and continue below.
+ | cvtsi2sd xmm0, RBd
+ | jmp >6
+ |4:
+ | ja ->fff_fallback
+ |.else
+ | checknumtp [BASE], ->fff_fallback
+ |.endif
+ |
+ | movsd xmm0, qword [BASE]
+ |5: // Handle numbers or integers.
+ | cmp RAd, RDd; jae ->fff_resxmm0
+ |.if DUALNUM
+ | mov RB, [BASE+RA*8-8]
+ | checknumx RB, >6, jb
+ | ja ->fff_fallback
+ | cvtsi2sd xmm1, RBd
+ | jmp >7
+ |.else
+ | checknumtp [BASE+RA*8-8], ->fff_fallback
+ |.endif
+ |6:
+ | movsd xmm1, qword [BASE+RA*8-8]
+ |7:
+ | sseop xmm0, xmm1
+ | add RAd, 1
+ | jmp <5
+ |.endmacro
+ |
+ | math_minmax math_min, cmovg, minsd
+ | math_minmax math_max, cmovl, maxsd
+ |
+ |//-- String library -----------------------------------------------------
+ |
+ |.ffunc string_byte // Only handle the 1-arg case here.
+ | cmp NARGS:RDd, 1+1; jne ->fff_fallback
+ | mov STR:RB, [BASE]
+ | checkstr STR:RB, ->fff_fallback
+ | mov PC, [BASE-8]
+ | cmp dword STR:RB->len, 1
+ | jb ->fff_res0 // Return no results for empty string.
+ | movzx RBd, byte STR:RB[1]
+ |.if DUALNUM
+ | jmp ->fff_resi
+ |.else
+ | cvtsi2sd xmm0, RBd; jmp ->fff_resxmm0
+ |.endif
+ |
+ |.ffunc string_char // Only handle the 1-arg case here.
+ | ffgccheck
+ | cmp NARGS:RDd, 1+1; jne ->fff_fallback // *Exactly* 1 arg.
+ |.if DUALNUM
+ | mov RB, [BASE]
+ | checkint RB, ->fff_fallback
+ |.else
+ | checknumtp [BASE], ->fff_fallback
+ | cvttsd2si RBd, qword [BASE]
+ |.endif
+ | cmp RBd, 255; ja ->fff_fallback
+ | mov TMP1d, RBd
+ | mov TMPRd, 1
+ | lea RD, TMP1 // Points to stack. Little-endian.
+ |->fff_newstr:
+ | mov L:RB, SAVE_L
+ | mov L:RB->base, BASE
+ | mov CARG3d, TMPRd // Zero-extended to size_t.
+ | mov CARG2, RD
+ | mov CARG1, L:RB
+ | mov SAVE_PC, PC
+ | call extern lj_str_new // (lua_State *L, char *str, size_t l)
+ |->fff_resstr:
+ | // GCstr * returned in eax (RD).
+ | mov BASE, L:RB->base
+ | mov PC, [BASE-8]
+ | settp STR:RD, LJ_TSTR
+ | mov [BASE-16], STR:RD
+ | jmp ->fff_res1
+ |
+ |.ffunc string_sub
+ | ffgccheck
+ | mov TMPRd, -1
+ | cmp NARGS:RDd, 1+2; jb ->fff_fallback
+ | jna >1
+ |.if DUALNUM
+ | mov TMPR, [BASE+16]
+ | checkint TMPR, ->fff_fallback
+ |.else
+ | checknumtp [BASE+16], ->fff_fallback
+ | cvttsd2si TMPRd, qword [BASE+16]
+ |.endif
+ |1:
+ | mov STR:RB, [BASE]
+ | checkstr STR:RB, ->fff_fallback
+ |.if DUALNUM
+ | mov ITYPE, [BASE+8]
+ | mov RAd, ITYPEd // Must clear hiword for lea below.
+ | sar ITYPE, 47
+ | cmp ITYPEd, LJ_TISNUM
+ | jne ->fff_fallback
+ |.else
+ | checknumtp [BASE+8], ->fff_fallback
+ | cvttsd2si RAd, qword [BASE+8]
+ |.endif
+ | mov RCd, STR:RB->len
+ | cmp RCd, TMPRd // len < end? (unsigned compare)
+ | jb >5
+ |2:
+ | test RAd, RAd // start <= 0?
+ | jle >7
+ |3:
+ | sub TMPRd, RAd // start > end?
+ | jl ->fff_emptystr
+ | lea RD, [STR:RB+RAd+#STR-1]
+ | add TMPRd, 1
+ |4:
+ | jmp ->fff_newstr
+ |
+ |5: // Negative end or overflow.
+ | jl >6
+ | lea TMPRd, [TMPRd+RCd+1] // end = end+(len+1)
+ | jmp <2
+ |6: // Overflow.
+ | mov TMPRd, RCd // end = len
+ | jmp <2
+ |
+ |7: // Negative start or underflow.
+ | je >8
+ | add RAd, RCd // start = start+(len+1)
+ | add RAd, 1
+ | jg <3 // start > 0?
+ |8: // Underflow.
+ | mov RAd, 1 // start = 1
+ | jmp <3
+ |
+ |->fff_emptystr: // Range underflow.
+ | xor TMPRd, TMPRd // Zero length. Any ptr in RD is ok.
+ | jmp <4
+ |
+ |.macro ffstring_op, name
+ | .ffunc_1 string_ .. name
+ | ffgccheck
+ |.if X64WIN
+ | mov STR:TMPR, [BASE]
+ | checkstr STR:TMPR, ->fff_fallback
+ |.else
+ | mov STR:CARG2, [BASE]
+ | checkstr STR:CARG2, ->fff_fallback
+ |.endif
+ | mov L:RB, SAVE_L
+ | lea SBUF:CARG1, [DISPATCH+DISPATCH_GL(tmpbuf)]
+ | mov L:RB->base, BASE
+ |.if X64WIN
+ | mov STR:CARG2, STR:TMPR // Caveat: CARG2 == BASE
+ |.endif
+ | mov RC, SBUF:CARG1->b
+ | mov SBUF:CARG1->L, L:RB
+ | mov SBUF:CARG1->w, RC
+ | mov SAVE_PC, PC
+ | call extern lj_buf_putstr_ .. name
+ | mov CARG1, rax
+ | call extern lj_buf_tostr
+ | jmp ->fff_resstr
+ |.endmacro
+ |
+ |ffstring_op reverse
+ |ffstring_op lower
+ |ffstring_op upper
+ |
+ |//-- Bit library --------------------------------------------------------
+ |
+ |.macro .ffunc_bit, name, kind, fdef
+ | fdef name
+ |.if kind == 2
+ | sseconst_tobit xmm1, RB
+ |.endif
+ |.if DUALNUM
+ | mov RB, [BASE]
+ | checkint RB, >1
+ |.if kind > 0
+ | jmp >2
+ |.else
+ | jmp ->fff_resbit
+ |.endif
+ |1:
+ | ja ->fff_fallback
+ | movd xmm0, RB
+ |.else
+ | checknumtp [BASE], ->fff_fallback
+ | movsd xmm0, qword [BASE]
+ |.endif
+ |.if kind < 2
+ | sseconst_tobit xmm1, RB
+ |.endif
+ | addsd xmm0, xmm1
+ | movd RBd, xmm0
+ |2:
+ |.endmacro
+ |
+ |.macro .ffunc_bit, name, kind
+ | .ffunc_bit name, kind, .ffunc_1
+ |.endmacro
+ |
+ |.ffunc_bit bit_tobit, 0
+ | jmp ->fff_resbit
+ |
+ |.macro .ffunc_bit_op, name, ins
+ | .ffunc_bit name, 2
+ | mov TMPRd, NARGS:RDd // Save for fallback.
+ | lea RD, [BASE+NARGS:RD*8-16]
+ |1:
+ | cmp RD, BASE
+ | jbe ->fff_resbit
+ |.if DUALNUM
+ | mov RA, [RD]
+ | checkint RA, >2
+ | ins RBd, RAd
+ | sub RD, 8
+ | jmp <1
+ |2:
+ | ja ->fff_fallback_bit_op
+ | movd xmm0, RA
+ |.else
+ | checknumtp [RD], ->fff_fallback_bit_op
+ | movsd xmm0, qword [RD]
+ |.endif
+ | addsd xmm0, xmm1
+ | movd RAd, xmm0
+ | ins RBd, RAd
+ | sub RD, 8
+ | jmp <1
+ |.endmacro
+ |
+ |.ffunc_bit_op bit_band, and
+ |.ffunc_bit_op bit_bor, or
+ |.ffunc_bit_op bit_bxor, xor
+ |
+ |.ffunc_bit bit_bswap, 1
+ | bswap RBd
+ | jmp ->fff_resbit
+ |
+ |.ffunc_bit bit_bnot, 1
+ | not RBd
+ |.if DUALNUM
+ | jmp ->fff_resbit
+ |.else
+ |->fff_resbit:
+ | cvtsi2sd xmm0, RBd
+ | jmp ->fff_resxmm0
+ |.endif
+ |
+ |->fff_fallback_bit_op:
+ | mov NARGS:RDd, TMPRd // Restore for fallback
+ | jmp ->fff_fallback
+ |
+ |.macro .ffunc_bit_sh, name, ins
+ |.if DUALNUM
+ | .ffunc_bit name, 1, .ffunc_2
+ | // Note: no inline conversion from number for 2nd argument!
+ | mov RA, [BASE+8]
+ | checkint RA, ->fff_fallback
+ |.else
+ | .ffunc_nn name
+ | sseconst_tobit xmm2, RB
+ | addsd xmm0, xmm2
+ | addsd xmm1, xmm2
+ | movd RBd, xmm0
+ | movd RAd, xmm1
+ |.endif
+ | ins RBd, cl // Assumes RA is ecx.
+ | jmp ->fff_resbit
+ |.endmacro
+ |
+ |.ffunc_bit_sh bit_lshift, shl
+ |.ffunc_bit_sh bit_rshift, shr
+ |.ffunc_bit_sh bit_arshift, sar
+ |.ffunc_bit_sh bit_rol, rol
+ |.ffunc_bit_sh bit_ror, ror
+ |
+ |//-----------------------------------------------------------------------
+ |
+ |->fff_fallback_2:
+ | mov NARGS:RDd, 1+2 // Other args are ignored, anyway.
+ | jmp ->fff_fallback
+ |->fff_fallback_1:
+ | mov NARGS:RDd, 1+1 // Other args are ignored, anyway.
+ |->fff_fallback: // Call fast function fallback handler.
+ | // BASE = new base, RD = nargs+1
+ | mov L:RB, SAVE_L
+ | mov PC, [BASE-8] // Fallback may overwrite PC.
+ | mov SAVE_PC, PC // Redundant (but a defined value).
+ | mov L:RB->base, BASE
+ | lea RD, [BASE+NARGS:RD*8-8]
+ | lea RA, [RD+8*LUA_MINSTACK] // Ensure enough space for handler.
+ | mov L:RB->top, RD
+ | mov CFUNC:RD, [BASE-16]
+ | cleartp CFUNC:RD
+ | cmp RA, L:RB->maxstack
+ | ja >5 // Need to grow stack.
+ | mov CARG1, L:RB
+ | call aword CFUNC:RD->f // (lua_State *L)
+ | mov BASE, L:RB->base
+ | // Either throws an error, or recovers and returns -1, 0 or nresults+1.
+ | test RDd, RDd; jg ->fff_res // Returned nresults+1?
+ |1:
+ | mov RA, L:RB->top
+ | sub RA, BASE
+ | shr RAd, 3
+ | test RDd, RDd
+ | lea NARGS:RDd, [RAd+1]
+ | mov LFUNC:RB, [BASE-16]
+ | jne ->vm_call_tail // Returned -1?
+ | cleartp LFUNC:RB
+ | ins_callt // Returned 0: retry fast path.
+ |
+ |// Reconstruct previous base for vmeta_call during tailcall.
+ |->vm_call_tail:
+ | mov RA, BASE
+ | test PCd, FRAME_TYPE
+ | jnz >3
+ | movzx RBd, PC_RA
+ | neg RB
+ | lea BASE, [BASE+RB*8-16] // base = base - (RB+2)*8
+ | jmp ->vm_call_dispatch // Resolve again for tailcall.
+ |3:
+ | mov RB, PC
+ | and RB, -8
+ | sub BASE, RB
+ | jmp ->vm_call_dispatch // Resolve again for tailcall.
+ |
+ |5: // Grow stack for fallback handler.
+ | mov CARG2d, LUA_MINSTACK
+ | mov CARG1, L:RB
+ | call extern lj_state_growstack // (lua_State *L, int n)
+ | mov BASE, L:RB->base
+ | xor RDd, RDd // Simulate a return 0.
+ | jmp <1 // Dumb retry (goes through ff first).
+ |
+ |->fff_gcstep: // Call GC step function.
+ | // BASE = new base, RD = nargs+1
+ | pop RB // Must keep stack at same level.
+ | mov TMP1, RB // Save return address
+ | mov L:RB, SAVE_L
+ | mov SAVE_PC, PC // Redundant (but a defined value).
+ | mov L:RB->base, BASE
+ | lea RD, [BASE+NARGS:RD*8-8]
+ | mov CARG1, L:RB
+ | mov L:RB->top, RD
+ | call extern lj_gc_step // (lua_State *L)
+ | mov BASE, L:RB->base
+ | mov RD, L:RB->top
+ | sub RD, BASE
+ | shr RDd, 3
+ | add NARGS:RDd, 1
+ | mov RB, TMP1
+ | push RB // Restore return address.
+ | ret
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Special dispatch targets -------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |->vm_record: // Dispatch target for recording phase.
+ |.if JIT
+ | movzx RDd, byte [DISPATCH+DISPATCH_GL(hookmask)]
+ | test RDL, HOOK_VMEVENT // No recording while in vmevent.
+ | jnz >5
+ | // Decrement the hookcount for consistency, but always do the call.
+ | test RDL, HOOK_ACTIVE
+ | jnz >1
+ | test RDL, LUA_MASKLINE|LUA_MASKCOUNT
+ | jz >1
+ | dec dword [DISPATCH+DISPATCH_GL(hookcount)]
+ | jmp >1
+ |.endif
+ |
+ |->vm_rethook: // Dispatch target for return hooks.
+ | movzx RDd, byte [DISPATCH+DISPATCH_GL(hookmask)]
+ | test RDL, HOOK_ACTIVE // Hook already active?
+ | jnz >5
+ | jmp >1
+ |
+ |->vm_inshook: // Dispatch target for instr/line hooks.
+ | movzx RDd, byte [DISPATCH+DISPATCH_GL(hookmask)]
+ | test RDL, HOOK_ACTIVE // Hook already active?
+ | jnz >5
+ |
+ | test RDL, LUA_MASKLINE|LUA_MASKCOUNT
+ | jz >5
+ | dec dword [DISPATCH+DISPATCH_GL(hookcount)]
+ | jz >1
+ | test RDL, LUA_MASKLINE
+ | jz >5
+ |1:
+ | mov L:RB, SAVE_L
+ | mov L:RB->base, BASE
+ | mov CARG2, PC // Caveat: CARG2 == BASE
+ | mov CARG1, L:RB
+ | // SAVE_PC must hold the _previous_ PC. The callee updates it with PC.
+ | call extern lj_dispatch_ins // (lua_State *L, const BCIns *pc)
+ |3:
+ | mov BASE, L:RB->base
+ |4:
+ | movzx RAd, PC_RA
+ |5:
+ | movzx OP, PC_OP
+ | movzx RDd, PC_RD
+ | jmp aword [DISPATCH+OP*8+GG_DISP2STATIC] // Re-dispatch to static ins.
+ |
+ |->cont_hook: // Continue from hook yield.
+ | add PC, 4
+ | mov RA, [RB-40]
+ | mov MULTRES, RAd // Restore MULTRES for *M ins.
+ | jmp <4
+ |
+ |->vm_hotloop: // Hot loop counter underflow.
+ |.if JIT
+ | mov LFUNC:RB, [BASE-16] // Same as curr_topL(L).
+ | cleartp LFUNC:RB
+ | mov RB, LFUNC:RB->pc
+ | movzx RDd, byte [RB+PC2PROTO(framesize)]
+ | lea RD, [BASE+RD*8]
+ | mov L:RB, SAVE_L
+ | mov L:RB->base, BASE
+ | mov L:RB->top, RD
+ | mov CARG2, PC
+ | lea CARG1, [DISPATCH+GG_DISP2J]
+ | mov aword [DISPATCH+DISPATCH_J(L)], L:RB
+ | mov SAVE_PC, PC
+ | call extern lj_trace_hot // (jit_State *J, const BCIns *pc)
+ | jmp <3
+ |.endif
+ |
+ |->vm_callhook: // Dispatch target for call hooks.
+ | mov SAVE_PC, PC
+ |.if JIT
+ | jmp >1
+ |.endif
+ |
+ |->vm_hotcall: // Hot call counter underflow.
+ |.if JIT
+ | mov SAVE_PC, PC
+ | or PC, 1 // Marker for hot call.
+ |1:
+ |.endif
+ | lea RD, [BASE+NARGS:RD*8-8]
+ | mov L:RB, SAVE_L
+ | mov L:RB->base, BASE
+ | mov L:RB->top, RD
+ | mov CARG2, PC
+ | mov CARG1, L:RB
+ | call extern lj_dispatch_call // (lua_State *L, const BCIns *pc)
+ | // ASMFunction returned in eax/rax (RD).
+ | mov SAVE_PC, 0 // Invalidate for subsequent line hook.
+ |.if JIT
+ | and PC, -2
+ |.endif
+ | mov BASE, L:RB->base
+ | mov RA, RD
+ | mov RD, L:RB->top
+ | sub RD, BASE
+ | mov RB, RA
+ | movzx RAd, PC_RA
+ | shr RDd, 3
+ | add NARGS:RDd, 1
+ | jmp RB
+ |
+ |->cont_stitch: // Trace stitching.
+ |.if JIT
+ | // BASE = base, RC = result, RB = mbase
+ | mov TRACE:ITYPE, [RB-40] // Save previous trace.
+ | cleartp TRACE:ITYPE
+ | mov TMPRd, MULTRES
+ | movzx RAd, PC_RA
+ | lea RA, [BASE+RA*8] // Call base.
+ | sub TMPRd, 1
+ | jz >2
+ |1: // Move results down.
+ | mov RB, [RC]
+ | mov [RA], RB
+ | add RC, 8
+ | add RA, 8
+ | sub TMPRd, 1
+ | jnz <1
+ |2:
+ | movzx RCd, PC_RA
+ | movzx RBd, PC_RB
+ | add RC, RB
+ | lea RC, [BASE+RC*8-8]
+ |3:
+ | cmp RC, RA
+ | ja >9 // More results wanted?
+ |
+ | test TRACE:ITYPE, TRACE:ITYPE
+ | jz ->cont_nop
+ | movzx RBd, word TRACE:ITYPE->traceno
+ | movzx RDd, word TRACE:ITYPE->link
+ | cmp RDd, RBd
+ | je ->cont_nop // Blacklisted.
+ | test RDd, RDd
+ | jne =>BC_JLOOP // Jump to stitched trace.
+ |
+ | // Stitch a new trace to the previous trace.
+ | mov [DISPATCH+DISPATCH_J(exitno)], RB
+ | mov L:RB, SAVE_L
+ | mov L:RB->base, BASE
+ | mov CARG2, PC
+ | lea CARG1, [DISPATCH+GG_DISP2J]
+ | mov aword [DISPATCH+DISPATCH_J(L)], L:RB
+ | call extern lj_dispatch_stitch // (jit_State *J, const BCIns *pc)
+ | mov BASE, L:RB->base
+ | jmp ->cont_nop
+ |
+ |9: // Fill up results with nil.
+ | mov aword [RA], LJ_TNIL
+ | add RA, 8
+ | jmp <3
+ |.endif
+ |
+ |->vm_profhook: // Dispatch target for profiler hook.
+#if LJ_HASPROFILE
+ | mov L:RB, SAVE_L
+ | mov L:RB->base, BASE
+ | mov CARG2, PC // Caveat: CARG2 == BASE
+ | mov CARG1, L:RB
+ | call extern lj_dispatch_profile // (lua_State *L, const BCIns *pc)
+ | mov BASE, L:RB->base
+ | // HOOK_PROFILE is off again, so re-dispatch to dynamic instruction.
+ | sub PC, 4
+ | jmp ->cont_nop
+#endif
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Trace exit handler -------------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |// Called from an exit stub with the exit number on the stack.
+ |// The 16 bit exit number is stored with two (sign-extended) push imm8.
+ |->vm_exit_handler:
+ |.if JIT
+ | push r13; push r12
+ | push r11; push r10; push r9; push r8
+ | push rdi; push rsi; push rbp; lea rbp, [rsp+88]; push rbp
+ | push rbx; push rdx; push rcx; push rax
+ | movzx RCd, byte [rbp-8] // Reconstruct exit number.
+ | mov RCH, byte [rbp-16]
+ | mov [rbp-8], r15; mov [rbp-16], r14
+ | // DISPATCH is preserved on-trace in LJ_GC64 mode.
+ | mov RAd, [DISPATCH+DISPATCH_GL(vmstate)] // Get trace number.
+ | set_vmstate EXIT
+ | mov [DISPATCH+DISPATCH_J(exitno)], RCd
+ | mov [DISPATCH+DISPATCH_J(parent)], RAd
+ |.if X64WIN
+ | sub rsp, 16*8+4*8 // Room for SSE regs + save area.
+ |.else
+ | sub rsp, 16*8 // Room for SSE regs.
+ |.endif
+ | add rbp, -128
+ | movsd qword [rbp-8], xmm15; movsd qword [rbp-16], xmm14
+ | movsd qword [rbp-24], xmm13; movsd qword [rbp-32], xmm12
+ | movsd qword [rbp-40], xmm11; movsd qword [rbp-48], xmm10
+ | movsd qword [rbp-56], xmm9; movsd qword [rbp-64], xmm8
+ | movsd qword [rbp-72], xmm7; movsd qword [rbp-80], xmm6
+ | movsd qword [rbp-88], xmm5; movsd qword [rbp-96], xmm4
+ | movsd qword [rbp-104], xmm3; movsd qword [rbp-112], xmm2
+ | movsd qword [rbp-120], xmm1; movsd qword [rbp-128], xmm0
+ | // Caveat: RB is rbp.
+ | mov L:RB, [DISPATCH+DISPATCH_GL(cur_L)]
+ | mov BASE, [DISPATCH+DISPATCH_GL(jit_base)]
+ | mov aword [DISPATCH+DISPATCH_J(L)], L:RB
+ | mov L:RB->base, BASE
+ |.if X64WIN
+ | lea CARG2, [rsp+4*8]
+ |.else
+ | mov CARG2, rsp
+ |.endif
+ | lea CARG1, [DISPATCH+GG_DISP2J]
+ | mov qword [DISPATCH+DISPATCH_GL(jit_base)], 0
+ | call extern lj_trace_exit // (jit_State *J, ExitState *ex)
+ | // MULTRES or negated error code returned in eax (RD).
+ | mov RA, L:RB->cframe
+ | and RA, CFRAME_RAWMASK
+ | mov [RA+CFRAME_OFS_L], L:RB // Set SAVE_L (on-trace resume/yield).
+ | mov BASE, L:RB->base
+ | mov PC, [RA+CFRAME_OFS_PC] // Get SAVE_PC.
+ | jmp >1
+ |.endif
+ |->vm_exit_interp:
+ | // RD = MULTRES or negated error code, BASE, PC and DISPATCH set.
+ |.if JIT
+ | // Restore additional callee-save registers only used in compiled code.
+ |.if X64WIN
+ | lea RA, [rsp+10*16+4*8]
+ |1:
+ | movdqa xmm15, [RA-10*16]
+ | movdqa xmm14, [RA-9*16]
+ | movdqa xmm13, [RA-8*16]
+ | movdqa xmm12, [RA-7*16]
+ | movdqa xmm11, [RA-6*16]
+ | movdqa xmm10, [RA-5*16]
+ | movdqa xmm9, [RA-4*16]
+ | movdqa xmm8, [RA-3*16]
+ | movdqa xmm7, [RA-2*16]
+ | mov rsp, RA // Reposition stack to C frame.
+ | movdqa xmm6, [RA-1*16]
+ | mov r15, CSAVE_1
+ | mov r14, CSAVE_2
+ | mov r13, CSAVE_3
+ | mov r12, CSAVE_4
+ |.else
+ | lea RA, [rsp+16]
+ |1:
+ | mov r13, [RA-8]
+ | mov r12, [RA]
+ | mov rsp, RA // Reposition stack to C frame.
+ |.endif
+ | test RDd, RDd; js >9 // Check for error from exit.
+ | mov L:RB, SAVE_L
+ | mov MULTRES, RDd
+ | mov LFUNC:KBASE, [BASE-16]
+ | cleartp LFUNC:KBASE
+ | mov KBASE, LFUNC:KBASE->pc
+ | mov KBASE, [KBASE+PC2PROTO(k)]
+ | mov L:RB->base, BASE
+ | mov qword [DISPATCH+DISPATCH_GL(jit_base)], 0
+ | set_vmstate INTERP
+ | // Modified copy of ins_next which handles function header dispatch, too.
+ | mov RCd, [PC]
+ | movzx RAd, RCH
+ | movzx OP, RCL
+ | add PC, 4
+ | shr RCd, 16
+ | cmp OP, BC_FUNCF // Function header?
+ | jb >3
+ | cmp OP, BC_FUNCC+2 // Fast function?
+ | jae >4
+ |2:
+ | mov RCd, MULTRES // RC/RD holds nres+1.
+ |3:
+ | jmp aword [DISPATCH+OP*8]
+ |
+ |4: // Check frame below fast function.
+ | mov RC, [BASE-8]
+ | test RCd, FRAME_TYPE
+ | jnz <2 // Trace stitching continuation?
+ | // Otherwise set KBASE for Lua function below fast function.
+ | movzx RCd, byte [RC-3]
+ | neg RC
+ | mov LFUNC:KBASE, [BASE+RC*8-32]
+ | cleartp LFUNC:KBASE
+ | mov KBASE, LFUNC:KBASE->pc
+ | mov KBASE, [KBASE+PC2PROTO(k)]
+ | jmp <2
+ |
+ |9: // Rethrow error from the right C frame.
+ | mov CARG2d, RDd
+ | mov CARG1, L:RB
+ | neg CARG2d
+ | call extern lj_err_trace // (lua_State *L, int errcode)
+ |.endif
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Math helper functions ----------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |// FP value rounding. Called by math.floor/math.ceil fast functions
+ |// and from JIT code. arg/ret is xmm0. xmm0-xmm3 and RD (eax) modified.
+ |.macro vm_round, name, mode, cond
+ |->name:
+ |->name .. _sse:
+ | sseconst_abs xmm2, RD
+ | sseconst_2p52 xmm3, RD
+ | movaps xmm1, xmm0
+ | andpd xmm1, xmm2 // |x|
+ | ucomisd xmm3, xmm1 // No truncation if 2^52 <= |x|.
+ | jbe >1
+ | andnpd xmm2, xmm0 // Isolate sign bit.
+ |.if mode == 2 // trunc(x)?
+ | movaps xmm0, xmm1
+ | addsd xmm1, xmm3 // (|x| + 2^52) - 2^52
+ | subsd xmm1, xmm3
+ | sseconst_1 xmm3, RD
+ | cmpsd xmm0, xmm1, 1 // |x| < result?
+ | andpd xmm0, xmm3
+ | subsd xmm1, xmm0 // If yes, subtract -1.
+ | orpd xmm1, xmm2 // Merge sign bit back in.
+ |.else
+ | addsd xmm1, xmm3 // (|x| + 2^52) - 2^52
+ | subsd xmm1, xmm3
+ | orpd xmm1, xmm2 // Merge sign bit back in.
+ | sseconst_1 xmm3, RD
+ | .if mode == 1 // ceil(x)?
+ | cmpsd xmm0, xmm1, 6 // x > result?
+ | andpd xmm0, xmm3
+ | addsd xmm1, xmm0 // If yes, add 1.
+ | orpd xmm1, xmm2 // Merge sign bit back in (again).
+ | .else // floor(x)?
+ | cmpsd xmm0, xmm1, 1 // x < result?
+ | andpd xmm0, xmm3
+ | subsd xmm1, xmm0 // If yes, subtract 1.
+ | .endif
+ |.endif
+ | movaps xmm0, xmm1
+ |1:
+ | ret
+ |.endmacro
+ |
+ | vm_round vm_floor, 0, 1
+ | vm_round vm_ceil, 1, JIT
+ | vm_round vm_trunc, 2, JIT
+ |
+ |// FP modulo x%y. Called by BC_MOD* and vm_arith.
+ |->vm_mod:
+ |// Args in xmm0/xmm1, return value in xmm0.
+ |// Caveat: xmm0-xmm5 and RC (eax) modified!
+ | movaps xmm5, xmm0
+ | divsd xmm0, xmm1
+ | sseconst_abs xmm2, RD
+ | sseconst_2p52 xmm3, RD
+ | movaps xmm4, xmm0
+ | andpd xmm4, xmm2 // |x/y|
+ | ucomisd xmm3, xmm4 // No truncation if 2^52 <= |x/y|.
+ | jbe >1
+ | andnpd xmm2, xmm0 // Isolate sign bit.
+ | addsd xmm4, xmm3 // (|x/y| + 2^52) - 2^52
+ | subsd xmm4, xmm3
+ | orpd xmm4, xmm2 // Merge sign bit back in.
+ | sseconst_1 xmm2, RD
+ | cmpsd xmm0, xmm4, 1 // x/y < result?
+ | andpd xmm0, xmm2
+ | subsd xmm4, xmm0 // If yes, subtract 1.0.
+ | movaps xmm0, xmm5
+ | mulsd xmm1, xmm4
+ | subsd xmm0, xmm1
+ | ret
+ |1:
+ | mulsd xmm1, xmm0
+ | movaps xmm0, xmm5
+ | subsd xmm0, xmm1
+ | ret
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Miscellaneous functions --------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |// int lj_vm_cpuid(uint32_t f, uint32_t res[4])
+ |->vm_cpuid:
+ | mov eax, CARG1d
+ | .if X64WIN; push rsi; mov rsi, CARG2; .endif
+ | push rbx
+ | xor ecx, ecx
+ | cpuid
+ | mov [rsi], eax
+ | mov [rsi+4], ebx
+ | mov [rsi+8], ecx
+ | mov [rsi+12], edx
+ | pop rbx
+ | .if X64WIN; pop rsi; .endif
+ | ret
+ |
+ |.define NEXT_TAB, TAB:CARG1
+ |.define NEXT_IDX, CARG2d
+ |.define NEXT_IDXa, CARG2
+ |.define NEXT_PTR, RC
+ |.define NEXT_PTRd, RCd
+ |.define NEXT_TMP, CARG3
+ |.define NEXT_ASIZE, CARG4d
+ |.macro NEXT_RES_IDXL, op2; lea edx, [NEXT_IDX+op2]; .endmacro
+ |.if X64WIN
+ |.define NEXT_RES_PTR, [rsp+aword*5]
+ |.macro NEXT_RES_IDX, op2; add NEXT_IDX, op2; .endmacro
+ |.else
+ |.define NEXT_RES_PTR, [rsp+aword*1]
+ |.macro NEXT_RES_IDX, op2; lea edx, [NEXT_IDX+op2]; .endmacro
+ |.endif
+ |
+ |// TValue *lj_vm_next(GCtab *t, uint32_t idx)
+ |// Next idx returned in edx.
+ |->vm_next:
+ |.if JIT
+ | mov NEXT_ASIZE, NEXT_TAB->asize
+ |1: // Traverse array part.
+ | cmp NEXT_IDX, NEXT_ASIZE; jae >5
+ | mov NEXT_TMP, NEXT_TAB->array
+ | mov NEXT_TMP, qword [NEXT_TMP+NEXT_IDX*8]
+ | cmp NEXT_TMP, LJ_TNIL; je >2
+ | lea NEXT_PTR, NEXT_RES_PTR
+ | mov qword [NEXT_PTR], NEXT_TMP
+ |.if DUALNUM
+ | setint NEXT_TMP, NEXT_IDXa
+ | mov qword [NEXT_PTR+qword*1], NEXT_TMP
+ |.else
+ | cvtsi2sd xmm0, NEXT_IDX
+ | movsd qword [NEXT_PTR+qword*1], xmm0
+ |.endif
+ | NEXT_RES_IDX 1
+ | ret
+ |2: // Skip holes in array part.
+ | add NEXT_IDX, 1
+ | jmp <1
+ |
+ |5: // Traverse hash part.
+ | sub NEXT_IDX, NEXT_ASIZE
+ |6:
+ | cmp NEXT_IDX, NEXT_TAB->hmask; ja >9
+ | imul NEXT_PTRd, NEXT_IDX, #NODE
+ | add NODE:NEXT_PTR, NEXT_TAB->node
+ | cmp qword NODE:NEXT_PTR->val, LJ_TNIL; je >7
+ | NEXT_RES_IDXL NEXT_ASIZE+1
+ | ret
+ |7: // Skip holes in hash part.
+ | add NEXT_IDX, 1
+ | jmp <6
+ |
+ |9: // End of iteration. Set the key to nil (not the value).
+ | NEXT_RES_IDX NEXT_ASIZE
+ | lea NEXT_PTR, NEXT_RES_PTR
+ | mov qword [NEXT_PTR+qword*1], LJ_TNIL
+ | ret
+ |.endif
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Assertions ---------------------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |->assert_bad_for_arg_type:
+#ifdef LUA_USE_ASSERT
+ | int3
+#endif
+ | int3
+ |
+ |//-----------------------------------------------------------------------
+ |//-- FFI helper functions -----------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |// Handler for callback functions. Callback slot number in ah/al.
+ |->vm_ffi_callback:
+ |.if FFI
+ |.type CTSTATE, CTState, PC
+ | saveregs_ // ebp/rbp already saved. ebp now holds global_State *.
+ | lea DISPATCH, [ebp+GG_G2DISP]
+ | mov CTSTATE, GL:ebp->ctype_state
+ | movzx eax, ax
+ | mov CTSTATE->cb.slot, eax
+ | mov CTSTATE->cb.gpr[0], CARG1
+ | mov CTSTATE->cb.gpr[1], CARG2
+ | mov CTSTATE->cb.gpr[2], CARG3
+ | mov CTSTATE->cb.gpr[3], CARG4
+ | movsd qword CTSTATE->cb.fpr[0], xmm0
+ | movsd qword CTSTATE->cb.fpr[1], xmm1
+ | movsd qword CTSTATE->cb.fpr[2], xmm2
+ | movsd qword CTSTATE->cb.fpr[3], xmm3
+ |.if X64WIN
+ | lea rax, [rsp+CFRAME_SIZE+4*8]
+ |.else
+ | lea rax, [rsp+CFRAME_SIZE]
+ | mov CTSTATE->cb.gpr[4], CARG5
+ | mov CTSTATE->cb.gpr[5], CARG6
+ | movsd qword CTSTATE->cb.fpr[4], xmm4
+ | movsd qword CTSTATE->cb.fpr[5], xmm5
+ | movsd qword CTSTATE->cb.fpr[6], xmm6
+ | movsd qword CTSTATE->cb.fpr[7], xmm7
+ |.endif
+ | mov CTSTATE->cb.stack, rax
+ | mov CARG2, rsp
+ | mov SAVE_PC, CTSTATE // Any value outside of bytecode is ok.
+ | mov CARG1, CTSTATE
+ | call extern lj_ccallback_enter // (CTState *cts, void *cf)
+ | // lua_State * returned in eax (RD).
+ | set_vmstate INTERP
+ | mov BASE, L:RD->base
+ | mov RD, L:RD->top
+ | sub RD, BASE
+ | mov LFUNC:RB, [BASE-16]
+ | cleartp LFUNC:RB
+ | shr RD, 3
+ | add RD, 1
+ | ins_callt
+ |.endif
+ |
+ |->cont_ffi_callback: // Return from FFI callback.
+ |.if FFI
+ | mov L:RA, SAVE_L
+ | mov CTSTATE, [DISPATCH+DISPATCH_GL(ctype_state)]
+ | mov aword CTSTATE->L, L:RA
+ | mov L:RA->base, BASE
+ | mov L:RA->top, RB
+ | mov CARG1, CTSTATE
+ | mov CARG2, RC
+ | call extern lj_ccallback_leave // (CTState *cts, TValue *o)
+ | mov rax, CTSTATE->cb.gpr[0]
+ | movsd xmm0, qword CTSTATE->cb.fpr[0]
+ | jmp ->vm_leave_unw
+ |.endif
+ |
+ |->vm_ffi_call: // Call C function via FFI.
+ | // Caveat: needs special frame unwinding, see below.
+ |.if FFI
+ | .type CCSTATE, CCallState, rbx
+ | push rbp; mov rbp, rsp; push rbx; mov CCSTATE, CARG1
+ |
+ | // Readjust stack.
+ | mov eax, CCSTATE->spadj
+ | sub rsp, rax
+ |
+ | // Copy stack slots.
+ | movzx ecx, byte CCSTATE->nsp
+ | sub ecx, 1
+ | js >2
+ |1:
+ | mov rax, [CCSTATE+rcx*8+offsetof(CCallState, stack)]
+ | mov [rsp+rcx*8+CCALL_SPS_EXTRA*8], rax
+ | sub ecx, 1
+ | jns <1
+ |2:
+ |
+ | movzx eax, byte CCSTATE->nfpr
+ | mov CARG1, CCSTATE->gpr[0]
+ | mov CARG2, CCSTATE->gpr[1]
+ | mov CARG3, CCSTATE->gpr[2]
+ | mov CARG4, CCSTATE->gpr[3]
+ |.if not X64WIN
+ | mov CARG5, CCSTATE->gpr[4]
+ | mov CARG6, CCSTATE->gpr[5]
+ |.endif
+ | test eax, eax; jz >5
+ | movaps xmm0, CCSTATE->fpr[0]
+ | movaps xmm1, CCSTATE->fpr[1]
+ | movaps xmm2, CCSTATE->fpr[2]
+ | movaps xmm3, CCSTATE->fpr[3]
+ |.if not X64WIN
+ | cmp eax, 4; jbe >5
+ | movaps xmm4, CCSTATE->fpr[4]
+ | movaps xmm5, CCSTATE->fpr[5]
+ | movaps xmm6, CCSTATE->fpr[6]
+ | movaps xmm7, CCSTATE->fpr[7]
+ |.endif
+ |5:
+ |
+ | call aword CCSTATE->func
+ |
+ | mov CCSTATE->gpr[0], rax
+ | movaps CCSTATE->fpr[0], xmm0
+ |.if not X64WIN
+ | mov CCSTATE->gpr[1], rdx
+ | movaps CCSTATE->fpr[1], xmm1
+ |.endif
+ |
+ | mov rbx, [rbp-8]; leave; ret
+ |.endif
+ |// Note: vm_ffi_call must be the last function in this object file!
+ |
+ |//-----------------------------------------------------------------------
+}
+
+/* Generate the code for a single instruction. */
+static void build_ins(BuildCtx *ctx, BCOp op, int defop)
+{
+ int vk = 0;
+ |// Note: aligning all instructions does not pay off.
+ |=>defop:
+
+ switch (op) {
+
+ /* -- Comparison ops ---------------------------------------------------- */
+
+ /* Remember: all ops branch for a true comparison, fall through otherwise. */
+
+ |.macro jmp_comp, lt, ge, le, gt, target
+ ||switch (op) {
+ ||case BC_ISLT:
+ | lt target
+ ||break;
+ ||case BC_ISGE:
+ | ge target
+ ||break;
+ ||case BC_ISLE:
+ | le target
+ ||break;
+ ||case BC_ISGT:
+ | gt target
+ ||break;
+ ||default: break; /* Shut up GCC. */
+ ||}
+ |.endmacro
+
+ case BC_ISLT: case BC_ISGE: case BC_ISLE: case BC_ISGT:
+ | // RA = src1, RD = src2, JMP with RD = target
+ | ins_AD
+ | mov ITYPE, [BASE+RA*8]
+ | mov RB, [BASE+RD*8]
+ | mov RA, ITYPE
+ | mov RD, RB
+ | sar ITYPE, 47
+ | sar RB, 47
+ |.if DUALNUM
+ | cmp ITYPEd, LJ_TISNUM; jne >7
+ | cmp RBd, LJ_TISNUM; jne >8
+ | add PC, 4
+ | cmp RAd, RDd
+ | jmp_comp jge, jl, jg, jle, >9
+ |6:
+ | movzx RDd, PC_RD
+ | branchPC RD
+ |9:
+ | ins_next
+ |
+ |7: // RA is not an integer.
+ | ja ->vmeta_comp
+ | // RA is a number.
+ | cmp RBd, LJ_TISNUM; jb >1; jne ->vmeta_comp
+ | // RA is a number, RD is an integer.
+ | cvtsi2sd xmm0, RDd
+ | jmp >2
+ |
+ |8: // RA is an integer, RD is not an integer.
+ | ja ->vmeta_comp
+ | // RA is an integer, RD is a number.
+ | cvtsi2sd xmm1, RAd
+ | movd xmm0, RD
+ | jmp >3
+ |.else
+ | cmp ITYPEd, LJ_TISNUM; jae ->vmeta_comp
+ | cmp RBd, LJ_TISNUM; jae ->vmeta_comp
+ |.endif
+ |1:
+ | movd xmm0, RD
+ |2:
+ | movd xmm1, RA
+ |3:
+ | add PC, 4
+ | ucomisd xmm0, xmm1
+ | // Unordered: all of ZF CF PF set, ordered: PF clear.
+ | // To preserve NaN semantics GE/GT branch on unordered, but LT/LE don't.
+ |.if DUALNUM
+ | jmp_comp jbe, ja, jb, jae, <9
+ | jmp <6
+ |.else
+ | jmp_comp jbe, ja, jb, jae, >1
+ | movzx RDd, PC_RD
+ | branchPC RD
+ |1:
+ | ins_next
+ |.endif
+ break;
+
+ case BC_ISEQV: case BC_ISNEV:
+ vk = op == BC_ISEQV;
+ | ins_AD // RA = src1, RD = src2, JMP with RD = target
+ | mov RB, [BASE+RD*8]
+ | mov ITYPE, [BASE+RA*8]
+ | add PC, 4
+ | mov RD, RB
+ | mov RA, ITYPE
+ | sar RB, 47
+ | sar ITYPE, 47
+ |.if DUALNUM
+ | cmp RBd, LJ_TISNUM; jne >7
+ | cmp ITYPEd, LJ_TISNUM; jne >8
+ | cmp RDd, RAd
+ if (vk) {
+ | jne >9
+ } else {
+ | je >9
+ }
+ | movzx RDd, PC_RD
+ | branchPC RD
+ |9:
+ | ins_next
+ |
+ |7: // RD is not an integer.
+ | ja >5
+ | // RD is a number.
+ | movd xmm1, RD
+ | cmp ITYPEd, LJ_TISNUM; jb >1; jne >5
+ | // RD is a number, RA is an integer.
+ | cvtsi2sd xmm0, RAd
+ | jmp >2
+ |
+ |8: // RD is an integer, RA is not an integer.
+ | ja >5
+ | // RD is an integer, RA is a number.
+ | cvtsi2sd xmm1, RDd
+ | jmp >1
+ |
+ |.else
+ | cmp RBd, LJ_TISNUM; jae >5
+ | cmp ITYPEd, LJ_TISNUM; jae >5
+ | movd xmm1, RD
+ |.endif
+ |1:
+ | movd xmm0, RA
+ |2:
+ | ucomisd xmm0, xmm1
+ |4:
+ iseqne_fp:
+ if (vk) {
+ | jp >2 // Unordered means not equal.
+ | jne >2
+ } else {
+ | jp >2 // Unordered means not equal.
+ | je >1
+ }
+ iseqne_end:
+ if (vk) {
+ |1: // EQ: Branch to the target.
+ | movzx RDd, PC_RD
+ | branchPC RD
+ |2: // NE: Fallthrough to next instruction.
+ |.if not FFI
+ |3:
+ |.endif
+ } else {
+ |.if not FFI
+ |3:
+ |.endif
+ |2: // NE: Branch to the target.
+ | movzx RDd, PC_RD
+ | branchPC RD
+ |1: // EQ: Fallthrough to next instruction.
+ }
+ if (LJ_DUALNUM && (op == BC_ISEQV || op == BC_ISNEV ||
+ op == BC_ISEQN || op == BC_ISNEN)) {
+ | jmp <9
+ } else {
+ | ins_next
+ }
+ |
+ if (op == BC_ISEQV || op == BC_ISNEV) {
+ |5: // Either or both types are not numbers.
+ |.if FFI
+ | cmp RBd, LJ_TCDATA; je ->vmeta_equal_cd
+ | cmp ITYPEd, LJ_TCDATA; je ->vmeta_equal_cd
+ |.endif
+ | cmp RA, RD
+ | je <1 // Same GCobjs or pvalues?
+ | cmp RBd, ITYPEd
+ | jne <2 // Not the same type?
+ | cmp RBd, LJ_TISTABUD
+ | ja <2 // Different objects and not table/ud?
+ |
+ | // Different tables or userdatas. Need to check __eq metamethod.
+ | // Field metatable must be at same offset for GCtab and GCudata!
+ | cleartp TAB:RA
+ | mov TAB:RB, TAB:RA->metatable
+ | test TAB:RB, TAB:RB
+ | jz <2 // No metatable?
+ | test byte TAB:RB->nomm, 1<<MM_eq
+ | jnz <2 // Or 'no __eq' flag set?
+ if (vk) {
+ | xor RBd, RBd // ne = 0
+ } else {
+ | mov RBd, 1 // ne = 1
+ }
+ | jmp ->vmeta_equal // Handle __eq metamethod.
+ } else {
+ |.if FFI
+ |3:
+ | cmp ITYPEd, LJ_TCDATA
+ if (LJ_DUALNUM && vk) {
+ | jne <9
+ } else {
+ | jne <2
+ }
+ | jmp ->vmeta_equal_cd
+ |.endif
+ }
+ break;
+ case BC_ISEQS: case BC_ISNES:
+ vk = op == BC_ISEQS;
+ | ins_AND // RA = src, RD = str const, JMP with RD = target
+ | mov RB, [BASE+RA*8]
+ | add PC, 4
+ | checkstr RB, >3
+ | cmp RB, [KBASE+RD*8]
+ iseqne_test:
+ if (vk) {
+ | jne >2
+ } else {
+ | je >1
+ }
+ goto iseqne_end;
+ case BC_ISEQN: case BC_ISNEN:
+ vk = op == BC_ISEQN;
+ | ins_AD // RA = src, RD = num const, JMP with RD = target
+ | mov RB, [BASE+RA*8]
+ | add PC, 4
+ |.if DUALNUM
+ | checkint RB, >7
+ | mov RD, [KBASE+RD*8]
+ | checkint RD, >8
+ | cmp RBd, RDd
+ if (vk) {
+ | jne >9
+ } else {
+ | je >9
+ }
+ | movzx RDd, PC_RD
+ | branchPC RD
+ |9:
+ | ins_next
+ |
+ |7: // RA is not an integer.
+ | ja >3
+ | // RA is a number.
+ | mov RD, [KBASE+RD*8]
+ | checkint RD, >1
+ | // RA is a number, RD is an integer.
+ | cvtsi2sd xmm0, RDd
+ | jmp >2
+ |
+ |8: // RA is an integer, RD is a number.
+ | cvtsi2sd xmm0, RBd
+ | movd xmm1, RD
+ | ucomisd xmm0, xmm1
+ | jmp >4
+ |1:
+ | movd xmm0, RD
+ |.else
+ | checknum RB, >3
+ |1:
+ | movsd xmm0, qword [KBASE+RD*8]
+ |.endif
+ |2:
+ | ucomisd xmm0, qword [BASE+RA*8]
+ |4:
+ goto iseqne_fp;
+ case BC_ISEQP: case BC_ISNEP:
+ vk = op == BC_ISEQP;
+ | ins_AND // RA = src, RD = primitive type (~), JMP with RD = target
+ | mov RB, [BASE+RA*8]
+ | sar RB, 47
+ | add PC, 4
+ | cmp RBd, RDd
+ if (!LJ_HASFFI) goto iseqne_test;
+ if (vk) {
+ | jne >3
+ | movzx RDd, PC_RD
+ | branchPC RD
+ |2:
+ | ins_next
+ |3:
+ | cmp RBd, LJ_TCDATA; jne <2
+ | jmp ->vmeta_equal_cd
+ } else {
+ | je >2
+ | cmp RBd, LJ_TCDATA; je ->vmeta_equal_cd
+ | movzx RDd, PC_RD
+ | branchPC RD
+ |2:
+ | ins_next
+ }
+ break;
+
+ /* -- Unary test and copy ops ------------------------------------------- */
+
+ case BC_ISTC: case BC_ISFC: case BC_IST: case BC_ISF:
+ | ins_AD // RA = dst or unused, RD = src, JMP with RD = target
+ | mov ITYPE, [BASE+RD*8]
+ | add PC, 4
+ if (op == BC_ISTC || op == BC_ISFC) {
+ | mov RB, ITYPE
+ }
+ | sar ITYPE, 47
+ | cmp ITYPEd, LJ_TISTRUECOND
+ if (op == BC_IST || op == BC_ISTC) {
+ | jae >1
+ } else {
+ | jb >1
+ }
+ if (op == BC_ISTC || op == BC_ISFC) {
+ | mov [BASE+RA*8], RB
+ }
+ | movzx RDd, PC_RD
+ | branchPC RD
+ |1: // Fallthrough to the next instruction.
+ | ins_next
+ break;
+
+ case BC_ISTYPE:
+ | ins_AD // RA = src, RD = -type
+ | mov RB, [BASE+RA*8]
+ | sar RB, 47
+ | add RBd, RDd
+ | jne ->vmeta_istype
+ | ins_next
+ break;
+ case BC_ISNUM:
+ | ins_AD // RA = src, RD = -(TISNUM-1)
+ | checknumtp [BASE+RA*8], ->vmeta_istype
+ | ins_next
+ break;
+
+ /* -- Unary ops --------------------------------------------------------- */
+
+ case BC_MOV:
+ | ins_AD // RA = dst, RD = src
+ | mov RB, [BASE+RD*8]
+ | mov [BASE+RA*8], RB
+ | ins_next_
+ break;
+ case BC_NOT:
+ | ins_AD // RA = dst, RD = src
+ | mov RB, [BASE+RD*8]
+ | sar RB, 47
+ | mov RCd, 2
+ | cmp RB, LJ_TISTRUECOND
+ | sbb RCd, 0
+ | shl RC, 47
+ | not RC
+ | mov [BASE+RA*8], RC
+ | ins_next
+ break;
+ case BC_UNM:
+ | ins_AD // RA = dst, RD = src
+ | mov RB, [BASE+RD*8]
+ |.if DUALNUM
+ | checkint RB, >5
+ | neg RBd
+ | jo >4
+ | setint RB
+ |9:
+ | mov [BASE+RA*8], RB
+ | ins_next
+ |4:
+ | mov64 RB, U64x(41e00000,00000000) // 2^31.
+ | jmp <9
+ |5:
+ | ja ->vmeta_unm
+ |.else
+ | checknum RB, ->vmeta_unm
+ |.endif
+ | mov64 RD, U64x(80000000,00000000)
+ | xor RB, RD
+ |.if DUALNUM
+ | jmp <9
+ |.else
+ | mov [BASE+RA*8], RB
+ | ins_next
+ |.endif
+ break;
+ case BC_LEN:
+ | ins_AD // RA = dst, RD = src
+ | mov RD, [BASE+RD*8]
+ | checkstr RD, >2
+ |.if DUALNUM
+ | mov RDd, dword STR:RD->len
+ |1:
+ | setint RD
+ | mov [BASE+RA*8], RD
+ |.else
+ | xorps xmm0, xmm0
+ | cvtsi2sd xmm0, dword STR:RD->len
+ |1:
+ | movsd qword [BASE+RA*8], xmm0
+ |.endif
+ | ins_next
+ |2:
+ | cmp ITYPEd, LJ_TTAB; jne ->vmeta_len
+ | mov TAB:CARG1, TAB:RD
+#if LJ_52
+ | mov TAB:RB, TAB:RD->metatable
+ | cmp TAB:RB, 0
+ | jnz >9
+ |3:
+#endif
+ |->BC_LEN_Z:
+ | mov RB, BASE // Save BASE.
+ | call extern lj_tab_len // (GCtab *t)
+ | // Length of table returned in eax (RD).
+ |.if DUALNUM
+ | // Nothing to do.
+ |.else
+ | cvtsi2sd xmm0, RDd
+ |.endif
+ | mov BASE, RB // Restore BASE.
+ | movzx RAd, PC_RA
+ | jmp <1
+#if LJ_52
+ |9: // Check for __len.
+ | test byte TAB:RB->nomm, 1<<MM_len
+ | jnz <3
+ | jmp ->vmeta_len // 'no __len' flag NOT set: check.
+#endif
+ break;
+
+ /* -- Binary ops -------------------------------------------------------- */
+
+ |.macro ins_arithpre, sseins, ssereg
+ | ins_ABC
+ ||vk = ((int)op - BC_ADDVN) / (BC_ADDNV-BC_ADDVN);
+ ||switch (vk) {
+ ||case 0:
+ | checknumtp [BASE+RB*8], ->vmeta_arith_vn
+ | .if DUALNUM
+ | checknumtp [KBASE+RC*8], ->vmeta_arith_vn
+ | .endif
+ | movsd xmm0, qword [BASE+RB*8]
+ | sseins ssereg, qword [KBASE+RC*8]
+ || break;
+ ||case 1:
+ | checknumtp [BASE+RB*8], ->vmeta_arith_nv
+ | .if DUALNUM
+ | checknumtp [KBASE+RC*8], ->vmeta_arith_nv
+ | .endif
+ | movsd xmm0, qword [KBASE+RC*8]
+ | sseins ssereg, qword [BASE+RB*8]
+ || break;
+ ||default:
+ | checknumtp [BASE+RB*8], ->vmeta_arith_vv
+ | checknumtp [BASE+RC*8], ->vmeta_arith_vv
+ | movsd xmm0, qword [BASE+RB*8]
+ | sseins ssereg, qword [BASE+RC*8]
+ || break;
+ ||}
+ |.endmacro
+ |
+ |.macro ins_arithdn, intins
+ | ins_ABC
+ ||vk = ((int)op - BC_ADDVN) / (BC_ADDNV-BC_ADDVN);
+ ||switch (vk) {
+ ||case 0:
+ | mov RB, [BASE+RB*8]
+ | mov RC, [KBASE+RC*8]
+ | checkint RB, ->vmeta_arith_vno
+ | checkint RC, ->vmeta_arith_vno
+ | intins RBd, RCd; jo ->vmeta_arith_vno
+ || break;
+ ||case 1:
+ | mov RB, [BASE+RB*8]
+ | mov RC, [KBASE+RC*8]
+ | checkint RB, ->vmeta_arith_nvo
+ | checkint RC, ->vmeta_arith_nvo
+ | intins RCd, RBd; jo ->vmeta_arith_nvo
+ || break;
+ ||default:
+ | mov RB, [BASE+RB*8]
+ | mov RC, [BASE+RC*8]
+ | checkint RB, ->vmeta_arith_vvo
+ | checkint RC, ->vmeta_arith_vvo
+ | intins RBd, RCd; jo ->vmeta_arith_vvo
+ || break;
+ ||}
+ ||if (vk == 1) {
+ | setint RC
+ | mov [BASE+RA*8], RC
+ ||} else {
+ | setint RB
+ | mov [BASE+RA*8], RB
+ ||}
+ | ins_next
+ |.endmacro
+ |
+ |.macro ins_arithpost
+ | movsd qword [BASE+RA*8], xmm0
+ |.endmacro
+ |
+ |.macro ins_arith, sseins
+ | ins_arithpre sseins, xmm0
+ | ins_arithpost
+ | ins_next
+ |.endmacro
+ |
+ |.macro ins_arith, intins, sseins
+ |.if DUALNUM
+ | ins_arithdn intins
+ |.else
+ | ins_arith, sseins
+ |.endif
+ |.endmacro
+
+ | // RA = dst, RB = src1 or num const, RC = src2 or num const
+ case BC_ADDVN: case BC_ADDNV: case BC_ADDVV:
+ | ins_arith add, addsd
+ break;
+ case BC_SUBVN: case BC_SUBNV: case BC_SUBVV:
+ | ins_arith sub, subsd
+ break;
+ case BC_MULVN: case BC_MULNV: case BC_MULVV:
+ | ins_arith imul, mulsd
+ break;
+ case BC_DIVVN: case BC_DIVNV: case BC_DIVVV:
+ | ins_arith divsd
+ break;
+ case BC_MODVN:
+ | ins_arithpre movsd, xmm1
+ |->BC_MODVN_Z:
+ | call ->vm_mod
+ | ins_arithpost
+ | ins_next
+ break;
+ case BC_MODNV: case BC_MODVV:
+ | ins_arithpre movsd, xmm1
+ | jmp ->BC_MODVN_Z // Avoid 3 copies. It's slow anyway.
+ break;
+ case BC_POW:
+ | ins_arithpre movsd, xmm1
+ | mov RB, BASE
+ | call extern pow
+ | movzx RAd, PC_RA
+ | mov BASE, RB
+ | ins_arithpost
+ | ins_next
+ break;
+
+ case BC_CAT:
+ | ins_ABC // RA = dst, RB = src_start, RC = src_end
+ | mov L:CARG1, SAVE_L
+ | mov L:CARG1->base, BASE
+ | lea CARG2, [BASE+RC*8]
+ | mov CARG3d, RCd
+ | sub CARG3d, RBd
+ |->BC_CAT_Z:
+ | mov L:RB, L:CARG1
+ | mov SAVE_PC, PC
+ | call extern lj_meta_cat // (lua_State *L, TValue *top, int left)
+ | // NULL (finished) or TValue * (metamethod) returned in eax (RC).
+ | mov BASE, L:RB->base
+ | test RC, RC
+ | jnz ->vmeta_binop
+ | movzx RBd, PC_RB // Copy result to Stk[RA] from Stk[RB].
+ | movzx RAd, PC_RA
+ | mov RC, [BASE+RB*8]
+ | mov [BASE+RA*8], RC
+ | ins_next
+ break;
+
+ /* -- Constant ops ------------------------------------------------------ */
+
+ case BC_KSTR:
+ | ins_AND // RA = dst, RD = str const (~)
+ | mov RD, [KBASE+RD*8]
+ | settp RD, LJ_TSTR
+ | mov [BASE+RA*8], RD
+ | ins_next
+ break;
+ case BC_KCDATA:
+ |.if FFI
+ | ins_AND // RA = dst, RD = cdata const (~)
+ | mov RD, [KBASE+RD*8]
+ | settp RD, LJ_TCDATA
+ | mov [BASE+RA*8], RD
+ | ins_next
+ |.endif
+ break;
+ case BC_KSHORT:
+ | ins_AD // RA = dst, RD = signed int16 literal
+ |.if DUALNUM
+ | movsx RDd, RDW
+ | setint RD
+ | mov [BASE+RA*8], RD
+ |.else
+ | movsx RDd, RDW // Sign-extend literal.
+ | cvtsi2sd xmm0, RDd
+ | movsd qword [BASE+RA*8], xmm0
+ |.endif
+ | ins_next
+ break;
+ case BC_KNUM:
+ | ins_AD // RA = dst, RD = num const
+ | movsd xmm0, qword [KBASE+RD*8]
+ | movsd qword [BASE+RA*8], xmm0
+ | ins_next
+ break;
+ case BC_KPRI:
+ | ins_AD // RA = dst, RD = primitive type (~)
+ | shl RD, 47
+ | not RD
+ | mov [BASE+RA*8], RD
+ | ins_next
+ break;
+ case BC_KNIL:
+ | ins_AD // RA = dst_start, RD = dst_end
+ | lea RA, [BASE+RA*8+8]
+ | lea RD, [BASE+RD*8]
+ | mov RB, LJ_TNIL
+ | mov [RA-8], RB // Sets minimum 2 slots.
+ |1:
+ | mov [RA], RB
+ | add RA, 8
+ | cmp RA, RD
+ | jbe <1
+ | ins_next
+ break;
+
+ /* -- Upvalue and function ops ------------------------------------------ */
+
+ case BC_UGET:
+ | ins_AD // RA = dst, RD = upvalue #
+ | mov LFUNC:RB, [BASE-16]
+ | cleartp LFUNC:RB
+ | mov UPVAL:RB, [LFUNC:RB+RD*8+offsetof(GCfuncL, uvptr)]
+ | mov RB, UPVAL:RB->v
+ | mov RD, [RB]
+ | mov [BASE+RA*8], RD
+ | ins_next
+ break;
+ case BC_USETV:
+#define TV2MARKOFS \
+ ((int32_t)offsetof(GCupval, marked)-(int32_t)offsetof(GCupval, tv))
+ | ins_AD // RA = upvalue #, RD = src
+ | mov LFUNC:RB, [BASE-16]
+ | cleartp LFUNC:RB
+ | mov UPVAL:RB, [LFUNC:RB+RA*8+offsetof(GCfuncL, uvptr)]
+ | cmp byte UPVAL:RB->closed, 0
+ | mov RB, UPVAL:RB->v
+ | mov RA, [BASE+RD*8]
+ | mov [RB], RA
+ | jz >1
+ | // Check barrier for closed upvalue.
+ | test byte [RB+TV2MARKOFS], LJ_GC_BLACK // isblack(uv)
+ | jnz >2
+ |1:
+ | ins_next
+ |
+ |2: // Upvalue is black. Check if new value is collectable and white.
+ | mov RD, RA
+ | sar RD, 47
+ | sub RDd, LJ_TISGCV
+ | cmp RDd, LJ_TNUMX - LJ_TISGCV // tvisgcv(v)
+ | jbe <1
+ | cleartp GCOBJ:RA
+ | test byte GCOBJ:RA->gch.marked, LJ_GC_WHITES // iswhite(v)
+ | jz <1
+ | // Crossed a write barrier. Move the barrier forward.
+ |.if not X64WIN
+ | mov CARG2, RB
+ | mov RB, BASE // Save BASE.
+ |.else
+ | xchg CARG2, RB // Save BASE (CARG2 == BASE).
+ |.endif
+ | lea GL:CARG1, [DISPATCH+GG_DISP2G]
+ | call extern lj_gc_barrieruv // (global_State *g, TValue *tv)
+ | mov BASE, RB // Restore BASE.
+ | jmp <1
+ break;
+#undef TV2MARKOFS
+ case BC_USETS:
+ | ins_AND // RA = upvalue #, RD = str const (~)
+ | mov LFUNC:RB, [BASE-16]
+ | cleartp LFUNC:RB
+ | mov UPVAL:RB, [LFUNC:RB+RA*8+offsetof(GCfuncL, uvptr)]
+ | mov STR:RA, [KBASE+RD*8]
+ | mov RD, UPVAL:RB->v
+ | settp STR:ITYPE, STR:RA, LJ_TSTR
+ | mov [RD], STR:ITYPE
+ | test byte UPVAL:RB->marked, LJ_GC_BLACK // isblack(uv)
+ | jnz >2
+ |1:
+ | ins_next
+ |
+ |2: // Check if string is white and ensure upvalue is closed.
+ | test byte GCOBJ:RA->gch.marked, LJ_GC_WHITES // iswhite(str)
+ | jz <1
+ | cmp byte UPVAL:RB->closed, 0
+ | jz <1
+ | // Crossed a write barrier. Move the barrier forward.
+ | mov RB, BASE // Save BASE (CARG2 == BASE).
+ | mov CARG2, RD
+ | lea GL:CARG1, [DISPATCH+GG_DISP2G]
+ | call extern lj_gc_barrieruv // (global_State *g, TValue *tv)
+ | mov BASE, RB // Restore BASE.
+ | jmp <1
+ break;
+ case BC_USETN:
+ | ins_AD // RA = upvalue #, RD = num const
+ | mov LFUNC:RB, [BASE-16]
+ | cleartp LFUNC:RB
+ | movsd xmm0, qword [KBASE+RD*8]
+ | mov UPVAL:RB, [LFUNC:RB+RA*8+offsetof(GCfuncL, uvptr)]
+ | mov RA, UPVAL:RB->v
+ | movsd qword [RA], xmm0
+ | ins_next
+ break;
+ case BC_USETP:
+ | ins_AD // RA = upvalue #, RD = primitive type (~)
+ | mov LFUNC:RB, [BASE-16]
+ | cleartp LFUNC:RB
+ | mov UPVAL:RB, [LFUNC:RB+RA*8+offsetof(GCfuncL, uvptr)]
+ | shl RD, 47
+ | not RD
+ | mov RA, UPVAL:RB->v
+ | mov [RA], RD
+ | ins_next
+ break;
+ case BC_UCLO:
+ | ins_AD // RA = level, RD = target
+ | branchPC RD // Do this first to free RD.
+ | mov L:RB, SAVE_L
+ | cmp aword L:RB->openupval, 0
+ | je >1
+ | mov L:RB->base, BASE
+ | lea CARG2, [BASE+RA*8] // Caveat: CARG2 == BASE
+ | mov L:CARG1, L:RB // Caveat: CARG1 == RA
+ | call extern lj_func_closeuv // (lua_State *L, TValue *level)
+ | mov BASE, L:RB->base
+ |1:
+ | ins_next
+ break;
+
+ case BC_FNEW:
+ | ins_AND // RA = dst, RD = proto const (~) (holding function prototype)
+ | mov L:RB, SAVE_L
+ | mov L:RB->base, BASE // Caveat: CARG2/CARG3 may be BASE.
+ | mov CARG3, [BASE-16]
+ | cleartp CARG3
+ | mov CARG2, [KBASE+RD*8] // Fetch GCproto *.
+ | mov CARG1, L:RB
+ | mov SAVE_PC, PC
+ | // (lua_State *L, GCproto *pt, GCfuncL *parent)
+ | call extern lj_func_newL_gc
+ | // GCfuncL * returned in eax (RC).
+ | mov BASE, L:RB->base
+ | movzx RAd, PC_RA
+ | settp LFUNC:RC, LJ_TFUNC
+ | mov [BASE+RA*8], LFUNC:RC
+ | ins_next
+ break;
+
+ /* -- Table ops --------------------------------------------------------- */
+
+ case BC_TNEW:
+ | ins_AD // RA = dst, RD = hbits|asize
+ | mov L:RB, SAVE_L
+ | mov L:RB->base, BASE
+ | mov RA, [DISPATCH+DISPATCH_GL(gc.total)]
+ | cmp RA, [DISPATCH+DISPATCH_GL(gc.threshold)]
+ | mov SAVE_PC, PC
+ | jae >5
+ |1:
+ | mov CARG3d, RDd
+ | and RDd, 0x7ff
+ | shr CARG3d, 11
+ | cmp RDd, 0x7ff
+ | je >3
+ |2:
+ | mov L:CARG1, L:RB
+ | mov CARG2d, RDd
+ | call extern lj_tab_new // (lua_State *L, int32_t asize, uint32_t hbits)
+ | // Table * returned in eax (RC).
+ | mov BASE, L:RB->base
+ | movzx RAd, PC_RA
+ | settp TAB:RC, LJ_TTAB
+ | mov [BASE+RA*8], TAB:RC
+ | ins_next
+ |3: // Turn 0x7ff into 0x801.
+ | mov RDd, 0x801
+ | jmp <2
+ |5:
+ | mov L:CARG1, L:RB
+ | call extern lj_gc_step_fixtop // (lua_State *L)
+ | movzx RDd, PC_RD
+ | jmp <1
+ break;
+ case BC_TDUP:
+ | ins_AND // RA = dst, RD = table const (~) (holding template table)
+ | mov L:RB, SAVE_L
+ | mov RA, [DISPATCH+DISPATCH_GL(gc.total)]
+ | mov SAVE_PC, PC
+ | cmp RA, [DISPATCH+DISPATCH_GL(gc.threshold)]
+ | mov L:RB->base, BASE
+ | jae >3
+ |2:
+ | mov TAB:CARG2, [KBASE+RD*8] // Caveat: CARG2 == BASE
+ | mov L:CARG1, L:RB // Caveat: CARG1 == RA
+ | call extern lj_tab_dup // (lua_State *L, Table *kt)
+ | // Table * returned in eax (RC).
+ | mov BASE, L:RB->base
+ | movzx RAd, PC_RA
+ | settp TAB:RC, LJ_TTAB
+ | mov [BASE+RA*8], TAB:RC
+ | ins_next
+ |3:
+ | mov L:CARG1, L:RB
+ | call extern lj_gc_step_fixtop // (lua_State *L)
+ | movzx RDd, PC_RD // Need to reload RD.
+ | not RD
+ | jmp <2
+ break;
+
+ case BC_GGET:
+ | ins_AND // RA = dst, RD = str const (~)
+ | mov LFUNC:RB, [BASE-16]
+ | cleartp LFUNC:RB
+ | mov TAB:RB, LFUNC:RB->env
+ | mov STR:RC, [KBASE+RD*8]
+ | jmp ->BC_TGETS_Z
+ break;
+ case BC_GSET:
+ | ins_AND // RA = src, RD = str const (~)
+ | mov LFUNC:RB, [BASE-16]
+ | cleartp LFUNC:RB
+ | mov TAB:RB, LFUNC:RB->env
+ | mov STR:RC, [KBASE+RD*8]
+ | jmp ->BC_TSETS_Z
+ break;
+
+ case BC_TGETV:
+ | ins_ABC // RA = dst, RB = table, RC = key
+ | mov TAB:RB, [BASE+RB*8]
+ | mov RC, [BASE+RC*8]
+ | checktab TAB:RB, ->vmeta_tgetv
+ |
+ | // Integer key?
+ |.if DUALNUM
+ | checkint RC, >5
+ |.else
+ | // Convert number to int and back and compare.
+ | checknum RC, >5
+ | movd xmm0, RC
+ | cvttsd2si RCd, xmm0
+ | cvtsi2sd xmm1, RCd
+ | ucomisd xmm0, xmm1
+ | jne ->vmeta_tgetv // Generic numeric key? Use fallback.
+ |.endif
+ | cmp RCd, TAB:RB->asize // Takes care of unordered, too.
+ | jae ->vmeta_tgetv // Not in array part? Use fallback.
+ | shl RCd, 3
+ | add RC, TAB:RB->array
+ | // Get array slot.
+ | mov ITYPE, [RC]
+ | cmp ITYPE, LJ_TNIL // Avoid overwriting RB in fastpath.
+ | je >2
+ |1:
+ | mov [BASE+RA*8], ITYPE
+ | ins_next
+ |
+ |2: // Check for __index if table value is nil.
+ | mov TAB:TMPR, TAB:RB->metatable
+ | test TAB:TMPR, TAB:TMPR
+ | jz <1
+ | test byte TAB:TMPR->nomm, 1<<MM_index
+ | jz ->vmeta_tgetv // 'no __index' flag NOT set: check.
+ | jmp <1
+ |
+ |5: // String key?
+ | cmp ITYPEd, LJ_TSTR; jne ->vmeta_tgetv
+ | cleartp STR:RC
+ | jmp ->BC_TGETS_Z
+ break;
+ case BC_TGETS:
+ | ins_ABC // RA = dst, RB = table, RC = str const (~)
+ | mov TAB:RB, [BASE+RB*8]
+ | not RC
+ | mov STR:RC, [KBASE+RC*8]
+ | checktab TAB:RB, ->vmeta_tgets
+ |->BC_TGETS_Z: // RB = GCtab *, RC = GCstr *
+ | mov TMPRd, TAB:RB->hmask
+ | and TMPRd, STR:RC->sid
+ | imul TMPRd, #NODE
+ | add NODE:TMPR, TAB:RB->node
+ | settp ITYPE, STR:RC, LJ_TSTR
+ |1:
+ | cmp NODE:TMPR->key, ITYPE
+ | jne >4
+ | // Get node value.
+ | mov ITYPE, NODE:TMPR->val
+ | cmp ITYPE, LJ_TNIL
+ | je >5 // Key found, but nil value?
+ |2:
+ | mov [BASE+RA*8], ITYPE
+ | ins_next
+ |
+ |4: // Follow hash chain.
+ | mov NODE:TMPR, NODE:TMPR->next
+ | test NODE:TMPR, NODE:TMPR
+ | jnz <1
+ | // End of hash chain: key not found, nil result.
+ | mov ITYPE, LJ_TNIL
+ |
+ |5: // Check for __index if table value is nil.
+ | mov TAB:TMPR, TAB:RB->metatable
+ | test TAB:TMPR, TAB:TMPR
+ | jz <2 // No metatable: done.
+ | test byte TAB:TMPR->nomm, 1<<MM_index
+ | jnz <2 // 'no __index' flag set: done.
+ | jmp ->vmeta_tgets // Caveat: preserve STR:RC.
+ break;
+ case BC_TGETB:
+ | ins_ABC // RA = dst, RB = table, RC = byte literal
+ | mov TAB:RB, [BASE+RB*8]
+ | checktab TAB:RB, ->vmeta_tgetb
+ | cmp RCd, TAB:RB->asize
+ | jae ->vmeta_tgetb
+ | shl RCd, 3
+ | add RC, TAB:RB->array
+ | // Get array slot.
+ | mov ITYPE, [RC]
+ | cmp ITYPE, LJ_TNIL
+ | je >2
+ |1:
+ | mov [BASE+RA*8], ITYPE
+ | ins_next
+ |
+ |2: // Check for __index if table value is nil.
+ | mov TAB:TMPR, TAB:RB->metatable
+ | test TAB:TMPR, TAB:TMPR
+ | jz <1
+ | test byte TAB:TMPR->nomm, 1<<MM_index
+ | jz ->vmeta_tgetb // 'no __index' flag NOT set: check.
+ | jmp <1
+ break;
+ case BC_TGETR:
+ | ins_ABC // RA = dst, RB = table, RC = key
+ | mov TAB:RB, [BASE+RB*8]
+ | cleartp TAB:RB
+ |.if DUALNUM
+ | mov RCd, dword [BASE+RC*8]
+ |.else
+ | cvttsd2si RCd, qword [BASE+RC*8]
+ |.endif
+ | cmp RCd, TAB:RB->asize
+ | jae ->vmeta_tgetr // Not in array part? Use fallback.
+ | shl RCd, 3
+ | add RC, TAB:RB->array
+ | // Get array slot.
+ |->BC_TGETR_Z:
+ | mov ITYPE, [RC]
+ |->BC_TGETR2_Z:
+ | mov [BASE+RA*8], ITYPE
+ | ins_next
+ break;
+
+ case BC_TSETV:
+ | ins_ABC // RA = src, RB = table, RC = key
+ | mov TAB:RB, [BASE+RB*8]
+ | mov RC, [BASE+RC*8]
+ | checktab TAB:RB, ->vmeta_tsetv
+ |
+ | // Integer key?
+ |.if DUALNUM
+ | checkint RC, >5
+ |.else
+ | // Convert number to int and back and compare.
+ | checknum RC, >5
+ | movd xmm0, RC
+ | cvttsd2si RCd, xmm0
+ | cvtsi2sd xmm1, RCd
+ | ucomisd xmm0, xmm1
+ | jne ->vmeta_tsetv // Generic numeric key? Use fallback.
+ |.endif
+ | cmp RCd, TAB:RB->asize // Takes care of unordered, too.
+ | jae ->vmeta_tsetv
+ | shl RCd, 3
+ | add RC, TAB:RB->array
+ | cmp aword [RC], LJ_TNIL
+ | je >3 // Previous value is nil?
+ |1:
+ | test byte TAB:RB->marked, LJ_GC_BLACK // isblack(table)
+ | jnz >7
+ |2: // Set array slot.
+ | mov RB, [BASE+RA*8]
+ | mov [RC], RB
+ | ins_next
+ |
+ |3: // Check for __newindex if previous value is nil.
+ | mov TAB:TMPR, TAB:RB->metatable
+ | test TAB:TMPR, TAB:TMPR
+ | jz <1
+ | test byte TAB:TMPR->nomm, 1<<MM_newindex
+ | jz ->vmeta_tsetv // 'no __newindex' flag NOT set: check.
+ | jmp <1
+ |
+ |5: // String key?
+ | cmp ITYPEd, LJ_TSTR; jne ->vmeta_tsetv
+ | cleartp STR:RC
+ | jmp ->BC_TSETS_Z
+ |
+ |7: // Possible table write barrier for the value. Skip valiswhite check.
+ | barrierback TAB:RB, TMPR
+ | jmp <2
+ break;
+ case BC_TSETS:
+ | ins_ABC // RA = src, RB = table, RC = str const (~)
+ | mov TAB:RB, [BASE+RB*8]
+ | not RC
+ | mov STR:RC, [KBASE+RC*8]
+ | checktab TAB:RB, ->vmeta_tsets
+ |->BC_TSETS_Z: // RB = GCtab *, RC = GCstr *
+ | mov TMPRd, TAB:RB->hmask
+ | and TMPRd, STR:RC->sid
+ | imul TMPRd, #NODE
+ | mov byte TAB:RB->nomm, 0 // Clear metamethod cache.
+ | add NODE:TMPR, TAB:RB->node
+ | settp ITYPE, STR:RC, LJ_TSTR
+ |1:
+ | cmp NODE:TMPR->key, ITYPE
+ | jne >5
+ | // Ok, key found. Assumes: offsetof(Node, val) == 0
+ | cmp aword [TMPR], LJ_TNIL
+ | je >4 // Previous value is nil?
+ |2:
+ | test byte TAB:RB->marked, LJ_GC_BLACK // isblack(table)
+ | jnz >7
+ |3: // Set node value.
+ | mov ITYPE, [BASE+RA*8]
+ | mov [TMPR], ITYPE
+ | ins_next
+ |
+ |4: // Check for __newindex if previous value is nil.
+ | mov TAB:ITYPE, TAB:RB->metatable
+ | test TAB:ITYPE, TAB:ITYPE
+ | jz <2
+ | test byte TAB:ITYPE->nomm, 1<<MM_newindex
+ | jz ->vmeta_tsets // 'no __newindex' flag NOT set: check.
+ | jmp <2
+ |
+ |5: // Follow hash chain.
+ | mov NODE:TMPR, NODE:TMPR->next
+ | test NODE:TMPR, NODE:TMPR
+ | jnz <1
+ | // End of hash chain: key not found, add a new one.
+ |
+ | // But check for __newindex first.
+ | mov TAB:TMPR, TAB:RB->metatable
+ | test TAB:TMPR, TAB:TMPR
+ | jz >6 // No metatable: continue.
+ | test byte TAB:TMPR->nomm, 1<<MM_newindex
+ | jz ->vmeta_tsets // 'no __newindex' flag NOT set: check.
+ |6:
+ | mov TMP1, ITYPE
+ | mov L:CARG1, SAVE_L
+ | mov L:CARG1->base, BASE
+ | lea CARG3, TMP1
+ | mov CARG2, TAB:RB
+ | mov SAVE_PC, PC
+ | call extern lj_tab_newkey // (lua_State *L, GCtab *t, TValue *k)
+ | // Handles write barrier for the new key. TValue * returned in eax (RC).
+ | mov L:CARG1, SAVE_L
+ | mov BASE, L:CARG1->base
+ | mov TMPR, rax
+ | movzx RAd, PC_RA
+ | jmp <2 // Must check write barrier for value.
+ |
+ |7: // Possible table write barrier for the value. Skip valiswhite check.
+ | barrierback TAB:RB, ITYPE
+ | jmp <3
+ break;
+ case BC_TSETB:
+ | ins_ABC // RA = src, RB = table, RC = byte literal
+ | mov TAB:RB, [BASE+RB*8]
+ | checktab TAB:RB, ->vmeta_tsetb
+ | cmp RCd, TAB:RB->asize
+ | jae ->vmeta_tsetb
+ | shl RCd, 3
+ | add RC, TAB:RB->array
+ | cmp aword [RC], LJ_TNIL
+ | je >3 // Previous value is nil?
+ |1:
+ | test byte TAB:RB->marked, LJ_GC_BLACK // isblack(table)
+ | jnz >7
+ |2: // Set array slot.
+ | mov ITYPE, [BASE+RA*8]
+ | mov [RC], ITYPE
+ | ins_next
+ |
+ |3: // Check for __newindex if previous value is nil.
+ | mov TAB:TMPR, TAB:RB->metatable
+ | test TAB:TMPR, TAB:TMPR
+ | jz <1
+ | test byte TAB:TMPR->nomm, 1<<MM_newindex
+ | jz ->vmeta_tsetb // 'no __newindex' flag NOT set: check.
+ | jmp <1
+ |
+ |7: // Possible table write barrier for the value. Skip valiswhite check.
+ | barrierback TAB:RB, TMPR
+ | jmp <2
+ break;
+ case BC_TSETR:
+ | ins_ABC // RA = src, RB = table, RC = key
+ | mov TAB:RB, [BASE+RB*8]
+ | cleartp TAB:RB
+ |.if DUALNUM
+ | mov RC, [BASE+RC*8]
+ |.else
+ | cvttsd2si RCd, qword [BASE+RC*8]
+ |.endif
+ | test byte TAB:RB->marked, LJ_GC_BLACK // isblack(table)
+ | jnz >7
+ |2:
+ | cmp RCd, TAB:RB->asize
+ | jae ->vmeta_tsetr
+ | shl RCd, 3
+ | add RC, TAB:RB->array
+ | // Set array slot.
+ |->BC_TSETR_Z:
+ | mov ITYPE, [BASE+RA*8]
+ | mov [RC], ITYPE
+ | ins_next
+ |
+ |7: // Possible table write barrier for the value. Skip valiswhite check.
+ | barrierback TAB:RB, TMPR
+ | jmp <2
+ break;
+
+ case BC_TSETM:
+ | ins_AD // RA = base (table at base-1), RD = num const (start index)
+ |1:
+ | mov TMPRd, dword [KBASE+RD*8] // Integer constant is in lo-word.
+ | lea RA, [BASE+RA*8]
+ | mov TAB:RB, [RA-8] // Guaranteed to be a table.
+ | cleartp TAB:RB
+ | test byte TAB:RB->marked, LJ_GC_BLACK // isblack(table)
+ | jnz >7
+ |2:
+ | mov RDd, MULTRES
+ | sub RDd, 1
+ | jz >4 // Nothing to copy?
+ | add RDd, TMPRd // Compute needed size.
+ | cmp RDd, TAB:RB->asize
+ | ja >5 // Doesn't fit into array part?
+ | sub RDd, TMPRd
+ | shl TMPRd, 3
+ | add TMPR, TAB:RB->array
+ |3: // Copy result slots to table.
+ | mov RB, [RA]
+ | add RA, 8
+ | mov [TMPR], RB
+ | add TMPR, 8
+ | sub RDd, 1
+ | jnz <3
+ |4:
+ | ins_next
+ |
+ |5: // Need to resize array part.
+ | mov L:CARG1, SAVE_L
+ | mov L:CARG1->base, BASE // Caveat: CARG2/CARG3 may be BASE.
+ | mov CARG2, TAB:RB
+ | mov CARG3d, RDd
+ | mov L:RB, L:CARG1
+ | mov SAVE_PC, PC
+ | call extern lj_tab_reasize // (lua_State *L, GCtab *t, int nasize)
+ | mov BASE, L:RB->base
+ | movzx RAd, PC_RA // Restore RA.
+ | movzx RDd, PC_RD // Restore RD.
+ | jmp <1 // Retry.
+ |
+ |7: // Possible table write barrier for any value. Skip valiswhite check.
+ | barrierback TAB:RB, RD
+ | jmp <2
+ break;
+
+ /* -- Calls and vararg handling ----------------------------------------- */
+
+ case BC_CALL: case BC_CALLM:
+ | ins_A_C // RA = base, (RB = nresults+1,) RC = nargs+1 | extra_nargs
+ if (op == BC_CALLM) {
+ | add NARGS:RDd, MULTRES
+ }
+ | mov LFUNC:RB, [BASE+RA*8]
+ | checkfunc LFUNC:RB, ->vmeta_call_ra
+ | lea BASE, [BASE+RA*8+16]
+ | ins_call
+ break;
+
+ case BC_CALLMT:
+ | ins_AD // RA = base, RD = extra_nargs
+ | add NARGS:RDd, MULTRES
+ | // Fall through. Assumes BC_CALLT follows and ins_AD is a no-op.
+ break;
+ case BC_CALLT:
+ | ins_AD // RA = base, RD = nargs+1
+ | lea RA, [BASE+RA*8+16]
+ | mov KBASE, BASE // Use KBASE for move + vmeta_call hint.
+ | mov LFUNC:RB, [RA-16]
+ | checktp_nc LFUNC:RB, LJ_TFUNC, ->vmeta_call
+ |->BC_CALLT_Z:
+ | mov PC, [BASE-8]
+ | test PCd, FRAME_TYPE
+ | jnz >7
+ |1:
+ | mov [BASE-16], LFUNC:RB // Copy func+tag down, reloaded below.
+ | mov MULTRES, NARGS:RDd
+ | sub NARGS:RDd, 1
+ | jz >3
+ |2: // Move args down.
+ | mov RB, [RA]
+ | add RA, 8
+ | mov [KBASE], RB
+ | add KBASE, 8
+ | sub NARGS:RDd, 1
+ | jnz <2
+ |
+ | mov LFUNC:RB, [BASE-16]
+ |3:
+ | cleartp LFUNC:RB
+ | mov NARGS:RDd, MULTRES
+ | cmp byte LFUNC:RB->ffid, 1 // (> FF_C) Calling a fast function?
+ | ja >5
+ |4:
+ | ins_callt
+ |
+ |5: // Tailcall to a fast function.
+ | test PCd, FRAME_TYPE // Lua frame below?
+ | jnz <4
+ | movzx RAd, PC_RA
+ | neg RA
+ | mov LFUNC:KBASE, [BASE+RA*8-32] // Need to prepare KBASE.
+ | cleartp LFUNC:KBASE
+ | mov KBASE, LFUNC:KBASE->pc
+ | mov KBASE, [KBASE+PC2PROTO(k)]
+ | jmp <4
+ |
+ |7: // Tailcall from a vararg function.
+ | sub PC, FRAME_VARG
+ | test PCd, FRAME_TYPEP
+ | jnz >8 // Vararg frame below?
+ | sub BASE, PC // Need to relocate BASE/KBASE down.
+ | mov KBASE, BASE
+ | mov PC, [BASE-8]
+ | jmp <1
+ |8:
+ | add PCd, FRAME_VARG
+ | jmp <1
+ break;
+
+ case BC_ITERC:
+ | ins_A // RA = base, (RB = nresults+1,) RC = nargs+1 (2+1)
+ | lea RA, [BASE+RA*8+16] // fb = base+2
+ | mov RB, [RA-32] // Copy state. fb[0] = fb[-4].
+ | mov RC, [RA-24] // Copy control var. fb[1] = fb[-3].
+ | mov [RA], RB
+ | mov [RA+8], RC
+ | mov LFUNC:RB, [RA-40] // Copy callable. fb[-2] = fb[-5]
+ | mov [RA-16], LFUNC:RB
+ | mov NARGS:RDd, 2+1 // Handle like a regular 2-arg call.
+ | checkfunc LFUNC:RB, ->vmeta_call
+ | mov BASE, RA
+ | ins_call
+ break;
+
+ case BC_ITERN:
+ |.if JIT
+ | hotloop RBd
+ |.endif
+ |->vm_IITERN:
+ | ins_A // RA = base, (RB = nresults+1, RC = nargs+1 (2+1))
+ | mov TAB:RB, [BASE+RA*8-16]
+ | cleartp TAB:RB
+ | mov RCd, [BASE+RA*8-8] // Get index from control var.
+ | mov TMPRd, TAB:RB->asize
+ | add PC, 4
+ | mov ITYPE, TAB:RB->array
+ |1: // Traverse array part.
+ | cmp RCd, TMPRd; jae >5 // Index points after array part?
+ | cmp aword [ITYPE+RC*8], LJ_TNIL; je >4
+ |.if not DUALNUM
+ | cvtsi2sd xmm0, RCd
+ |.endif
+ | // Copy array slot to returned value.
+ | mov RB, [ITYPE+RC*8]
+ | mov [BASE+RA*8+8], RB
+ | // Return array index as a numeric key.
+ |.if DUALNUM
+ | setint ITYPE, RC
+ | mov [BASE+RA*8], ITYPE
+ |.else
+ | movsd qword [BASE+RA*8], xmm0
+ |.endif
+ | add RCd, 1
+ | mov [BASE+RA*8-8], RCd // Update control var.
+ |2:
+ | movzx RDd, PC_RD // Get target from ITERL.
+ | branchPC RD
+ |3:
+ | ins_next
+ |
+ |4: // Skip holes in array part.
+ | add RCd, 1
+ | jmp <1
+ |
+ |5: // Traverse hash part.
+ | sub RCd, TMPRd
+ |6:
+ | cmp RCd, TAB:RB->hmask; ja <3 // End of iteration? Branch to ITERL+1.
+ | imul ITYPEd, RCd, #NODE
+ | add NODE:ITYPE, TAB:RB->node
+ | cmp aword NODE:ITYPE->val, LJ_TNIL; je >7
+ | lea TMPRd, [RCd+TMPRd+1]
+ | // Copy key and value from hash slot.
+ | mov RB, NODE:ITYPE->key
+ | mov RC, NODE:ITYPE->val
+ | mov [BASE+RA*8], RB
+ | mov [BASE+RA*8+8], RC
+ | mov [BASE+RA*8-8], TMPRd
+ | jmp <2
+ |
+ |7: // Skip holes in hash part.
+ | add RCd, 1
+ | jmp <6
+ break;
+
+ case BC_ISNEXT:
+ | ins_AD // RA = base, RD = target (points to ITERN)
+ | mov CFUNC:RB, [BASE+RA*8-24]
+ | checkfunc CFUNC:RB, >5
+ | checktptp [BASE+RA*8-16], LJ_TTAB, >5
+ | cmp aword [BASE+RA*8-8], LJ_TNIL; jne >5
+ | cmp byte CFUNC:RB->ffid, FF_next_N; jne >5
+ | branchPC RD
+ | mov64 TMPR, ((uint64_t)LJ_KEYINDEX << 32)
+ | mov [BASE+RA*8-8], TMPR // Initialize control var.
+ |1:
+ | ins_next
+ |5: // Despecialize bytecode if any of the checks fail.
+ | mov PC_OP, BC_JMP
+ | branchPC RD
+ |.if JIT
+ | cmp byte [PC], BC_ITERN
+ | jne >6
+ |.endif
+ | mov byte [PC], BC_ITERC
+ | jmp <1
+ |.if JIT
+ |6: // Unpatch JLOOP.
+ | mov RA, [DISPATCH+DISPATCH_J(trace)]
+ | movzx RCd, word [PC+2]
+ | mov TRACE:RA, [RA+RC*8]
+ | mov eax, TRACE:RA->startins
+ | mov al, BC_ITERC
+ | mov dword [PC], eax
+ | jmp <1
+ |.endif
+ break;
+
+ case BC_VARG:
+ | ins_ABC // RA = base, RB = nresults+1, RC = numparams
+ | lea TMPR, [BASE+RC*8+(16+FRAME_VARG)]
+ | lea RA, [BASE+RA*8]
+ | sub TMPR, [BASE-8]
+ | // Note: TMPR may now be even _above_ BASE if nargs was < numparams.
+ | test RB, RB
+ | jz >5 // Copy all varargs?
+ | lea RB, [RA+RB*8-8]
+ | cmp TMPR, BASE // No vararg slots?
+ | jnb >2
+ |1: // Copy vararg slots to destination slots.
+ | mov RC, [TMPR-16]
+ | add TMPR, 8
+ | mov [RA], RC
+ | add RA, 8
+ | cmp RA, RB // All destination slots filled?
+ | jnb >3
+ | cmp TMPR, BASE // No more vararg slots?
+ | jb <1
+ |2: // Fill up remainder with nil.
+ | mov aword [RA], LJ_TNIL
+ | add RA, 8
+ | cmp RA, RB
+ | jb <2
+ |3:
+ | ins_next
+ |
+ |5: // Copy all varargs.
+ | mov MULTRES, 1 // MULTRES = 0+1
+ | mov RC, BASE
+ | sub RC, TMPR
+ | jbe <3 // No vararg slots?
+ | mov RBd, RCd
+ | shr RBd, 3
+ | add RBd, 1
+ | mov MULTRES, RBd // MULTRES = #varargs+1
+ | mov L:RB, SAVE_L
+ | add RC, RA
+ | cmp RC, L:RB->maxstack
+ | ja >7 // Need to grow stack?
+ |6: // Copy all vararg slots.
+ | mov RC, [TMPR-16]
+ | add TMPR, 8
+ | mov [RA], RC
+ | add RA, 8
+ | cmp TMPR, BASE // No more vararg slots?
+ | jb <6
+ | jmp <3
+ |
+ |7: // Grow stack for varargs.
+ | mov L:RB->base, BASE
+ | mov L:RB->top, RA
+ | mov SAVE_PC, PC
+ | sub TMPR, BASE // Need delta, because BASE may change.
+ | mov TMP1hi, TMPRd
+ | mov CARG2d, MULTRES
+ | sub CARG2d, 1
+ | mov CARG1, L:RB
+ | call extern lj_state_growstack // (lua_State *L, int n)
+ | mov BASE, L:RB->base
+ | movsxd TMPR, TMP1hi
+ | mov RA, L:RB->top
+ | add TMPR, BASE
+ | jmp <6
+ break;
+
+ /* -- Returns ----------------------------------------------------------- */
+
+ case BC_RETM:
+ | ins_AD // RA = results, RD = extra_nresults
+ | add RDd, MULTRES // MULTRES >=1, so RD >=1.
+ | // Fall through. Assumes BC_RET follows and ins_AD is a no-op.
+ break;
+
+ case BC_RET: case BC_RET0: case BC_RET1:
+ | ins_AD // RA = results, RD = nresults+1
+ if (op != BC_RET0) {
+ | shl RAd, 3
+ }
+ |1:
+ | mov PC, [BASE-8]
+ | mov MULTRES, RDd // Save nresults+1.
+ | test PCd, FRAME_TYPE // Check frame type marker.
+ | jnz >7 // Not returning to a fixarg Lua func?
+ switch (op) {
+ case BC_RET:
+ |->BC_RET_Z:
+ | mov KBASE, BASE // Use KBASE for result move.
+ | sub RDd, 1
+ | jz >3
+ |2: // Move results down.
+ | mov RB, [KBASE+RA]
+ | mov [KBASE-16], RB
+ | add KBASE, 8
+ | sub RDd, 1
+ | jnz <2
+ |3:
+ | mov RDd, MULTRES // Note: MULTRES may be >255.
+ | movzx RBd, PC_RB // So cannot compare with RDL!
+ |5:
+ | cmp RBd, RDd // More results expected?
+ | ja >6
+ break;
+ case BC_RET1:
+ | mov RB, [BASE+RA]
+ | mov [BASE-16], RB
+ /* fallthrough */
+ case BC_RET0:
+ |5:
+ | cmp PC_RB, RDL // More results expected?
+ | ja >6
+ default:
+ break;
+ }
+ | movzx RAd, PC_RA
+ | neg RA
+ | lea BASE, [BASE+RA*8-16] // base = base - (RA+2)*8
+ | mov LFUNC:KBASE, [BASE-16]
+ | cleartp LFUNC:KBASE
+ | mov KBASE, LFUNC:KBASE->pc
+ | mov KBASE, [KBASE+PC2PROTO(k)]
+ | ins_next
+ |
+ |6: // Fill up results with nil.
+ if (op == BC_RET) {
+ | mov aword [KBASE-16], LJ_TNIL // Note: relies on shifted base.
+ | add KBASE, 8
+ } else {
+ | mov aword [BASE+RD*8-24], LJ_TNIL
+ }
+ | add RD, 1
+ | jmp <5
+ |
+ |7: // Non-standard return case.
+ | lea RB, [PC-FRAME_VARG]
+ | test RBd, FRAME_TYPEP
+ | jnz ->vm_return
+ | // Return from vararg function: relocate BASE down and RA up.
+ | sub BASE, RB
+ if (op != BC_RET0) {
+ | add RA, RB
+ }
+ | jmp <1
+ break;
+
+ /* -- Loops and branches ------------------------------------------------ */
+
+ |.define FOR_IDX, [RA]
+ |.define FOR_STOP, [RA+8]
+ |.define FOR_STEP, [RA+16]
+ |.define FOR_EXT, [RA+24]
+
+ case BC_FORL:
+ |.if JIT
+ | hotloop RBd
+ |.endif
+ | // Fall through. Assumes BC_IFORL follows and ins_AJ is a no-op.
+ break;
+
+ case BC_JFORI:
+ case BC_JFORL:
+#if !LJ_HASJIT
+ break;
+#endif
+ case BC_FORI:
+ case BC_IFORL:
+ vk = (op == BC_IFORL || op == BC_JFORL);
+ | ins_AJ // RA = base, RD = target (after end of loop or start of loop)
+ | lea RA, [BASE+RA*8]
+ if (LJ_DUALNUM) {
+ | mov RB, FOR_IDX
+ | checkint RB, >9
+ | mov TMPR, FOR_STOP
+ if (!vk) {
+ | checkint TMPR, ->vmeta_for
+ | mov ITYPE, FOR_STEP
+ | test ITYPEd, ITYPEd; js >5
+ | sar ITYPE, 47;
+ | cmp ITYPEd, LJ_TISNUM; jne ->vmeta_for
+ } else {
+#ifdef LUA_USE_ASSERT
+ | checkinttp FOR_STOP, ->assert_bad_for_arg_type
+ | checkinttp FOR_STEP, ->assert_bad_for_arg_type
+#endif
+ | mov ITYPE, FOR_STEP
+ | test ITYPEd, ITYPEd; js >5
+ | add RBd, ITYPEd; jo >1
+ | setint RB
+ | mov FOR_IDX, RB
+ }
+ | cmp RBd, TMPRd
+ | mov FOR_EXT, RB
+ if (op == BC_FORI) {
+ | jle >7
+ |1:
+ |6:
+ | branchPC RD
+ } else if (op == BC_JFORI) {
+ | branchPC RD
+ | movzx RDd, PC_RD
+ | jle =>BC_JLOOP
+ |1:
+ |6:
+ } else if (op == BC_IFORL) {
+ | jg >7
+ |6:
+ | branchPC RD
+ |1:
+ } else {
+ | jle =>BC_JLOOP
+ |1:
+ |6:
+ }
+ |7:
+ | ins_next
+ |
+ |5: // Invert check for negative step.
+ if (!vk) {
+ | sar ITYPE, 47;
+ | cmp ITYPEd, LJ_TISNUM; jne ->vmeta_for
+ } else {
+ | add RBd, ITYPEd; jo <1
+ | setint RB
+ | mov FOR_IDX, RB
+ }
+ | cmp RBd, TMPRd
+ | mov FOR_EXT, RB
+ if (op == BC_FORI) {
+ | jge <7
+ } else if (op == BC_JFORI) {
+ | branchPC RD
+ | movzx RDd, PC_RD
+ | jge =>BC_JLOOP
+ } else if (op == BC_IFORL) {
+ | jl <7
+ } else {
+ | jge =>BC_JLOOP
+ }
+ | jmp <6
+ |9: // Fallback to FP variant.
+ if (!vk) {
+ | jae ->vmeta_for
+ }
+ } else if (!vk) {
+ | checknumtp FOR_IDX, ->vmeta_for
+ }
+ if (!vk) {
+ | checknumtp FOR_STOP, ->vmeta_for
+ } else {
+#ifdef LUA_USE_ASSERT
+ | checknumtp FOR_STOP, ->assert_bad_for_arg_type
+ | checknumtp FOR_STEP, ->assert_bad_for_arg_type
+#endif
+ }
+ | mov RB, FOR_STEP
+ if (!vk) {
+ | checknum RB, ->vmeta_for
+ }
+ | movsd xmm0, qword FOR_IDX
+ | movsd xmm1, qword FOR_STOP
+ if (vk) {
+ | addsd xmm0, qword FOR_STEP
+ | movsd qword FOR_IDX, xmm0
+ | test RB, RB; js >3
+ } else {
+ | jl >3
+ }
+ | ucomisd xmm1, xmm0
+ |1:
+ | movsd qword FOR_EXT, xmm0
+ if (op == BC_FORI) {
+ |.if DUALNUM
+ | jnb <7
+ |.else
+ | jnb >2
+ | branchPC RD
+ |.endif
+ } else if (op == BC_JFORI) {
+ | branchPC RD
+ | movzx RDd, PC_RD
+ | jnb =>BC_JLOOP
+ } else if (op == BC_IFORL) {
+ |.if DUALNUM
+ | jb <7
+ |.else
+ | jb >2
+ | branchPC RD
+ |.endif
+ } else {
+ | jnb =>BC_JLOOP
+ }
+ |.if DUALNUM
+ | jmp <6
+ |.else
+ |2:
+ | ins_next
+ |.endif
+ |
+ |3: // Invert comparison if step is negative.
+ | ucomisd xmm0, xmm1
+ | jmp <1
+ break;
+
+ case BC_ITERL:
+ |.if JIT
+ | hotloop RBd
+ |.endif
+ | // Fall through. Assumes BC_IITERL follows and ins_AJ is a no-op.
+ break;
+
+ case BC_JITERL:
+#if !LJ_HASJIT
+ break;
+#endif
+ case BC_IITERL:
+ | ins_AJ // RA = base, RD = target
+ | lea RA, [BASE+RA*8]
+ | mov RB, [RA]
+ | cmp RB, LJ_TNIL; je >1 // Stop if iterator returned nil.
+ if (op == BC_JITERL) {
+ | mov [RA-8], RB
+ | jmp =>BC_JLOOP
+ } else {
+ | branchPC RD // Otherwise save control var + branch.
+ | mov [RA-8], RB
+ }
+ |1:
+ | ins_next
+ break;
+
+ case BC_LOOP:
+ | ins_A // RA = base, RD = target (loop extent)
+ | // Note: RA/RD is only used by trace recorder to determine scope/extent
+ | // This opcode does NOT jump, it's only purpose is to detect a hot loop.
+ |.if JIT
+ | hotloop RBd
+ |.endif
+ | // Fall through. Assumes BC_ILOOP follows and ins_A is a no-op.
+ break;
+
+ case BC_ILOOP:
+ | ins_A // RA = base, RD = target (loop extent)
+ | ins_next
+ break;
+
+ case BC_JLOOP:
+ |.if JIT
+ | ins_AD // RA = base (ignored), RD = traceno
+ | mov RA, [DISPATCH+DISPATCH_J(trace)]
+ | mov TRACE:RD, [RA+RD*8]
+ | mov RD, TRACE:RD->mcode
+ | mov L:RB, SAVE_L
+ | mov [DISPATCH+DISPATCH_GL(jit_base)], BASE
+ | mov [DISPATCH+DISPATCH_GL(tmpbuf.L)], L:RB
+ | // Save additional callee-save registers only used in compiled code.
+ |.if X64WIN
+ | mov CSAVE_4, r12
+ | mov CSAVE_3, r13
+ | mov CSAVE_2, r14
+ | mov CSAVE_1, r15
+ | mov RA, rsp
+ | sub rsp, 10*16+4*8
+ | movdqa [RA-1*16], xmm6
+ | movdqa [RA-2*16], xmm7
+ | movdqa [RA-3*16], xmm8
+ | movdqa [RA-4*16], xmm9
+ | movdqa [RA-5*16], xmm10
+ | movdqa [RA-6*16], xmm11
+ | movdqa [RA-7*16], xmm12
+ | movdqa [RA-8*16], xmm13
+ | movdqa [RA-9*16], xmm14
+ | movdqa [RA-10*16], xmm15
+ |.else
+ | sub rsp, 16
+ | mov [rsp+16], r12
+ | mov [rsp+8], r13
+ |.endif
+ | jmp RD
+ |.endif
+ break;
+
+ case BC_JMP:
+ | ins_AJ // RA = unused, RD = target
+ | branchPC RD
+ | ins_next
+ break;
+
+ /* -- Function headers -------------------------------------------------- */
+
+ /*
+ ** Reminder: A function may be called with func/args above L->maxstack,
+ ** i.e. occupying EXTRA_STACK slots. And vmeta_call may add one extra slot,
+ ** too. This means all FUNC* ops (including fast functions) must check
+ ** for stack overflow _before_ adding more slots!
+ */
+
+ case BC_FUNCF:
+ |.if JIT
+ | hotcall RBd
+ |.endif
+ case BC_FUNCV: /* NYI: compiled vararg functions. */
+ | // Fall through. Assumes BC_IFUNCF/BC_IFUNCV follow and ins_AD is a no-op.
+ break;
+
+ case BC_JFUNCF:
+#if !LJ_HASJIT
+ break;
+#endif
+ case BC_IFUNCF:
+ | ins_AD // BASE = new base, RA = framesize, RD = nargs+1
+ | mov KBASE, [PC-4+PC2PROTO(k)]
+ | mov L:RB, SAVE_L
+ | lea RA, [BASE+RA*8] // Top of frame.
+ | cmp RA, L:RB->maxstack
+ | ja ->vm_growstack_f
+ | movzx RAd, byte [PC-4+PC2PROTO(numparams)]
+ | cmp NARGS:RDd, RAd // Check for missing parameters.
+ | jbe >3
+ |2:
+ if (op == BC_JFUNCF) {
+ | movzx RDd, PC_RD
+ | jmp =>BC_JLOOP
+ } else {
+ | ins_next
+ }
+ |
+ |3: // Clear missing parameters.
+ | mov aword [BASE+NARGS:RD*8-8], LJ_TNIL
+ | add NARGS:RDd, 1
+ | cmp NARGS:RDd, RAd
+ | jbe <3
+ | jmp <2
+ break;
+
+ case BC_JFUNCV:
+#if !LJ_HASJIT
+ break;
+#endif
+ | int3 // NYI: compiled vararg functions
+ break; /* NYI: compiled vararg functions. */
+
+ case BC_IFUNCV:
+ | ins_AD // BASE = new base, RA = framesize, RD = nargs+1
+ | lea RBd, [NARGS:RD*8+FRAME_VARG+8]
+ | lea RD, [BASE+NARGS:RD*8+8]
+ | mov LFUNC:KBASE, [BASE-16]
+ | mov [RD-8], RB // Store delta + FRAME_VARG.
+ | mov [RD-16], LFUNC:KBASE // Store copy of LFUNC.
+ | mov L:RB, SAVE_L
+ | lea RA, [RD+RA*8]
+ | cmp RA, L:RB->maxstack
+ | ja ->vm_growstack_v // Need to grow stack.
+ | mov RA, BASE
+ | mov BASE, RD
+ | movzx RBd, byte [PC-4+PC2PROTO(numparams)]
+ | test RBd, RBd
+ | jz >2
+ | add RA, 8
+ |1: // Copy fixarg slots up to new frame.
+ | add RA, 8
+ | cmp RA, BASE
+ | jnb >3 // Less args than parameters?
+ | mov KBASE, [RA-16]
+ | mov [RD], KBASE
+ | add RD, 8
+ | mov aword [RA-16], LJ_TNIL // Clear old fixarg slot (help the GC).
+ | sub RBd, 1
+ | jnz <1
+ |2:
+ if (op == BC_JFUNCV) {
+ | movzx RDd, PC_RD
+ | jmp =>BC_JLOOP
+ } else {
+ | mov KBASE, [PC-4+PC2PROTO(k)]
+ | ins_next
+ }
+ |
+ |3: // Clear missing parameters.
+ | mov aword [RD], LJ_TNIL
+ | add RD, 8
+ | sub RBd, 1
+ | jnz <3
+ | jmp <2
+ break;
+
+ case BC_FUNCC:
+ case BC_FUNCCW:
+ | ins_AD // BASE = new base, RA = ins RA|RD (unused), RD = nargs+1
+ | mov CFUNC:RB, [BASE-16]
+ | cleartp CFUNC:RB
+ | mov KBASE, CFUNC:RB->f
+ | mov L:RB, SAVE_L
+ | lea RD, [BASE+NARGS:RD*8-8]
+ | mov L:RB->base, BASE
+ | lea RA, [RD+8*LUA_MINSTACK]
+ | cmp RA, L:RB->maxstack
+ | mov L:RB->top, RD
+ if (op == BC_FUNCC) {
+ | mov CARG1, L:RB // Caveat: CARG1 may be RA.
+ } else {
+ | mov CARG2, KBASE
+ | mov CARG1, L:RB // Caveat: CARG1 may be RA.
+ }
+ | ja ->vm_growstack_c // Need to grow stack.
+ | set_vmstate C
+ if (op == BC_FUNCC) {
+ | call KBASE // (lua_State *L)
+ } else {
+ | // (lua_State *L, lua_CFunction f)
+ | call aword [DISPATCH+DISPATCH_GL(wrapf)]
+ }
+ | // nresults returned in eax (RD).
+ | mov BASE, L:RB->base
+ | mov [DISPATCH+DISPATCH_GL(cur_L)], L:RB
+ | set_vmstate INTERP
+ | lea RA, [BASE+RD*8]
+ | neg RA
+ | add RA, L:RB->top // RA = (L->top-(L->base+nresults))*8
+ | mov PC, [BASE-8] // Fetch PC of caller.
+ | jmp ->vm_returnc
+ break;
+
+ /* ---------------------------------------------------------------------- */
+
+ default:
+ fprintf(stderr, "Error: undefined opcode BC_%s\n", bc_names[op]);
+ exit(2);
+ break;
+ }
+}
+
+static int build_backend(BuildCtx *ctx)
+{
+ int op;
+ dasm_growpc(Dst, BC__MAX);
+ build_subroutines(ctx);
+ |.code_op
+ for (op = 0; op < BC__MAX; op++)
+ build_ins(ctx, (BCOp)op, op);
+ return BC__MAX;
+}
+
+/* Emit pseudo frame-info for all assembler functions. */
+static void emit_asm_debug(BuildCtx *ctx)
+{
+ int fcofs = (int)((uint8_t *)ctx->glob[GLOB_vm_ffi_call] - ctx->code);
+ switch (ctx->mode) {
+ case BUILD_elfasm:
+ fprintf(ctx->fp, "\t.section .debug_frame,\"\",@progbits\n");
+ fprintf(ctx->fp,
+ ".Lframe0:\n"
+ "\t.long .LECIE0-.LSCIE0\n"
+ ".LSCIE0:\n"
+ "\t.long 0xffffffff\n"
+ "\t.byte 0x1\n"
+ "\t.string \"\"\n"
+ "\t.uleb128 0x1\n"
+ "\t.sleb128 -8\n"
+ "\t.byte 0x10\n"
+ "\t.byte 0xc\n\t.uleb128 0x7\n\t.uleb128 8\n"
+ "\t.byte 0x80+0x10\n\t.uleb128 0x1\n"
+ "\t.align 8\n"
+ ".LECIE0:\n\n");
+ fprintf(ctx->fp,
+ ".LSFDE0:\n"
+ "\t.long .LEFDE0-.LASFDE0\n"
+ ".LASFDE0:\n"
+ "\t.long .Lframe0\n"
+ "\t.quad .Lbegin\n"
+ "\t.quad %d\n"
+ "\t.byte 0xe\n\t.uleb128 %d\n" /* def_cfa_offset */
+ "\t.byte 0x86\n\t.uleb128 0x2\n" /* offset rbp */
+ "\t.byte 0x83\n\t.uleb128 0x3\n" /* offset rbx */
+ "\t.byte 0x8f\n\t.uleb128 0x4\n" /* offset r15 */
+ "\t.byte 0x8e\n\t.uleb128 0x5\n" /* offset r14 */
+#if LJ_NO_UNWIND
+ "\t.byte 0x8d\n\t.uleb128 0x6\n" /* offset r13 */
+ "\t.byte 0x8c\n\t.uleb128 0x7\n" /* offset r12 */
+#endif
+ "\t.align 8\n"
+ ".LEFDE0:\n\n", fcofs, CFRAME_SIZE);
+#if LJ_HASFFI
+ fprintf(ctx->fp,
+ ".LSFDE1:\n"
+ "\t.long .LEFDE1-.LASFDE1\n"
+ ".LASFDE1:\n"
+ "\t.long .Lframe0\n"
+ "\t.quad lj_vm_ffi_call\n"
+ "\t.quad %d\n"
+ "\t.byte 0xe\n\t.uleb128 16\n" /* def_cfa_offset */
+ "\t.byte 0x86\n\t.uleb128 0x2\n" /* offset rbp */
+ "\t.byte 0xd\n\t.uleb128 0x6\n" /* def_cfa_register rbp */
+ "\t.byte 0x83\n\t.uleb128 0x3\n" /* offset rbx */
+ "\t.align 8\n"
+ ".LEFDE1:\n\n", (int)ctx->codesz - fcofs);
+#endif
+#if !LJ_NO_UNWIND
+#if LJ_TARGET_SOLARIS
+ fprintf(ctx->fp, "\t.section .eh_frame,\"a\",@unwind\n");
+#else
+ fprintf(ctx->fp, "\t.section .eh_frame,\"a\",@progbits\n");
+#endif
+ fprintf(ctx->fp,
+ ".Lframe1:\n"
+ "\t.long .LECIE1-.LSCIE1\n"
+ ".LSCIE1:\n"
+ "\t.long 0\n"
+ "\t.byte 0x1\n"
+ "\t.string \"zPR\"\n"
+ "\t.uleb128 0x1\n"
+ "\t.sleb128 -8\n"
+ "\t.byte 0x10\n"
+ "\t.uleb128 6\n" /* augmentation length */
+ "\t.byte 0x1b\n" /* pcrel|sdata4 */
+ "\t.long lj_err_unwind_dwarf-.\n"
+ "\t.byte 0x1b\n" /* pcrel|sdata4 */
+ "\t.byte 0xc\n\t.uleb128 0x7\n\t.uleb128 8\n"
+ "\t.byte 0x80+0x10\n\t.uleb128 0x1\n"
+ "\t.align 8\n"
+ ".LECIE1:\n\n");
+ fprintf(ctx->fp,
+ ".LSFDE2:\n"
+ "\t.long .LEFDE2-.LASFDE2\n"
+ ".LASFDE2:\n"
+ "\t.long .LASFDE2-.Lframe1\n"
+ "\t.long .Lbegin-.\n"
+ "\t.long %d\n"
+ "\t.uleb128 0\n" /* augmentation length */
+ "\t.byte 0xe\n\t.uleb128 %d\n" /* def_cfa_offset */
+ "\t.byte 0x86\n\t.uleb128 0x2\n" /* offset rbp */
+ "\t.byte 0x83\n\t.uleb128 0x3\n" /* offset rbx */
+ "\t.byte 0x8f\n\t.uleb128 0x4\n" /* offset r15 */
+ "\t.byte 0x8e\n\t.uleb128 0x5\n" /* offset r14 */
+ "\t.align 8\n"
+ ".LEFDE2:\n\n", fcofs, CFRAME_SIZE);
+#if LJ_HASFFI
+ fprintf(ctx->fp,
+ ".Lframe2:\n"
+ "\t.long .LECIE2-.LSCIE2\n"
+ ".LSCIE2:\n"
+ "\t.long 0\n"
+ "\t.byte 0x1\n"
+ "\t.string \"zR\"\n"
+ "\t.uleb128 0x1\n"
+ "\t.sleb128 -8\n"
+ "\t.byte 0x10\n"
+ "\t.uleb128 1\n" /* augmentation length */
+ "\t.byte 0x1b\n" /* pcrel|sdata4 */
+ "\t.byte 0xc\n\t.uleb128 0x7\n\t.uleb128 8\n"
+ "\t.byte 0x80+0x10\n\t.uleb128 0x1\n"
+ "\t.align 8\n"
+ ".LECIE2:\n\n");
+ fprintf(ctx->fp,
+ ".LSFDE3:\n"
+ "\t.long .LEFDE3-.LASFDE3\n"
+ ".LASFDE3:\n"
+ "\t.long .LASFDE3-.Lframe2\n"
+ "\t.long lj_vm_ffi_call-.\n"
+ "\t.long %d\n"
+ "\t.uleb128 0\n" /* augmentation length */
+ "\t.byte 0xe\n\t.uleb128 16\n" /* def_cfa_offset */
+ "\t.byte 0x86\n\t.uleb128 0x2\n" /* offset rbp */
+ "\t.byte 0xd\n\t.uleb128 0x6\n" /* def_cfa_register rbp */
+ "\t.byte 0x83\n\t.uleb128 0x3\n" /* offset rbx */
+ "\t.align 8\n"
+ ".LEFDE3:\n\n", (int)ctx->codesz - fcofs);
+#endif
+#endif
+ break;
+#if !LJ_NO_UNWIND
+ /* Mental note: never let Apple design an assembler.
+ ** Or a linker. Or a plastic case. But I digress.
+ */
+ case BUILD_machasm: {
+#if LJ_HASFFI
+ int fcsize = 0;
+#endif
+ int i;
+ fprintf(ctx->fp, "\t.section __TEXT,__eh_frame,coalesced,no_toc+strip_static_syms+live_support\n");
+ fprintf(ctx->fp,
+ "EH_frame1:\n"
+ "\t.set L$set$x,LECIEX-LSCIEX\n"
+ "\t.long L$set$x\n"
+ "LSCIEX:\n"
+ "\t.long 0\n"
+ "\t.byte 0x1\n"
+ "\t.ascii \"zPR\\0\"\n"
+ "\t.byte 0x1\n"
+ "\t.byte 128-8\n"
+ "\t.byte 0x10\n"
+ "\t.byte 6\n" /* augmentation length */
+ "\t.byte 0x9b\n" /* indirect|pcrel|sdata4 */
+ "\t.long _lj_err_unwind_dwarf+4@GOTPCREL\n"
+ "\t.byte 0x1b\n" /* pcrel|sdata4 */
+ "\t.byte 0xc\n\t.byte 0x7\n\t.byte 8\n"
+ "\t.byte 0x80+0x10\n\t.byte 0x1\n"
+ "\t.align 3\n"
+ "LECIEX:\n\n");
+ for (i = 0; i < ctx->nsym; i++) {
+ const char *name = ctx->sym[i].name;
+ int32_t size = ctx->sym[i+1].ofs - ctx->sym[i].ofs;
+ if (size == 0) continue;
+#if LJ_HASFFI
+ if (!strcmp(name, "_lj_vm_ffi_call")) { fcsize = size; continue; }
+#endif
+ fprintf(ctx->fp,
+ "%s.eh:\n"
+ "LSFDE%d:\n"
+ "\t.set L$set$%d,LEFDE%d-LASFDE%d\n"
+ "\t.long L$set$%d\n"
+ "LASFDE%d:\n"
+ "\t.long LASFDE%d-EH_frame1\n"
+ "\t.long %s-.\n"
+ "\t.long %d\n"
+ "\t.byte 0\n" /* augmentation length */
+ "\t.byte 0xe\n\t.byte %d\n" /* def_cfa_offset */
+ "\t.byte 0x86\n\t.byte 0x2\n" /* offset rbp */
+ "\t.byte 0x83\n\t.byte 0x3\n" /* offset rbx */
+ "\t.byte 0x8f\n\t.byte 0x4\n" /* offset r15 */
+ "\t.byte 0x8e\n\t.byte 0x5\n" /* offset r14 */
+ "\t.align 3\n"
+ "LEFDE%d:\n\n",
+ name, i, i, i, i, i, i, i, name, size, CFRAME_SIZE, i);
+ }
+#if LJ_HASFFI
+ if (fcsize) {
+ fprintf(ctx->fp,
+ "EH_frame2:\n"
+ "\t.set L$set$y,LECIEY-LSCIEY\n"
+ "\t.long L$set$y\n"
+ "LSCIEY:\n"
+ "\t.long 0\n"
+ "\t.byte 0x1\n"
+ "\t.ascii \"zR\\0\"\n"
+ "\t.byte 0x1\n"
+ "\t.byte 128-8\n"
+ "\t.byte 0x10\n"
+ "\t.byte 1\n" /* augmentation length */
+ "\t.byte 0x1b\n" /* pcrel|sdata4 */
+ "\t.byte 0xc\n\t.byte 0x7\n\t.byte 8\n"
+ "\t.byte 0x80+0x10\n\t.byte 0x1\n"
+ "\t.align 3\n"
+ "LECIEY:\n\n");
+ fprintf(ctx->fp,
+ "_lj_vm_ffi_call.eh:\n"
+ "LSFDEY:\n"
+ "\t.set L$set$yy,LEFDEY-LASFDEY\n"
+ "\t.long L$set$yy\n"
+ "LASFDEY:\n"
+ "\t.long LASFDEY-EH_frame2\n"
+ "\t.long _lj_vm_ffi_call-.\n"
+ "\t.long %d\n"
+ "\t.byte 0\n" /* augmentation length */
+ "\t.byte 0xe\n\t.byte 16\n" /* def_cfa_offset */
+ "\t.byte 0x86\n\t.byte 0x2\n" /* offset rbp */
+ "\t.byte 0xd\n\t.byte 0x6\n" /* def_cfa_register rbp */
+ "\t.byte 0x83\n\t.byte 0x3\n" /* offset rbx */
+ "\t.align 3\n"
+ "LEFDEY:\n\n", fcsize);
+ }
+#endif
+ fprintf(ctx->fp, ".subsections_via_symbols\n");
+ }
+ break;
+#endif
+ default: /* Difficult for other modes. */
+ break;
+ }
+}
+
diff --git a/libs/luajit-cmake/luajit/src/vm_x86.dasc b/libs/luajit-cmake/luajit/src/vm_x86.dasc
new file mode 100644
index 0000000..18ca87b
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/vm_x86.dasc
@@ -0,0 +1,5825 @@
+|// Low-level VM code for x86 CPUs.
+|// Bytecode interpreter, fast functions and helper functions.
+|// Copyright (C) 2005-2022 Mike Pall. See Copyright Notice in luajit.h
+|
+|.if P64
+|.arch x64
+|.else
+|.arch x86
+|.endif
+|.section code_op, code_sub
+|
+|.actionlist build_actionlist
+|.globals GLOB_
+|.globalnames globnames
+|.externnames extnames
+|
+|//-----------------------------------------------------------------------
+|
+|.if P64
+|.define X64, 1
+|.if WIN
+|.define X64WIN, 1
+|.endif
+|.endif
+|
+|// Fixed register assignments for the interpreter.
+|// This is very fragile and has many dependencies. Caveat emptor.
+|.define BASE, edx // Not C callee-save, refetched anyway.
+|.if not X64
+|.define KBASE, edi // Must be C callee-save.
+|.define KBASEa, KBASE
+|.define PC, esi // Must be C callee-save.
+|.define PCa, PC
+|.define DISPATCH, ebx // Must be C callee-save.
+|.elif X64WIN
+|.define KBASE, edi // Must be C callee-save.
+|.define KBASEa, rdi
+|.define PC, esi // Must be C callee-save.
+|.define PCa, rsi
+|.define DISPATCH, ebx // Must be C callee-save.
+|.else
+|.define KBASE, r15d // Must be C callee-save.
+|.define KBASEa, r15
+|.define PC, ebx // Must be C callee-save.
+|.define PCa, rbx
+|.define DISPATCH, r14d // Must be C callee-save.
+|.endif
+|
+|.define RA, ecx
+|.define RAH, ch
+|.define RAL, cl
+|.define RB, ebp // Must be ebp (C callee-save).
+|.define RC, eax // Must be eax.
+|.define RCW, ax
+|.define RCH, ah
+|.define RCL, al
+|.define OP, RB
+|.define RD, RC
+|.define RDW, RCW
+|.define RDL, RCL
+|.if X64
+|.define RAa, rcx
+|.define RBa, rbp
+|.define RCa, rax
+|.define RDa, rax
+|.else
+|.define RAa, RA
+|.define RBa, RB
+|.define RCa, RC
+|.define RDa, RD
+|.endif
+|
+|.if not X64
+|.define FCARG1, ecx // x86 fastcall arguments.
+|.define FCARG2, edx
+|.elif X64WIN
+|.define CARG1, rcx // x64/WIN64 C call arguments.
+|.define CARG2, rdx
+|.define CARG3, r8
+|.define CARG4, r9
+|.define CARG1d, ecx
+|.define CARG2d, edx
+|.define CARG3d, r8d
+|.define CARG4d, r9d
+|.define FCARG1, CARG1d // Upwards compatible to x86 fastcall.
+|.define FCARG2, CARG2d
+|.else
+|.define CARG1, rdi // x64/POSIX C call arguments.
+|.define CARG2, rsi
+|.define CARG3, rdx
+|.define CARG4, rcx
+|.define CARG5, r8
+|.define CARG6, r9
+|.define CARG1d, edi
+|.define CARG2d, esi
+|.define CARG3d, edx
+|.define CARG4d, ecx
+|.define CARG5d, r8d
+|.define CARG6d, r9d
+|.define FCARG1, CARG1d // Simulate x86 fastcall.
+|.define FCARG2, CARG2d
+|.endif
+|
+|// Type definitions. Some of these are only used for documentation.
+|.type L, lua_State
+|.type GL, global_State
+|.type TVALUE, TValue
+|.type GCOBJ, GCobj
+|.type STR, GCstr
+|.type TAB, GCtab
+|.type LFUNC, GCfuncL
+|.type CFUNC, GCfuncC
+|.type PROTO, GCproto
+|.type UPVAL, GCupval
+|.type NODE, Node
+|.type NARGS, int
+|.type TRACE, GCtrace
+|.type SBUF, SBuf
+|
+|// Stack layout while in interpreter. Must match with lj_frame.h.
+|//-----------------------------------------------------------------------
+|.if not X64 // x86 stack layout.
+|
+|.if WIN
+|
+|.define CFRAME_SPACE, aword*9 // Delta for esp (see <--).
+|.macro saveregs_
+| push edi; push esi; push ebx
+| push extern lj_err_unwind_win
+| fs; push dword [0]
+| fs; mov [0], esp
+| sub esp, CFRAME_SPACE
+|.endmacro
+|.macro restoreregs
+| add esp, CFRAME_SPACE
+| fs; pop dword [0]
+| pop edi // Short for esp += 4.
+| pop ebx; pop esi; pop edi; pop ebp
+|.endmacro
+|
+|.else
+|
+|.define CFRAME_SPACE, aword*7 // Delta for esp (see <--).
+|.macro saveregs_
+| push edi; push esi; push ebx
+| sub esp, CFRAME_SPACE
+|.endmacro
+|.macro restoreregs
+| add esp, CFRAME_SPACE
+| pop ebx; pop esi; pop edi; pop ebp
+|.endmacro
+|
+|.endif
+|
+|.macro saveregs
+| push ebp; saveregs_
+|.endmacro
+|
+|.if WIN
+|.define SAVE_ERRF, aword [esp+aword*19] // vm_pcall/vm_cpcall only.
+|.define SAVE_NRES, aword [esp+aword*18]
+|.define SAVE_CFRAME, aword [esp+aword*17]
+|.define SAVE_L, aword [esp+aword*16]
+|//----- 16 byte aligned, ^^^ arguments from C caller
+|.define SAVE_RET, aword [esp+aword*15] //<-- esp entering interpreter.
+|.define SAVE_R4, aword [esp+aword*14]
+|.define SAVE_R3, aword [esp+aword*13]
+|.define SAVE_R2, aword [esp+aword*12]
+|//----- 16 byte aligned
+|.define SAVE_R1, aword [esp+aword*11]
+|.define SEH_FUNC, aword [esp+aword*10]
+|.define SEH_NEXT, aword [esp+aword*9] //<-- esp after register saves.
+|.define UNUSED2, aword [esp+aword*8]
+|//----- 16 byte aligned
+|.define UNUSED1, aword [esp+aword*7]
+|.define SAVE_PC, aword [esp+aword*6]
+|.define TMP2, aword [esp+aword*5]
+|.define TMP1, aword [esp+aword*4]
+|//----- 16 byte aligned
+|.define ARG4, aword [esp+aword*3]
+|.define ARG3, aword [esp+aword*2]
+|.define ARG2, aword [esp+aword*1]
+|.define ARG1, aword [esp] //<-- esp while in interpreter.
+|//----- 16 byte aligned, ^^^ arguments for C callee
+|.else
+|.define SAVE_ERRF, aword [esp+aword*15] // vm_pcall/vm_cpcall only.
+|.define SAVE_NRES, aword [esp+aword*14]
+|.define SAVE_CFRAME, aword [esp+aword*13]
+|.define SAVE_L, aword [esp+aword*12]
+|//----- 16 byte aligned, ^^^ arguments from C caller
+|.define SAVE_RET, aword [esp+aword*11] //<-- esp entering interpreter.
+|.define SAVE_R4, aword [esp+aword*10]
+|.define SAVE_R3, aword [esp+aword*9]
+|.define SAVE_R2, aword [esp+aword*8]
+|//----- 16 byte aligned
+|.define SAVE_R1, aword [esp+aword*7] //<-- esp after register saves.
+|.define SAVE_PC, aword [esp+aword*6]
+|.define TMP2, aword [esp+aword*5]
+|.define TMP1, aword [esp+aword*4]
+|//----- 16 byte aligned
+|.define ARG4, aword [esp+aword*3]
+|.define ARG3, aword [esp+aword*2]
+|.define ARG2, aword [esp+aword*1]
+|.define ARG1, aword [esp] //<-- esp while in interpreter.
+|//----- 16 byte aligned, ^^^ arguments for C callee
+|.endif
+|
+|// FPARGx overlaps ARGx and ARG(x+1) on x86.
+|.define FPARG3, qword [esp+qword*1]
+|.define FPARG1, qword [esp]
+|// TMPQ overlaps TMP1/TMP2. ARG5/MULTRES overlap TMP1/TMP2 (and TMPQ).
+|.define TMPQ, qword [esp+aword*4]
+|.define TMP3, ARG4
+|.define ARG5, TMP1
+|.define TMPa, TMP1
+|.define MULTRES, TMP2
+|
+|// Arguments for vm_call and vm_pcall.
+|.define INARG_BASE, SAVE_CFRAME // Overwritten by SAVE_CFRAME!
+|
+|// Arguments for vm_cpcall.
+|.define INARG_CP_CALL, SAVE_ERRF
+|.define INARG_CP_UD, SAVE_NRES
+|.define INARG_CP_FUNC, SAVE_CFRAME
+|
+|//-----------------------------------------------------------------------
+|.elif X64WIN // x64/Windows stack layout
+|
+|.define CFRAME_SPACE, aword*5 // Delta for rsp (see <--).
+|.macro saveregs_
+| push rdi; push rsi; push rbx
+| sub rsp, CFRAME_SPACE
+|.endmacro
+|.macro saveregs
+| push rbp; saveregs_
+|.endmacro
+|.macro restoreregs
+| add rsp, CFRAME_SPACE
+| pop rbx; pop rsi; pop rdi; pop rbp
+|.endmacro
+|
+|.define SAVE_CFRAME, aword [rsp+aword*13]
+|.define SAVE_PC, dword [rsp+dword*25]
+|.define SAVE_L, dword [rsp+dword*24]
+|.define SAVE_ERRF, dword [rsp+dword*23]
+|.define SAVE_NRES, dword [rsp+dword*22]
+|.define TMP2, dword [rsp+dword*21]
+|.define TMP1, dword [rsp+dword*20]
+|//----- 16 byte aligned, ^^^ 32 byte register save area, owned by interpreter
+|.define SAVE_RET, aword [rsp+aword*9] //<-- rsp entering interpreter.
+|.define SAVE_R4, aword [rsp+aword*8]
+|.define SAVE_R3, aword [rsp+aword*7]
+|.define SAVE_R2, aword [rsp+aword*6]
+|.define SAVE_R1, aword [rsp+aword*5] //<-- rsp after register saves.
+|.define ARG5, aword [rsp+aword*4]
+|.define CSAVE_4, aword [rsp+aword*3]
+|.define CSAVE_3, aword [rsp+aword*2]
+|.define CSAVE_2, aword [rsp+aword*1]
+|.define CSAVE_1, aword [rsp] //<-- rsp while in interpreter.
+|//----- 16 byte aligned, ^^^ 32 byte register save area, owned by callee
+|
+|// TMPQ overlaps TMP1/TMP2. MULTRES overlaps TMP2 (and TMPQ).
+|.define TMPQ, qword [rsp+aword*10]
+|.define MULTRES, TMP2
+|.define TMPa, ARG5
+|.define ARG5d, dword [rsp+aword*4]
+|.define TMP3, ARG5d
+|
+|//-----------------------------------------------------------------------
+|.else // x64/POSIX stack layout
+|
+|.define CFRAME_SPACE, aword*5 // Delta for rsp (see <--).
+|.macro saveregs_
+| push rbx; push r15; push r14
+|.if NO_UNWIND
+| push r13; push r12
+|.endif
+| sub rsp, CFRAME_SPACE
+|.endmacro
+|.macro saveregs
+| push rbp; saveregs_
+|.endmacro
+|.macro restoreregs
+| add rsp, CFRAME_SPACE
+|.if NO_UNWIND
+| pop r12; pop r13
+|.endif
+| pop r14; pop r15; pop rbx; pop rbp
+|.endmacro
+|
+|//----- 16 byte aligned,
+|.if NO_UNWIND
+|.define SAVE_RET, aword [rsp+aword*11] //<-- rsp entering interpreter.
+|.define SAVE_R4, aword [rsp+aword*10]
+|.define SAVE_R3, aword [rsp+aword*9]
+|.define SAVE_R2, aword [rsp+aword*8]
+|.define SAVE_R1, aword [rsp+aword*7]
+|.define SAVE_RU2, aword [rsp+aword*6]
+|.define SAVE_RU1, aword [rsp+aword*5] //<-- rsp after register saves.
+|.else
+|.define SAVE_RET, aword [rsp+aword*9] //<-- rsp entering interpreter.
+|.define SAVE_R4, aword [rsp+aword*8]
+|.define SAVE_R3, aword [rsp+aword*7]
+|.define SAVE_R2, aword [rsp+aword*6]
+|.define SAVE_R1, aword [rsp+aword*5] //<-- rsp after register saves.
+|.endif
+|.define SAVE_CFRAME, aword [rsp+aword*4]
+|.define SAVE_PC, dword [rsp+dword*7]
+|.define SAVE_L, dword [rsp+dword*6]
+|.define SAVE_ERRF, dword [rsp+dword*5]
+|.define SAVE_NRES, dword [rsp+dword*4]
+|.define TMPa, aword [rsp+aword*1]
+|.define TMP2, dword [rsp+dword*1]
+|.define TMP1, dword [rsp] //<-- rsp while in interpreter.
+|//----- 16 byte aligned
+|
+|// TMPQ overlaps TMP1/TMP2. MULTRES overlaps TMP2 (and TMPQ).
+|.define TMPQ, qword [rsp]
+|.define TMP3, dword [rsp+aword*1]
+|.define MULTRES, TMP2
+|
+|.endif
+|
+|//-----------------------------------------------------------------------
+|
+|// Instruction headers.
+|.macro ins_A; .endmacro
+|.macro ins_AD; .endmacro
+|.macro ins_AJ; .endmacro
+|.macro ins_ABC; movzx RB, RCH; movzx RC, RCL; .endmacro
+|.macro ins_AB_; movzx RB, RCH; .endmacro
+|.macro ins_A_C; movzx RC, RCL; .endmacro
+|.macro ins_AND; not RDa; .endmacro
+|
+|// Instruction decode+dispatch. Carefully tuned (nope, lodsd is not faster).
+|.macro ins_NEXT
+| mov RC, [PC]
+| movzx RA, RCH
+| movzx OP, RCL
+| add PC, 4
+| shr RC, 16
+|.if X64
+| jmp aword [DISPATCH+OP*8]
+|.else
+| jmp aword [DISPATCH+OP*4]
+|.endif
+|.endmacro
+|
+|// Instruction footer.
+|.if 1
+| // Replicated dispatch. Less unpredictable branches, but higher I-Cache use.
+| .define ins_next, ins_NEXT
+| .define ins_next_, ins_NEXT
+|.else
+| // Common dispatch. Lower I-Cache use, only one (very) unpredictable branch.
+| // Affects only certain kinds of benchmarks (and only with -j off).
+| // Around 10%-30% slower on Core2, a lot more slower on P4.
+| .macro ins_next
+| jmp ->ins_next
+| .endmacro
+| .macro ins_next_
+| ->ins_next:
+| ins_NEXT
+| .endmacro
+|.endif
+|
+|// Call decode and dispatch.
+|.macro ins_callt
+| // BASE = new base, RB = LFUNC, RD = nargs+1, [BASE-4] = PC
+| mov PC, LFUNC:RB->pc
+| mov RA, [PC]
+| movzx OP, RAL
+| movzx RA, RAH
+| add PC, 4
+|.if X64
+| jmp aword [DISPATCH+OP*8]
+|.else
+| jmp aword [DISPATCH+OP*4]
+|.endif
+|.endmacro
+|
+|.macro ins_call
+| // BASE = new base, RB = LFUNC, RD = nargs+1
+| mov [BASE-4], PC
+| ins_callt
+|.endmacro
+|
+|//-----------------------------------------------------------------------
+|
+|// Macros to test operand types.
+|.macro checktp, reg, tp; cmp dword [BASE+reg*8+4], tp; .endmacro
+|.macro checknum, reg, target; checktp reg, LJ_TISNUM; jae target; .endmacro
+|.macro checkint, reg, target; checktp reg, LJ_TISNUM; jne target; .endmacro
+|.macro checkstr, reg, target; checktp reg, LJ_TSTR; jne target; .endmacro
+|.macro checktab, reg, target; checktp reg, LJ_TTAB; jne target; .endmacro
+|
+|// These operands must be used with movzx.
+|.define PC_OP, byte [PC-4]
+|.define PC_RA, byte [PC-3]
+|.define PC_RB, byte [PC-1]
+|.define PC_RC, byte [PC-2]
+|.define PC_RD, word [PC-2]
+|
+|.macro branchPC, reg
+| lea PC, [PC+reg*4-BCBIAS_J*4]
+|.endmacro
+|
+|// Assumes DISPATCH is relative to GL.
+#define DISPATCH_GL(field) (GG_DISP2G + (int)offsetof(global_State, field))
+#define DISPATCH_J(field) (GG_DISP2J + (int)offsetof(jit_State, field))
+|
+#define PC2PROTO(field) ((int)offsetof(GCproto, field)-(int)sizeof(GCproto))
+|
+|// Decrement hashed hotcount and trigger trace recorder if zero.
+|.macro hotloop, reg
+| mov reg, PC
+| shr reg, 1
+| and reg, HOTCOUNT_PCMASK
+| sub word [DISPATCH+reg+GG_DISP2HOT], HOTCOUNT_LOOP
+| jb ->vm_hotloop
+|.endmacro
+|
+|.macro hotcall, reg
+| mov reg, PC
+| shr reg, 1
+| and reg, HOTCOUNT_PCMASK
+| sub word [DISPATCH+reg+GG_DISP2HOT], HOTCOUNT_CALL
+| jb ->vm_hotcall
+|.endmacro
+|
+|// Set current VM state.
+|.macro set_vmstate, st
+| mov dword [DISPATCH+DISPATCH_GL(vmstate)], ~LJ_VMST_..st
+|.endmacro
+|
+|// x87 compares.
+|.macro fcomparepp // Compare and pop st0 >< st1.
+| fucomip st1
+| fpop
+|.endmacro
+|
+|.macro fpop1; fstp st1; .endmacro
+|
+|// Synthesize SSE FP constants.
+|.macro sseconst_abs, reg, tmp // Synthesize abs mask.
+|.if X64
+| mov64 tmp, U64x(7fffffff,ffffffff); movd reg, tmp
+|.else
+| pxor reg, reg; pcmpeqd reg, reg; psrlq reg, 1
+|.endif
+|.endmacro
+|
+|.macro sseconst_hi, reg, tmp, val // Synthesize hi-32 bit const.
+|.if X64
+| mov64 tmp, U64x(val,00000000); movd reg, tmp
+|.else
+| mov tmp, 0x .. val; movd reg, tmp; pshufd reg, reg, 0x51
+|.endif
+|.endmacro
+|
+|.macro sseconst_sign, reg, tmp // Synthesize sign mask.
+| sseconst_hi reg, tmp, 80000000
+|.endmacro
+|.macro sseconst_1, reg, tmp // Synthesize 1.0.
+| sseconst_hi reg, tmp, 3ff00000
+|.endmacro
+|.macro sseconst_2p52, reg, tmp // Synthesize 2^52.
+| sseconst_hi reg, tmp, 43300000
+|.endmacro
+|.macro sseconst_tobit, reg, tmp // Synthesize 2^52 + 2^51.
+| sseconst_hi reg, tmp, 43380000
+|.endmacro
+|
+|// Move table write barrier back. Overwrites reg.
+|.macro barrierback, tab, reg
+| and byte tab->marked, (uint8_t)~LJ_GC_BLACK // black2gray(tab)
+| mov reg, [DISPATCH+DISPATCH_GL(gc.grayagain)]
+| mov [DISPATCH+DISPATCH_GL(gc.grayagain)], tab
+| mov tab->gclist, reg
+|.endmacro
+|
+|//-----------------------------------------------------------------------
+
+/* Generate subroutines used by opcodes and other parts of the VM. */
+/* The .code_sub section should be last to help static branch prediction. */
+static void build_subroutines(BuildCtx *ctx)
+{
+ |.code_sub
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Return handling ----------------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |->vm_returnp:
+ | test PC, FRAME_P
+ | jz ->cont_dispatch
+ |
+ | // Return from pcall or xpcall fast func.
+ | and PC, -8
+ | sub BASE, PC // Restore caller base.
+ | lea RAa, [RA+PC-8] // Rebase RA and prepend one result.
+ | mov PC, [BASE-4] // Fetch PC of previous frame.
+ | // Prepending may overwrite the pcall frame, so do it at the end.
+ | mov dword [BASE+RA+4], LJ_TTRUE // Prepend true to results.
+ |
+ |->vm_returnc:
+ | add RD, 1 // RD = nresults+1
+ | jz ->vm_unwind_yield
+ | mov MULTRES, RD
+ | test PC, FRAME_TYPE
+ | jz ->BC_RET_Z // Handle regular return to Lua.
+ |
+ |->vm_return:
+ | // BASE = base, RA = resultofs, RD = nresults+1 (= MULTRES), PC = return
+ | xor PC, FRAME_C
+ | test PC, FRAME_TYPE
+ | jnz ->vm_returnp
+ |
+ | // Return to C.
+ | set_vmstate C
+ | and PC, -8
+ | sub PC, BASE
+ | neg PC // Previous base = BASE - delta.
+ |
+ | sub RD, 1
+ | jz >2
+ |1: // Move results down.
+ |.if X64
+ | mov RBa, [BASE+RA]
+ | mov [BASE-8], RBa
+ |.else
+ | mov RB, [BASE+RA]
+ | mov [BASE-8], RB
+ | mov RB, [BASE+RA+4]
+ | mov [BASE-4], RB
+ |.endif
+ | add BASE, 8
+ | sub RD, 1
+ | jnz <1
+ |2:
+ | mov L:RB, SAVE_L
+ | mov L:RB->base, PC
+ |3:
+ | mov RD, MULTRES
+ | mov RA, SAVE_NRES // RA = wanted nresults+1
+ |4:
+ | cmp RA, RD
+ | jne >6 // More/less results wanted?
+ |5:
+ | sub BASE, 8
+ | mov L:RB->top, BASE
+ |
+ |->vm_leave_cp:
+ | mov RAa, SAVE_CFRAME // Restore previous C frame.
+ | mov L:RB->cframe, RAa
+ | xor eax, eax // Ok return status for vm_pcall.
+ |
+ |->vm_leave_unw:
+ | restoreregs
+ | ret
+ |
+ |6:
+ | jb >7 // Less results wanted?
+ | // More results wanted. Check stack size and fill up results with nil.
+ | cmp BASE, L:RB->maxstack
+ | ja >8
+ | mov dword [BASE-4], LJ_TNIL
+ | add BASE, 8
+ | add RD, 1
+ | jmp <4
+ |
+ |7: // Less results wanted.
+ | test RA, RA
+ | jz <5 // But check for LUA_MULTRET+1.
+ | sub RA, RD // Negative result!
+ | lea BASE, [BASE+RA*8] // Correct top.
+ | jmp <5
+ |
+ |8: // Corner case: need to grow stack for filling up results.
+ | // This can happen if:
+ | // - A C function grows the stack (a lot).
+ | // - The GC shrinks the stack in between.
+ | // - A return back from a lua_call() with (high) nresults adjustment.
+ | mov L:RB->top, BASE // Save current top held in BASE (yes).
+ | mov MULTRES, RD // Need to fill only remainder with nil.
+ | mov FCARG2, RA
+ | mov FCARG1, L:RB
+ | call extern lj_state_growstack@8 // (lua_State *L, int n)
+ | mov BASE, L:RB->top // Need the (realloced) L->top in BASE.
+ | jmp <3
+ |
+ |->vm_unwind_yield:
+ | mov al, LUA_YIELD
+ | jmp ->vm_unwind_c_eh
+ |
+ |->vm_unwind_c@8: // Unwind C stack, return from vm_pcall.
+ | // (void *cframe, int errcode)
+ |.if X64
+ | mov eax, CARG2d // Error return status for vm_pcall.
+ | mov rsp, CARG1
+ |.else
+ | mov eax, FCARG2 // Error return status for vm_pcall.
+ | mov esp, FCARG1
+ |.if WIN
+ | lea FCARG1, SEH_NEXT
+ | fs; mov [0], FCARG1
+ |.endif
+ |.endif
+ |->vm_unwind_c_eh: // Landing pad for external unwinder.
+ | mov L:RB, SAVE_L
+ | mov GL:RB, L:RB->glref
+ | mov dword GL:RB->vmstate, ~LJ_VMST_C
+ | jmp ->vm_leave_unw
+ |
+ |->vm_unwind_rethrow:
+ |.if X64 and not X64WIN
+ | mov FCARG1, SAVE_L
+ | mov FCARG2, eax
+ | restoreregs
+ | jmp extern lj_err_throw@8 // (lua_State *L, int errcode)
+ |.endif
+ |
+ |->vm_unwind_ff@4: // Unwind C stack, return from ff pcall.
+ | // (void *cframe)
+ |.if X64
+ | and CARG1, CFRAME_RAWMASK
+ | mov rsp, CARG1
+ |.else
+ | and FCARG1, CFRAME_RAWMASK
+ | mov esp, FCARG1
+ |.if WIN
+ | lea FCARG1, SEH_NEXT
+ | fs; mov [0], FCARG1
+ |.endif
+ |.endif
+ |->vm_unwind_ff_eh: // Landing pad for external unwinder.
+ | mov L:RB, SAVE_L
+ | mov RAa, -8 // Results start at BASE+RA = BASE-8.
+ | mov RD, 1+1 // Really 1+2 results, incr. later.
+ | mov BASE, L:RB->base
+ | mov DISPATCH, L:RB->glref // Setup pointer to dispatch table.
+ | add DISPATCH, GG_G2DISP
+ | mov PC, [BASE-4] // Fetch PC of previous frame.
+ | mov dword [BASE-4], LJ_TFALSE // Prepend false to error message.
+ | set_vmstate INTERP
+ | jmp ->vm_returnc // Increments RD/MULTRES and returns.
+ |
+ |.if WIN and not X64
+ |->vm_rtlunwind@16: // Thin layer around RtlUnwind.
+ | // (void *cframe, void *excptrec, void *unwinder, int errcode)
+ | mov [esp], FCARG1 // Return value for RtlUnwind.
+ | push FCARG2 // Exception record for RtlUnwind.
+ | push 0 // Ignored by RtlUnwind.
+ | push dword [FCARG1+CFRAME_OFS_SEH]
+ | call extern RtlUnwind@16 // Violates ABI (clobbers too much).
+ | mov FCARG1, eax
+ | mov FCARG2, [esp+4] // errcode (for vm_unwind_c).
+ | ret // Jump to unwinder.
+ |.endif
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Grow stack for calls -----------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |->vm_growstack_c: // Grow stack for C function.
+ | mov FCARG2, LUA_MINSTACK
+ | jmp >2
+ |
+ |->vm_growstack_v: // Grow stack for vararg Lua function.
+ | sub RD, 8
+ | jmp >1
+ |
+ |->vm_growstack_f: // Grow stack for fixarg Lua function.
+ | // BASE = new base, RD = nargs+1, RB = L, PC = first PC
+ | lea RD, [BASE+NARGS:RD*8-8]
+ |1:
+ | movzx RA, byte [PC-4+PC2PROTO(framesize)]
+ | add PC, 4 // Must point after first instruction.
+ | mov L:RB->base, BASE
+ | mov L:RB->top, RD
+ | mov SAVE_PC, PC
+ | mov FCARG2, RA
+ |2:
+ | // RB = L, L->base = new base, L->top = top
+ | mov FCARG1, L:RB
+ | call extern lj_state_growstack@8 // (lua_State *L, int n)
+ | mov BASE, L:RB->base
+ | mov RD, L:RB->top
+ | mov LFUNC:RB, [BASE-8]
+ | sub RD, BASE
+ | shr RD, 3
+ | add NARGS:RD, 1
+ | // BASE = new base, RB = LFUNC, RD = nargs+1
+ | ins_callt // Just retry the call.
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Entry points into the assembler VM ---------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |->vm_resume: // Setup C frame and resume thread.
+ | // (lua_State *L, TValue *base, int nres1 = 0, ptrdiff_t ef = 0)
+ | saveregs
+ |.if X64
+ | mov L:RB, CARG1d // Caveat: CARG1d may be RA.
+ | mov SAVE_L, CARG1d
+ | mov RA, CARG2d
+ |.else
+ | mov L:RB, SAVE_L
+ | mov RA, INARG_BASE // Caveat: overlaps SAVE_CFRAME!
+ |.endif
+ | mov PC, FRAME_CP
+ | xor RD, RD
+ | lea KBASEa, [esp+CFRAME_RESUME]
+ | mov DISPATCH, L:RB->glref // Setup pointer to dispatch table.
+ | add DISPATCH, GG_G2DISP
+ | mov SAVE_PC, RD // Any value outside of bytecode is ok.
+ | mov SAVE_CFRAME, RDa
+ |.if X64
+ | mov SAVE_NRES, RD
+ | mov SAVE_ERRF, RD
+ |.endif
+ | mov L:RB->cframe, KBASEa
+ | cmp byte L:RB->status, RDL
+ | je >2 // Initial resume (like a call).
+ |
+ | // Resume after yield (like a return).
+ | mov [DISPATCH+DISPATCH_GL(cur_L)], L:RB
+ | set_vmstate INTERP
+ | mov byte L:RB->status, RDL
+ | mov BASE, L:RB->base
+ | mov RD, L:RB->top
+ | sub RD, RA
+ | shr RD, 3
+ | add RD, 1 // RD = nresults+1
+ | sub RA, BASE // RA = resultofs
+ | mov PC, [BASE-4]
+ | mov MULTRES, RD
+ | test PC, FRAME_TYPE
+ | jz ->BC_RET_Z
+ | jmp ->vm_return
+ |
+ |->vm_pcall: // Setup protected C frame and enter VM.
+ | // (lua_State *L, TValue *base, int nres1, ptrdiff_t ef)
+ | saveregs
+ | mov PC, FRAME_CP
+ |.if X64
+ | mov SAVE_ERRF, CARG4d
+ |.endif
+ | jmp >1
+ |
+ |->vm_call: // Setup C frame and enter VM.
+ | // (lua_State *L, TValue *base, int nres1)
+ | saveregs
+ | mov PC, FRAME_C
+ |
+ |1: // Entry point for vm_pcall above (PC = ftype).
+ |.if X64
+ | mov SAVE_NRES, CARG3d
+ | mov L:RB, CARG1d // Caveat: CARG1d may be RA.
+ | mov SAVE_L, CARG1d
+ | mov RA, CARG2d
+ |.else
+ | mov L:RB, SAVE_L
+ | mov RA, INARG_BASE // Caveat: overlaps SAVE_CFRAME!
+ |.endif
+ |
+ | mov DISPATCH, L:RB->glref // Setup pointer to dispatch table.
+ | mov KBASEa, L:RB->cframe // Add our C frame to cframe chain.
+ | mov SAVE_CFRAME, KBASEa
+ | mov SAVE_PC, L:RB // Any value outside of bytecode is ok.
+ | add DISPATCH, GG_G2DISP
+ |.if X64
+ | mov L:RB->cframe, rsp
+ |.else
+ | mov L:RB->cframe, esp
+ |.endif
+ |
+ |2: // Entry point for vm_resume/vm_cpcall (RA = base, RB = L, PC = ftype).
+ | mov [DISPATCH+DISPATCH_GL(cur_L)], L:RB
+ | set_vmstate INTERP
+ | mov BASE, L:RB->base // BASE = old base (used in vmeta_call).
+ | add PC, RA
+ | sub PC, BASE // PC = frame delta + frame type
+ |
+ | mov RD, L:RB->top
+ | sub RD, RA
+ | shr NARGS:RD, 3
+ | add NARGS:RD, 1 // RD = nargs+1
+ |
+ |->vm_call_dispatch:
+ | mov LFUNC:RB, [RA-8]
+ | cmp dword [RA-4], LJ_TFUNC
+ | jne ->vmeta_call // Ensure KBASE defined and != BASE.
+ |
+ |->vm_call_dispatch_f:
+ | mov BASE, RA
+ | ins_call
+ | // BASE = new base, RB = func, RD = nargs+1, PC = caller PC
+ |
+ |->vm_cpcall: // Setup protected C frame, call C.
+ | // (lua_State *L, lua_CFunction func, void *ud, lua_CPFunction cp)
+ | saveregs
+ |.if X64
+ | mov L:RB, CARG1d // Caveat: CARG1d may be RA.
+ | mov SAVE_L, CARG1d
+ |.else
+ | mov L:RB, SAVE_L
+ | // Caveat: INARG_CP_* and SAVE_CFRAME/SAVE_NRES/SAVE_ERRF overlap!
+ | mov RC, INARG_CP_UD // Get args before they are overwritten.
+ | mov RA, INARG_CP_FUNC
+ | mov BASE, INARG_CP_CALL
+ |.endif
+ | mov SAVE_PC, L:RB // Any value outside of bytecode is ok.
+ |
+ | mov KBASE, L:RB->stack // Compute -savestack(L, L->top).
+ | sub KBASE, L:RB->top
+ | mov DISPATCH, L:RB->glref // Setup pointer to dispatch table.
+ | mov SAVE_ERRF, 0 // No error function.
+ | mov SAVE_NRES, KBASE // Neg. delta means cframe w/o frame.
+ | add DISPATCH, GG_G2DISP
+ | // Handler may change cframe_nres(L->cframe) or cframe_errfunc(L->cframe).
+ |
+ |.if X64
+ | mov KBASEa, L:RB->cframe // Add our C frame to cframe chain.
+ | mov SAVE_CFRAME, KBASEa
+ | mov L:RB->cframe, rsp
+ | mov [DISPATCH+DISPATCH_GL(cur_L)], L:RB
+ |
+ | call CARG4 // (lua_State *L, lua_CFunction func, void *ud)
+ |.else
+ | mov ARG3, RC // Have to copy args downwards.
+ | mov ARG2, RA
+ | mov ARG1, L:RB
+ |
+ | mov KBASE, L:RB->cframe // Add our C frame to cframe chain.
+ | mov SAVE_CFRAME, KBASE
+ | mov L:RB->cframe, esp
+ | mov [DISPATCH+DISPATCH_GL(cur_L)], L:RB
+ |
+ | call BASE // (lua_State *L, lua_CFunction func, void *ud)
+ |.endif
+ | // TValue * (new base) or NULL returned in eax (RC).
+ | test RC, RC
+ | jz ->vm_leave_cp // No base? Just remove C frame.
+ | mov RA, RC
+ | mov PC, FRAME_CP
+ | jmp <2 // Else continue with the call.
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Metamethod handling ------------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |//-- Continuation dispatch ----------------------------------------------
+ |
+ |->cont_dispatch:
+ | // BASE = meta base, RA = resultofs, RD = nresults+1 (also in MULTRES)
+ | add RA, BASE
+ | and PC, -8
+ | mov RB, BASE
+ | sub BASE, PC // Restore caller BASE.
+ | mov dword [RA+RD*8-4], LJ_TNIL // Ensure one valid arg.
+ | mov RC, RA // ... in [RC]
+ | mov PC, [RB-12] // Restore PC from [cont|PC].
+ |.if X64
+ | movsxd RAa, dword [RB-16] // May be negative on WIN64 with debug.
+ |.if FFI
+ | cmp RA, 1
+ | jbe >1
+ |.endif
+ | lea KBASEa, qword [=>0]
+ | add RAa, KBASEa
+ |.else
+ | mov RA, dword [RB-16]
+ |.if FFI
+ | cmp RA, 1
+ | jbe >1
+ |.endif
+ |.endif
+ | mov LFUNC:KBASE, [BASE-8]
+ | mov KBASE, LFUNC:KBASE->pc
+ | mov KBASE, [KBASE+PC2PROTO(k)]
+ | // BASE = base, RC = result, RB = meta base
+ | jmp RAa // Jump to continuation.
+ |
+ |.if FFI
+ |1:
+ | je ->cont_ffi_callback // cont = 1: return from FFI callback.
+ | // cont = 0: Tail call from C function.
+ | sub RB, BASE
+ | shr RB, 3
+ | lea RD, [RB-1]
+ | jmp ->vm_call_tail
+ |.endif
+ |
+ |->cont_cat: // BASE = base, RC = result, RB = mbase
+ | movzx RA, PC_RB
+ | sub RB, 16
+ | lea RA, [BASE+RA*8]
+ | sub RA, RB
+ | je ->cont_ra
+ | neg RA
+ | shr RA, 3
+ |.if X64WIN
+ | mov CARG3d, RA
+ | mov L:CARG1d, SAVE_L
+ | mov L:CARG1d->base, BASE
+ | mov RCa, [RC]
+ | mov [RB], RCa
+ | mov CARG2d, RB
+ |.elif X64
+ | mov L:CARG1d, SAVE_L
+ | mov L:CARG1d->base, BASE
+ | mov CARG3d, RA
+ | mov RAa, [RC]
+ | mov [RB], RAa
+ | mov CARG2d, RB
+ |.else
+ | mov ARG3, RA
+ | mov RA, [RC+4]
+ | mov RC, [RC]
+ | mov [RB+4], RA
+ | mov [RB], RC
+ | mov ARG2, RB
+ |.endif
+ | jmp ->BC_CAT_Z
+ |
+ |//-- Table indexing metamethods -----------------------------------------
+ |
+ |->vmeta_tgets:
+ | mov TMP1, RC // RC = GCstr *
+ | mov TMP2, LJ_TSTR
+ | lea RCa, TMP1 // Store temp. TValue in TMP1/TMP2.
+ | cmp PC_OP, BC_GGET
+ | jne >1
+ | lea RA, [DISPATCH+DISPATCH_GL(tmptv)] // Store fn->l.env in g->tmptv.
+ | mov [RA], TAB:RB // RB = GCtab *
+ | mov dword [RA+4], LJ_TTAB
+ | mov RB, RA
+ | jmp >2
+ |
+ |->vmeta_tgetb:
+ | movzx RC, PC_RC
+ |.if DUALNUM
+ | mov TMP2, LJ_TISNUM
+ | mov TMP1, RC
+ |.else
+ | cvtsi2sd xmm0, RC
+ | movsd TMPQ, xmm0
+ |.endif
+ | lea RCa, TMPQ // Store temp. TValue in TMPQ.
+ | jmp >1
+ |
+ |->vmeta_tgetv:
+ | movzx RC, PC_RC // Reload TValue *k from RC.
+ | lea RC, [BASE+RC*8]
+ |1:
+ | movzx RB, PC_RB // Reload TValue *t from RB.
+ | lea RB, [BASE+RB*8]
+ |2:
+ |.if X64
+ | mov L:CARG1d, SAVE_L
+ | mov L:CARG1d->base, BASE // Caveat: CARG2d/CARG3d may be BASE.
+ | mov CARG2d, RB
+ | mov CARG3, RCa // May be 64 bit ptr to stack.
+ | mov L:RB, L:CARG1d
+ |.else
+ | mov ARG2, RB
+ | mov L:RB, SAVE_L
+ | mov ARG3, RC
+ | mov ARG1, L:RB
+ | mov L:RB->base, BASE
+ |.endif
+ | mov SAVE_PC, PC
+ | call extern lj_meta_tget // (lua_State *L, TValue *o, TValue *k)
+ | // TValue * (finished) or NULL (metamethod) returned in eax (RC).
+ | mov BASE, L:RB->base
+ | test RC, RC
+ | jz >3
+ |->cont_ra: // BASE = base, RC = result
+ | movzx RA, PC_RA
+ |.if X64
+ | mov RBa, [RC]
+ | mov [BASE+RA*8], RBa
+ |.else
+ | mov RB, [RC+4]
+ | mov RC, [RC]
+ | mov [BASE+RA*8+4], RB
+ | mov [BASE+RA*8], RC
+ |.endif
+ | ins_next
+ |
+ |3: // Call __index metamethod.
+ | // BASE = base, L->top = new base, stack = cont/func/t/k
+ | mov RA, L:RB->top
+ | mov [RA-12], PC // [cont|PC]
+ | lea PC, [RA+FRAME_CONT]
+ | sub PC, BASE
+ | mov LFUNC:RB, [RA-8] // Guaranteed to be a function here.
+ | mov NARGS:RD, 2+1 // 2 args for func(t, k).
+ | jmp ->vm_call_dispatch_f
+ |
+ |->vmeta_tgetr:
+ | mov FCARG1, TAB:RB
+ | mov RB, BASE // Save BASE.
+ | mov FCARG2, RC // Caveat: FCARG2 == BASE
+ | call extern lj_tab_getinth@8 // (GCtab *t, int32_t key)
+ | // cTValue * or NULL returned in eax (RC).
+ | movzx RA, PC_RA
+ | mov BASE, RB // Restore BASE.
+ | test RC, RC
+ | jnz ->BC_TGETR_Z
+ | mov dword [BASE+RA*8+4], LJ_TNIL
+ | jmp ->BC_TGETR2_Z
+ |
+ |//-----------------------------------------------------------------------
+ |
+ |->vmeta_tsets:
+ | mov TMP1, RC // RC = GCstr *
+ | mov TMP2, LJ_TSTR
+ | lea RCa, TMP1 // Store temp. TValue in TMP1/TMP2.
+ | cmp PC_OP, BC_GSET
+ | jne >1
+ | lea RA, [DISPATCH+DISPATCH_GL(tmptv)] // Store fn->l.env in g->tmptv.
+ | mov [RA], TAB:RB // RB = GCtab *
+ | mov dword [RA+4], LJ_TTAB
+ | mov RB, RA
+ | jmp >2
+ |
+ |->vmeta_tsetb:
+ | movzx RC, PC_RC
+ |.if DUALNUM
+ | mov TMP2, LJ_TISNUM
+ | mov TMP1, RC
+ |.else
+ | cvtsi2sd xmm0, RC
+ | movsd TMPQ, xmm0
+ |.endif
+ | lea RCa, TMPQ // Store temp. TValue in TMPQ.
+ | jmp >1
+ |
+ |->vmeta_tsetv:
+ | movzx RC, PC_RC // Reload TValue *k from RC.
+ | lea RC, [BASE+RC*8]
+ |1:
+ | movzx RB, PC_RB // Reload TValue *t from RB.
+ | lea RB, [BASE+RB*8]
+ |2:
+ |.if X64
+ | mov L:CARG1d, SAVE_L
+ | mov L:CARG1d->base, BASE // Caveat: CARG2d/CARG3d may be BASE.
+ | mov CARG2d, RB
+ | mov CARG3, RCa // May be 64 bit ptr to stack.
+ | mov L:RB, L:CARG1d
+ |.else
+ | mov ARG2, RB
+ | mov L:RB, SAVE_L
+ | mov ARG3, RC
+ | mov ARG1, L:RB
+ | mov L:RB->base, BASE
+ |.endif
+ | mov SAVE_PC, PC
+ | call extern lj_meta_tset // (lua_State *L, TValue *o, TValue *k)
+ | // TValue * (finished) or NULL (metamethod) returned in eax (RC).
+ | mov BASE, L:RB->base
+ | test RC, RC
+ | jz >3
+ | // NOBARRIER: lj_meta_tset ensures the table is not black.
+ | movzx RA, PC_RA
+ |.if X64
+ | mov RBa, [BASE+RA*8]
+ | mov [RC], RBa
+ |.else
+ | mov RB, [BASE+RA*8+4]
+ | mov RA, [BASE+RA*8]
+ | mov [RC+4], RB
+ | mov [RC], RA
+ |.endif
+ |->cont_nop: // BASE = base, (RC = result)
+ | ins_next
+ |
+ |3: // Call __newindex metamethod.
+ | // BASE = base, L->top = new base, stack = cont/func/t/k/(v)
+ | mov RA, L:RB->top
+ | mov [RA-12], PC // [cont|PC]
+ | movzx RC, PC_RA
+ | // Copy value to third argument.
+ |.if X64
+ | mov RBa, [BASE+RC*8]
+ | mov [RA+16], RBa
+ |.else
+ | mov RB, [BASE+RC*8+4]
+ | mov RC, [BASE+RC*8]
+ | mov [RA+20], RB
+ | mov [RA+16], RC
+ |.endif
+ | lea PC, [RA+FRAME_CONT]
+ | sub PC, BASE
+ | mov LFUNC:RB, [RA-8] // Guaranteed to be a function here.
+ | mov NARGS:RD, 3+1 // 3 args for func(t, k, v).
+ | jmp ->vm_call_dispatch_f
+ |
+ |->vmeta_tsetr:
+ |.if X64WIN
+ | mov L:CARG1d, SAVE_L
+ | mov CARG3d, RC
+ | mov L:CARG1d->base, BASE
+ | xchg CARG2d, TAB:RB // Caveat: CARG2d == BASE.
+ |.elif X64
+ | mov L:CARG1d, SAVE_L
+ | mov CARG2d, TAB:RB
+ | mov L:CARG1d->base, BASE
+ | mov RB, BASE // Save BASE.
+ | mov CARG3d, RC // Caveat: CARG3d == BASE.
+ |.else
+ | mov L:RA, SAVE_L
+ | mov ARG2, TAB:RB
+ | mov RB, BASE // Save BASE.
+ | mov ARG3, RC
+ | mov ARG1, L:RA
+ | mov L:RA->base, BASE
+ |.endif
+ | mov SAVE_PC, PC
+ | call extern lj_tab_setinth // (lua_State *L, GCtab *t, int32_t key)
+ | // TValue * returned in eax (RC).
+ | movzx RA, PC_RA
+ | mov BASE, RB // Restore BASE.
+ | jmp ->BC_TSETR_Z
+ |
+ |//-- Comparison metamethods ---------------------------------------------
+ |
+ |->vmeta_comp:
+ |.if X64
+ | mov L:RB, SAVE_L
+ | mov L:RB->base, BASE // Caveat: CARG2d/CARG3d == BASE.
+ |.if X64WIN
+ | lea CARG3d, [BASE+RD*8]
+ | lea CARG2d, [BASE+RA*8]
+ |.else
+ | lea CARG2d, [BASE+RA*8]
+ | lea CARG3d, [BASE+RD*8]
+ |.endif
+ | mov CARG1d, L:RB // Caveat: CARG1d/CARG4d == RA.
+ | movzx CARG4d, PC_OP
+ |.else
+ | movzx RB, PC_OP
+ | lea RD, [BASE+RD*8]
+ | lea RA, [BASE+RA*8]
+ | mov ARG4, RB
+ | mov L:RB, SAVE_L
+ | mov ARG3, RD
+ | mov ARG2, RA
+ | mov ARG1, L:RB
+ | mov L:RB->base, BASE
+ |.endif
+ | mov SAVE_PC, PC
+ | call extern lj_meta_comp // (lua_State *L, TValue *o1, *o2, int op)
+ | // 0/1 or TValue * (metamethod) returned in eax (RC).
+ |3:
+ | mov BASE, L:RB->base
+ | cmp RC, 1
+ | ja ->vmeta_binop
+ |4:
+ | lea PC, [PC+4]
+ | jb >6
+ |5:
+ | movzx RD, PC_RD
+ | branchPC RD
+ |6:
+ | ins_next
+ |
+ |->cont_condt: // BASE = base, RC = result
+ | add PC, 4
+ | cmp dword [RC+4], LJ_TISTRUECOND // Branch if result is true.
+ | jb <5
+ | jmp <6
+ |
+ |->cont_condf: // BASE = base, RC = result
+ | cmp dword [RC+4], LJ_TISTRUECOND // Branch if result is false.
+ | jmp <4
+ |
+ |->vmeta_equal:
+ | sub PC, 4
+ |.if X64WIN
+ | mov CARG3d, RD
+ | mov CARG4d, RB
+ | mov L:RB, SAVE_L
+ | mov L:RB->base, BASE // Caveat: CARG2d == BASE.
+ | mov CARG2d, RA
+ | mov CARG1d, L:RB // Caveat: CARG1d == RA.
+ |.elif X64
+ | mov CARG2d, RA
+ | mov CARG4d, RB // Caveat: CARG4d == RA.
+ | mov L:RB, SAVE_L
+ | mov L:RB->base, BASE // Caveat: CARG3d == BASE.
+ | mov CARG3d, RD
+ | mov CARG1d, L:RB
+ |.else
+ | mov ARG4, RB
+ | mov L:RB, SAVE_L
+ | mov ARG3, RD
+ | mov ARG2, RA
+ | mov ARG1, L:RB
+ | mov L:RB->base, BASE
+ |.endif
+ | mov SAVE_PC, PC
+ | call extern lj_meta_equal // (lua_State *L, GCobj *o1, *o2, int ne)
+ | // 0/1 or TValue * (metamethod) returned in eax (RC).
+ | jmp <3
+ |
+ |->vmeta_equal_cd:
+ |.if FFI
+ | sub PC, 4
+ | mov L:RB, SAVE_L
+ | mov L:RB->base, BASE
+ | mov FCARG1, L:RB
+ | mov FCARG2, dword [PC-4]
+ | mov SAVE_PC, PC
+ | call extern lj_meta_equal_cd@8 // (lua_State *L, BCIns ins)
+ | // 0/1 or TValue * (metamethod) returned in eax (RC).
+ | jmp <3
+ |.endif
+ |
+ |->vmeta_istype:
+ |.if X64
+ | mov L:RB, SAVE_L
+ | mov L:RB->base, BASE // Caveat: CARG2d/CARG3d may be BASE.
+ | mov CARG2d, RA
+ | movzx CARG3d, PC_RD
+ | mov L:CARG1d, L:RB
+ |.else
+ | movzx RD, PC_RD
+ | mov ARG2, RA
+ | mov L:RB, SAVE_L
+ | mov ARG3, RD
+ | mov ARG1, L:RB
+ | mov L:RB->base, BASE
+ |.endif
+ | mov SAVE_PC, PC
+ | call extern lj_meta_istype // (lua_State *L, BCReg ra, BCReg tp)
+ | mov BASE, L:RB->base
+ | jmp <6
+ |
+ |//-- Arithmetic metamethods ---------------------------------------------
+ |
+ |->vmeta_arith_vno:
+ |.if DUALNUM
+ | movzx RB, PC_RB
+ |.endif
+ |->vmeta_arith_vn:
+ | lea RC, [KBASE+RC*8]
+ | jmp >1
+ |
+ |->vmeta_arith_nvo:
+ |.if DUALNUM
+ | movzx RC, PC_RC
+ |.endif
+ |->vmeta_arith_nv:
+ | lea RC, [KBASE+RC*8]
+ | lea RB, [BASE+RB*8]
+ | xchg RB, RC
+ | jmp >2
+ |
+ |->vmeta_unm:
+ | lea RC, [BASE+RD*8]
+ | mov RB, RC
+ | jmp >2
+ |
+ |->vmeta_arith_vvo:
+ |.if DUALNUM
+ | movzx RB, PC_RB
+ |.endif
+ |->vmeta_arith_vv:
+ | lea RC, [BASE+RC*8]
+ |1:
+ | lea RB, [BASE+RB*8]
+ |2:
+ | lea RA, [BASE+RA*8]
+ |.if X64WIN
+ | mov CARG3d, RB
+ | mov CARG4d, RC
+ | movzx RC, PC_OP
+ | mov ARG5d, RC
+ | mov L:RB, SAVE_L
+ | mov L:RB->base, BASE // Caveat: CARG2d == BASE.
+ | mov CARG2d, RA
+ | mov CARG1d, L:RB // Caveat: CARG1d == RA.
+ |.elif X64
+ | movzx CARG5d, PC_OP
+ | mov CARG2d, RA
+ | mov CARG4d, RC // Caveat: CARG4d == RA.
+ | mov L:CARG1d, SAVE_L
+ | mov L:CARG1d->base, BASE // Caveat: CARG3d == BASE.
+ | mov CARG3d, RB
+ | mov L:RB, L:CARG1d
+ |.else
+ | mov ARG3, RB
+ | mov L:RB, SAVE_L
+ | mov ARG4, RC
+ | movzx RC, PC_OP
+ | mov ARG2, RA
+ | mov ARG5, RC
+ | mov ARG1, L:RB
+ | mov L:RB->base, BASE
+ |.endif
+ | mov SAVE_PC, PC
+ | call extern lj_meta_arith // (lua_State *L, TValue *ra,*rb,*rc, BCReg op)
+ | // NULL (finished) or TValue * (metamethod) returned in eax (RC).
+ | mov BASE, L:RB->base
+ | test RC, RC
+ | jz ->cont_nop
+ |
+ | // Call metamethod for binary op.
+ |->vmeta_binop:
+ | // BASE = base, RC = new base, stack = cont/func/o1/o2
+ | mov RA, RC
+ | sub RC, BASE
+ | mov [RA-12], PC // [cont|PC]
+ | lea PC, [RC+FRAME_CONT]
+ | mov NARGS:RD, 2+1 // 2 args for func(o1, o2).
+ | jmp ->vm_call_dispatch
+ |
+ |->vmeta_len:
+ | mov L:RB, SAVE_L
+ | mov L:RB->base, BASE
+ | lea FCARG2, [BASE+RD*8] // Caveat: FCARG2 == BASE
+ | mov L:FCARG1, L:RB
+ | mov SAVE_PC, PC
+ | call extern lj_meta_len@8 // (lua_State *L, TValue *o)
+ | // NULL (retry) or TValue * (metamethod) returned in eax (RC).
+ | mov BASE, L:RB->base
+#if LJ_52
+ | test RC, RC
+ | jne ->vmeta_binop // Binop call for compatibility.
+ | movzx RD, PC_RD
+ | mov TAB:FCARG1, [BASE+RD*8]
+ | jmp ->BC_LEN_Z
+#else
+ | jmp ->vmeta_binop // Binop call for compatibility.
+#endif
+ |
+ |//-- Call metamethod ----------------------------------------------------
+ |
+ |->vmeta_call_ra:
+ | lea RA, [BASE+RA*8+8]
+ |->vmeta_call: // Resolve and call __call metamethod.
+ | // BASE = old base, RA = new base, RC = nargs+1, PC = return
+ | mov TMP2, RA // Save RA, RC for us.
+ | mov TMP1, NARGS:RD
+ | sub RA, 8
+ |.if X64
+ | mov L:RB, SAVE_L
+ | mov L:RB->base, BASE // Caveat: CARG2d/CARG3d may be BASE.
+ | mov CARG2d, RA
+ | lea CARG3d, [RA+NARGS:RD*8]
+ | mov CARG1d, L:RB // Caveat: CARG1d may be RA.
+ |.else
+ | lea RC, [RA+NARGS:RD*8]
+ | mov L:RB, SAVE_L
+ | mov ARG2, RA
+ | mov ARG3, RC
+ | mov ARG1, L:RB
+ | mov L:RB->base, BASE // This is the callers base!
+ |.endif
+ | mov SAVE_PC, PC
+ | call extern lj_meta_call // (lua_State *L, TValue *func, TValue *top)
+ | mov BASE, L:RB->base
+ | mov RA, TMP2
+ | mov NARGS:RD, TMP1
+ | mov LFUNC:RB, [RA-8]
+ | add NARGS:RD, 1
+ | // This is fragile. L->base must not move, KBASE must always be defined.
+ |.if x64
+ | cmp KBASEa, rdx // Continue with CALLT if flag set.
+ |.else
+ | cmp KBASE, BASE // Continue with CALLT if flag set.
+ |.endif
+ | je ->BC_CALLT_Z
+ | mov BASE, RA
+ | ins_call // Otherwise call resolved metamethod.
+ |
+ |//-- Argument coercion for 'for' statement ------------------------------
+ |
+ |->vmeta_for:
+ | mov L:RB, SAVE_L
+ | mov L:RB->base, BASE
+ | mov FCARG2, RA // Caveat: FCARG2 == BASE
+ | mov L:FCARG1, L:RB // Caveat: FCARG1 == RA
+ | mov SAVE_PC, PC
+ | call extern lj_meta_for@8 // (lua_State *L, TValue *base)
+ | mov BASE, L:RB->base
+ | mov RC, [PC-4]
+ | movzx RA, RCH
+ | movzx OP, RCL
+ | shr RC, 16
+ |.if X64
+ | jmp aword [DISPATCH+OP*8+GG_DISP2STATIC] // Retry FORI or JFORI.
+ |.else
+ | jmp aword [DISPATCH+OP*4+GG_DISP2STATIC] // Retry FORI or JFORI.
+ |.endif
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Fast functions -----------------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |.macro .ffunc, name
+ |->ff_ .. name:
+ |.endmacro
+ |
+ |.macro .ffunc_1, name
+ |->ff_ .. name:
+ | cmp NARGS:RD, 1+1; jb ->fff_fallback
+ |.endmacro
+ |
+ |.macro .ffunc_2, name
+ |->ff_ .. name:
+ | cmp NARGS:RD, 2+1; jb ->fff_fallback
+ |.endmacro
+ |
+ |.macro .ffunc_nsse, name, op
+ | .ffunc_1 name
+ | cmp dword [BASE+4], LJ_TISNUM; jae ->fff_fallback
+ | op xmm0, qword [BASE]
+ |.endmacro
+ |
+ |.macro .ffunc_nsse, name
+ | .ffunc_nsse name, movsd
+ |.endmacro
+ |
+ |.macro .ffunc_nnsse, name
+ | .ffunc_2 name
+ | cmp dword [BASE+4], LJ_TISNUM; jae ->fff_fallback
+ | cmp dword [BASE+12], LJ_TISNUM; jae ->fff_fallback
+ | movsd xmm0, qword [BASE]
+ | movsd xmm1, qword [BASE+8]
+ |.endmacro
+ |
+ |.macro .ffunc_nnr, name
+ | .ffunc_2 name
+ | cmp dword [BASE+4], LJ_TISNUM; jae ->fff_fallback
+ | cmp dword [BASE+12], LJ_TISNUM; jae ->fff_fallback
+ | fld qword [BASE+8]
+ | fld qword [BASE]
+ |.endmacro
+ |
+ |// Inlined GC threshold check. Caveat: uses label 1.
+ |.macro ffgccheck
+ | mov RB, [DISPATCH+DISPATCH_GL(gc.total)]
+ | cmp RB, [DISPATCH+DISPATCH_GL(gc.threshold)]
+ | jb >1
+ | call ->fff_gcstep
+ |1:
+ |.endmacro
+ |
+ |//-- Base library: checks -----------------------------------------------
+ |
+ |.ffunc_1 assert
+ | mov RB, [BASE+4]
+ | cmp RB, LJ_TISTRUECOND; jae ->fff_fallback
+ | mov PC, [BASE-4]
+ | mov MULTRES, RD
+ | mov [BASE-4], RB
+ | mov RB, [BASE]
+ | mov [BASE-8], RB
+ | sub RD, 2
+ | jz >2
+ | mov RA, BASE
+ |1:
+ | add RA, 8
+ |.if X64
+ | mov RBa, [RA]
+ | mov [RA-8], RBa
+ |.else
+ | mov RB, [RA+4]
+ | mov [RA-4], RB
+ | mov RB, [RA]
+ | mov [RA-8], RB
+ |.endif
+ | sub RD, 1
+ | jnz <1
+ |2:
+ | mov RD, MULTRES
+ | jmp ->fff_res_
+ |
+ |.ffunc_1 type
+ | mov RB, [BASE+4]
+ |.if X64
+ | mov RA, RB
+ | sar RA, 15
+ | cmp RA, -2
+ | je >3
+ |.endif
+ | mov RC, ~LJ_TNUMX
+ | not RB
+ | cmp RC, RB
+ | cmova RC, RB
+ |2:
+ | mov CFUNC:RB, [BASE-8]
+ | mov STR:RC, [CFUNC:RB+RC*8+((char *)(&((GCfuncC *)0)->upvalue))]
+ | mov PC, [BASE-4]
+ | mov dword [BASE-4], LJ_TSTR
+ | mov [BASE-8], STR:RC
+ | jmp ->fff_res1
+ |.if X64
+ |3:
+ | mov RC, ~LJ_TLIGHTUD
+ | jmp <2
+ |.endif
+ |
+ |//-- Base library: getters and setters ---------------------------------
+ |
+ |.ffunc_1 getmetatable
+ | mov RB, [BASE+4]
+ | mov PC, [BASE-4]
+ | cmp RB, LJ_TTAB; jne >6
+ |1: // Field metatable must be at same offset for GCtab and GCudata!
+ | mov TAB:RB, [BASE]
+ | mov TAB:RB, TAB:RB->metatable
+ |2:
+ | test TAB:RB, TAB:RB
+ | mov dword [BASE-4], LJ_TNIL
+ | jz ->fff_res1
+ | mov STR:RC, [DISPATCH+DISPATCH_GL(gcroot)+4*(GCROOT_MMNAME+MM_metatable)]
+ | mov dword [BASE-4], LJ_TTAB // Store metatable as default result.
+ | mov [BASE-8], TAB:RB
+ | mov RA, TAB:RB->hmask
+ | and RA, STR:RC->sid
+ | imul RA, #NODE
+ | add NODE:RA, TAB:RB->node
+ |3: // Rearranged logic, because we expect _not_ to find the key.
+ | cmp dword NODE:RA->key.it, LJ_TSTR
+ | jne >4
+ | cmp dword NODE:RA->key.gcr, STR:RC
+ | je >5
+ |4:
+ | mov NODE:RA, NODE:RA->next
+ | test NODE:RA, NODE:RA
+ | jnz <3
+ | jmp ->fff_res1 // Not found, keep default result.
+ |5:
+ | mov RB, [RA+4]
+ | cmp RB, LJ_TNIL; je ->fff_res1 // Ditto for nil value.
+ | mov RC, [RA]
+ | mov [BASE-4], RB // Return value of mt.__metatable.
+ | mov [BASE-8], RC
+ | jmp ->fff_res1
+ |
+ |6:
+ | cmp RB, LJ_TUDATA; je <1
+ |.if X64
+ | cmp RB, LJ_TNUMX; ja >8
+ | cmp RB, LJ_TISNUM; jbe >7
+ | mov RB, LJ_TLIGHTUD
+ | jmp >8
+ |7:
+ |.else
+ | cmp RB, LJ_TISNUM; ja >8
+ |.endif
+ | mov RB, LJ_TNUMX
+ |8:
+ | not RB
+ | mov TAB:RB, [DISPATCH+RB*4+DISPATCH_GL(gcroot[GCROOT_BASEMT])]
+ | jmp <2
+ |
+ |.ffunc_2 setmetatable
+ | cmp dword [BASE+4], LJ_TTAB; jne ->fff_fallback
+ | // Fast path: no mt for table yet and not clearing the mt.
+ | mov TAB:RB, [BASE]
+ | cmp dword TAB:RB->metatable, 0; jne ->fff_fallback
+ | cmp dword [BASE+12], LJ_TTAB; jne ->fff_fallback
+ | mov TAB:RC, [BASE+8]
+ | mov TAB:RB->metatable, TAB:RC
+ | mov PC, [BASE-4]
+ | mov dword [BASE-4], LJ_TTAB // Return original table.
+ | mov [BASE-8], TAB:RB
+ | test byte TAB:RB->marked, LJ_GC_BLACK // isblack(table)
+ | jz >1
+ | // Possible write barrier. Table is black, but skip iswhite(mt) check.
+ | barrierback TAB:RB, RC
+ |1:
+ | jmp ->fff_res1
+ |
+ |.ffunc_2 rawget
+ | cmp dword [BASE+4], LJ_TTAB; jne ->fff_fallback
+ |.if X64WIN
+ | mov RB, BASE // Save BASE.
+ | lea CARG3d, [BASE+8]
+ | mov CARG2d, [BASE] // Caveat: CARG2d == BASE.
+ | mov CARG1d, SAVE_L
+ |.elif X64
+ | mov RB, BASE // Save BASE.
+ | mov CARG2d, [BASE]
+ | lea CARG3d, [BASE+8] // Caveat: CARG3d == BASE.
+ | mov CARG1d, SAVE_L
+ |.else
+ | mov TAB:RD, [BASE]
+ | mov L:RB, SAVE_L
+ | mov ARG2, TAB:RD
+ | mov ARG1, L:RB
+ | mov RB, BASE // Save BASE.
+ | add BASE, 8
+ | mov ARG3, BASE
+ |.endif
+ | call extern lj_tab_get // (lua_State *L, GCtab *t, cTValue *key)
+ | // cTValue * returned in eax (RD).
+ | mov BASE, RB // Restore BASE.
+ | // Copy table slot.
+ |.if X64
+ | mov RBa, [RD]
+ | mov PC, [BASE-4]
+ | mov [BASE-8], RBa
+ |.else
+ | mov RB, [RD]
+ | mov RD, [RD+4]
+ | mov PC, [BASE-4]
+ | mov [BASE-8], RB
+ | mov [BASE-4], RD
+ |.endif
+ | jmp ->fff_res1
+ |
+ |//-- Base library: conversions ------------------------------------------
+ |
+ |.ffunc tonumber
+ | // Only handles the number case inline (without a base argument).
+ | cmp NARGS:RD, 1+1; jne ->fff_fallback // Exactly one argument.
+ | cmp dword [BASE+4], LJ_TISNUM
+ |.if DUALNUM
+ | jne >1
+ | mov RB, dword [BASE]; jmp ->fff_resi
+ |1:
+ | ja ->fff_fallback
+ |.else
+ | jae ->fff_fallback
+ |.endif
+ | movsd xmm0, qword [BASE]; jmp ->fff_resxmm0
+ |
+ |.ffunc_1 tostring
+ | // Only handles the string or number case inline.
+ | mov PC, [BASE-4]
+ | cmp dword [BASE+4], LJ_TSTR; jne >3
+ | // A __tostring method in the string base metatable is ignored.
+ | mov STR:RD, [BASE]
+ |2:
+ | mov dword [BASE-4], LJ_TSTR
+ | mov [BASE-8], STR:RD
+ | jmp ->fff_res1
+ |3: // Handle numbers inline, unless a number base metatable is present.
+ | cmp dword [BASE+4], LJ_TISNUM; ja ->fff_fallback
+ | cmp dword [DISPATCH+DISPATCH_GL(gcroot[GCROOT_BASEMT_NUM])], 0
+ | jne ->fff_fallback
+ | ffgccheck // Caveat: uses label 1.
+ | mov L:RB, SAVE_L
+ | mov L:RB->base, BASE // Add frame since C call can throw.
+ | mov SAVE_PC, PC // Redundant (but a defined value).
+ |.if X64 and not X64WIN
+ | mov FCARG2, BASE // Otherwise: FCARG2 == BASE
+ |.endif
+ | mov L:FCARG1, L:RB
+ |.if DUALNUM
+ | call extern lj_strfmt_number@8 // (lua_State *L, cTValue *o)
+ |.else
+ | call extern lj_strfmt_num@8 // (lua_State *L, lua_Number *np)
+ |.endif
+ | // GCstr returned in eax (RD).
+ | mov BASE, L:RB->base
+ | jmp <2
+ |
+ |//-- Base library: iterators -------------------------------------------
+ |
+ |.ffunc_1 next
+ | je >2 // Missing 2nd arg?
+ |1:
+ | cmp dword [BASE+4], LJ_TTAB; jne ->fff_fallback
+ | mov PC, [BASE-4]
+ | mov RB, BASE // Save BASE.
+ |.if X64WIN
+ | mov CARG1d, [BASE]
+ | lea CARG3d, [BASE-8]
+ | lea CARG2d, [BASE+8] // Caveat: CARG2d == BASE.
+ |.elif X64
+ | mov CARG1d, [BASE]
+ | lea CARG2d, [BASE+8]
+ | lea CARG3d, [BASE-8] // Caveat: CARG3d == BASE.
+ |.else
+ | mov TAB:RD, [BASE]
+ | mov ARG1, TAB:RD
+ | add BASE, 8
+ | mov ARG2, BASE
+ | sub BASE, 8+8
+ | mov ARG3, BASE
+ |.endif
+ | call extern lj_tab_next // (GCtab *t, cTValue *key, TValue *o)
+ | // 1=found, 0=end, -1=error returned in eax (RD).
+ | mov BASE, RB // Restore BASE.
+ | test RD, RD; jg ->fff_res2 // Found key/value.
+ | js ->fff_fallback_2 // Invalid key.
+ | // End of traversal: return nil.
+ | mov dword [BASE-4], LJ_TNIL
+ | jmp ->fff_res1
+ |2: // Set missing 2nd arg to nil.
+ | mov dword [BASE+12], LJ_TNIL
+ | jmp <1
+ |
+ |.ffunc_1 pairs
+ | mov TAB:RB, [BASE]
+ | cmp dword [BASE+4], LJ_TTAB; jne ->fff_fallback
+#if LJ_52
+ | cmp dword TAB:RB->metatable, 0; jne ->fff_fallback
+#endif
+ | mov CFUNC:RB, [BASE-8]
+ | mov CFUNC:RD, CFUNC:RB->upvalue[0]
+ | mov PC, [BASE-4]
+ | mov dword [BASE-4], LJ_TFUNC
+ | mov [BASE-8], CFUNC:RD
+ | mov dword [BASE+12], LJ_TNIL
+ | mov RD, 1+3
+ | jmp ->fff_res
+ |
+ |.ffunc_2 ipairs_aux
+ | cmp dword [BASE+4], LJ_TTAB; jne ->fff_fallback
+ | cmp dword [BASE+12], LJ_TISNUM
+ |.if DUALNUM
+ | jne ->fff_fallback
+ |.else
+ | jae ->fff_fallback
+ |.endif
+ | mov PC, [BASE-4]
+ |.if DUALNUM
+ | mov RD, dword [BASE+8]
+ | add RD, 1
+ | mov dword [BASE-4], LJ_TISNUM
+ | mov dword [BASE-8], RD
+ |.else
+ | movsd xmm0, qword [BASE+8]
+ | sseconst_1 xmm1, RBa
+ | addsd xmm0, xmm1
+ | cvttsd2si RD, xmm0
+ | movsd qword [BASE-8], xmm0
+ |.endif
+ | mov TAB:RB, [BASE]
+ | cmp RD, TAB:RB->asize; jae >2 // Not in array part?
+ | shl RD, 3
+ | add RD, TAB:RB->array
+ |1:
+ | cmp dword [RD+4], LJ_TNIL; je ->fff_res0
+ | // Copy array slot.
+ |.if X64
+ | mov RBa, [RD]
+ | mov [BASE], RBa
+ |.else
+ | mov RB, [RD]
+ | mov RD, [RD+4]
+ | mov [BASE], RB
+ | mov [BASE+4], RD
+ |.endif
+ |->fff_res2:
+ | mov RD, 1+2
+ | jmp ->fff_res
+ |2: // Check for empty hash part first. Otherwise call C function.
+ | cmp dword TAB:RB->hmask, 0; je ->fff_res0
+ | mov FCARG1, TAB:RB
+ | mov RB, BASE // Save BASE.
+ | mov FCARG2, RD // Caveat: FCARG2 == BASE
+ | call extern lj_tab_getinth@8 // (GCtab *t, int32_t key)
+ | // cTValue * or NULL returned in eax (RD).
+ | mov BASE, RB
+ | test RD, RD
+ | jnz <1
+ |->fff_res0:
+ | mov RD, 1+0
+ | jmp ->fff_res
+ |
+ |.ffunc_1 ipairs
+ | mov TAB:RB, [BASE]
+ | cmp dword [BASE+4], LJ_TTAB; jne ->fff_fallback
+#if LJ_52
+ | cmp dword TAB:RB->metatable, 0; jne ->fff_fallback
+#endif
+ | mov CFUNC:RB, [BASE-8]
+ | mov CFUNC:RD, CFUNC:RB->upvalue[0]
+ | mov PC, [BASE-4]
+ | mov dword [BASE-4], LJ_TFUNC
+ | mov [BASE-8], CFUNC:RD
+ |.if DUALNUM
+ | mov dword [BASE+12], LJ_TISNUM
+ | mov dword [BASE+8], 0
+ |.else
+ | xorps xmm0, xmm0
+ | movsd qword [BASE+8], xmm0
+ |.endif
+ | mov RD, 1+3
+ | jmp ->fff_res
+ |
+ |//-- Base library: catch errors ----------------------------------------
+ |
+ |.ffunc_1 pcall
+ | lea RA, [BASE+8]
+ | sub NARGS:RD, 1
+ | mov PC, 8+FRAME_PCALL
+ |1:
+ | movzx RB, byte [DISPATCH+DISPATCH_GL(hookmask)]
+ | shr RB, HOOK_ACTIVE_SHIFT
+ | and RB, 1
+ | add PC, RB // Remember active hook before pcall.
+ | jmp ->vm_call_dispatch
+ |
+ |.ffunc_2 xpcall
+ | cmp dword [BASE+12], LJ_TFUNC; jne ->fff_fallback
+ | mov RB, [BASE+4] // Swap function and traceback.
+ | mov [BASE+12], RB
+ | mov dword [BASE+4], LJ_TFUNC
+ | mov LFUNC:RB, [BASE]
+ | mov PC, [BASE+8]
+ | mov [BASE+8], LFUNC:RB
+ | mov [BASE], PC
+ | lea RA, [BASE+16]
+ | sub NARGS:RD, 2
+ | mov PC, 16+FRAME_PCALL
+ | jmp <1
+ |
+ |//-- Coroutine library --------------------------------------------------
+ |
+ |.macro coroutine_resume_wrap, resume
+ |.if resume
+ |.ffunc_1 coroutine_resume
+ | mov L:RB, [BASE]
+ |.else
+ |.ffunc coroutine_wrap_aux
+ | mov CFUNC:RB, [BASE-8]
+ | mov L:RB, CFUNC:RB->upvalue[0].gcr
+ |.endif
+ | mov PC, [BASE-4]
+ | mov SAVE_PC, PC
+ |.if X64
+ | mov TMP1, L:RB
+ |.else
+ | mov ARG1, L:RB
+ |.endif
+ |.if resume
+ | cmp dword [BASE+4], LJ_TTHREAD; jne ->fff_fallback
+ |.endif
+ | cmp aword L:RB->cframe, 0; jne ->fff_fallback
+ | cmp byte L:RB->status, LUA_YIELD; ja ->fff_fallback
+ | mov RA, L:RB->top
+ | je >1 // Status != LUA_YIELD (i.e. 0)?
+ | cmp RA, L:RB->base // Check for presence of initial func.
+ | je ->fff_fallback
+ |1:
+ |.if resume
+ | lea PC, [RA+NARGS:RD*8-16] // Check stack space (-1-thread).
+ |.else
+ | lea PC, [RA+NARGS:RD*8-8] // Check stack space (-1).
+ |.endif
+ | cmp PC, L:RB->maxstack; ja ->fff_fallback
+ | mov L:RB->top, PC
+ |
+ | mov L:RB, SAVE_L
+ | mov L:RB->base, BASE
+ |.if resume
+ | add BASE, 8 // Keep resumed thread in stack for GC.
+ |.endif
+ | mov L:RB->top, BASE
+ |.if resume
+ | lea RB, [BASE+NARGS:RD*8-24] // RB = end of source for stack move.
+ |.else
+ | lea RB, [BASE+NARGS:RD*8-16] // RB = end of source for stack move.
+ |.endif
+ | sub RBa, PCa // Relative to PC.
+ |
+ | cmp PC, RA
+ | je >3
+ |2: // Move args to coroutine.
+ |.if X64
+ | mov RCa, [PC+RB]
+ | mov [PC-8], RCa
+ |.else
+ | mov RC, [PC+RB+4]
+ | mov [PC-4], RC
+ | mov RC, [PC+RB]
+ | mov [PC-8], RC
+ |.endif
+ | sub PC, 8
+ | cmp PC, RA
+ | jne <2
+ |3:
+ |.if X64
+ | mov CARG2d, RA
+ | mov CARG1d, TMP1
+ |.else
+ | mov ARG2, RA
+ | xor RA, RA
+ | mov ARG4, RA
+ | mov ARG3, RA
+ |.endif
+ | call ->vm_resume // (lua_State *L, TValue *base, 0, 0)
+ |
+ | mov L:RB, SAVE_L
+ |.if X64
+ | mov L:PC, TMP1
+ |.else
+ | mov L:PC, ARG1 // The callee doesn't modify SAVE_L.
+ |.endif
+ | mov BASE, L:RB->base
+ | mov [DISPATCH+DISPATCH_GL(cur_L)], L:RB
+ | set_vmstate INTERP
+ |
+ | cmp eax, LUA_YIELD
+ | ja >8
+ |4:
+ | mov RA, L:PC->base
+ | mov KBASE, L:PC->top
+ | mov L:PC->top, RA // Clear coroutine stack.
+ | mov PC, KBASE
+ | sub PC, RA
+ | je >6 // No results?
+ | lea RD, [BASE+PC]
+ | shr PC, 3
+ | cmp RD, L:RB->maxstack
+ | ja >9 // Need to grow stack?
+ |
+ | mov RB, BASE
+ | sub RBa, RAa
+ |5: // Move results from coroutine.
+ |.if X64
+ | mov RDa, [RA]
+ | mov [RA+RB], RDa
+ |.else
+ | mov RD, [RA]
+ | mov [RA+RB], RD
+ | mov RD, [RA+4]
+ | mov [RA+RB+4], RD
+ |.endif
+ | add RA, 8
+ | cmp RA, KBASE
+ | jne <5
+ |6:
+ |.if resume
+ | lea RD, [PC+2] // nresults+1 = 1 + true + results.
+ | mov dword [BASE-4], LJ_TTRUE // Prepend true to results.
+ |.else
+ | lea RD, [PC+1] // nresults+1 = 1 + results.
+ |.endif
+ |7:
+ | mov PC, SAVE_PC
+ | mov MULTRES, RD
+ |.if resume
+ | mov RAa, -8
+ |.else
+ | xor RA, RA
+ |.endif
+ | test PC, FRAME_TYPE
+ | jz ->BC_RET_Z
+ | jmp ->vm_return
+ |
+ |8: // Coroutine returned with error (at co->top-1).
+ |.if resume
+ | mov dword [BASE-4], LJ_TFALSE // Prepend false to results.
+ | mov RA, L:PC->top
+ | sub RA, 8
+ | mov L:PC->top, RA // Clear error from coroutine stack.
+ | // Copy error message.
+ |.if X64
+ | mov RDa, [RA]
+ | mov [BASE], RDa
+ |.else
+ | mov RD, [RA]
+ | mov [BASE], RD
+ | mov RD, [RA+4]
+ | mov [BASE+4], RD
+ |.endif
+ | mov RD, 1+2 // nresults+1 = 1 + false + error.
+ | jmp <7
+ |.else
+ | mov FCARG2, L:PC
+ | mov FCARG1, L:RB
+ | call extern lj_ffh_coroutine_wrap_err@8 // (lua_State *L, lua_State *co)
+ | // Error function does not return.
+ |.endif
+ |
+ |9: // Handle stack expansion on return from yield.
+ |.if X64
+ | mov L:RA, TMP1
+ |.else
+ | mov L:RA, ARG1 // The callee doesn't modify SAVE_L.
+ |.endif
+ | mov L:RA->top, KBASE // Undo coroutine stack clearing.
+ | mov FCARG2, PC
+ | mov FCARG1, L:RB
+ | call extern lj_state_growstack@8 // (lua_State *L, int n)
+ |.if X64
+ | mov L:PC, TMP1
+ |.else
+ | mov L:PC, ARG1
+ |.endif
+ | mov BASE, L:RB->base
+ | jmp <4 // Retry the stack move.
+ |.endmacro
+ |
+ | coroutine_resume_wrap 1 // coroutine.resume
+ | coroutine_resume_wrap 0 // coroutine.wrap
+ |
+ |.ffunc coroutine_yield
+ | mov L:RB, SAVE_L
+ | test aword L:RB->cframe, CFRAME_RESUME
+ | jz ->fff_fallback
+ | mov L:RB->base, BASE
+ | lea RD, [BASE+NARGS:RD*8-8]
+ | mov L:RB->top, RD
+ | xor RD, RD
+ | mov aword L:RB->cframe, RDa
+ | mov al, LUA_YIELD
+ | mov byte L:RB->status, al
+ | jmp ->vm_leave_unw
+ |
+ |//-- Math library -------------------------------------------------------
+ |
+ |.if not DUALNUM
+ |->fff_resi: // Dummy.
+ |.endif
+ |
+ |->fff_resn:
+ | mov PC, [BASE-4]
+ | fstp qword [BASE-8]
+ | jmp ->fff_res1
+ |
+ | .ffunc_1 math_abs
+ |.if DUALNUM
+ | cmp dword [BASE+4], LJ_TISNUM; jne >2
+ | mov RB, dword [BASE]
+ | cmp RB, 0; jns ->fff_resi
+ | neg RB; js >1
+ |->fff_resbit:
+ |->fff_resi:
+ | mov PC, [BASE-4]
+ | mov dword [BASE-4], LJ_TISNUM
+ | mov dword [BASE-8], RB
+ | jmp ->fff_res1
+ |1:
+ | mov PC, [BASE-4]
+ | mov dword [BASE-4], 0x41e00000 // 2^31.
+ | mov dword [BASE-8], 0
+ | jmp ->fff_res1
+ |2:
+ | ja ->fff_fallback
+ |.else
+ | cmp dword [BASE+4], LJ_TISNUM; jae ->fff_fallback
+ |.endif
+ | movsd xmm0, qword [BASE]
+ | sseconst_abs xmm1, RDa
+ | andps xmm0, xmm1
+ |->fff_resxmm0:
+ | mov PC, [BASE-4]
+ | movsd qword [BASE-8], xmm0
+ | // fallthrough
+ |
+ |->fff_res1:
+ | mov RD, 1+1
+ |->fff_res:
+ | mov MULTRES, RD
+ |->fff_res_:
+ | test PC, FRAME_TYPE
+ | jnz >7
+ |5:
+ | cmp PC_RB, RDL // More results expected?
+ | ja >6
+ | // Adjust BASE. KBASE is assumed to be set for the calling frame.
+ | movzx RA, PC_RA
+ | not RAa // Note: ~RA = -(RA+1)
+ | lea BASE, [BASE+RA*8] // base = base - (RA+1)*8
+ | ins_next
+ |
+ |6: // Fill up results with nil.
+ | mov dword [BASE+RD*8-12], LJ_TNIL
+ | add RD, 1
+ | jmp <5
+ |
+ |7: // Non-standard return case.
+ | mov RAa, -8 // Results start at BASE+RA = BASE-8.
+ | jmp ->vm_return
+ |
+ |.if X64
+ |.define fff_resfp, fff_resxmm0
+ |.else
+ |.define fff_resfp, fff_resn
+ |.endif
+ |
+ |.macro math_round, func
+ | .ffunc math_ .. func
+ |.if DUALNUM
+ | cmp dword [BASE+4], LJ_TISNUM; jne >1
+ | mov RB, dword [BASE]; jmp ->fff_resi
+ |1:
+ | ja ->fff_fallback
+ |.else
+ | cmp dword [BASE+4], LJ_TISNUM; jae ->fff_fallback
+ |.endif
+ | movsd xmm0, qword [BASE]
+ | call ->vm_ .. func .. _sse
+ |.if DUALNUM
+ | cvttsd2si RB, xmm0
+ | cmp RB, 0x80000000
+ | jne ->fff_resi
+ | cvtsi2sd xmm1, RB
+ | ucomisd xmm0, xmm1
+ | jp ->fff_resxmm0
+ | je ->fff_resi
+ |.endif
+ | jmp ->fff_resxmm0
+ |.endmacro
+ |
+ | math_round floor
+ | math_round ceil
+ |
+ |.ffunc_nsse math_sqrt, sqrtsd; jmp ->fff_resxmm0
+ |
+ |.ffunc math_log
+ | cmp NARGS:RD, 1+1; jne ->fff_fallback // Exactly one argument.
+ | cmp dword [BASE+4], LJ_TISNUM; jae ->fff_fallback
+ | movsd xmm0, qword [BASE]
+ |.if not X64
+ | movsd FPARG1, xmm0
+ |.endif
+ | mov RB, BASE
+ | call extern log
+ | mov BASE, RB
+ | jmp ->fff_resfp
+ |
+ |.macro math_extern, func
+ | .ffunc_nsse math_ .. func
+ |.if not X64
+ | movsd FPARG1, xmm0
+ |.endif
+ | mov RB, BASE
+ | call extern func
+ | mov BASE, RB
+ | jmp ->fff_resfp
+ |.endmacro
+ |
+ |.macro math_extern2, func
+ | .ffunc_nnsse math_ .. func
+ |.if not X64
+ | movsd FPARG1, xmm0
+ | movsd FPARG3, xmm1
+ |.endif
+ | mov RB, BASE
+ | call extern func
+ | mov BASE, RB
+ | jmp ->fff_resfp
+ |.endmacro
+ |
+ | math_extern log10
+ | math_extern exp
+ | math_extern sin
+ | math_extern cos
+ | math_extern tan
+ | math_extern asin
+ | math_extern acos
+ | math_extern atan
+ | math_extern sinh
+ | math_extern cosh
+ | math_extern tanh
+ | math_extern2 pow
+ | math_extern2 atan2
+ | math_extern2 fmod
+ |
+ |.ffunc_nnr math_ldexp; fscale; fpop1; jmp ->fff_resn
+ |
+ |.ffunc_1 math_frexp
+ | mov RB, [BASE+4]
+ | cmp RB, LJ_TISNUM; jae ->fff_fallback
+ | mov PC, [BASE-4]
+ | mov RC, [BASE]
+ | mov [BASE-4], RB; mov [BASE-8], RC
+ | shl RB, 1; cmp RB, 0xffe00000; jae >3
+ | or RC, RB; jz >3
+ | mov RC, 1022
+ | cmp RB, 0x00200000; jb >4
+ |1:
+ | shr RB, 21; sub RB, RC // Extract and unbias exponent.
+ | cvtsi2sd xmm0, RB
+ | mov RB, [BASE-4]
+ | and RB, 0x800fffff // Mask off exponent.
+ | or RB, 0x3fe00000 // Put mantissa in range [0.5,1) or 0.
+ | mov [BASE-4], RB
+ |2:
+ | movsd qword [BASE], xmm0
+ | mov RD, 1+2
+ | jmp ->fff_res
+ |3: // Return +-0, +-Inf, NaN unmodified and an exponent of 0.
+ | xorps xmm0, xmm0; jmp <2
+ |4: // Handle denormals by multiplying with 2^54 and adjusting the bias.
+ | movsd xmm0, qword [BASE]
+ | sseconst_hi xmm1, RBa, 43500000 // 2^54.
+ | mulsd xmm0, xmm1
+ | movsd qword [BASE-8], xmm0
+ | mov RB, [BASE-4]; mov RC, 1076; shl RB, 1; jmp <1
+ |
+ |.ffunc_nsse math_modf
+ | mov RB, [BASE+4]
+ | mov PC, [BASE-4]
+ | shl RB, 1; cmp RB, 0xffe00000; je >4 // +-Inf?
+ | movaps xmm4, xmm0
+ | call ->vm_trunc_sse
+ | subsd xmm4, xmm0
+ |1:
+ | movsd qword [BASE-8], xmm0
+ | movsd qword [BASE], xmm4
+ | mov RC, [BASE-4]; mov RB, [BASE+4]
+ | xor RC, RB; js >3 // Need to adjust sign?
+ |2:
+ | mov RD, 1+2
+ | jmp ->fff_res
+ |3:
+ | xor RB, 0x80000000; mov [BASE+4], RB // Flip sign of fraction.
+ | jmp <2
+ |4:
+ | xorps xmm4, xmm4; jmp <1 // Return +-Inf and +-0.
+ |
+ |.macro math_minmax, name, cmovop, sseop
+ | .ffunc_1 name
+ | mov RA, 2
+ | cmp dword [BASE+4], LJ_TISNUM
+ |.if DUALNUM
+ | jne >4
+ | mov RB, dword [BASE]
+ |1: // Handle integers.
+ | cmp RA, RD; jae ->fff_resi
+ | cmp dword [BASE+RA*8-4], LJ_TISNUM; jne >3
+ | cmp RB, dword [BASE+RA*8-8]
+ | cmovop RB, dword [BASE+RA*8-8]
+ | add RA, 1
+ | jmp <1
+ |3:
+ | ja ->fff_fallback
+ | // Convert intermediate result to number and continue below.
+ | cvtsi2sd xmm0, RB
+ | jmp >6
+ |4:
+ | ja ->fff_fallback
+ |.else
+ | jae ->fff_fallback
+ |.endif
+ |
+ | movsd xmm0, qword [BASE]
+ |5: // Handle numbers or integers.
+ | cmp RA, RD; jae ->fff_resxmm0
+ | cmp dword [BASE+RA*8-4], LJ_TISNUM
+ |.if DUALNUM
+ | jb >6
+ | ja ->fff_fallback
+ | cvtsi2sd xmm1, dword [BASE+RA*8-8]
+ | jmp >7
+ |.else
+ | jae ->fff_fallback
+ |.endif
+ |6:
+ | movsd xmm1, qword [BASE+RA*8-8]
+ |7:
+ | sseop xmm0, xmm1
+ | add RA, 1
+ | jmp <5
+ |.endmacro
+ |
+ | math_minmax math_min, cmovg, minsd
+ | math_minmax math_max, cmovl, maxsd
+ |
+ |//-- String library -----------------------------------------------------
+ |
+ |.ffunc string_byte // Only handle the 1-arg case here.
+ | cmp NARGS:RD, 1+1; jne ->fff_fallback
+ | cmp dword [BASE+4], LJ_TSTR; jne ->fff_fallback
+ | mov STR:RB, [BASE]
+ | mov PC, [BASE-4]
+ | cmp dword STR:RB->len, 1
+ | jb ->fff_res0 // Return no results for empty string.
+ | movzx RB, byte STR:RB[1]
+ |.if DUALNUM
+ | jmp ->fff_resi
+ |.else
+ | cvtsi2sd xmm0, RB; jmp ->fff_resxmm0
+ |.endif
+ |
+ |.ffunc string_char // Only handle the 1-arg case here.
+ | ffgccheck
+ | cmp NARGS:RD, 1+1; jne ->fff_fallback // *Exactly* 1 arg.
+ | cmp dword [BASE+4], LJ_TISNUM
+ |.if DUALNUM
+ | jne ->fff_fallback
+ | mov RB, dword [BASE]
+ | cmp RB, 255; ja ->fff_fallback
+ | mov TMP2, RB
+ |.else
+ | jae ->fff_fallback
+ | cvttsd2si RB, qword [BASE]
+ | cmp RB, 255; ja ->fff_fallback
+ | mov TMP2, RB
+ |.endif
+ |.if X64
+ | mov TMP3, 1
+ |.else
+ | mov ARG3, 1
+ |.endif
+ | lea RDa, TMP2 // Points to stack. Little-endian.
+ |->fff_newstr:
+ | mov L:RB, SAVE_L
+ | mov L:RB->base, BASE
+ |.if X64
+ | mov CARG3d, TMP3 // Zero-extended to size_t.
+ | mov CARG2, RDa // May be 64 bit ptr to stack.
+ | mov CARG1d, L:RB
+ |.else
+ | mov ARG2, RD
+ | mov ARG1, L:RB
+ |.endif
+ | mov SAVE_PC, PC
+ | call extern lj_str_new // (lua_State *L, char *str, size_t l)
+ |->fff_resstr:
+ | // GCstr * returned in eax (RD).
+ | mov BASE, L:RB->base
+ | mov PC, [BASE-4]
+ | mov dword [BASE-4], LJ_TSTR
+ | mov [BASE-8], STR:RD
+ | jmp ->fff_res1
+ |
+ |.ffunc string_sub
+ | ffgccheck
+ | mov TMP2, -1
+ | cmp NARGS:RD, 1+2; jb ->fff_fallback
+ | jna >1
+ | cmp dword [BASE+20], LJ_TISNUM
+ |.if DUALNUM
+ | jne ->fff_fallback
+ | mov RB, dword [BASE+16]
+ | mov TMP2, RB
+ |.else
+ | jae ->fff_fallback
+ | cvttsd2si RB, qword [BASE+16]
+ | mov TMP2, RB
+ |.endif
+ |1:
+ | cmp dword [BASE+4], LJ_TSTR; jne ->fff_fallback
+ | cmp dword [BASE+12], LJ_TISNUM
+ |.if DUALNUM
+ | jne ->fff_fallback
+ |.else
+ | jae ->fff_fallback
+ |.endif
+ | mov STR:RB, [BASE]
+ | mov TMP3, STR:RB
+ | mov RB, STR:RB->len
+ |.if DUALNUM
+ | mov RA, dword [BASE+8]
+ |.else
+ | cvttsd2si RA, qword [BASE+8]
+ |.endif
+ | mov RC, TMP2
+ | cmp RB, RC // len < end? (unsigned compare)
+ | jb >5
+ |2:
+ | test RA, RA // start <= 0?
+ | jle >7
+ |3:
+ | mov STR:RB, TMP3
+ | sub RC, RA // start > end?
+ | jl ->fff_emptystr
+ | lea RB, [STR:RB+RA+#STR-1]
+ | add RC, 1
+ |4:
+ |.if X64
+ | mov TMP3, RC
+ |.else
+ | mov ARG3, RC
+ |.endif
+ | mov RD, RB
+ | jmp ->fff_newstr
+ |
+ |5: // Negative end or overflow.
+ | jl >6
+ | lea RC, [RC+RB+1] // end = end+(len+1)
+ | jmp <2
+ |6: // Overflow.
+ | mov RC, RB // end = len
+ | jmp <2
+ |
+ |7: // Negative start or underflow.
+ | je >8
+ | add RA, RB // start = start+(len+1)
+ | add RA, 1
+ | jg <3 // start > 0?
+ |8: // Underflow.
+ | mov RA, 1 // start = 1
+ | jmp <3
+ |
+ |->fff_emptystr: // Range underflow.
+ | xor RC, RC // Zero length. Any ptr in RB is ok.
+ | jmp <4
+ |
+ |.macro ffstring_op, name
+ | .ffunc_1 string_ .. name
+ | ffgccheck
+ | cmp dword [BASE+4], LJ_TSTR; jne ->fff_fallback
+ | mov L:RB, SAVE_L
+ | lea SBUF:FCARG1, [DISPATCH+DISPATCH_GL(tmpbuf)]
+ | mov L:RB->base, BASE
+ | mov STR:FCARG2, [BASE] // Caveat: FCARG2 == BASE
+ | mov RCa, SBUF:FCARG1->b
+ | mov SBUF:FCARG1->L, L:RB
+ | mov SBUF:FCARG1->w, RCa
+ | mov SAVE_PC, PC
+ | call extern lj_buf_putstr_ .. name .. @8
+ | mov FCARG1, eax
+ | call extern lj_buf_tostr@4
+ | jmp ->fff_resstr
+ |.endmacro
+ |
+ |ffstring_op reverse
+ |ffstring_op lower
+ |ffstring_op upper
+ |
+ |//-- Bit library --------------------------------------------------------
+ |
+ |.macro .ffunc_bit, name, kind, fdef
+ | fdef name
+ |.if kind == 2
+ | sseconst_tobit xmm1, RBa
+ |.endif
+ | cmp dword [BASE+4], LJ_TISNUM
+ |.if DUALNUM
+ | jne >1
+ | mov RB, dword [BASE]
+ |.if kind > 0
+ | jmp >2
+ |.else
+ | jmp ->fff_resbit
+ |.endif
+ |1:
+ | ja ->fff_fallback
+ |.else
+ | jae ->fff_fallback
+ |.endif
+ | movsd xmm0, qword [BASE]
+ |.if kind < 2
+ | sseconst_tobit xmm1, RBa
+ |.endif
+ | addsd xmm0, xmm1
+ | movd RB, xmm0
+ |2:
+ |.endmacro
+ |
+ |.macro .ffunc_bit, name, kind
+ | .ffunc_bit name, kind, .ffunc_1
+ |.endmacro
+ |
+ |.ffunc_bit bit_tobit, 0
+ | jmp ->fff_resbit
+ |
+ |.macro .ffunc_bit_op, name, ins
+ | .ffunc_bit name, 2
+ | mov TMP2, NARGS:RD // Save for fallback.
+ | lea RD, [BASE+NARGS:RD*8-16]
+ |1:
+ | cmp RD, BASE
+ | jbe ->fff_resbit
+ | cmp dword [RD+4], LJ_TISNUM
+ |.if DUALNUM
+ | jne >2
+ | ins RB, dword [RD]
+ | sub RD, 8
+ | jmp <1
+ |2:
+ | ja ->fff_fallback_bit_op
+ |.else
+ | jae ->fff_fallback_bit_op
+ |.endif
+ | movsd xmm0, qword [RD]
+ | addsd xmm0, xmm1
+ | movd RA, xmm0
+ | ins RB, RA
+ | sub RD, 8
+ | jmp <1
+ |.endmacro
+ |
+ |.ffunc_bit_op bit_band, and
+ |.ffunc_bit_op bit_bor, or
+ |.ffunc_bit_op bit_bxor, xor
+ |
+ |.ffunc_bit bit_bswap, 1
+ | bswap RB
+ | jmp ->fff_resbit
+ |
+ |.ffunc_bit bit_bnot, 1
+ | not RB
+ |.if DUALNUM
+ | jmp ->fff_resbit
+ |.else
+ |->fff_resbit:
+ | cvtsi2sd xmm0, RB
+ | jmp ->fff_resxmm0
+ |.endif
+ |
+ |->fff_fallback_bit_op:
+ | mov NARGS:RD, TMP2 // Restore for fallback
+ | jmp ->fff_fallback
+ |
+ |.macro .ffunc_bit_sh, name, ins
+ |.if DUALNUM
+ | .ffunc_bit name, 1, .ffunc_2
+ | // Note: no inline conversion from number for 2nd argument!
+ | cmp dword [BASE+12], LJ_TISNUM; jne ->fff_fallback
+ | mov RA, dword [BASE+8]
+ |.else
+ | .ffunc_nnsse name
+ | sseconst_tobit xmm2, RBa
+ | addsd xmm0, xmm2
+ | addsd xmm1, xmm2
+ | movd RB, xmm0
+ | movd RA, xmm1
+ |.endif
+ | ins RB, cl // Assumes RA is ecx.
+ | jmp ->fff_resbit
+ |.endmacro
+ |
+ |.ffunc_bit_sh bit_lshift, shl
+ |.ffunc_bit_sh bit_rshift, shr
+ |.ffunc_bit_sh bit_arshift, sar
+ |.ffunc_bit_sh bit_rol, rol
+ |.ffunc_bit_sh bit_ror, ror
+ |
+ |//-----------------------------------------------------------------------
+ |
+ |->fff_fallback_2:
+ | mov NARGS:RD, 1+2 // Other args are ignored, anyway.
+ | jmp ->fff_fallback
+ |->fff_fallback_1:
+ | mov NARGS:RD, 1+1 // Other args are ignored, anyway.
+ |->fff_fallback: // Call fast function fallback handler.
+ | // BASE = new base, RD = nargs+1
+ | mov L:RB, SAVE_L
+ | mov PC, [BASE-4] // Fallback may overwrite PC.
+ | mov SAVE_PC, PC // Redundant (but a defined value).
+ | mov L:RB->base, BASE
+ | lea RD, [BASE+NARGS:RD*8-8]
+ | lea RA, [RD+8*LUA_MINSTACK] // Ensure enough space for handler.
+ | mov L:RB->top, RD
+ | mov CFUNC:RD, [BASE-8]
+ | cmp RA, L:RB->maxstack
+ | ja >5 // Need to grow stack.
+ |.if X64
+ | mov CARG1d, L:RB
+ |.else
+ | mov ARG1, L:RB
+ |.endif
+ | call aword CFUNC:RD->f // (lua_State *L)
+ | mov BASE, L:RB->base
+ | // Either throws an error, or recovers and returns -1, 0 or nresults+1.
+ | test RD, RD; jg ->fff_res // Returned nresults+1?
+ |1:
+ | mov RA, L:RB->top
+ | sub RA, BASE
+ | shr RA, 3
+ | test RD, RD
+ | lea NARGS:RD, [RA+1]
+ | mov LFUNC:RB, [BASE-8]
+ | jne ->vm_call_tail // Returned -1?
+ | ins_callt // Returned 0: retry fast path.
+ |
+ |// Reconstruct previous base for vmeta_call during tailcall.
+ |->vm_call_tail:
+ | mov RA, BASE
+ | test PC, FRAME_TYPE
+ | jnz >3
+ | movzx RB, PC_RA
+ | not RBa // Note: ~RB = -(RB+1)
+ | lea BASE, [BASE+RB*8] // base = base - (RB+1)*8
+ | jmp ->vm_call_dispatch // Resolve again for tailcall.
+ |3:
+ | mov RB, PC
+ | and RB, -8
+ | sub BASE, RB
+ | jmp ->vm_call_dispatch // Resolve again for tailcall.
+ |
+ |5: // Grow stack for fallback handler.
+ | mov FCARG2, LUA_MINSTACK
+ | mov FCARG1, L:RB
+ | call extern lj_state_growstack@8 // (lua_State *L, int n)
+ | mov BASE, L:RB->base
+ | xor RD, RD // Simulate a return 0.
+ | jmp <1 // Dumb retry (goes through ff first).
+ |
+ |->fff_gcstep: // Call GC step function.
+ | // BASE = new base, RD = nargs+1
+ | pop RBa // Must keep stack at same level.
+ | mov TMPa, RBa // Save return address
+ | mov L:RB, SAVE_L
+ | mov SAVE_PC, PC // Redundant (but a defined value).
+ | mov L:RB->base, BASE
+ | lea RD, [BASE+NARGS:RD*8-8]
+ | mov FCARG1, L:RB
+ | mov L:RB->top, RD
+ | call extern lj_gc_step@4 // (lua_State *L)
+ | mov BASE, L:RB->base
+ | mov RD, L:RB->top
+ | sub RD, BASE
+ | shr RD, 3
+ | add NARGS:RD, 1
+ | mov RBa, TMPa
+ | push RBa // Restore return address.
+ | ret
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Special dispatch targets -------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |->vm_record: // Dispatch target for recording phase.
+ |.if JIT
+ | movzx RD, byte [DISPATCH+DISPATCH_GL(hookmask)]
+ | test RDL, HOOK_VMEVENT // No recording while in vmevent.
+ | jnz >5
+ | // Decrement the hookcount for consistency, but always do the call.
+ | test RDL, HOOK_ACTIVE
+ | jnz >1
+ | test RDL, LUA_MASKLINE|LUA_MASKCOUNT
+ | jz >1
+ | dec dword [DISPATCH+DISPATCH_GL(hookcount)]
+ | jmp >1
+ |.endif
+ |
+ |->vm_rethook: // Dispatch target for return hooks.
+ | movzx RD, byte [DISPATCH+DISPATCH_GL(hookmask)]
+ | test RDL, HOOK_ACTIVE // Hook already active?
+ | jnz >5
+ | jmp >1
+ |
+ |->vm_inshook: // Dispatch target for instr/line hooks.
+ | movzx RD, byte [DISPATCH+DISPATCH_GL(hookmask)]
+ | test RDL, HOOK_ACTIVE // Hook already active?
+ | jnz >5
+ |
+ | test RDL, LUA_MASKLINE|LUA_MASKCOUNT
+ | jz >5
+ | dec dword [DISPATCH+DISPATCH_GL(hookcount)]
+ | jz >1
+ | test RDL, LUA_MASKLINE
+ | jz >5
+ |1:
+ | mov L:RB, SAVE_L
+ | mov L:RB->base, BASE
+ | mov FCARG2, PC // Caveat: FCARG2 == BASE
+ | mov FCARG1, L:RB
+ | // SAVE_PC must hold the _previous_ PC. The callee updates it with PC.
+ | call extern lj_dispatch_ins@8 // (lua_State *L, const BCIns *pc)
+ |3:
+ | mov BASE, L:RB->base
+ |4:
+ | movzx RA, PC_RA
+ |5:
+ | movzx OP, PC_OP
+ | movzx RD, PC_RD
+ |.if X64
+ | jmp aword [DISPATCH+OP*8+GG_DISP2STATIC] // Re-dispatch to static ins.
+ |.else
+ | jmp aword [DISPATCH+OP*4+GG_DISP2STATIC] // Re-dispatch to static ins.
+ |.endif
+ |
+ |->cont_hook: // Continue from hook yield.
+ | add PC, 4
+ | mov RA, [RB-24]
+ | mov MULTRES, RA // Restore MULTRES for *M ins.
+ | jmp <4
+ |
+ |->vm_hotloop: // Hot loop counter underflow.
+ |.if JIT
+ | mov LFUNC:RB, [BASE-8] // Same as curr_topL(L).
+ | mov RB, LFUNC:RB->pc
+ | movzx RD, byte [RB+PC2PROTO(framesize)]
+ | lea RD, [BASE+RD*8]
+ | mov L:RB, SAVE_L
+ | mov L:RB->base, BASE
+ | mov L:RB->top, RD
+ | mov FCARG2, PC
+ | lea FCARG1, [DISPATCH+GG_DISP2J]
+ | mov aword [DISPATCH+DISPATCH_J(L)], L:RBa
+ | mov SAVE_PC, PC
+ | call extern lj_trace_hot@8 // (jit_State *J, const BCIns *pc)
+ | jmp <3
+ |.endif
+ |
+ |->vm_callhook: // Dispatch target for call hooks.
+ | mov SAVE_PC, PC
+ |.if JIT
+ | jmp >1
+ |.endif
+ |
+ |->vm_hotcall: // Hot call counter underflow.
+ |.if JIT
+ | mov SAVE_PC, PC
+ | or PC, 1 // Marker for hot call.
+ |1:
+ |.endif
+ | lea RD, [BASE+NARGS:RD*8-8]
+ | mov L:RB, SAVE_L
+ | mov L:RB->base, BASE
+ | mov L:RB->top, RD
+ | mov FCARG2, PC
+ | mov FCARG1, L:RB
+ | call extern lj_dispatch_call@8 // (lua_State *L, const BCIns *pc)
+ | // ASMFunction returned in eax/rax (RDa).
+ | mov SAVE_PC, 0 // Invalidate for subsequent line hook.
+ |.if JIT
+ | and PC, -2
+ |.endif
+ | mov BASE, L:RB->base
+ | mov RAa, RDa
+ | mov RD, L:RB->top
+ | sub RD, BASE
+ | mov RBa, RAa
+ | movzx RA, PC_RA
+ | shr RD, 3
+ | add NARGS:RD, 1
+ | jmp RBa
+ |
+ |->cont_stitch: // Trace stitching.
+ |.if JIT
+ | // BASE = base, RC = result, RB = mbase
+ | mov TRACE:RA, [RB-24] // Save previous trace.
+ | mov TMP1, TRACE:RA
+ | mov TMP3, DISPATCH // Need one more register.
+ | mov DISPATCH, MULTRES
+ | movzx RA, PC_RA
+ | lea RA, [BASE+RA*8] // Call base.
+ | sub DISPATCH, 1
+ | jz >2
+ |1: // Move results down.
+ |.if X64
+ | mov RBa, [RC]
+ | mov [RA], RBa
+ |.else
+ | mov RB, [RC]
+ | mov [RA], RB
+ | mov RB, [RC+4]
+ | mov [RA+4], RB
+ |.endif
+ | add RC, 8
+ | add RA, 8
+ | sub DISPATCH, 1
+ | jnz <1
+ |2:
+ | movzx RC, PC_RA
+ | movzx RB, PC_RB
+ | add RC, RB
+ | lea RC, [BASE+RC*8-8]
+ |3:
+ | cmp RC, RA
+ | ja >9 // More results wanted?
+ |
+ | mov DISPATCH, TMP3
+ | mov TRACE:RD, TMP1 // Get previous trace.
+ | movzx RB, word TRACE:RD->traceno
+ | movzx RD, word TRACE:RD->link
+ | cmp RD, RB
+ | je ->cont_nop // Blacklisted.
+ | test RD, RD
+ | jne =>BC_JLOOP // Jump to stitched trace.
+ |
+ | // Stitch a new trace to the previous trace.
+ | mov [DISPATCH+DISPATCH_J(exitno)], RB
+ | mov L:RB, SAVE_L
+ | mov L:RB->base, BASE
+ | mov FCARG2, PC
+ | lea FCARG1, [DISPATCH+GG_DISP2J]
+ | mov aword [DISPATCH+DISPATCH_J(L)], L:RBa
+ | call extern lj_dispatch_stitch@8 // (jit_State *J, const BCIns *pc)
+ | mov BASE, L:RB->base
+ | jmp ->cont_nop
+ |
+ |9: // Fill up results with nil.
+ | mov dword [RA+4], LJ_TNIL
+ | add RA, 8
+ | jmp <3
+ |.endif
+ |
+ |->vm_profhook: // Dispatch target for profiler hook.
+#if LJ_HASPROFILE
+ | mov L:RB, SAVE_L
+ | mov L:RB->base, BASE
+ | mov FCARG2, PC // Caveat: FCARG2 == BASE
+ | mov FCARG1, L:RB
+ | call extern lj_dispatch_profile@8 // (lua_State *L, const BCIns *pc)
+ | mov BASE, L:RB->base
+ | // HOOK_PROFILE is off again, so re-dispatch to dynamic instruction.
+ | sub PC, 4
+ | jmp ->cont_nop
+#endif
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Trace exit handler -------------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |// Called from an exit stub with the exit number on the stack.
+ |// The 16 bit exit number is stored with two (sign-extended) push imm8.
+ |->vm_exit_handler:
+ |.if JIT
+ |.if X64
+ | push r13; push r12
+ | push r11; push r10; push r9; push r8
+ | push rdi; push rsi; push rbp; lea rbp, [rsp+88]; push rbp
+ | push rbx; push rdx; push rcx; push rax
+ | movzx RC, byte [rbp-8] // Reconstruct exit number.
+ | mov RCH, byte [rbp-16]
+ | mov [rbp-8], r15; mov [rbp-16], r14
+ |.else
+ | push ebp; lea ebp, [esp+12]; push ebp
+ | push ebx; push edx; push ecx; push eax
+ | movzx RC, byte [ebp-4] // Reconstruct exit number.
+ | mov RCH, byte [ebp-8]
+ | mov [ebp-4], edi; mov [ebp-8], esi
+ |.endif
+ | // Caveat: DISPATCH is ebx.
+ | mov DISPATCH, [ebp]
+ | mov RA, [DISPATCH+DISPATCH_GL(vmstate)] // Get trace number.
+ | set_vmstate EXIT
+ | mov [DISPATCH+DISPATCH_J(exitno)], RC
+ | mov [DISPATCH+DISPATCH_J(parent)], RA
+ |.if X64
+ |.if X64WIN
+ | sub rsp, 16*8+4*8 // Room for SSE regs + save area.
+ |.else
+ | sub rsp, 16*8 // Room for SSE regs.
+ |.endif
+ | add rbp, -128
+ | movsd qword [rbp-8], xmm15; movsd qword [rbp-16], xmm14
+ | movsd qword [rbp-24], xmm13; movsd qword [rbp-32], xmm12
+ | movsd qword [rbp-40], xmm11; movsd qword [rbp-48], xmm10
+ | movsd qword [rbp-56], xmm9; movsd qword [rbp-64], xmm8
+ | movsd qword [rbp-72], xmm7; movsd qword [rbp-80], xmm6
+ | movsd qword [rbp-88], xmm5; movsd qword [rbp-96], xmm4
+ | movsd qword [rbp-104], xmm3; movsd qword [rbp-112], xmm2
+ | movsd qword [rbp-120], xmm1; movsd qword [rbp-128], xmm0
+ |.else
+ | sub esp, 8*8+16 // Room for SSE regs + args.
+ | movsd qword [ebp-40], xmm7; movsd qword [ebp-48], xmm6
+ | movsd qword [ebp-56], xmm5; movsd qword [ebp-64], xmm4
+ | movsd qword [ebp-72], xmm3; movsd qword [ebp-80], xmm2
+ | movsd qword [ebp-88], xmm1; movsd qword [ebp-96], xmm0
+ |.endif
+ | // Caveat: RB is ebp.
+ | mov L:RB, [DISPATCH+DISPATCH_GL(cur_L)]
+ | mov BASE, [DISPATCH+DISPATCH_GL(jit_base)]
+ | mov aword [DISPATCH+DISPATCH_J(L)], L:RBa
+ | mov L:RB->base, BASE
+ |.if X64WIN
+ | lea CARG2, [rsp+4*8]
+ |.elif X64
+ | mov CARG2, rsp
+ |.else
+ | lea FCARG2, [esp+16]
+ |.endif
+ | lea FCARG1, [DISPATCH+GG_DISP2J]
+ | mov dword [DISPATCH+DISPATCH_GL(jit_base)], 0
+ | call extern lj_trace_exit@8 // (jit_State *J, ExitState *ex)
+ | // MULTRES or negated error code returned in eax (RD).
+ | mov RAa, L:RB->cframe
+ | and RAa, CFRAME_RAWMASK
+ |.if X64WIN
+ | // Reposition stack later.
+ |.elif X64
+ | mov rsp, RAa // Reposition stack to C frame.
+ |.else
+ | mov esp, RAa // Reposition stack to C frame.
+ |.endif
+ | mov [RAa+CFRAME_OFS_L], L:RB // Set SAVE_L (on-trace resume/yield).
+ | mov BASE, L:RB->base
+ | mov PC, [RAa+CFRAME_OFS_PC] // Get SAVE_PC.
+ |.if X64
+ | jmp >1
+ |.endif
+ |.endif
+ |->vm_exit_interp:
+ | // RD = MULTRES or negated error code, BASE, PC and DISPATCH set.
+ |.if JIT
+ |.if X64
+ | // Restore additional callee-save registers only used in compiled code.
+ |.if X64WIN
+ | lea RAa, [rsp+9*16+4*8]
+ |1:
+ | movdqa xmm15, [RAa-9*16]
+ | movdqa xmm14, [RAa-8*16]
+ | movdqa xmm13, [RAa-7*16]
+ | movdqa xmm12, [RAa-6*16]
+ | movdqa xmm11, [RAa-5*16]
+ | movdqa xmm10, [RAa-4*16]
+ | movdqa xmm9, [RAa-3*16]
+ | movdqa xmm8, [RAa-2*16]
+ | movdqa xmm7, [RAa-1*16]
+ | mov rsp, RAa // Reposition stack to C frame.
+ | movdqa xmm6, [RAa]
+ | mov r15, CSAVE_3
+ | mov r14, CSAVE_4
+ |.else
+ | add rsp, 16 // Reposition stack to C frame.
+ |1:
+ |.endif
+ | mov r13, TMPa
+ | mov r12, TMPQ
+ |.endif
+ | test RD, RD; js >9 // Check for error from exit.
+ | mov L:RB, SAVE_L
+ | mov MULTRES, RD
+ | mov LFUNC:KBASE, [BASE-8]
+ | mov KBASE, LFUNC:KBASE->pc
+ | mov KBASE, [KBASE+PC2PROTO(k)]
+ | mov L:RB->base, BASE
+ | mov dword [DISPATCH+DISPATCH_GL(jit_base)], 0
+ | set_vmstate INTERP
+ | // Modified copy of ins_next which handles function header dispatch, too.
+ | mov RC, [PC]
+ | movzx RA, RCH
+ | movzx OP, RCL
+ | add PC, 4
+ | shr RC, 16
+ | cmp OP, BC_FUNCF // Function header?
+ | jb >3
+ | cmp OP, BC_FUNCC+2 // Fast function?
+ | jae >4
+ |2:
+ | mov RC, MULTRES // RC/RD holds nres+1.
+ |3:
+ |.if X64
+ | jmp aword [DISPATCH+OP*8]
+ |.else
+ | jmp aword [DISPATCH+OP*4]
+ |.endif
+ |
+ |4: // Check frame below fast function.
+ | mov RC, [BASE-4]
+ | test RC, FRAME_TYPE
+ | jnz <2 // Trace stitching continuation?
+ | // Otherwise set KBASE for Lua function below fast function.
+ | movzx RC, byte [RC-3]
+ | not RCa
+ | mov LFUNC:KBASE, [BASE+RC*8-8]
+ | mov KBASE, LFUNC:KBASE->pc
+ | mov KBASE, [KBASE+PC2PROTO(k)]
+ | jmp <2
+ |
+ |9: // Rethrow error from the right C frame.
+ | mov FCARG2, RD
+ | mov FCARG1, L:RB
+ | neg FCARG2
+ | call extern lj_err_trace@8 // (lua_State *L, int errcode)
+ |.endif
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Math helper functions ----------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |// FP value rounding. Called by math.floor/math.ceil fast functions
+ |// and from JIT code. arg/ret is xmm0. xmm0-xmm3 and RD (eax) modified.
+ |.macro vm_round, name, mode, cond
+ |->name:
+ |.if not X64 and cond
+ | movsd xmm0, qword [esp+4]
+ | call ->name .. _sse
+ | movsd qword [esp+4], xmm0 // Overwrite callee-owned arg.
+ | fld qword [esp+4]
+ | ret
+ |.endif
+ |
+ |->name .. _sse:
+ | sseconst_abs xmm2, RDa
+ | sseconst_2p52 xmm3, RDa
+ | movaps xmm1, xmm0
+ | andpd xmm1, xmm2 // |x|
+ | ucomisd xmm3, xmm1 // No truncation if 2^52 <= |x|.
+ | jbe >1
+ | andnpd xmm2, xmm0 // Isolate sign bit.
+ |.if mode == 2 // trunc(x)?
+ | movaps xmm0, xmm1
+ | addsd xmm1, xmm3 // (|x| + 2^52) - 2^52
+ | subsd xmm1, xmm3
+ | sseconst_1 xmm3, RDa
+ | cmpsd xmm0, xmm1, 1 // |x| < result?
+ | andpd xmm0, xmm3
+ | subsd xmm1, xmm0 // If yes, subtract -1.
+ | orpd xmm1, xmm2 // Merge sign bit back in.
+ |.else
+ | addsd xmm1, xmm3 // (|x| + 2^52) - 2^52
+ | subsd xmm1, xmm3
+ | orpd xmm1, xmm2 // Merge sign bit back in.
+ | sseconst_1 xmm3, RDa
+ | .if mode == 1 // ceil(x)?
+ | cmpsd xmm0, xmm1, 6 // x > result?
+ | andpd xmm0, xmm3
+ | addsd xmm1, xmm0 // If yes, add 1.
+ | orpd xmm1, xmm2 // Merge sign bit back in (again).
+ | .else // floor(x)?
+ | cmpsd xmm0, xmm1, 1 // x < result?
+ | andpd xmm0, xmm3
+ | subsd xmm1, xmm0 // If yes, subtract 1.
+ | .endif
+ |.endif
+ | movaps xmm0, xmm1
+ |1:
+ | ret
+ |.endmacro
+ |
+ | vm_round vm_floor, 0, 1
+ | vm_round vm_ceil, 1, JIT
+ | vm_round vm_trunc, 2, JIT
+ |
+ |// FP modulo x%y. Called by BC_MOD* and vm_arith.
+ |->vm_mod:
+ |// Args in xmm0/xmm1, return value in xmm0.
+ |// Caveat: xmm0-xmm5 and RC (eax) modified!
+ | movaps xmm5, xmm0
+ | divsd xmm0, xmm1
+ | sseconst_abs xmm2, RDa
+ | sseconst_2p52 xmm3, RDa
+ | movaps xmm4, xmm0
+ | andpd xmm4, xmm2 // |x/y|
+ | ucomisd xmm3, xmm4 // No truncation if 2^52 <= |x/y|.
+ | jbe >1
+ | andnpd xmm2, xmm0 // Isolate sign bit.
+ | addsd xmm4, xmm3 // (|x/y| + 2^52) - 2^52
+ | subsd xmm4, xmm3
+ | orpd xmm4, xmm2 // Merge sign bit back in.
+ | sseconst_1 xmm2, RDa
+ | cmpsd xmm0, xmm4, 1 // x/y < result?
+ | andpd xmm0, xmm2
+ | subsd xmm4, xmm0 // If yes, subtract 1.0.
+ | movaps xmm0, xmm5
+ | mulsd xmm1, xmm4
+ | subsd xmm0, xmm1
+ | ret
+ |1:
+ | mulsd xmm1, xmm0
+ | movaps xmm0, xmm5
+ | subsd xmm0, xmm1
+ | ret
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Miscellaneous functions --------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |// int lj_vm_cpuid(uint32_t f, uint32_t res[4])
+ |->vm_cpuid:
+ |.if X64
+ | mov eax, CARG1d
+ | .if X64WIN; push rsi; mov rsi, CARG2; .endif
+ | push rbx
+ | xor ecx, ecx
+ | cpuid
+ | mov [rsi], eax
+ | mov [rsi+4], ebx
+ | mov [rsi+8], ecx
+ | mov [rsi+12], edx
+ | pop rbx
+ | .if X64WIN; pop rsi; .endif
+ | ret
+ |.else
+ | pushfd
+ | pop edx
+ | mov ecx, edx
+ | xor edx, 0x00200000 // Toggle ID bit in flags.
+ | push edx
+ | popfd
+ | pushfd
+ | pop edx
+ | xor eax, eax // Zero means no features supported.
+ | cmp ecx, edx
+ | jz >1 // No ID toggle means no CPUID support.
+ | mov eax, [esp+4] // Argument 1 is function number.
+ | push edi
+ | push ebx
+ | xor ecx, ecx
+ | cpuid
+ | mov edi, [esp+16] // Argument 2 is result area.
+ | mov [edi], eax
+ | mov [edi+4], ebx
+ | mov [edi+8], ecx
+ | mov [edi+12], edx
+ | pop ebx
+ | pop edi
+ |1:
+ | ret
+ |.endif
+ |
+ |.define NEXT_TAB, TAB:FCARG1
+ |.define NEXT_IDX, FCARG2
+ |.define NEXT_PTR, RCa
+ |.define NEXT_PTRd, RC
+ |.macro NEXT_RES_IDXL, op2; lea edx, [NEXT_IDX+op2]; .endmacro
+ |.if X64
+ |.define NEXT_TMP, CARG3d
+ |.define NEXT_TMPq, CARG3
+ |.define NEXT_ASIZE, CARG4d
+ |.macro NEXT_ENTER; .endmacro
+ |.macro NEXT_LEAVE; ret; .endmacro
+ |.if X64WIN
+ |.define NEXT_RES_PTR, [rsp+aword*5]
+ |.macro NEXT_RES_IDX, op2; add NEXT_IDX, op2; .endmacro
+ |.else
+ |.define NEXT_RES_PTR, [rsp+aword*1]
+ |.macro NEXT_RES_IDX, op2; lea edx, [NEXT_IDX+op2]; .endmacro
+ |.endif
+ |.else
+ |.define NEXT_ASIZE, esi
+ |.define NEXT_TMP, edi
+ |.macro NEXT_ENTER; push esi; push edi; .endmacro
+ |.macro NEXT_LEAVE; pop edi; pop esi; ret; .endmacro
+ |.define NEXT_RES_PTR, [esp+dword*3]
+ |.macro NEXT_RES_IDX, op2; add NEXT_IDX, op2; .endmacro
+ |.endif
+ |
+ |// TValue *lj_vm_next(GCtab *t, uint32_t idx)
+ |// Next idx returned in edx.
+ |->vm_next:
+ |.if JIT
+ | NEXT_ENTER
+ | mov NEXT_ASIZE, NEXT_TAB->asize
+ |1: // Traverse array part.
+ | cmp NEXT_IDX, NEXT_ASIZE; jae >5
+ | mov NEXT_TMP, NEXT_TAB->array
+ | cmp dword [NEXT_TMP+NEXT_IDX*8+4], LJ_TNIL; je >2
+ | lea NEXT_PTR, NEXT_RES_PTR
+ |.if X64
+ | mov NEXT_TMPq, qword [NEXT_TMP+NEXT_IDX*8]
+ | mov qword [NEXT_PTR], NEXT_TMPq
+ |.else
+ | mov NEXT_ASIZE, dword [NEXT_TMP+NEXT_IDX*8+4]
+ | mov NEXT_TMP, dword [NEXT_TMP+NEXT_IDX*8]
+ | mov dword [NEXT_PTR+4], NEXT_ASIZE
+ | mov dword [NEXT_PTR], NEXT_TMP
+ |.endif
+ |.if DUALNUM
+ | mov dword [NEXT_PTR+dword*3], LJ_TISNUM
+ | mov dword [NEXT_PTR+dword*2], NEXT_IDX
+ |.else
+ | cvtsi2sd xmm0, NEXT_IDX
+ | movsd qword [NEXT_PTR+dword*2], xmm0
+ |.endif
+ | NEXT_RES_IDX 1
+ | NEXT_LEAVE
+ |2: // Skip holes in array part.
+ | add NEXT_IDX, 1
+ | jmp <1
+ |
+ |5: // Traverse hash part.
+ | sub NEXT_IDX, NEXT_ASIZE
+ |6:
+ | cmp NEXT_IDX, NEXT_TAB->hmask; ja >9
+ | imul NEXT_PTRd, NEXT_IDX, #NODE
+ | add NODE:NEXT_PTRd, dword NEXT_TAB->node
+ | cmp dword NODE:NEXT_PTR->val.it, LJ_TNIL; je >7
+ | NEXT_RES_IDXL NEXT_ASIZE+1
+ | NEXT_LEAVE
+ |7: // Skip holes in hash part.
+ | add NEXT_IDX, 1
+ | jmp <6
+ |
+ |9: // End of iteration. Set the key to nil (not the value).
+ | NEXT_RES_IDX NEXT_ASIZE
+ | lea NEXT_PTR, NEXT_RES_PTR
+ | mov dword [NEXT_PTR+dword*3], LJ_TNIL
+ | NEXT_LEAVE
+ |.endif
+ |
+ |//-----------------------------------------------------------------------
+ |//-- Assertions ---------------------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |->assert_bad_for_arg_type:
+#ifdef LUA_USE_ASSERT
+ | int3
+#endif
+ | int3
+ |
+ |//-----------------------------------------------------------------------
+ |//-- FFI helper functions -----------------------------------------------
+ |//-----------------------------------------------------------------------
+ |
+ |// Handler for callback functions. Callback slot number in ah/al.
+ |->vm_ffi_callback:
+ |.if FFI
+ |.type CTSTATE, CTState, PC
+ |.if not X64
+ | sub esp, 16 // Leave room for SAVE_ERRF etc.
+ |.endif
+ | saveregs_ // ebp/rbp already saved. ebp now holds global_State *.
+ | lea DISPATCH, [ebp+GG_G2DISP]
+ | mov CTSTATE, GL:ebp->ctype_state
+ | movzx eax, ax
+ | mov CTSTATE->cb.slot, eax
+ |.if X64
+ | mov CTSTATE->cb.gpr[0], CARG1
+ | mov CTSTATE->cb.gpr[1], CARG2
+ | mov CTSTATE->cb.gpr[2], CARG3
+ | mov CTSTATE->cb.gpr[3], CARG4
+ | movsd qword CTSTATE->cb.fpr[0], xmm0
+ | movsd qword CTSTATE->cb.fpr[1], xmm1
+ | movsd qword CTSTATE->cb.fpr[2], xmm2
+ | movsd qword CTSTATE->cb.fpr[3], xmm3
+ |.if X64WIN
+ | lea rax, [rsp+CFRAME_SIZE+4*8]
+ |.else
+ | lea rax, [rsp+CFRAME_SIZE]
+ | mov CTSTATE->cb.gpr[4], CARG5
+ | mov CTSTATE->cb.gpr[5], CARG6
+ | movsd qword CTSTATE->cb.fpr[4], xmm4
+ | movsd qword CTSTATE->cb.fpr[5], xmm5
+ | movsd qword CTSTATE->cb.fpr[6], xmm6
+ | movsd qword CTSTATE->cb.fpr[7], xmm7
+ |.endif
+ | mov CTSTATE->cb.stack, rax
+ | mov CARG2, rsp
+ |.else
+ | lea eax, [esp+CFRAME_SIZE+16]
+ | mov CTSTATE->cb.gpr[0], FCARG1
+ | mov CTSTATE->cb.gpr[1], FCARG2
+ | mov CTSTATE->cb.stack, eax
+ | mov FCARG1, [esp+CFRAME_SIZE+12] // Move around misplaced retaddr/ebp.
+ | mov FCARG2, [esp+CFRAME_SIZE+8]
+ | mov SAVE_RET, FCARG1
+ | mov SAVE_R4, FCARG2
+ | mov FCARG2, esp
+ |.endif
+ | mov SAVE_PC, CTSTATE // Any value outside of bytecode is ok.
+ | mov FCARG1, CTSTATE
+ | call extern lj_ccallback_enter@8 // (CTState *cts, void *cf)
+ | // lua_State * returned in eax (RD).
+ | set_vmstate INTERP
+ | mov BASE, L:RD->base
+ | mov RD, L:RD->top
+ | sub RD, BASE
+ | mov LFUNC:RB, [BASE-8]
+ | shr RD, 3
+ | add RD, 1
+ | ins_callt
+ |.endif
+ |
+ |->cont_ffi_callback: // Return from FFI callback.
+ |.if FFI
+ | mov L:RA, SAVE_L
+ | mov CTSTATE, [DISPATCH+DISPATCH_GL(ctype_state)]
+ | mov aword CTSTATE->L, L:RAa
+ | mov L:RA->base, BASE
+ | mov L:RA->top, RB
+ | mov FCARG1, CTSTATE
+ | mov FCARG2, RC
+ | call extern lj_ccallback_leave@8 // (CTState *cts, TValue *o)
+ |.if X64
+ | mov rax, CTSTATE->cb.gpr[0]
+ | movsd xmm0, qword CTSTATE->cb.fpr[0]
+ | jmp ->vm_leave_unw
+ |.else
+ | mov L:RB, SAVE_L
+ | mov eax, CTSTATE->cb.gpr[0]
+ | mov edx, CTSTATE->cb.gpr[1]
+ | cmp dword CTSTATE->cb.gpr[2], 1
+ | jb >7
+ | je >6
+ | fld qword CTSTATE->cb.fpr[0].d
+ | jmp >7
+ |6:
+ | fld dword CTSTATE->cb.fpr[0].f
+ |7:
+ | mov ecx, L:RB->top
+ | movzx ecx, word [ecx+6] // Get stack adjustment and copy up.
+ | mov SAVE_L, ecx // Must be one slot above SAVE_RET
+ | restoreregs
+ | pop ecx // Move return addr from SAVE_RET.
+ | add esp, [esp] // Adjust stack.
+ | add esp, 16
+ | push ecx
+ | ret
+ |.endif
+ |.endif
+ |
+ |->vm_ffi_call@4: // Call C function via FFI.
+ | // Caveat: needs special frame unwinding, see below.
+ |.if FFI
+ |.if X64
+ | .type CCSTATE, CCallState, rbx
+ | push rbp; mov rbp, rsp; push rbx; mov CCSTATE, CARG1
+ |.else
+ | .type CCSTATE, CCallState, ebx
+ | push ebp; mov ebp, esp; push ebx; mov CCSTATE, FCARG1
+ |.endif
+ |
+ | // Readjust stack.
+ |.if X64
+ | mov eax, CCSTATE->spadj
+ | sub rsp, rax
+ |.else
+ | sub esp, CCSTATE->spadj
+ |.if WIN
+ | mov CCSTATE->spadj, esp
+ |.endif
+ |.endif
+ |
+ | // Copy stack slots.
+ | movzx ecx, byte CCSTATE->nsp
+ | sub ecx, 1
+ | js >2
+ |1:
+ |.if X64
+ | mov rax, [CCSTATE+rcx*8+offsetof(CCallState, stack)]
+ | mov [rsp+rcx*8+CCALL_SPS_EXTRA*8], rax
+ |.else
+ | mov eax, [CCSTATE+ecx*4+offsetof(CCallState, stack)]
+ | mov [esp+ecx*4], eax
+ |.endif
+ | sub ecx, 1
+ | jns <1
+ |2:
+ |
+ |.if X64
+ | movzx eax, byte CCSTATE->nfpr
+ | mov CARG1, CCSTATE->gpr[0]
+ | mov CARG2, CCSTATE->gpr[1]
+ | mov CARG3, CCSTATE->gpr[2]
+ | mov CARG4, CCSTATE->gpr[3]
+ |.if not X64WIN
+ | mov CARG5, CCSTATE->gpr[4]
+ | mov CARG6, CCSTATE->gpr[5]
+ |.endif
+ | test eax, eax; jz >5
+ | movaps xmm0, CCSTATE->fpr[0]
+ | movaps xmm1, CCSTATE->fpr[1]
+ | movaps xmm2, CCSTATE->fpr[2]
+ | movaps xmm3, CCSTATE->fpr[3]
+ |.if not X64WIN
+ | cmp eax, 4; jbe >5
+ | movaps xmm4, CCSTATE->fpr[4]
+ | movaps xmm5, CCSTATE->fpr[5]
+ | movaps xmm6, CCSTATE->fpr[6]
+ | movaps xmm7, CCSTATE->fpr[7]
+ |.endif
+ |5:
+ |.else
+ | mov FCARG1, CCSTATE->gpr[0]
+ | mov FCARG2, CCSTATE->gpr[1]
+ |.endif
+ |
+ | call aword CCSTATE->func
+ |
+ |.if X64
+ | mov CCSTATE->gpr[0], rax
+ | movaps CCSTATE->fpr[0], xmm0
+ |.if not X64WIN
+ | mov CCSTATE->gpr[1], rdx
+ | movaps CCSTATE->fpr[1], xmm1
+ |.endif
+ |.else
+ | mov CCSTATE->gpr[0], eax
+ | mov CCSTATE->gpr[1], edx
+ | cmp byte CCSTATE->resx87, 1
+ | jb >7
+ | je >6
+ | fstp qword CCSTATE->fpr[0].d[0]
+ | jmp >7
+ |6:
+ | fstp dword CCSTATE->fpr[0].f[0]
+ |7:
+ |.if WIN
+ | sub CCSTATE->spadj, esp
+ |.endif
+ |.endif
+ |
+ |.if X64
+ | mov rbx, [rbp-8]; leave; ret
+ |.else
+ | mov ebx, [ebp-4]; leave; ret
+ |.endif
+ |.endif
+ |// Note: vm_ffi_call must be the last function in this object file!
+ |
+ |//-----------------------------------------------------------------------
+}
+
+/* Generate the code for a single instruction. */
+static void build_ins(BuildCtx *ctx, BCOp op, int defop)
+{
+ int vk = 0;
+ |// Note: aligning all instructions does not pay off.
+ |=>defop:
+
+ switch (op) {
+
+ /* -- Comparison ops ---------------------------------------------------- */
+
+ /* Remember: all ops branch for a true comparison, fall through otherwise. */
+
+ |.macro jmp_comp, lt, ge, le, gt, target
+ ||switch (op) {
+ ||case BC_ISLT:
+ | lt target
+ ||break;
+ ||case BC_ISGE:
+ | ge target
+ ||break;
+ ||case BC_ISLE:
+ | le target
+ ||break;
+ ||case BC_ISGT:
+ | gt target
+ ||break;
+ ||default: break; /* Shut up GCC. */
+ ||}
+ |.endmacro
+
+ case BC_ISLT: case BC_ISGE: case BC_ISLE: case BC_ISGT:
+ | // RA = src1, RD = src2, JMP with RD = target
+ | ins_AD
+ |.if DUALNUM
+ | checkint RA, >7
+ | checkint RD, >8
+ | mov RB, dword [BASE+RA*8]
+ | add PC, 4
+ | cmp RB, dword [BASE+RD*8]
+ | jmp_comp jge, jl, jg, jle, >9
+ |6:
+ | movzx RD, PC_RD
+ | branchPC RD
+ |9:
+ | ins_next
+ |
+ |7: // RA is not an integer.
+ | ja ->vmeta_comp
+ | // RA is a number.
+ | cmp dword [BASE+RD*8+4], LJ_TISNUM; jb >1; jne ->vmeta_comp
+ | // RA is a number, RD is an integer.
+ | cvtsi2sd xmm0, dword [BASE+RD*8]
+ | jmp >2
+ |
+ |8: // RA is an integer, RD is not an integer.
+ | ja ->vmeta_comp
+ | // RA is an integer, RD is a number.
+ | cvtsi2sd xmm1, dword [BASE+RA*8]
+ | movsd xmm0, qword [BASE+RD*8]
+ | add PC, 4
+ | ucomisd xmm0, xmm1
+ | jmp_comp jbe, ja, jb, jae, <9
+ | jmp <6
+ |.else
+ | checknum RA, ->vmeta_comp
+ | checknum RD, ->vmeta_comp
+ |.endif
+ |1:
+ | movsd xmm0, qword [BASE+RD*8]
+ |2:
+ | add PC, 4
+ | ucomisd xmm0, qword [BASE+RA*8]
+ |3:
+ | // Unordered: all of ZF CF PF set, ordered: PF clear.
+ | // To preserve NaN semantics GE/GT branch on unordered, but LT/LE don't.
+ |.if DUALNUM
+ | jmp_comp jbe, ja, jb, jae, <9
+ | jmp <6
+ |.else
+ | jmp_comp jbe, ja, jb, jae, >1
+ | movzx RD, PC_RD
+ | branchPC RD
+ |1:
+ | ins_next
+ |.endif
+ break;
+
+ case BC_ISEQV: case BC_ISNEV:
+ vk = op == BC_ISEQV;
+ | ins_AD // RA = src1, RD = src2, JMP with RD = target
+ | mov RB, [BASE+RD*8+4]
+ | add PC, 4
+ |.if DUALNUM
+ | cmp RB, LJ_TISNUM; jne >7
+ | checkint RA, >8
+ | mov RB, dword [BASE+RD*8]
+ | cmp RB, dword [BASE+RA*8]
+ if (vk) {
+ | jne >9
+ } else {
+ | je >9
+ }
+ | movzx RD, PC_RD
+ | branchPC RD
+ |9:
+ | ins_next
+ |
+ |7: // RD is not an integer.
+ | ja >5
+ | // RD is a number.
+ | cmp dword [BASE+RA*8+4], LJ_TISNUM; jb >1; jne >5
+ | // RD is a number, RA is an integer.
+ | cvtsi2sd xmm0, dword [BASE+RA*8]
+ | jmp >2
+ |
+ |8: // RD is an integer, RA is not an integer.
+ | ja >5
+ | // RD is an integer, RA is a number.
+ | cvtsi2sd xmm0, dword [BASE+RD*8]
+ | ucomisd xmm0, qword [BASE+RA*8]
+ | jmp >4
+ |
+ |.else
+ | cmp RB, LJ_TISNUM; jae >5
+ | checknum RA, >5
+ |.endif
+ |1:
+ | movsd xmm0, qword [BASE+RA*8]
+ |2:
+ | ucomisd xmm0, qword [BASE+RD*8]
+ |4:
+ iseqne_fp:
+ if (vk) {
+ | jp >2 // Unordered means not equal.
+ | jne >2
+ } else {
+ | jp >2 // Unordered means not equal.
+ | je >1
+ }
+ iseqne_end:
+ if (vk) {
+ |1: // EQ: Branch to the target.
+ | movzx RD, PC_RD
+ | branchPC RD
+ |2: // NE: Fallthrough to next instruction.
+ |.if not FFI
+ |3:
+ |.endif
+ } else {
+ |.if not FFI
+ |3:
+ |.endif
+ |2: // NE: Branch to the target.
+ | movzx RD, PC_RD
+ | branchPC RD
+ |1: // EQ: Fallthrough to next instruction.
+ }
+ if (LJ_DUALNUM && (op == BC_ISEQV || op == BC_ISNEV ||
+ op == BC_ISEQN || op == BC_ISNEN)) {
+ | jmp <9
+ } else {
+ | ins_next
+ }
+ |
+ if (op == BC_ISEQV || op == BC_ISNEV) {
+ |5: // Either or both types are not numbers.
+ |.if FFI
+ | cmp RB, LJ_TCDATA; je ->vmeta_equal_cd
+ | checktp RA, LJ_TCDATA; je ->vmeta_equal_cd
+ |.endif
+ | checktp RA, RB // Compare types.
+ | jne <2 // Not the same type?
+ | cmp RB, LJ_TISPRI
+ | jae <1 // Same type and primitive type?
+ |
+ | // Same types and not a primitive type. Compare GCobj or pvalue.
+ | mov RA, [BASE+RA*8]
+ | mov RD, [BASE+RD*8]
+ | cmp RA, RD
+ | je <1 // Same GCobjs or pvalues?
+ | cmp RB, LJ_TISTABUD
+ | ja <2 // Different objects and not table/ud?
+ |.if X64
+ | cmp RB, LJ_TUDATA // And not 64 bit lightuserdata.
+ | jb <2
+ |.endif
+ |
+ | // Different tables or userdatas. Need to check __eq metamethod.
+ | // Field metatable must be at same offset for GCtab and GCudata!
+ | mov TAB:RB, TAB:RA->metatable
+ | test TAB:RB, TAB:RB
+ | jz <2 // No metatable?
+ | test byte TAB:RB->nomm, 1<<MM_eq
+ | jnz <2 // Or 'no __eq' flag set?
+ if (vk) {
+ | xor RB, RB // ne = 0
+ } else {
+ | mov RB, 1 // ne = 1
+ }
+ | jmp ->vmeta_equal // Handle __eq metamethod.
+ } else {
+ |.if FFI
+ |3:
+ | cmp RB, LJ_TCDATA
+ if (LJ_DUALNUM && vk) {
+ | jne <9
+ } else {
+ | jne <2
+ }
+ | jmp ->vmeta_equal_cd
+ |.endif
+ }
+ break;
+ case BC_ISEQS: case BC_ISNES:
+ vk = op == BC_ISEQS;
+ | ins_AND // RA = src, RD = str const, JMP with RD = target
+ | mov RB, [BASE+RA*8+4]
+ | add PC, 4
+ | cmp RB, LJ_TSTR; jne >3
+ | mov RA, [BASE+RA*8]
+ | cmp RA, [KBASE+RD*4]
+ iseqne_test:
+ if (vk) {
+ | jne >2
+ } else {
+ | je >1
+ }
+ goto iseqne_end;
+ case BC_ISEQN: case BC_ISNEN:
+ vk = op == BC_ISEQN;
+ | ins_AD // RA = src, RD = num const, JMP with RD = target
+ | mov RB, [BASE+RA*8+4]
+ | add PC, 4
+ |.if DUALNUM
+ | cmp RB, LJ_TISNUM; jne >7
+ | cmp dword [KBASE+RD*8+4], LJ_TISNUM; jne >8
+ | mov RB, dword [KBASE+RD*8]
+ | cmp RB, dword [BASE+RA*8]
+ if (vk) {
+ | jne >9
+ } else {
+ | je >9
+ }
+ | movzx RD, PC_RD
+ | branchPC RD
+ |9:
+ | ins_next
+ |
+ |7: // RA is not an integer.
+ | ja >3
+ | // RA is a number.
+ | cmp dword [KBASE+RD*8+4], LJ_TISNUM; jb >1
+ | // RA is a number, RD is an integer.
+ | cvtsi2sd xmm0, dword [KBASE+RD*8]
+ | jmp >2
+ |
+ |8: // RA is an integer, RD is a number.
+ | cvtsi2sd xmm0, dword [BASE+RA*8]
+ | ucomisd xmm0, qword [KBASE+RD*8]
+ | jmp >4
+ |.else
+ | cmp RB, LJ_TISNUM; jae >3
+ |.endif
+ |1:
+ | movsd xmm0, qword [KBASE+RD*8]
+ |2:
+ | ucomisd xmm0, qword [BASE+RA*8]
+ |4:
+ goto iseqne_fp;
+ case BC_ISEQP: case BC_ISNEP:
+ vk = op == BC_ISEQP;
+ | ins_AND // RA = src, RD = primitive type (~), JMP with RD = target
+ | mov RB, [BASE+RA*8+4]
+ | add PC, 4
+ | cmp RB, RD
+ if (!LJ_HASFFI) goto iseqne_test;
+ if (vk) {
+ | jne >3
+ | movzx RD, PC_RD
+ | branchPC RD
+ |2:
+ | ins_next
+ |3:
+ | cmp RB, LJ_TCDATA; jne <2
+ | jmp ->vmeta_equal_cd
+ } else {
+ | je >2
+ | cmp RB, LJ_TCDATA; je ->vmeta_equal_cd
+ | movzx RD, PC_RD
+ | branchPC RD
+ |2:
+ | ins_next
+ }
+ break;
+
+ /* -- Unary test and copy ops ------------------------------------------- */
+
+ case BC_ISTC: case BC_ISFC: case BC_IST: case BC_ISF:
+ | ins_AD // RA = dst or unused, RD = src, JMP with RD = target
+ | mov RB, [BASE+RD*8+4]
+ | add PC, 4
+ | cmp RB, LJ_TISTRUECOND
+ if (op == BC_IST || op == BC_ISTC) {
+ | jae >1
+ } else {
+ | jb >1
+ }
+ if (op == BC_ISTC || op == BC_ISFC) {
+ | mov [BASE+RA*8+4], RB
+ | mov RB, [BASE+RD*8]
+ | mov [BASE+RA*8], RB
+ }
+ | movzx RD, PC_RD
+ | branchPC RD
+ |1: // Fallthrough to the next instruction.
+ | ins_next
+ break;
+
+ case BC_ISTYPE:
+ | ins_AD // RA = src, RD = -type
+ | add RD, [BASE+RA*8+4]
+ | jne ->vmeta_istype
+ | ins_next
+ break;
+ case BC_ISNUM:
+ | ins_AD // RA = src, RD = -(TISNUM-1)
+ | checknum RA, ->vmeta_istype
+ | ins_next
+ break;
+
+ /* -- Unary ops --------------------------------------------------------- */
+
+ case BC_MOV:
+ | ins_AD // RA = dst, RD = src
+ |.if X64
+ | mov RBa, [BASE+RD*8]
+ | mov [BASE+RA*8], RBa
+ |.else
+ | mov RB, [BASE+RD*8+4]
+ | mov RD, [BASE+RD*8]
+ | mov [BASE+RA*8+4], RB
+ | mov [BASE+RA*8], RD
+ |.endif
+ | ins_next_
+ break;
+ case BC_NOT:
+ | ins_AD // RA = dst, RD = src
+ | xor RB, RB
+ | checktp RD, LJ_TISTRUECOND
+ | adc RB, LJ_TTRUE
+ | mov [BASE+RA*8+4], RB
+ | ins_next
+ break;
+ case BC_UNM:
+ | ins_AD // RA = dst, RD = src
+ |.if DUALNUM
+ | checkint RD, >5
+ | mov RB, [BASE+RD*8]
+ | neg RB
+ | jo >4
+ | mov dword [BASE+RA*8+4], LJ_TISNUM
+ | mov dword [BASE+RA*8], RB
+ |9:
+ | ins_next
+ |4:
+ | mov dword [BASE+RA*8+4], 0x41e00000 // 2^31.
+ | mov dword [BASE+RA*8], 0
+ | jmp <9
+ |5:
+ | ja ->vmeta_unm
+ |.else
+ | checknum RD, ->vmeta_unm
+ |.endif
+ | movsd xmm0, qword [BASE+RD*8]
+ | sseconst_sign xmm1, RDa
+ | xorps xmm0, xmm1
+ | movsd qword [BASE+RA*8], xmm0
+ |.if DUALNUM
+ | jmp <9
+ |.else
+ | ins_next
+ |.endif
+ break;
+ case BC_LEN:
+ | ins_AD // RA = dst, RD = src
+ | checkstr RD, >2
+ | mov STR:RD, [BASE+RD*8]
+ |.if DUALNUM
+ | mov RD, dword STR:RD->len
+ |1:
+ | mov dword [BASE+RA*8+4], LJ_TISNUM
+ | mov dword [BASE+RA*8], RD
+ |.else
+ | xorps xmm0, xmm0
+ | cvtsi2sd xmm0, dword STR:RD->len
+ |1:
+ | movsd qword [BASE+RA*8], xmm0
+ |.endif
+ | ins_next
+ |2:
+ | checktab RD, ->vmeta_len
+ | mov TAB:FCARG1, [BASE+RD*8]
+#if LJ_52
+ | mov TAB:RB, TAB:FCARG1->metatable
+ | cmp TAB:RB, 0
+ | jnz >9
+ |3:
+#endif
+ |->BC_LEN_Z:
+ | mov RB, BASE // Save BASE.
+ | call extern lj_tab_len@4 // (GCtab *t)
+ | // Length of table returned in eax (RD).
+ |.if DUALNUM
+ | // Nothing to do.
+ |.else
+ | cvtsi2sd xmm0, RD
+ |.endif
+ | mov BASE, RB // Restore BASE.
+ | movzx RA, PC_RA
+ | jmp <1
+#if LJ_52
+ |9: // Check for __len.
+ | test byte TAB:RB->nomm, 1<<MM_len
+ | jnz <3
+ | jmp ->vmeta_len // 'no __len' flag NOT set: check.
+#endif
+ break;
+
+ /* -- Binary ops -------------------------------------------------------- */
+
+ |.macro ins_arithpre, sseins, ssereg
+ | ins_ABC
+ ||vk = ((int)op - BC_ADDVN) / (BC_ADDNV-BC_ADDVN);
+ ||switch (vk) {
+ ||case 0:
+ | checknum RB, ->vmeta_arith_vn
+ | .if DUALNUM
+ | cmp dword [KBASE+RC*8+4], LJ_TISNUM; jae ->vmeta_arith_vn
+ | .endif
+ | movsd xmm0, qword [BASE+RB*8]
+ | sseins ssereg, qword [KBASE+RC*8]
+ || break;
+ ||case 1:
+ | checknum RB, ->vmeta_arith_nv
+ | .if DUALNUM
+ | cmp dword [KBASE+RC*8+4], LJ_TISNUM; jae ->vmeta_arith_nv
+ | .endif
+ | movsd xmm0, qword [KBASE+RC*8]
+ | sseins ssereg, qword [BASE+RB*8]
+ || break;
+ ||default:
+ | checknum RB, ->vmeta_arith_vv
+ | checknum RC, ->vmeta_arith_vv
+ | movsd xmm0, qword [BASE+RB*8]
+ | sseins ssereg, qword [BASE+RC*8]
+ || break;
+ ||}
+ |.endmacro
+ |
+ |.macro ins_arithdn, intins
+ | ins_ABC
+ ||vk = ((int)op - BC_ADDVN) / (BC_ADDNV-BC_ADDVN);
+ ||switch (vk) {
+ ||case 0:
+ | checkint RB, ->vmeta_arith_vn
+ | cmp dword [KBASE+RC*8+4], LJ_TISNUM; jne ->vmeta_arith_vn
+ | mov RB, [BASE+RB*8]
+ | intins RB, [KBASE+RC*8]; jo ->vmeta_arith_vno
+ || break;
+ ||case 1:
+ | checkint RB, ->vmeta_arith_nv
+ | cmp dword [KBASE+RC*8+4], LJ_TISNUM; jne ->vmeta_arith_nv
+ | mov RC, [KBASE+RC*8]
+ | intins RC, [BASE+RB*8]; jo ->vmeta_arith_nvo
+ || break;
+ ||default:
+ | checkint RB, ->vmeta_arith_vv
+ | checkint RC, ->vmeta_arith_vv
+ | mov RB, [BASE+RB*8]
+ | intins RB, [BASE+RC*8]; jo ->vmeta_arith_vvo
+ || break;
+ ||}
+ | mov dword [BASE+RA*8+4], LJ_TISNUM
+ ||if (vk == 1) {
+ | mov dword [BASE+RA*8], RC
+ ||} else {
+ | mov dword [BASE+RA*8], RB
+ ||}
+ | ins_next
+ |.endmacro
+ |
+ |.macro ins_arithpost
+ | movsd qword [BASE+RA*8], xmm0
+ |.endmacro
+ |
+ |.macro ins_arith, sseins
+ | ins_arithpre sseins, xmm0
+ | ins_arithpost
+ | ins_next
+ |.endmacro
+ |
+ |.macro ins_arith, intins, sseins
+ |.if DUALNUM
+ | ins_arithdn intins
+ |.else
+ | ins_arith, sseins
+ |.endif
+ |.endmacro
+
+ | // RA = dst, RB = src1 or num const, RC = src2 or num const
+ case BC_ADDVN: case BC_ADDNV: case BC_ADDVV:
+ | ins_arith add, addsd
+ break;
+ case BC_SUBVN: case BC_SUBNV: case BC_SUBVV:
+ | ins_arith sub, subsd
+ break;
+ case BC_MULVN: case BC_MULNV: case BC_MULVV:
+ | ins_arith imul, mulsd
+ break;
+ case BC_DIVVN: case BC_DIVNV: case BC_DIVVV:
+ | ins_arith divsd
+ break;
+ case BC_MODVN:
+ | ins_arithpre movsd, xmm1
+ |->BC_MODVN_Z:
+ | call ->vm_mod
+ | ins_arithpost
+ | ins_next
+ break;
+ case BC_MODNV: case BC_MODVV:
+ | ins_arithpre movsd, xmm1
+ | jmp ->BC_MODVN_Z // Avoid 3 copies. It's slow anyway.
+ break;
+ case BC_POW:
+ | ins_arithpre movsd, xmm1
+ | mov RB, BASE
+ |.if not X64
+ | movsd FPARG1, xmm0
+ | movsd FPARG3, xmm1
+ |.endif
+ | call extern pow
+ | movzx RA, PC_RA
+ | mov BASE, RB
+ |.if X64
+ | ins_arithpost
+ |.else
+ | fstp qword [BASE+RA*8]
+ |.endif
+ | ins_next
+ break;
+
+ case BC_CAT:
+ | ins_ABC // RA = dst, RB = src_start, RC = src_end
+ |.if X64
+ | mov L:CARG1d, SAVE_L
+ | mov L:CARG1d->base, BASE
+ | lea CARG2d, [BASE+RC*8]
+ | mov CARG3d, RC
+ | sub CARG3d, RB
+ |->BC_CAT_Z:
+ | mov L:RB, L:CARG1d
+ |.else
+ | lea RA, [BASE+RC*8]
+ | sub RC, RB
+ | mov ARG2, RA
+ | mov ARG3, RC
+ |->BC_CAT_Z:
+ | mov L:RB, SAVE_L
+ | mov ARG1, L:RB
+ | mov L:RB->base, BASE
+ |.endif
+ | mov SAVE_PC, PC
+ | call extern lj_meta_cat // (lua_State *L, TValue *top, int left)
+ | // NULL (finished) or TValue * (metamethod) returned in eax (RC).
+ | mov BASE, L:RB->base
+ | test RC, RC
+ | jnz ->vmeta_binop
+ | movzx RB, PC_RB // Copy result to Stk[RA] from Stk[RB].
+ | movzx RA, PC_RA
+ |.if X64
+ | mov RCa, [BASE+RB*8]
+ | mov [BASE+RA*8], RCa
+ |.else
+ | mov RC, [BASE+RB*8+4]
+ | mov RB, [BASE+RB*8]
+ | mov [BASE+RA*8+4], RC
+ | mov [BASE+RA*8], RB
+ |.endif
+ | ins_next
+ break;
+
+ /* -- Constant ops ------------------------------------------------------ */
+
+ case BC_KSTR:
+ | ins_AND // RA = dst, RD = str const (~)
+ | mov RD, [KBASE+RD*4]
+ | mov dword [BASE+RA*8+4], LJ_TSTR
+ | mov [BASE+RA*8], RD
+ | ins_next
+ break;
+ case BC_KCDATA:
+ |.if FFI
+ | ins_AND // RA = dst, RD = cdata const (~)
+ | mov RD, [KBASE+RD*4]
+ | mov dword [BASE+RA*8+4], LJ_TCDATA
+ | mov [BASE+RA*8], RD
+ | ins_next
+ |.endif
+ break;
+ case BC_KSHORT:
+ | ins_AD // RA = dst, RD = signed int16 literal
+ |.if DUALNUM
+ | movsx RD, RDW
+ | mov dword [BASE+RA*8+4], LJ_TISNUM
+ | mov dword [BASE+RA*8], RD
+ |.else
+ | movsx RD, RDW // Sign-extend literal.
+ | cvtsi2sd xmm0, RD
+ | movsd qword [BASE+RA*8], xmm0
+ |.endif
+ | ins_next
+ break;
+ case BC_KNUM:
+ | ins_AD // RA = dst, RD = num const
+ | movsd xmm0, qword [KBASE+RD*8]
+ | movsd qword [BASE+RA*8], xmm0
+ | ins_next
+ break;
+ case BC_KPRI:
+ | ins_AND // RA = dst, RD = primitive type (~)
+ | mov [BASE+RA*8+4], RD
+ | ins_next
+ break;
+ case BC_KNIL:
+ | ins_AD // RA = dst_start, RD = dst_end
+ | lea RA, [BASE+RA*8+12]
+ | lea RD, [BASE+RD*8+4]
+ | mov RB, LJ_TNIL
+ | mov [RA-8], RB // Sets minimum 2 slots.
+ |1:
+ | mov [RA], RB
+ | add RA, 8
+ | cmp RA, RD
+ | jbe <1
+ | ins_next
+ break;
+
+ /* -- Upvalue and function ops ------------------------------------------ */
+
+ case BC_UGET:
+ | ins_AD // RA = dst, RD = upvalue #
+ | mov LFUNC:RB, [BASE-8]
+ | mov UPVAL:RB, [LFUNC:RB+RD*4+offsetof(GCfuncL, uvptr)]
+ | mov RB, UPVAL:RB->v
+ |.if X64
+ | mov RDa, [RB]
+ | mov [BASE+RA*8], RDa
+ |.else
+ | mov RD, [RB+4]
+ | mov RB, [RB]
+ | mov [BASE+RA*8+4], RD
+ | mov [BASE+RA*8], RB
+ |.endif
+ | ins_next
+ break;
+ case BC_USETV:
+#define TV2MARKOFS \
+ ((int32_t)offsetof(GCupval, marked)-(int32_t)offsetof(GCupval, tv))
+ | ins_AD // RA = upvalue #, RD = src
+ | mov LFUNC:RB, [BASE-8]
+ | mov UPVAL:RB, [LFUNC:RB+RA*4+offsetof(GCfuncL, uvptr)]
+ | cmp byte UPVAL:RB->closed, 0
+ | mov RB, UPVAL:RB->v
+ | mov RA, [BASE+RD*8]
+ | mov RD, [BASE+RD*8+4]
+ | mov [RB], RA
+ | mov [RB+4], RD
+ | jz >1
+ | // Check barrier for closed upvalue.
+ | test byte [RB+TV2MARKOFS], LJ_GC_BLACK // isblack(uv)
+ | jnz >2
+ |1:
+ | ins_next
+ |
+ |2: // Upvalue is black. Check if new value is collectable and white.
+ | sub RD, LJ_TISGCV
+ | cmp RD, LJ_TNUMX - LJ_TISGCV // tvisgcv(v)
+ | jbe <1
+ | test byte GCOBJ:RA->gch.marked, LJ_GC_WHITES // iswhite(v)
+ | jz <1
+ | // Crossed a write barrier. Move the barrier forward.
+ |.if X64 and not X64WIN
+ | mov FCARG2, RB
+ | mov RB, BASE // Save BASE.
+ |.else
+ | xchg FCARG2, RB // Save BASE (FCARG2 == BASE).
+ |.endif
+ | lea GL:FCARG1, [DISPATCH+GG_DISP2G]
+ | call extern lj_gc_barrieruv@8 // (global_State *g, TValue *tv)
+ | mov BASE, RB // Restore BASE.
+ | jmp <1
+ break;
+#undef TV2MARKOFS
+ case BC_USETS:
+ | ins_AND // RA = upvalue #, RD = str const (~)
+ | mov LFUNC:RB, [BASE-8]
+ | mov UPVAL:RB, [LFUNC:RB+RA*4+offsetof(GCfuncL, uvptr)]
+ | mov GCOBJ:RA, [KBASE+RD*4]
+ | mov RD, UPVAL:RB->v
+ | mov [RD], GCOBJ:RA
+ | mov dword [RD+4], LJ_TSTR
+ | test byte UPVAL:RB->marked, LJ_GC_BLACK // isblack(uv)
+ | jnz >2
+ |1:
+ | ins_next
+ |
+ |2: // Check if string is white and ensure upvalue is closed.
+ | test byte GCOBJ:RA->gch.marked, LJ_GC_WHITES // iswhite(str)
+ | jz <1
+ | cmp byte UPVAL:RB->closed, 0
+ | jz <1
+ | // Crossed a write barrier. Move the barrier forward.
+ | mov RB, BASE // Save BASE (FCARG2 == BASE).
+ | mov FCARG2, RD
+ | lea GL:FCARG1, [DISPATCH+GG_DISP2G]
+ | call extern lj_gc_barrieruv@8 // (global_State *g, TValue *tv)
+ | mov BASE, RB // Restore BASE.
+ | jmp <1
+ break;
+ case BC_USETN:
+ | ins_AD // RA = upvalue #, RD = num const
+ | mov LFUNC:RB, [BASE-8]
+ | movsd xmm0, qword [KBASE+RD*8]
+ | mov UPVAL:RB, [LFUNC:RB+RA*4+offsetof(GCfuncL, uvptr)]
+ | mov RA, UPVAL:RB->v
+ | movsd qword [RA], xmm0
+ | ins_next
+ break;
+ case BC_USETP:
+ | ins_AND // RA = upvalue #, RD = primitive type (~)
+ | mov LFUNC:RB, [BASE-8]
+ | mov UPVAL:RB, [LFUNC:RB+RA*4+offsetof(GCfuncL, uvptr)]
+ | mov RA, UPVAL:RB->v
+ | mov [RA+4], RD
+ | ins_next
+ break;
+ case BC_UCLO:
+ | ins_AD // RA = level, RD = target
+ | branchPC RD // Do this first to free RD.
+ | mov L:RB, SAVE_L
+ | cmp dword L:RB->openupval, 0
+ | je >1
+ | mov L:RB->base, BASE
+ | lea FCARG2, [BASE+RA*8] // Caveat: FCARG2 == BASE
+ | mov L:FCARG1, L:RB // Caveat: FCARG1 == RA
+ | call extern lj_func_closeuv@8 // (lua_State *L, TValue *level)
+ | mov BASE, L:RB->base
+ |1:
+ | ins_next
+ break;
+
+ case BC_FNEW:
+ | ins_AND // RA = dst, RD = proto const (~) (holding function prototype)
+ |.if X64
+ | mov L:RB, SAVE_L
+ | mov L:RB->base, BASE // Caveat: CARG2d/CARG3d may be BASE.
+ | mov CARG3d, [BASE-8]
+ | mov CARG2d, [KBASE+RD*4] // Fetch GCproto *.
+ | mov CARG1d, L:RB
+ |.else
+ | mov LFUNC:RA, [BASE-8]
+ | mov PROTO:RD, [KBASE+RD*4] // Fetch GCproto *.
+ | mov L:RB, SAVE_L
+ | mov ARG3, LFUNC:RA
+ | mov ARG2, PROTO:RD
+ | mov ARG1, L:RB
+ | mov L:RB->base, BASE
+ |.endif
+ | mov SAVE_PC, PC
+ | // (lua_State *L, GCproto *pt, GCfuncL *parent)
+ | call extern lj_func_newL_gc
+ | // GCfuncL * returned in eax (RC).
+ | mov BASE, L:RB->base
+ | movzx RA, PC_RA
+ | mov [BASE+RA*8], LFUNC:RC
+ | mov dword [BASE+RA*8+4], LJ_TFUNC
+ | ins_next
+ break;
+
+ /* -- Table ops --------------------------------------------------------- */
+
+ case BC_TNEW:
+ | ins_AD // RA = dst, RD = hbits|asize
+ | mov L:RB, SAVE_L
+ | mov L:RB->base, BASE
+ | mov RA, [DISPATCH+DISPATCH_GL(gc.total)]
+ | cmp RA, [DISPATCH+DISPATCH_GL(gc.threshold)]
+ | mov SAVE_PC, PC
+ | jae >5
+ |1:
+ |.if X64
+ | mov CARG3d, RD
+ | and RD, 0x7ff
+ | shr CARG3d, 11
+ |.else
+ | mov RA, RD
+ | and RD, 0x7ff
+ | shr RA, 11
+ | mov ARG3, RA
+ |.endif
+ | cmp RD, 0x7ff
+ | je >3
+ |2:
+ |.if X64
+ | mov L:CARG1d, L:RB
+ | mov CARG2d, RD
+ |.else
+ | mov ARG1, L:RB
+ | mov ARG2, RD
+ |.endif
+ | call extern lj_tab_new // (lua_State *L, int32_t asize, uint32_t hbits)
+ | // Table * returned in eax (RC).
+ | mov BASE, L:RB->base
+ | movzx RA, PC_RA
+ | mov [BASE+RA*8], TAB:RC
+ | mov dword [BASE+RA*8+4], LJ_TTAB
+ | ins_next
+ |3: // Turn 0x7ff into 0x801.
+ | mov RD, 0x801
+ | jmp <2
+ |5:
+ | mov L:FCARG1, L:RB
+ | call extern lj_gc_step_fixtop@4 // (lua_State *L)
+ | movzx RD, PC_RD
+ | jmp <1
+ break;
+ case BC_TDUP:
+ | ins_AND // RA = dst, RD = table const (~) (holding template table)
+ | mov L:RB, SAVE_L
+ | mov RA, [DISPATCH+DISPATCH_GL(gc.total)]
+ | mov SAVE_PC, PC
+ | cmp RA, [DISPATCH+DISPATCH_GL(gc.threshold)]
+ | mov L:RB->base, BASE
+ | jae >3
+ |2:
+ | mov TAB:FCARG2, [KBASE+RD*4] // Caveat: FCARG2 == BASE
+ | mov L:FCARG1, L:RB // Caveat: FCARG1 == RA
+ | call extern lj_tab_dup@8 // (lua_State *L, Table *kt)
+ | // Table * returned in eax (RC).
+ | mov BASE, L:RB->base
+ | movzx RA, PC_RA
+ | mov [BASE+RA*8], TAB:RC
+ | mov dword [BASE+RA*8+4], LJ_TTAB
+ | ins_next
+ |3:
+ | mov L:FCARG1, L:RB
+ | call extern lj_gc_step_fixtop@4 // (lua_State *L)
+ | movzx RD, PC_RD // Need to reload RD.
+ | not RDa
+ | jmp <2
+ break;
+
+ case BC_GGET:
+ | ins_AND // RA = dst, RD = str const (~)
+ | mov LFUNC:RB, [BASE-8]
+ | mov TAB:RB, LFUNC:RB->env
+ | mov STR:RC, [KBASE+RD*4]
+ | jmp ->BC_TGETS_Z
+ break;
+ case BC_GSET:
+ | ins_AND // RA = src, RD = str const (~)
+ | mov LFUNC:RB, [BASE-8]
+ | mov TAB:RB, LFUNC:RB->env
+ | mov STR:RC, [KBASE+RD*4]
+ | jmp ->BC_TSETS_Z
+ break;
+
+ case BC_TGETV:
+ | ins_ABC // RA = dst, RB = table, RC = key
+ | checktab RB, ->vmeta_tgetv
+ | mov TAB:RB, [BASE+RB*8]
+ |
+ | // Integer key?
+ |.if DUALNUM
+ | checkint RC, >5
+ | mov RC, dword [BASE+RC*8]
+ |.else
+ | // Convert number to int and back and compare.
+ | checknum RC, >5
+ | movsd xmm0, qword [BASE+RC*8]
+ | cvttsd2si RC, xmm0
+ | cvtsi2sd xmm1, RC
+ | ucomisd xmm0, xmm1
+ | jne ->vmeta_tgetv // Generic numeric key? Use fallback.
+ |.endif
+ | cmp RC, TAB:RB->asize // Takes care of unordered, too.
+ | jae ->vmeta_tgetv // Not in array part? Use fallback.
+ | shl RC, 3
+ | add RC, TAB:RB->array
+ | cmp dword [RC+4], LJ_TNIL // Avoid overwriting RB in fastpath.
+ | je >2
+ | // Get array slot.
+ |.if X64
+ | mov RBa, [RC]
+ | mov [BASE+RA*8], RBa
+ |.else
+ | mov RB, [RC]
+ | mov RC, [RC+4]
+ | mov [BASE+RA*8], RB
+ | mov [BASE+RA*8+4], RC
+ |.endif
+ |1:
+ | ins_next
+ |
+ |2: // Check for __index if table value is nil.
+ | cmp dword TAB:RB->metatable, 0 // Shouldn't overwrite RA for fastpath.
+ | jz >3
+ | mov TAB:RA, TAB:RB->metatable
+ | test byte TAB:RA->nomm, 1<<MM_index
+ | jz ->vmeta_tgetv // 'no __index' flag NOT set: check.
+ | movzx RA, PC_RA // Restore RA.
+ |3:
+ | mov dword [BASE+RA*8+4], LJ_TNIL
+ | jmp <1
+ |
+ |5: // String key?
+ | checkstr RC, ->vmeta_tgetv
+ | mov STR:RC, [BASE+RC*8]
+ | jmp ->BC_TGETS_Z
+ break;
+ case BC_TGETS:
+ | ins_ABC // RA = dst, RB = table, RC = str const (~)
+ | not RCa
+ | mov STR:RC, [KBASE+RC*4]
+ | checktab RB, ->vmeta_tgets
+ | mov TAB:RB, [BASE+RB*8]
+ |->BC_TGETS_Z: // RB = GCtab *, RC = GCstr *, refetches PC_RA.
+ | mov RA, TAB:RB->hmask
+ | and RA, STR:RC->sid
+ | imul RA, #NODE
+ | add NODE:RA, TAB:RB->node
+ |1:
+ | cmp dword NODE:RA->key.it, LJ_TSTR
+ | jne >4
+ | cmp dword NODE:RA->key.gcr, STR:RC
+ | jne >4
+ | // Ok, key found. Assumes: offsetof(Node, val) == 0
+ | cmp dword [RA+4], LJ_TNIL // Avoid overwriting RB in fastpath.
+ | je >5 // Key found, but nil value?
+ | movzx RC, PC_RA
+ | // Get node value.
+ |.if X64
+ | mov RBa, [RA]
+ | mov [BASE+RC*8], RBa
+ |.else
+ | mov RB, [RA]
+ | mov RA, [RA+4]
+ | mov [BASE+RC*8], RB
+ | mov [BASE+RC*8+4], RA
+ |.endif
+ |2:
+ | ins_next
+ |
+ |3:
+ | movzx RC, PC_RA
+ | mov dword [BASE+RC*8+4], LJ_TNIL
+ | jmp <2
+ |
+ |4: // Follow hash chain.
+ | mov NODE:RA, NODE:RA->next
+ | test NODE:RA, NODE:RA
+ | jnz <1
+ | // End of hash chain: key not found, nil result.
+ |
+ |5: // Check for __index if table value is nil.
+ | mov TAB:RA, TAB:RB->metatable
+ | test TAB:RA, TAB:RA
+ | jz <3 // No metatable: done.
+ | test byte TAB:RA->nomm, 1<<MM_index
+ | jnz <3 // 'no __index' flag set: done.
+ | jmp ->vmeta_tgets // Caveat: preserve STR:RC.
+ break;
+ case BC_TGETB:
+ | ins_ABC // RA = dst, RB = table, RC = byte literal
+ | checktab RB, ->vmeta_tgetb
+ | mov TAB:RB, [BASE+RB*8]
+ | cmp RC, TAB:RB->asize
+ | jae ->vmeta_tgetb
+ | shl RC, 3
+ | add RC, TAB:RB->array
+ | cmp dword [RC+4], LJ_TNIL // Avoid overwriting RB in fastpath.
+ | je >2
+ | // Get array slot.
+ |.if X64
+ | mov RBa, [RC]
+ | mov [BASE+RA*8], RBa
+ |.else
+ | mov RB, [RC]
+ | mov RC, [RC+4]
+ | mov [BASE+RA*8], RB
+ | mov [BASE+RA*8+4], RC
+ |.endif
+ |1:
+ | ins_next
+ |
+ |2: // Check for __index if table value is nil.
+ | cmp dword TAB:RB->metatable, 0 // Shouldn't overwrite RA for fastpath.
+ | jz >3
+ | mov TAB:RA, TAB:RB->metatable
+ | test byte TAB:RA->nomm, 1<<MM_index
+ | jz ->vmeta_tgetb // 'no __index' flag NOT set: check.
+ | movzx RA, PC_RA // Restore RA.
+ |3:
+ | mov dword [BASE+RA*8+4], LJ_TNIL
+ | jmp <1
+ break;
+ case BC_TGETR:
+ | ins_ABC // RA = dst, RB = table, RC = key
+ | mov TAB:RB, [BASE+RB*8]
+ |.if DUALNUM
+ | mov RC, dword [BASE+RC*8]
+ |.else
+ | cvttsd2si RC, qword [BASE+RC*8]
+ |.endif
+ | cmp RC, TAB:RB->asize
+ | jae ->vmeta_tgetr // Not in array part? Use fallback.
+ | shl RC, 3
+ | add RC, TAB:RB->array
+ | // Get array slot.
+ |->BC_TGETR_Z:
+ |.if X64
+ | mov RBa, [RC]
+ | mov [BASE+RA*8], RBa
+ |.else
+ | mov RB, [RC]
+ | mov RC, [RC+4]
+ | mov [BASE+RA*8], RB
+ | mov [BASE+RA*8+4], RC
+ |.endif
+ |->BC_TGETR2_Z:
+ | ins_next
+ break;
+
+ case BC_TSETV:
+ | ins_ABC // RA = src, RB = table, RC = key
+ | checktab RB, ->vmeta_tsetv
+ | mov TAB:RB, [BASE+RB*8]
+ |
+ | // Integer key?
+ |.if DUALNUM
+ | checkint RC, >5
+ | mov RC, dword [BASE+RC*8]
+ |.else
+ | // Convert number to int and back and compare.
+ | checknum RC, >5
+ | movsd xmm0, qword [BASE+RC*8]
+ | cvttsd2si RC, xmm0
+ | cvtsi2sd xmm1, RC
+ | ucomisd xmm0, xmm1
+ | jne ->vmeta_tsetv // Generic numeric key? Use fallback.
+ |.endif
+ | cmp RC, TAB:RB->asize // Takes care of unordered, too.
+ | jae ->vmeta_tsetv
+ | shl RC, 3
+ | add RC, TAB:RB->array
+ | cmp dword [RC+4], LJ_TNIL
+ | je >3 // Previous value is nil?
+ |1:
+ | test byte TAB:RB->marked, LJ_GC_BLACK // isblack(table)
+ | jnz >7
+ |2: // Set array slot.
+ |.if X64
+ | mov RBa, [BASE+RA*8]
+ | mov [RC], RBa
+ |.else
+ | mov RB, [BASE+RA*8+4]
+ | mov RA, [BASE+RA*8]
+ | mov [RC+4], RB
+ | mov [RC], RA
+ |.endif
+ | ins_next
+ |
+ |3: // Check for __newindex if previous value is nil.
+ | cmp dword TAB:RB->metatable, 0 // Shouldn't overwrite RA for fastpath.
+ | jz <1
+ | mov TAB:RA, TAB:RB->metatable
+ | test byte TAB:RA->nomm, 1<<MM_newindex
+ | jz ->vmeta_tsetv // 'no __newindex' flag NOT set: check.
+ | movzx RA, PC_RA // Restore RA.
+ | jmp <1
+ |
+ |5: // String key?
+ | checkstr RC, ->vmeta_tsetv
+ | mov STR:RC, [BASE+RC*8]
+ | jmp ->BC_TSETS_Z
+ |
+ |7: // Possible table write barrier for the value. Skip valiswhite check.
+ | barrierback TAB:RB, RA
+ | movzx RA, PC_RA // Restore RA.
+ | jmp <2
+ break;
+ case BC_TSETS:
+ | ins_ABC // RA = src, RB = table, RC = str const (~)
+ | not RCa
+ | mov STR:RC, [KBASE+RC*4]
+ | checktab RB, ->vmeta_tsets
+ | mov TAB:RB, [BASE+RB*8]
+ |->BC_TSETS_Z: // RB = GCtab *, RC = GCstr *, refetches PC_RA.
+ | mov RA, TAB:RB->hmask
+ | and RA, STR:RC->sid
+ | imul RA, #NODE
+ | mov byte TAB:RB->nomm, 0 // Clear metamethod cache.
+ | add NODE:RA, TAB:RB->node
+ |1:
+ | cmp dword NODE:RA->key.it, LJ_TSTR
+ | jne >5
+ | cmp dword NODE:RA->key.gcr, STR:RC
+ | jne >5
+ | // Ok, key found. Assumes: offsetof(Node, val) == 0
+ | cmp dword [RA+4], LJ_TNIL
+ | je >4 // Previous value is nil?
+ |2:
+ | test byte TAB:RB->marked, LJ_GC_BLACK // isblack(table)
+ | jnz >7
+ |3: // Set node value.
+ | movzx RC, PC_RA
+ |.if X64
+ | mov RBa, [BASE+RC*8]
+ | mov [RA], RBa
+ |.else
+ | mov RB, [BASE+RC*8+4]
+ | mov RC, [BASE+RC*8]
+ | mov [RA+4], RB
+ | mov [RA], RC
+ |.endif
+ | ins_next
+ |
+ |4: // Check for __newindex if previous value is nil.
+ | cmp dword TAB:RB->metatable, 0 // Shouldn't overwrite RA for fastpath.
+ | jz <2
+ | mov TMP1, RA // Save RA.
+ | mov TAB:RA, TAB:RB->metatable
+ | test byte TAB:RA->nomm, 1<<MM_newindex
+ | jz ->vmeta_tsets // 'no __newindex' flag NOT set: check.
+ | mov RA, TMP1 // Restore RA.
+ | jmp <2
+ |
+ |5: // Follow hash chain.
+ | mov NODE:RA, NODE:RA->next
+ | test NODE:RA, NODE:RA
+ | jnz <1
+ | // End of hash chain: key not found, add a new one.
+ |
+ | // But check for __newindex first.
+ | mov TAB:RA, TAB:RB->metatable
+ | test TAB:RA, TAB:RA
+ | jz >6 // No metatable: continue.
+ | test byte TAB:RA->nomm, 1<<MM_newindex
+ | jz ->vmeta_tsets // 'no __newindex' flag NOT set: check.
+ |6:
+ | mov TMP1, STR:RC
+ | mov TMP2, LJ_TSTR
+ | mov TMP3, TAB:RB // Save TAB:RB for us.
+ |.if X64
+ | mov L:CARG1d, SAVE_L
+ | mov L:CARG1d->base, BASE
+ | lea CARG3, TMP1
+ | mov CARG2d, TAB:RB
+ | mov L:RB, L:CARG1d
+ |.else
+ | lea RC, TMP1 // Store temp. TValue in TMP1/TMP2.
+ | mov ARG2, TAB:RB
+ | mov L:RB, SAVE_L
+ | mov ARG3, RC
+ | mov ARG1, L:RB
+ | mov L:RB->base, BASE
+ |.endif
+ | mov SAVE_PC, PC
+ | call extern lj_tab_newkey // (lua_State *L, GCtab *t, TValue *k)
+ | // Handles write barrier for the new key. TValue * returned in eax (RC).
+ | mov BASE, L:RB->base
+ | mov TAB:RB, TMP3 // Need TAB:RB for barrier.
+ | mov RA, eax
+ | jmp <2 // Must check write barrier for value.
+ |
+ |7: // Possible table write barrier for the value. Skip valiswhite check.
+ | barrierback TAB:RB, RC // Destroys STR:RC.
+ | jmp <3
+ break;
+ case BC_TSETB:
+ | ins_ABC // RA = src, RB = table, RC = byte literal
+ | checktab RB, ->vmeta_tsetb
+ | mov TAB:RB, [BASE+RB*8]
+ | cmp RC, TAB:RB->asize
+ | jae ->vmeta_tsetb
+ | shl RC, 3
+ | add RC, TAB:RB->array
+ | cmp dword [RC+4], LJ_TNIL
+ | je >3 // Previous value is nil?
+ |1:
+ | test byte TAB:RB->marked, LJ_GC_BLACK // isblack(table)
+ | jnz >7
+ |2: // Set array slot.
+ |.if X64
+ | mov RAa, [BASE+RA*8]
+ | mov [RC], RAa
+ |.else
+ | mov RB, [BASE+RA*8+4]
+ | mov RA, [BASE+RA*8]
+ | mov [RC+4], RB
+ | mov [RC], RA
+ |.endif
+ | ins_next
+ |
+ |3: // Check for __newindex if previous value is nil.
+ | cmp dword TAB:RB->metatable, 0 // Shouldn't overwrite RA for fastpath.
+ | jz <1
+ | mov TAB:RA, TAB:RB->metatable
+ | test byte TAB:RA->nomm, 1<<MM_newindex
+ | jz ->vmeta_tsetb // 'no __newindex' flag NOT set: check.
+ | movzx RA, PC_RA // Restore RA.
+ | jmp <1
+ |
+ |7: // Possible table write barrier for the value. Skip valiswhite check.
+ | barrierback TAB:RB, RA
+ | movzx RA, PC_RA // Restore RA.
+ | jmp <2
+ break;
+ case BC_TSETR:
+ | ins_ABC // RA = src, RB = table, RC = key
+ | mov TAB:RB, [BASE+RB*8]
+ |.if DUALNUM
+ | mov RC, dword [BASE+RC*8]
+ |.else
+ | cvttsd2si RC, qword [BASE+RC*8]
+ |.endif
+ | test byte TAB:RB->marked, LJ_GC_BLACK // isblack(table)
+ | jnz >7
+ |2:
+ | cmp RC, TAB:RB->asize
+ | jae ->vmeta_tsetr
+ | shl RC, 3
+ | add RC, TAB:RB->array
+ | // Set array slot.
+ |->BC_TSETR_Z:
+ |.if X64
+ | mov RBa, [BASE+RA*8]
+ | mov [RC], RBa
+ |.else
+ | mov RB, [BASE+RA*8+4]
+ | mov RA, [BASE+RA*8]
+ | mov [RC+4], RB
+ | mov [RC], RA
+ |.endif
+ | ins_next
+ |
+ |7: // Possible table write barrier for the value. Skip valiswhite check.
+ | barrierback TAB:RB, RA
+ | movzx RA, PC_RA // Restore RA.
+ | jmp <2
+ break;
+
+ case BC_TSETM:
+ | ins_AD // RA = base (table at base-1), RD = num const (start index)
+ | mov TMP1, KBASE // Need one more free register.
+ | mov KBASE, dword [KBASE+RD*8] // Integer constant is in lo-word.
+ |1:
+ | lea RA, [BASE+RA*8]
+ | mov TAB:RB, [RA-8] // Guaranteed to be a table.
+ | test byte TAB:RB->marked, LJ_GC_BLACK // isblack(table)
+ | jnz >7
+ |2:
+ | mov RD, MULTRES
+ | sub RD, 1
+ | jz >4 // Nothing to copy?
+ | add RD, KBASE // Compute needed size.
+ | cmp RD, TAB:RB->asize
+ | ja >5 // Doesn't fit into array part?
+ | sub RD, KBASE
+ | shl KBASE, 3
+ | add KBASE, TAB:RB->array
+ |3: // Copy result slots to table.
+ |.if X64
+ | mov RBa, [RA]
+ | add RA, 8
+ | mov [KBASE], RBa
+ |.else
+ | mov RB, [RA]
+ | mov [KBASE], RB
+ | mov RB, [RA+4]
+ | add RA, 8
+ | mov [KBASE+4], RB
+ |.endif
+ | add KBASE, 8
+ | sub RD, 1
+ | jnz <3
+ |4:
+ | mov KBASE, TMP1
+ | ins_next
+ |
+ |5: // Need to resize array part.
+ |.if X64
+ | mov L:CARG1d, SAVE_L
+ | mov L:CARG1d->base, BASE // Caveat: CARG2d/CARG3d may be BASE.
+ | mov CARG2d, TAB:RB
+ | mov CARG3d, RD
+ | mov L:RB, L:CARG1d
+ |.else
+ | mov ARG2, TAB:RB
+ | mov L:RB, SAVE_L
+ | mov L:RB->base, BASE
+ | mov ARG3, RD
+ | mov ARG1, L:RB
+ |.endif
+ | mov SAVE_PC, PC
+ | call extern lj_tab_reasize // (lua_State *L, GCtab *t, int nasize)
+ | mov BASE, L:RB->base
+ | movzx RA, PC_RA // Restore RA.
+ | jmp <1 // Retry.
+ |
+ |7: // Possible table write barrier for any value. Skip valiswhite check.
+ | barrierback TAB:RB, RD
+ | jmp <2
+ break;
+
+ /* -- Calls and vararg handling ----------------------------------------- */
+
+ case BC_CALL: case BC_CALLM:
+ | ins_A_C // RA = base, (RB = nresults+1,) RC = nargs+1 | extra_nargs
+ if (op == BC_CALLM) {
+ | add NARGS:RD, MULTRES
+ }
+ | cmp dword [BASE+RA*8+4], LJ_TFUNC
+ | mov LFUNC:RB, [BASE+RA*8]
+ | jne ->vmeta_call_ra
+ | lea BASE, [BASE+RA*8+8]
+ | ins_call
+ break;
+
+ case BC_CALLMT:
+ | ins_AD // RA = base, RD = extra_nargs
+ | add NARGS:RD, MULTRES
+ | // Fall through. Assumes BC_CALLT follows and ins_AD is a no-op.
+ break;
+ case BC_CALLT:
+ | ins_AD // RA = base, RD = nargs+1
+ | lea RA, [BASE+RA*8+8]
+ | mov KBASE, BASE // Use KBASE for move + vmeta_call hint.
+ | mov LFUNC:RB, [RA-8]
+ | cmp dword [RA-4], LJ_TFUNC
+ | jne ->vmeta_call
+ |->BC_CALLT_Z:
+ | mov PC, [BASE-4]
+ | test PC, FRAME_TYPE
+ | jnz >7
+ |1:
+ | mov [BASE-8], LFUNC:RB // Copy function down, reloaded below.
+ | mov MULTRES, NARGS:RD
+ | sub NARGS:RD, 1
+ | jz >3
+ |2: // Move args down.
+ |.if X64
+ | mov RBa, [RA]
+ | add RA, 8
+ | mov [KBASE], RBa
+ |.else
+ | mov RB, [RA]
+ | mov [KBASE], RB
+ | mov RB, [RA+4]
+ | add RA, 8
+ | mov [KBASE+4], RB
+ |.endif
+ | add KBASE, 8
+ | sub NARGS:RD, 1
+ | jnz <2
+ |
+ | mov LFUNC:RB, [BASE-8]
+ |3:
+ | mov NARGS:RD, MULTRES
+ | cmp byte LFUNC:RB->ffid, 1 // (> FF_C) Calling a fast function?
+ | ja >5
+ |4:
+ | ins_callt
+ |
+ |5: // Tailcall to a fast function.
+ | test PC, FRAME_TYPE // Lua frame below?
+ | jnz <4
+ | movzx RA, PC_RA
+ | not RAa
+ | mov LFUNC:KBASE, [BASE+RA*8-8] // Need to prepare KBASE.
+ | mov KBASE, LFUNC:KBASE->pc
+ | mov KBASE, [KBASE+PC2PROTO(k)]
+ | jmp <4
+ |
+ |7: // Tailcall from a vararg function.
+ | sub PC, FRAME_VARG
+ | test PC, FRAME_TYPEP
+ | jnz >8 // Vararg frame below?
+ | sub BASE, PC // Need to relocate BASE/KBASE down.
+ | mov KBASE, BASE
+ | mov PC, [BASE-4]
+ | jmp <1
+ |8:
+ | add PC, FRAME_VARG
+ | jmp <1
+ break;
+
+ case BC_ITERC:
+ | ins_A // RA = base, (RB = nresults+1,) RC = nargs+1 (2+1)
+ | lea RA, [BASE+RA*8+8] // fb = base+1
+ |.if X64
+ | mov RBa, [RA-24] // Copy state. fb[0] = fb[-3].
+ | mov RCa, [RA-16] // Copy control var. fb[1] = fb[-2].
+ | mov [RA], RBa
+ | mov [RA+8], RCa
+ |.else
+ | mov RB, [RA-24] // Copy state. fb[0] = fb[-3].
+ | mov RC, [RA-20]
+ | mov [RA], RB
+ | mov [RA+4], RC
+ | mov RB, [RA-16] // Copy control var. fb[1] = fb[-2].
+ | mov RC, [RA-12]
+ | mov [RA+8], RB
+ | mov [RA+12], RC
+ |.endif
+ | mov LFUNC:RB, [RA-32] // Copy callable. fb[-1] = fb[-4]
+ | mov RC, [RA-28]
+ | mov [RA-8], LFUNC:RB
+ | mov [RA-4], RC
+ | cmp RC, LJ_TFUNC // Handle like a regular 2-arg call.
+ | mov NARGS:RD, 2+1
+ | jne ->vmeta_call
+ | mov BASE, RA
+ | ins_call
+ break;
+
+ case BC_ITERN:
+ |.if JIT
+ | hotloop RB
+ |.endif
+ |->vm_IITERN:
+ | ins_A // RA = base, (RB = nresults+1, RC = nargs+1 (2+1))
+ | mov TMP1, KBASE // Need two more free registers.
+ | mov TMP2, DISPATCH
+ | mov TAB:RB, [BASE+RA*8-16]
+ | mov RC, [BASE+RA*8-8] // Get index from control var.
+ | mov DISPATCH, TAB:RB->asize
+ | add PC, 4
+ | mov KBASE, TAB:RB->array
+ |1: // Traverse array part.
+ | cmp RC, DISPATCH; jae >5 // Index points after array part?
+ | cmp dword [KBASE+RC*8+4], LJ_TNIL; je >4
+ |.if DUALNUM
+ | mov dword [BASE+RA*8+4], LJ_TISNUM
+ | mov dword [BASE+RA*8], RC
+ |.else
+ | cvtsi2sd xmm0, RC
+ |.endif
+ | // Copy array slot to returned value.
+ |.if X64
+ | mov RBa, [KBASE+RC*8]
+ | mov [BASE+RA*8+8], RBa
+ |.else
+ | mov RB, [KBASE+RC*8+4]
+ | mov [BASE+RA*8+12], RB
+ | mov RB, [KBASE+RC*8]
+ | mov [BASE+RA*8+8], RB
+ |.endif
+ | add RC, 1
+ | // Return array index as a numeric key.
+ |.if DUALNUM
+ | // See above.
+ |.else
+ | movsd qword [BASE+RA*8], xmm0
+ |.endif
+ | mov [BASE+RA*8-8], RC // Update control var.
+ |2:
+ | movzx RD, PC_RD // Get target from ITERL.
+ | branchPC RD
+ |3:
+ | mov DISPATCH, TMP2
+ | mov KBASE, TMP1
+ | ins_next
+ |
+ |4: // Skip holes in array part.
+ | add RC, 1
+ | jmp <1
+ |
+ |5: // Traverse hash part.
+ | sub RC, DISPATCH
+ |6:
+ | cmp RC, TAB:RB->hmask; ja <3 // End of iteration? Branch to ITERL+1.
+ | imul KBASE, RC, #NODE
+ | add NODE:KBASE, TAB:RB->node
+ | cmp dword NODE:KBASE->val.it, LJ_TNIL; je >7
+ | lea DISPATCH, [RC+DISPATCH+1]
+ | // Copy key and value from hash slot.
+ |.if X64
+ | mov RBa, NODE:KBASE->key
+ | mov RCa, NODE:KBASE->val
+ | mov [BASE+RA*8], RBa
+ | mov [BASE+RA*8+8], RCa
+ |.else
+ | mov RB, NODE:KBASE->key.gcr
+ | mov RC, NODE:KBASE->key.it
+ | mov [BASE+RA*8], RB
+ | mov [BASE+RA*8+4], RC
+ | mov RB, NODE:KBASE->val.gcr
+ | mov RC, NODE:KBASE->val.it
+ | mov [BASE+RA*8+8], RB
+ | mov [BASE+RA*8+12], RC
+ |.endif
+ | mov [BASE+RA*8-8], DISPATCH
+ | jmp <2
+ |
+ |7: // Skip holes in hash part.
+ | add RC, 1
+ | jmp <6
+ break;
+
+ case BC_ISNEXT:
+ | ins_AD // RA = base, RD = target (points to ITERN)
+ | cmp dword [BASE+RA*8-20], LJ_TFUNC; jne >5
+ | mov CFUNC:RB, [BASE+RA*8-24]
+ | cmp dword [BASE+RA*8-12], LJ_TTAB; jne >5
+ | cmp dword [BASE+RA*8-4], LJ_TNIL; jne >5
+ | cmp byte CFUNC:RB->ffid, FF_next_N; jne >5
+ | branchPC RD
+ | mov dword [BASE+RA*8-8], 0 // Initialize control var.
+ | mov dword [BASE+RA*8-4], LJ_KEYINDEX
+ |1:
+ | ins_next
+ |5: // Despecialize bytecode if any of the checks fail.
+ | mov PC_OP, BC_JMP
+ | branchPC RD
+ |.if JIT
+ | cmp byte [PC], BC_ITERN
+ | jne >6
+ |.endif
+ | mov byte [PC], BC_ITERC
+ | jmp <1
+ |.if JIT
+ |6: // Unpatch JLOOP.
+ | mov RA, [DISPATCH+DISPATCH_J(trace)]
+ | movzx RC, word [PC+2]
+ | mov TRACE:RA, [RA+RC*4]
+ | mov eax, TRACE:RA->startins
+ | mov al, BC_ITERC
+ | mov dword [PC], eax
+ | jmp <1
+ |.endif
+ break;
+
+ case BC_VARG:
+ | ins_ABC // RA = base, RB = nresults+1, RC = numparams
+ | mov TMP1, KBASE // Need one more free register.
+ | lea KBASE, [BASE+RC*8+(8+FRAME_VARG)]
+ | lea RA, [BASE+RA*8]
+ | sub KBASE, [BASE-4]
+ | // Note: KBASE may now be even _above_ BASE if nargs was < numparams.
+ | test RB, RB
+ | jz >5 // Copy all varargs?
+ | lea RB, [RA+RB*8-8]
+ | cmp KBASE, BASE // No vararg slots?
+ | jnb >2
+ |1: // Copy vararg slots to destination slots.
+ |.if X64
+ | mov RCa, [KBASE-8]
+ | add KBASE, 8
+ | mov [RA], RCa
+ |.else
+ | mov RC, [KBASE-8]
+ | mov [RA], RC
+ | mov RC, [KBASE-4]
+ | add KBASE, 8
+ | mov [RA+4], RC
+ |.endif
+ | add RA, 8
+ | cmp RA, RB // All destination slots filled?
+ | jnb >3
+ | cmp KBASE, BASE // No more vararg slots?
+ | jb <1
+ |2: // Fill up remainder with nil.
+ | mov dword [RA+4], LJ_TNIL
+ | add RA, 8
+ | cmp RA, RB
+ | jb <2
+ |3:
+ | mov KBASE, TMP1
+ | ins_next
+ |
+ |5: // Copy all varargs.
+ | mov MULTRES, 1 // MULTRES = 0+1
+ | mov RC, BASE
+ | sub RC, KBASE
+ | jbe <3 // No vararg slots?
+ | mov RB, RC
+ | shr RB, 3
+ | add RB, 1
+ | mov MULTRES, RB // MULTRES = #varargs+1
+ | mov L:RB, SAVE_L
+ | add RC, RA
+ | cmp RC, L:RB->maxstack
+ | ja >7 // Need to grow stack?
+ |6: // Copy all vararg slots.
+ |.if X64
+ | mov RCa, [KBASE-8]
+ | add KBASE, 8
+ | mov [RA], RCa
+ |.else
+ | mov RC, [KBASE-8]
+ | mov [RA], RC
+ | mov RC, [KBASE-4]
+ | add KBASE, 8
+ | mov [RA+4], RC
+ |.endif
+ | add RA, 8
+ | cmp KBASE, BASE // No more vararg slots?
+ | jb <6
+ | jmp <3
+ |
+ |7: // Grow stack for varargs.
+ | mov L:RB->base, BASE
+ | mov L:RB->top, RA
+ | mov SAVE_PC, PC
+ | sub KBASE, BASE // Need delta, because BASE may change.
+ | mov FCARG2, MULTRES
+ | sub FCARG2, 1
+ | mov FCARG1, L:RB
+ | call extern lj_state_growstack@8 // (lua_State *L, int n)
+ | mov BASE, L:RB->base
+ | mov RA, L:RB->top
+ | add KBASE, BASE
+ | jmp <6
+ break;
+
+ /* -- Returns ----------------------------------------------------------- */
+
+ case BC_RETM:
+ | ins_AD // RA = results, RD = extra_nresults
+ | add RD, MULTRES // MULTRES >=1, so RD >=1.
+ | // Fall through. Assumes BC_RET follows and ins_AD is a no-op.
+ break;
+
+ case BC_RET: case BC_RET0: case BC_RET1:
+ | ins_AD // RA = results, RD = nresults+1
+ if (op != BC_RET0) {
+ | shl RA, 3
+ }
+ |1:
+ | mov PC, [BASE-4]
+ | mov MULTRES, RD // Save nresults+1.
+ | test PC, FRAME_TYPE // Check frame type marker.
+ | jnz >7 // Not returning to a fixarg Lua func?
+ switch (op) {
+ case BC_RET:
+ |->BC_RET_Z:
+ | mov KBASE, BASE // Use KBASE for result move.
+ | sub RD, 1
+ | jz >3
+ |2: // Move results down.
+ |.if X64
+ | mov RBa, [KBASE+RA]
+ | mov [KBASE-8], RBa
+ |.else
+ | mov RB, [KBASE+RA]
+ | mov [KBASE-8], RB
+ | mov RB, [KBASE+RA+4]
+ | mov [KBASE-4], RB
+ |.endif
+ | add KBASE, 8
+ | sub RD, 1
+ | jnz <2
+ |3:
+ | mov RD, MULTRES // Note: MULTRES may be >255.
+ | movzx RB, PC_RB // So cannot compare with RDL!
+ |5:
+ | cmp RB, RD // More results expected?
+ | ja >6
+ break;
+ case BC_RET1:
+ |.if X64
+ | mov RBa, [BASE+RA]
+ | mov [BASE-8], RBa
+ |.else
+ | mov RB, [BASE+RA+4]
+ | mov [BASE-4], RB
+ | mov RB, [BASE+RA]
+ | mov [BASE-8], RB
+ |.endif
+ /* fallthrough */
+ case BC_RET0:
+ |5:
+ | cmp PC_RB, RDL // More results expected?
+ | ja >6
+ default:
+ break;
+ }
+ | movzx RA, PC_RA
+ | not RAa // Note: ~RA = -(RA+1)
+ | lea BASE, [BASE+RA*8] // base = base - (RA+1)*8
+ | mov LFUNC:KBASE, [BASE-8]
+ | mov KBASE, LFUNC:KBASE->pc
+ | mov KBASE, [KBASE+PC2PROTO(k)]
+ | ins_next
+ |
+ |6: // Fill up results with nil.
+ if (op == BC_RET) {
+ | mov dword [KBASE-4], LJ_TNIL // Note: relies on shifted base.
+ | add KBASE, 8
+ } else {
+ | mov dword [BASE+RD*8-12], LJ_TNIL
+ }
+ | add RD, 1
+ | jmp <5
+ |
+ |7: // Non-standard return case.
+ | lea RB, [PC-FRAME_VARG]
+ | test RB, FRAME_TYPEP
+ | jnz ->vm_return
+ | // Return from vararg function: relocate BASE down and RA up.
+ | sub BASE, RB
+ if (op != BC_RET0) {
+ | add RA, RB
+ }
+ | jmp <1
+ break;
+
+ /* -- Loops and branches ------------------------------------------------ */
+
+ |.define FOR_IDX, [RA]; .define FOR_TIDX, dword [RA+4]
+ |.define FOR_STOP, [RA+8]; .define FOR_TSTOP, dword [RA+12]
+ |.define FOR_STEP, [RA+16]; .define FOR_TSTEP, dword [RA+20]
+ |.define FOR_EXT, [RA+24]; .define FOR_TEXT, dword [RA+28]
+
+ case BC_FORL:
+ |.if JIT
+ | hotloop RB
+ |.endif
+ | // Fall through. Assumes BC_IFORL follows and ins_AJ is a no-op.
+ break;
+
+ case BC_JFORI:
+ case BC_JFORL:
+#if !LJ_HASJIT
+ break;
+#endif
+ case BC_FORI:
+ case BC_IFORL:
+ vk = (op == BC_IFORL || op == BC_JFORL);
+ | ins_AJ // RA = base, RD = target (after end of loop or start of loop)
+ | lea RA, [BASE+RA*8]
+ if (LJ_DUALNUM) {
+ | cmp FOR_TIDX, LJ_TISNUM; jne >9
+ if (!vk) {
+ | cmp FOR_TSTOP, LJ_TISNUM; jne ->vmeta_for
+ | cmp FOR_TSTEP, LJ_TISNUM; jne ->vmeta_for
+ | mov RB, dword FOR_IDX
+ | cmp dword FOR_STEP, 0; jl >5
+ } else {
+#ifdef LUA_USE_ASSERT
+ | cmp FOR_TSTOP, LJ_TISNUM; jne ->assert_bad_for_arg_type
+ | cmp FOR_TSTEP, LJ_TISNUM; jne ->assert_bad_for_arg_type
+#endif
+ | mov RB, dword FOR_STEP
+ | test RB, RB; js >5
+ | add RB, dword FOR_IDX; jo >1
+ | mov dword FOR_IDX, RB
+ }
+ | cmp RB, dword FOR_STOP
+ | mov FOR_TEXT, LJ_TISNUM
+ | mov dword FOR_EXT, RB
+ if (op == BC_FORI) {
+ | jle >7
+ |1:
+ |6:
+ | branchPC RD
+ } else if (op == BC_JFORI) {
+ | branchPC RD
+ | movzx RD, PC_RD
+ | jle =>BC_JLOOP
+ |1:
+ |6:
+ } else if (op == BC_IFORL) {
+ | jg >7
+ |6:
+ | branchPC RD
+ |1:
+ } else {
+ | jle =>BC_JLOOP
+ |1:
+ |6:
+ }
+ |7:
+ | ins_next
+ |
+ |5: // Invert check for negative step.
+ if (vk) {
+ | add RB, dword FOR_IDX; jo <1
+ | mov dword FOR_IDX, RB
+ }
+ | cmp RB, dword FOR_STOP
+ | mov FOR_TEXT, LJ_TISNUM
+ | mov dword FOR_EXT, RB
+ if (op == BC_FORI) {
+ | jge <7
+ } else if (op == BC_JFORI) {
+ | branchPC RD
+ | movzx RD, PC_RD
+ | jge =>BC_JLOOP
+ } else if (op == BC_IFORL) {
+ | jl <7
+ } else {
+ | jge =>BC_JLOOP
+ }
+ | jmp <6
+ |9: // Fallback to FP variant.
+ } else if (!vk) {
+ | cmp FOR_TIDX, LJ_TISNUM
+ }
+ if (!vk) {
+ | jae ->vmeta_for
+ | cmp FOR_TSTOP, LJ_TISNUM; jae ->vmeta_for
+ } else {
+#ifdef LUA_USE_ASSERT
+ | cmp FOR_TSTOP, LJ_TISNUM; jae ->assert_bad_for_arg_type
+ | cmp FOR_TSTEP, LJ_TISNUM; jae ->assert_bad_for_arg_type
+#endif
+ }
+ | mov RB, FOR_TSTEP // Load type/hiword of for step.
+ if (!vk) {
+ | cmp RB, LJ_TISNUM; jae ->vmeta_for
+ }
+ | movsd xmm0, qword FOR_IDX
+ | movsd xmm1, qword FOR_STOP
+ if (vk) {
+ | addsd xmm0, qword FOR_STEP
+ | movsd qword FOR_IDX, xmm0
+ | test RB, RB; js >3
+ } else {
+ | jl >3
+ }
+ | ucomisd xmm1, xmm0
+ |1:
+ | movsd qword FOR_EXT, xmm0
+ if (op == BC_FORI) {
+ |.if DUALNUM
+ | jnb <7
+ |.else
+ | jnb >2
+ | branchPC RD
+ |.endif
+ } else if (op == BC_JFORI) {
+ | branchPC RD
+ | movzx RD, PC_RD
+ | jnb =>BC_JLOOP
+ } else if (op == BC_IFORL) {
+ |.if DUALNUM
+ | jb <7
+ |.else
+ | jb >2
+ | branchPC RD
+ |.endif
+ } else {
+ | jnb =>BC_JLOOP
+ }
+ |.if DUALNUM
+ | jmp <6
+ |.else
+ |2:
+ | ins_next
+ |.endif
+ |
+ |3: // Invert comparison if step is negative.
+ | ucomisd xmm0, xmm1
+ | jmp <1
+ break;
+
+ case BC_ITERL:
+ |.if JIT
+ | hotloop RB
+ |.endif
+ | // Fall through. Assumes BC_IITERL follows and ins_AJ is a no-op.
+ break;
+
+ case BC_JITERL:
+#if !LJ_HASJIT
+ break;
+#endif
+ case BC_IITERL:
+ | ins_AJ // RA = base, RD = target
+ | lea RA, [BASE+RA*8]
+ | mov RB, [RA+4]
+ | cmp RB, LJ_TNIL; je >1 // Stop if iterator returned nil.
+ if (op == BC_JITERL) {
+ | mov [RA-4], RB
+ | mov RB, [RA]
+ | mov [RA-8], RB
+ | jmp =>BC_JLOOP
+ } else {
+ | branchPC RD // Otherwise save control var + branch.
+ | mov RD, [RA]
+ | mov [RA-4], RB
+ | mov [RA-8], RD
+ }
+ |1:
+ | ins_next
+ break;
+
+ case BC_LOOP:
+ | ins_A // RA = base, RD = target (loop extent)
+ | // Note: RA/RD is only used by trace recorder to determine scope/extent
+ | // This opcode does NOT jump, it's only purpose is to detect a hot loop.
+ |.if JIT
+ | hotloop RB
+ |.endif
+ | // Fall through. Assumes BC_ILOOP follows and ins_A is a no-op.
+ break;
+
+ case BC_ILOOP:
+ | ins_A // RA = base, RD = target (loop extent)
+ | ins_next
+ break;
+
+ case BC_JLOOP:
+ |.if JIT
+ | ins_AD // RA = base (ignored), RD = traceno
+ | mov RA, [DISPATCH+DISPATCH_J(trace)]
+ | mov TRACE:RD, [RA+RD*4]
+ | mov RDa, TRACE:RD->mcode
+ | mov L:RB, SAVE_L
+ | mov [DISPATCH+DISPATCH_GL(jit_base)], BASE
+ | mov [DISPATCH+DISPATCH_GL(tmpbuf.L)], L:RB
+ | // Save additional callee-save registers only used in compiled code.
+ |.if X64WIN
+ | mov TMPQ, r12
+ | mov TMPa, r13
+ | mov CSAVE_4, r14
+ | mov CSAVE_3, r15
+ | mov RAa, rsp
+ | sub rsp, 9*16+4*8
+ | movdqa [RAa], xmm6
+ | movdqa [RAa-1*16], xmm7
+ | movdqa [RAa-2*16], xmm8
+ | movdqa [RAa-3*16], xmm9
+ | movdqa [RAa-4*16], xmm10
+ | movdqa [RAa-5*16], xmm11
+ | movdqa [RAa-6*16], xmm12
+ | movdqa [RAa-7*16], xmm13
+ | movdqa [RAa-8*16], xmm14
+ | movdqa [RAa-9*16], xmm15
+ |.elif X64
+ | mov TMPQ, r12
+ | mov TMPa, r13
+ | sub rsp, 16
+ |.endif
+ | jmp RDa
+ |.endif
+ break;
+
+ case BC_JMP:
+ | ins_AJ // RA = unused, RD = target
+ | branchPC RD
+ | ins_next
+ break;
+
+ /* -- Function headers -------------------------------------------------- */
+
+ /*
+ ** Reminder: A function may be called with func/args above L->maxstack,
+ ** i.e. occupying EXTRA_STACK slots. And vmeta_call may add one extra slot,
+ ** too. This means all FUNC* ops (including fast functions) must check
+ ** for stack overflow _before_ adding more slots!
+ */
+
+ case BC_FUNCF:
+ |.if JIT
+ | hotcall RB
+ |.endif
+ case BC_FUNCV: /* NYI: compiled vararg functions. */
+ | // Fall through. Assumes BC_IFUNCF/BC_IFUNCV follow and ins_AD is a no-op.
+ break;
+
+ case BC_JFUNCF:
+#if !LJ_HASJIT
+ break;
+#endif
+ case BC_IFUNCF:
+ | ins_AD // BASE = new base, RA = framesize, RD = nargs+1
+ | mov KBASE, [PC-4+PC2PROTO(k)]
+ | mov L:RB, SAVE_L
+ | lea RA, [BASE+RA*8] // Top of frame.
+ | cmp RA, L:RB->maxstack
+ | ja ->vm_growstack_f
+ | movzx RA, byte [PC-4+PC2PROTO(numparams)]
+ | cmp NARGS:RD, RA // Check for missing parameters.
+ | jbe >3
+ |2:
+ if (op == BC_JFUNCF) {
+ | movzx RD, PC_RD
+ | jmp =>BC_JLOOP
+ } else {
+ | ins_next
+ }
+ |
+ |3: // Clear missing parameters.
+ | mov dword [BASE+NARGS:RD*8-4], LJ_TNIL
+ | add NARGS:RD, 1
+ | cmp NARGS:RD, RA
+ | jbe <3
+ | jmp <2
+ break;
+
+ case BC_JFUNCV:
+#if !LJ_HASJIT
+ break;
+#endif
+ | int3 // NYI: compiled vararg functions
+ break; /* NYI: compiled vararg functions. */
+
+ case BC_IFUNCV:
+ | ins_AD // BASE = new base, RA = framesize, RD = nargs+1
+ | lea RB, [NARGS:RD*8+FRAME_VARG]
+ | lea RD, [BASE+NARGS:RD*8]
+ | mov LFUNC:KBASE, [BASE-8]
+ | mov [RD-4], RB // Store delta + FRAME_VARG.
+ | mov [RD-8], LFUNC:KBASE // Store copy of LFUNC.
+ | mov L:RB, SAVE_L
+ | lea RA, [RD+RA*8]
+ | cmp RA, L:RB->maxstack
+ | ja ->vm_growstack_v // Need to grow stack.
+ | mov RA, BASE
+ | mov BASE, RD
+ | movzx RB, byte [PC-4+PC2PROTO(numparams)]
+ | test RB, RB
+ | jz >2
+ |1: // Copy fixarg slots up to new frame.
+ | add RA, 8
+ | cmp RA, BASE
+ | jnb >3 // Less args than parameters?
+ | mov KBASE, [RA-8]
+ | mov [RD], KBASE
+ | mov KBASE, [RA-4]
+ | mov [RD+4], KBASE
+ | add RD, 8
+ | mov dword [RA-4], LJ_TNIL // Clear old fixarg slot (help the GC).
+ | sub RB, 1
+ | jnz <1
+ |2:
+ if (op == BC_JFUNCV) {
+ | movzx RD, PC_RD
+ | jmp =>BC_JLOOP
+ } else {
+ | mov KBASE, [PC-4+PC2PROTO(k)]
+ | ins_next
+ }
+ |
+ |3: // Clear missing parameters.
+ | mov dword [RD+4], LJ_TNIL
+ | add RD, 8
+ | sub RB, 1
+ | jnz <3
+ | jmp <2
+ break;
+
+ case BC_FUNCC:
+ case BC_FUNCCW:
+ | ins_AD // BASE = new base, RA = ins RA|RD (unused), RD = nargs+1
+ | mov CFUNC:RB, [BASE-8]
+ | mov KBASEa, CFUNC:RB->f
+ | mov L:RB, SAVE_L
+ | lea RD, [BASE+NARGS:RD*8-8]
+ | mov L:RB->base, BASE
+ | lea RA, [RD+8*LUA_MINSTACK]
+ | cmp RA, L:RB->maxstack
+ | mov L:RB->top, RD
+ if (op == BC_FUNCC) {
+ |.if X64
+ | mov CARG1d, L:RB // Caveat: CARG1d may be RA.
+ |.else
+ | mov ARG1, L:RB
+ |.endif
+ } else {
+ |.if X64
+ | mov CARG2, KBASEa
+ | mov CARG1d, L:RB // Caveat: CARG1d may be RA.
+ |.else
+ | mov ARG2, KBASEa
+ | mov ARG1, L:RB
+ |.endif
+ }
+ | ja ->vm_growstack_c // Need to grow stack.
+ | set_vmstate C
+ if (op == BC_FUNCC) {
+ | call KBASEa // (lua_State *L)
+ } else {
+ | // (lua_State *L, lua_CFunction f)
+ | call aword [DISPATCH+DISPATCH_GL(wrapf)]
+ }
+ | // nresults returned in eax (RD).
+ | mov BASE, L:RB->base
+ | mov [DISPATCH+DISPATCH_GL(cur_L)], L:RB
+ | set_vmstate INTERP
+ | lea RA, [BASE+RD*8]
+ | neg RA
+ | add RA, L:RB->top // RA = (L->top-(L->base+nresults))*8
+ | mov PC, [BASE-4] // Fetch PC of caller.
+ | jmp ->vm_returnc
+ break;
+
+ /* ---------------------------------------------------------------------- */
+
+ default:
+ fprintf(stderr, "Error: undefined opcode BC_%s\n", bc_names[op]);
+ exit(2);
+ break;
+ }
+}
+
+static int build_backend(BuildCtx *ctx)
+{
+ int op;
+ dasm_growpc(Dst, BC__MAX);
+ build_subroutines(ctx);
+ |.code_op
+ for (op = 0; op < BC__MAX; op++)
+ build_ins(ctx, (BCOp)op, op);
+ return BC__MAX;
+}
+
+/* Emit pseudo frame-info for all assembler functions. */
+static void emit_asm_debug(BuildCtx *ctx)
+{
+ int fcofs = (int)((uint8_t *)ctx->glob[GLOB_vm_ffi_call] - ctx->code);
+#if LJ_64
+#define SZPTR "8"
+#define BSZPTR "3"
+#define REG_SP "0x7"
+#define REG_RA "0x10"
+#else
+#define SZPTR "4"
+#define BSZPTR "2"
+#define REG_SP "0x4"
+#define REG_RA "0x8"
+#endif
+ switch (ctx->mode) {
+ case BUILD_elfasm:
+ fprintf(ctx->fp, "\t.section .debug_frame,\"\",@progbits\n");
+ fprintf(ctx->fp,
+ ".Lframe0:\n"
+ "\t.long .LECIE0-.LSCIE0\n"
+ ".LSCIE0:\n"
+ "\t.long 0xffffffff\n"
+ "\t.byte 0x1\n"
+ "\t.string \"\"\n"
+ "\t.uleb128 0x1\n"
+ "\t.sleb128 -" SZPTR "\n"
+ "\t.byte " REG_RA "\n"
+ "\t.byte 0xc\n\t.uleb128 " REG_SP "\n\t.uleb128 " SZPTR "\n"
+ "\t.byte 0x80+" REG_RA "\n\t.uleb128 0x1\n"
+ "\t.align " SZPTR "\n"
+ ".LECIE0:\n\n");
+ fprintf(ctx->fp,
+ ".LSFDE0:\n"
+ "\t.long .LEFDE0-.LASFDE0\n"
+ ".LASFDE0:\n"
+ "\t.long .Lframe0\n"
+#if LJ_64
+ "\t.quad .Lbegin\n"
+ "\t.quad %d\n"
+ "\t.byte 0xe\n\t.uleb128 %d\n" /* def_cfa_offset */
+ "\t.byte 0x86\n\t.uleb128 0x2\n" /* offset rbp */
+ "\t.byte 0x83\n\t.uleb128 0x3\n" /* offset rbx */
+ "\t.byte 0x8f\n\t.uleb128 0x4\n" /* offset r15 */
+ "\t.byte 0x8e\n\t.uleb128 0x5\n" /* offset r14 */
+#if LJ_NO_UNWIND
+ "\t.byte 0x8d\n\t.uleb128 0x6\n" /* offset r13 */
+ "\t.byte 0x8c\n\t.uleb128 0x7\n" /* offset r12 */
+#endif
+#else
+ "\t.long .Lbegin\n"
+ "\t.long %d\n"
+ "\t.byte 0xe\n\t.uleb128 %d\n" /* def_cfa_offset */
+ "\t.byte 0x85\n\t.uleb128 0x2\n" /* offset ebp */
+ "\t.byte 0x87\n\t.uleb128 0x3\n" /* offset edi */
+ "\t.byte 0x86\n\t.uleb128 0x4\n" /* offset esi */
+ "\t.byte 0x83\n\t.uleb128 0x5\n" /* offset ebx */
+#endif
+ "\t.align " SZPTR "\n"
+ ".LEFDE0:\n\n", fcofs, CFRAME_SIZE);
+#if LJ_HASFFI
+ fprintf(ctx->fp,
+ ".LSFDE1:\n"
+ "\t.long .LEFDE1-.LASFDE1\n"
+ ".LASFDE1:\n"
+ "\t.long .Lframe0\n"
+#if LJ_64
+ "\t.quad lj_vm_ffi_call\n"
+ "\t.quad %d\n"
+ "\t.byte 0xe\n\t.uleb128 16\n" /* def_cfa_offset */
+ "\t.byte 0x86\n\t.uleb128 0x2\n" /* offset rbp */
+ "\t.byte 0xd\n\t.uleb128 0x6\n" /* def_cfa_register rbp */
+ "\t.byte 0x83\n\t.uleb128 0x3\n" /* offset rbx */
+#else
+ "\t.long lj_vm_ffi_call\n"
+ "\t.long %d\n"
+ "\t.byte 0xe\n\t.uleb128 8\n" /* def_cfa_offset */
+ "\t.byte 0x85\n\t.uleb128 0x2\n" /* offset ebp */
+ "\t.byte 0xd\n\t.uleb128 0x5\n" /* def_cfa_register ebp */
+ "\t.byte 0x83\n\t.uleb128 0x3\n" /* offset ebx */
+#endif
+ "\t.align " SZPTR "\n"
+ ".LEFDE1:\n\n", (int)ctx->codesz - fcofs);
+#endif
+#if !LJ_NO_UNWIND
+#if LJ_TARGET_SOLARIS
+#if LJ_64
+ fprintf(ctx->fp, "\t.section .eh_frame,\"a\",@unwind\n");
+#else
+ fprintf(ctx->fp, "\t.section .eh_frame,\"aw\",@progbits\n");
+#endif
+#else
+ fprintf(ctx->fp, "\t.section .eh_frame,\"a\",@progbits\n");
+#endif
+ fprintf(ctx->fp,
+ ".Lframe1:\n"
+ "\t.long .LECIE1-.LSCIE1\n"
+ ".LSCIE1:\n"
+ "\t.long 0\n"
+ "\t.byte 0x1\n"
+ "\t.string \"zPR\"\n"
+ "\t.uleb128 0x1\n"
+ "\t.sleb128 -" SZPTR "\n"
+ "\t.byte " REG_RA "\n"
+ "\t.uleb128 6\n" /* augmentation length */
+ "\t.byte 0x1b\n" /* pcrel|sdata4 */
+ "\t.long lj_err_unwind_dwarf-.\n"
+ "\t.byte 0x1b\n" /* pcrel|sdata4 */
+ "\t.byte 0xc\n\t.uleb128 " REG_SP "\n\t.uleb128 " SZPTR "\n"
+ "\t.byte 0x80+" REG_RA "\n\t.uleb128 0x1\n"
+ "\t.align " SZPTR "\n"
+ ".LECIE1:\n\n");
+ fprintf(ctx->fp,
+ ".LSFDE2:\n"
+ "\t.long .LEFDE2-.LASFDE2\n"
+ ".LASFDE2:\n"
+ "\t.long .LASFDE2-.Lframe1\n"
+ "\t.long .Lbegin-.\n"
+ "\t.long %d\n"
+ "\t.uleb128 0\n" /* augmentation length */
+ "\t.byte 0xe\n\t.uleb128 %d\n" /* def_cfa_offset */
+#if LJ_64
+ "\t.byte 0x86\n\t.uleb128 0x2\n" /* offset rbp */
+ "\t.byte 0x83\n\t.uleb128 0x3\n" /* offset rbx */
+ "\t.byte 0x8f\n\t.uleb128 0x4\n" /* offset r15 */
+ "\t.byte 0x8e\n\t.uleb128 0x5\n" /* offset r14 */
+#else
+ "\t.byte 0x85\n\t.uleb128 0x2\n" /* offset ebp */
+ "\t.byte 0x87\n\t.uleb128 0x3\n" /* offset edi */
+ "\t.byte 0x86\n\t.uleb128 0x4\n" /* offset esi */
+ "\t.byte 0x83\n\t.uleb128 0x5\n" /* offset ebx */
+#endif
+ "\t.align " SZPTR "\n"
+ ".LEFDE2:\n\n", fcofs, CFRAME_SIZE);
+#if LJ_HASFFI
+ fprintf(ctx->fp,
+ ".Lframe2:\n"
+ "\t.long .LECIE2-.LSCIE2\n"
+ ".LSCIE2:\n"
+ "\t.long 0\n"
+ "\t.byte 0x1\n"
+ "\t.string \"zR\"\n"
+ "\t.uleb128 0x1\n"
+ "\t.sleb128 -" SZPTR "\n"
+ "\t.byte " REG_RA "\n"
+ "\t.uleb128 1\n" /* augmentation length */
+ "\t.byte 0x1b\n" /* pcrel|sdata4 */
+ "\t.byte 0xc\n\t.uleb128 " REG_SP "\n\t.uleb128 " SZPTR "\n"
+ "\t.byte 0x80+" REG_RA "\n\t.uleb128 0x1\n"
+ "\t.align " SZPTR "\n"
+ ".LECIE2:\n\n");
+ fprintf(ctx->fp,
+ ".LSFDE3:\n"
+ "\t.long .LEFDE3-.LASFDE3\n"
+ ".LASFDE3:\n"
+ "\t.long .LASFDE3-.Lframe2\n"
+ "\t.long lj_vm_ffi_call-.\n"
+ "\t.long %d\n"
+ "\t.uleb128 0\n" /* augmentation length */
+#if LJ_64
+ "\t.byte 0xe\n\t.uleb128 16\n" /* def_cfa_offset */
+ "\t.byte 0x86\n\t.uleb128 0x2\n" /* offset rbp */
+ "\t.byte 0xd\n\t.uleb128 0x6\n" /* def_cfa_register rbp */
+ "\t.byte 0x83\n\t.uleb128 0x3\n" /* offset rbx */
+#else
+ "\t.byte 0xe\n\t.uleb128 8\n" /* def_cfa_offset */
+ "\t.byte 0x85\n\t.uleb128 0x2\n" /* offset ebp */
+ "\t.byte 0xd\n\t.uleb128 0x5\n" /* def_cfa_register ebp */
+ "\t.byte 0x83\n\t.uleb128 0x3\n" /* offset ebx */
+#endif
+ "\t.align " SZPTR "\n"
+ ".LEFDE3:\n\n", (int)ctx->codesz - fcofs);
+#endif
+#endif
+ break;
+#if !LJ_NO_UNWIND
+ /* Mental note: never let Apple design an assembler.
+ ** Or a linker. Or a plastic case. But I digress.
+ */
+ case BUILD_machasm: {
+#if LJ_HASFFI
+ int fcsize = 0;
+#endif
+ int i;
+ fprintf(ctx->fp, "\t.section __TEXT,__eh_frame,coalesced,no_toc+strip_static_syms+live_support\n");
+ fprintf(ctx->fp,
+ "EH_frame1:\n"
+ "\t.set L$set$x,LECIEX-LSCIEX\n"
+ "\t.long L$set$x\n"
+ "LSCIEX:\n"
+ "\t.long 0\n"
+ "\t.byte 0x1\n"
+ "\t.ascii \"zPR\\0\"\n"
+ "\t.byte 0x1\n"
+ "\t.byte 128-" SZPTR "\n"
+ "\t.byte " REG_RA "\n"
+ "\t.byte 6\n" /* augmentation length */
+ "\t.byte 0x9b\n" /* indirect|pcrel|sdata4 */
+#if LJ_64
+ "\t.long _lj_err_unwind_dwarf+4@GOTPCREL\n"
+ "\t.byte 0x1b\n" /* pcrel|sdata4 */
+ "\t.byte 0xc\n\t.byte " REG_SP "\n\t.byte " SZPTR "\n"
+#else
+ "\t.long L_lj_err_unwind_dwarf$non_lazy_ptr-.\n"
+ "\t.byte 0x1b\n" /* pcrel|sdata4 */
+ "\t.byte 0xc\n\t.byte 0x5\n\t.byte 0x4\n" /* esp=5 on 32 bit MACH-O. */
+#endif
+ "\t.byte 0x80+" REG_RA "\n\t.byte 0x1\n"
+ "\t.align " BSZPTR "\n"
+ "LECIEX:\n\n");
+ for (i = 0; i < ctx->nsym; i++) {
+ const char *name = ctx->sym[i].name;
+ int32_t size = ctx->sym[i+1].ofs - ctx->sym[i].ofs;
+ if (size == 0) continue;
+#if LJ_HASFFI
+ if (!strcmp(name, "_lj_vm_ffi_call")) { fcsize = size; continue; }
+#endif
+ fprintf(ctx->fp,
+ "%s.eh:\n"
+ "LSFDE%d:\n"
+ "\t.set L$set$%d,LEFDE%d-LASFDE%d\n"
+ "\t.long L$set$%d\n"
+ "LASFDE%d:\n"
+ "\t.long LASFDE%d-EH_frame1\n"
+ "\t.long %s-.\n"
+ "\t.long %d\n"
+ "\t.byte 0\n" /* augmentation length */
+ "\t.byte 0xe\n\t.byte %d\n" /* def_cfa_offset */
+#if LJ_64
+ "\t.byte 0x86\n\t.byte 0x2\n" /* offset rbp */
+ "\t.byte 0x83\n\t.byte 0x3\n" /* offset rbx */
+ "\t.byte 0x8f\n\t.byte 0x4\n" /* offset r15 */
+ "\t.byte 0x8e\n\t.byte 0x5\n" /* offset r14 */
+#else
+ "\t.byte 0x84\n\t.byte 0x2\n" /* offset ebp (4 for MACH-O)*/
+ "\t.byte 0x87\n\t.byte 0x3\n" /* offset edi */
+ "\t.byte 0x86\n\t.byte 0x4\n" /* offset esi */
+ "\t.byte 0x83\n\t.byte 0x5\n" /* offset ebx */
+#endif
+ "\t.align " BSZPTR "\n"
+ "LEFDE%d:\n\n",
+ name, i, i, i, i, i, i, i, name, size, CFRAME_SIZE, i);
+ }
+#if LJ_HASFFI
+ if (fcsize) {
+ fprintf(ctx->fp,
+ "EH_frame2:\n"
+ "\t.set L$set$y,LECIEY-LSCIEY\n"
+ "\t.long L$set$y\n"
+ "LSCIEY:\n"
+ "\t.long 0\n"
+ "\t.byte 0x1\n"
+ "\t.ascii \"zR\\0\"\n"
+ "\t.byte 0x1\n"
+ "\t.byte 128-" SZPTR "\n"
+ "\t.byte " REG_RA "\n"
+ "\t.byte 1\n" /* augmentation length */
+#if LJ_64
+ "\t.byte 0x1b\n" /* pcrel|sdata4 */
+ "\t.byte 0xc\n\t.byte " REG_SP "\n\t.byte " SZPTR "\n"
+#else
+ "\t.byte 0x1b\n" /* pcrel|sdata4 */
+ "\t.byte 0xc\n\t.byte 0x5\n\t.byte 0x4\n" /* esp=5 on 32 bit MACH. */
+#endif
+ "\t.byte 0x80+" REG_RA "\n\t.byte 0x1\n"
+ "\t.align " BSZPTR "\n"
+ "LECIEY:\n\n");
+ fprintf(ctx->fp,
+ "_lj_vm_ffi_call.eh:\n"
+ "LSFDEY:\n"
+ "\t.set L$set$yy,LEFDEY-LASFDEY\n"
+ "\t.long L$set$yy\n"
+ "LASFDEY:\n"
+ "\t.long LASFDEY-EH_frame2\n"
+ "\t.long _lj_vm_ffi_call-.\n"
+ "\t.long %d\n"
+ "\t.byte 0\n" /* augmentation length */
+#if LJ_64
+ "\t.byte 0xe\n\t.byte 16\n" /* def_cfa_offset */
+ "\t.byte 0x86\n\t.byte 0x2\n" /* offset rbp */
+ "\t.byte 0xd\n\t.byte 0x6\n" /* def_cfa_register rbp */
+ "\t.byte 0x83\n\t.byte 0x3\n" /* offset rbx */
+#else
+ "\t.byte 0xe\n\t.byte 8\n" /* def_cfa_offset */
+ "\t.byte 0x84\n\t.byte 0x2\n" /* offset ebp (4 for MACH-O)*/
+ "\t.byte 0xd\n\t.byte 0x4\n" /* def_cfa_register ebp */
+ "\t.byte 0x83\n\t.byte 0x3\n" /* offset ebx */
+#endif
+ "\t.align " BSZPTR "\n"
+ "LEFDEY:\n\n", fcsize);
+ }
+#endif
+#if !LJ_64
+ fprintf(ctx->fp,
+ "\t.non_lazy_symbol_pointer\n"
+ "L_lj_err_unwind_dwarf$non_lazy_ptr:\n"
+ ".indirect_symbol _lj_err_unwind_dwarf\n"
+ ".long 0\n\n");
+ fprintf(ctx->fp, "\t.section __IMPORT,__jump_table,symbol_stubs,pure_instructions+self_modifying_code,5\n");
+ {
+ const char *const *xn;
+ for (xn = ctx->extnames; *xn; xn++)
+ if (strncmp(*xn, LABEL_PREFIX, sizeof(LABEL_PREFIX)-1))
+ fprintf(ctx->fp, "L_%s$stub:\n\t.indirect_symbol _%s\n\t.ascii \"\\364\\364\\364\\364\\364\"\n", *xn, *xn);
+ }
+#endif
+ fprintf(ctx->fp, ".subsections_via_symbols\n");
+ }
+ break;
+#endif
+ default: /* Difficult for other modes. */
+ break;
+ }
+}
+
diff --git a/libs/luajit-cmake/luajit/src/xb1build.bat b/libs/luajit-cmake/luajit/src/xb1build.bat
new file mode 100644
index 0000000..2eb6817
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/xb1build.bat
@@ -0,0 +1,101 @@
+@rem Script to build LuaJIT with the Xbox One SDK.
+@rem Donated to the public domain.
+@rem
+@rem Open a "Visual Studio .NET Command Prompt" (64 bit host compiler)
+@rem Then cd to this directory and run this script.
+
+@if not defined INCLUDE goto :FAIL
+@if not defined DurangoXDK goto :FAIL
+
+@setlocal
+@echo ---- Host compiler ----
+@set LJCOMPILE=cl /nologo /c /MD /O2 /W3 /D_CRT_SECURE_NO_DEPRECATE
+@set LJLINK=link /nologo
+@set LJMT=mt /nologo
+@set DASMDIR=..\dynasm
+@set DASM=%DASMDIR%\dynasm.lua
+@set ALL_LIB=lib_base.c lib_math.c lib_bit.c lib_string.c lib_table.c lib_io.c lib_os.c lib_package.c lib_debug.c lib_jit.c lib_ffi.c lib_buffer.c
+
+%LJCOMPILE% host\minilua.c
+@if errorlevel 1 goto :BAD
+%LJLINK% /out:minilua.exe minilua.obj
+@if errorlevel 1 goto :BAD
+if exist minilua.exe.manifest^
+ %LJMT% -manifest minilua.exe.manifest -outputresource:minilua.exe
+
+@rem Error out for 64 bit host compiler
+@minilua
+@if not errorlevel 8 goto :FAIL
+
+@set DASMFLAGS=-D WIN -D FFI -D P64
+minilua %DASM% -LN %DASMFLAGS% -o host\buildvm_arch.h vm_x64.dasc
+@if errorlevel 1 goto :BAD
+
+%LJCOMPILE% /I "." /I %DASMDIR% /D_DURANGO host\buildvm*.c
+@if errorlevel 1 goto :BAD
+%LJLINK% /out:buildvm.exe buildvm*.obj
+@if errorlevel 1 goto :BAD
+if exist buildvm.exe.manifest^
+ %LJMT% -manifest buildvm.exe.manifest -outputresource:buildvm.exe
+
+buildvm -m peobj -o lj_vm.obj
+@if errorlevel 1 goto :BAD
+buildvm -m bcdef -o lj_bcdef.h %ALL_LIB%
+@if errorlevel 1 goto :BAD
+buildvm -m ffdef -o lj_ffdef.h %ALL_LIB%
+@if errorlevel 1 goto :BAD
+buildvm -m libdef -o lj_libdef.h %ALL_LIB%
+@if errorlevel 1 goto :BAD
+buildvm -m recdef -o lj_recdef.h %ALL_LIB%
+@if errorlevel 1 goto :BAD
+buildvm -m vmdef -o jit\vmdef.lua %ALL_LIB%
+@if errorlevel 1 goto :BAD
+buildvm -m folddef -o lj_folddef.h lj_opt_fold.c
+@if errorlevel 1 goto :BAD
+
+@echo ---- Cross compiler ----
+
+@set CWD=%cd%
+@call "%DurangoXDK%\xdk\DurangoVars.cmd" XDK
+@cd /D "%CWD%"
+@shift
+
+@set LJCOMPILE="cl" /nologo /c /W3 /GF /Gm- /GR- /GS- /Gy /openmp- /D_CRT_SECURE_NO_DEPRECATE /D_LIB /D_UNICODE /D_DURANGO
+@set LJLIB="lib" /nologo
+
+@if "%1"=="debug" (
+ @shift
+ @set LJCOMPILE=%LJCOMPILE% /Zi /MDd /Od
+ @set LJLINK=%LJLINK% /debug
+) else (
+ @set LJCOMPILE=%LJCOMPILE% /MD /O2 /DNDEBUG
+)
+
+@if "%1"=="amalg" goto :AMALG
+%LJCOMPILE% /DLUA_BUILD_AS_DLL lj_*.c lib_*.c
+@if errorlevel 1 goto :BAD
+%LJLIB% /OUT:luajit.lib lj_*.obj lib_*.obj
+@if errorlevel 1 goto :BAD
+@goto :NOAMALG
+:AMALG
+%LJCOMPILE% /DLUA_BUILD_AS_DLL ljamalg.c
+@if errorlevel 1 goto :BAD
+%LJLIB% /OUT:luajit.lib ljamalg.obj lj_vm.obj
+@if errorlevel 1 goto :BAD
+:NOAMALG
+
+@del *.obj *.manifest minilua.exe buildvm.exe
+@echo.
+@echo === Successfully built LuaJIT for Xbox One ===
+
+@goto :END
+:BAD
+@echo.
+@echo *******************************************************
+@echo *** Build FAILED -- Please check the error messages ***
+@echo *******************************************************
+@goto :END
+:FAIL
+@echo To run this script you must open a "Visual Studio .NET Command Prompt"
+@echo (64 bit host compiler). The Xbox One SDK must be installed, too.
+:END
diff --git a/libs/luajit-cmake/luajit/src/xedkbuild.bat b/libs/luajit-cmake/luajit/src/xedkbuild.bat
new file mode 100644
index 0000000..37322d0
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/xedkbuild.bat
@@ -0,0 +1,92 @@
+@rem Script to build LuaJIT with the Xbox 360 SDK.
+@rem Donated to the public domain.
+@rem
+@rem Open a "Visual Studio .NET Command Prompt" (32 bit host compiler)
+@rem Then cd to this directory and run this script.
+
+@if not defined INCLUDE goto :FAIL
+@if not defined XEDK goto :FAIL
+
+@setlocal
+@rem ---- Host compiler ----
+@set LJCOMPILE=cl /nologo /c /MD /O2 /W3 /D_CRT_SECURE_NO_DEPRECATE
+@set LJLINK=link /nologo
+@set LJMT=mt /nologo
+@set DASMDIR=..\dynasm
+@set DASM=%DASMDIR%\dynasm.lua
+@set ALL_LIB=lib_base.c lib_math.c lib_bit.c lib_string.c lib_table.c lib_io.c lib_os.c lib_package.c lib_debug.c lib_jit.c lib_ffi.c lib_buffer.c
+
+%LJCOMPILE% host\minilua.c
+@if errorlevel 1 goto :BAD
+%LJLINK% /out:minilua.exe minilua.obj
+@if errorlevel 1 goto :BAD
+if exist minilua.exe.manifest^
+ %LJMT% -manifest minilua.exe.manifest -outputresource:minilua.exe
+
+@rem Error out for 64 bit host compiler
+@minilua
+@if errorlevel 8 goto :FAIL
+
+@set DASMFLAGS=-D GPR64 -D FRAME32 -D PPE -D SQRT -D DUALNUM
+minilua %DASM% -LN %DASMFLAGS% -o host\buildvm_arch.h vm_ppc.dasc
+@if errorlevel 1 goto :BAD
+
+%LJCOMPILE% /I "." /I %DASMDIR% /D_XBOX_VER=200 /DLUAJIT_TARGET=LUAJIT_ARCH_PPC host\buildvm*.c
+@if errorlevel 1 goto :BAD
+%LJLINK% /out:buildvm.exe buildvm*.obj
+@if errorlevel 1 goto :BAD
+if exist buildvm.exe.manifest^
+ %LJMT% -manifest buildvm.exe.manifest -outputresource:buildvm.exe
+
+buildvm -m peobj -o lj_vm.obj
+@if errorlevel 1 goto :BAD
+buildvm -m bcdef -o lj_bcdef.h %ALL_LIB%
+@if errorlevel 1 goto :BAD
+buildvm -m ffdef -o lj_ffdef.h %ALL_LIB%
+@if errorlevel 1 goto :BAD
+buildvm -m libdef -o lj_libdef.h %ALL_LIB%
+@if errorlevel 1 goto :BAD
+buildvm -m recdef -o lj_recdef.h %ALL_LIB%
+@if errorlevel 1 goto :BAD
+buildvm -m vmdef -o jit\vmdef.lua %ALL_LIB%
+@if errorlevel 1 goto :BAD
+buildvm -m folddef -o lj_folddef.h lj_opt_fold.c
+@if errorlevel 1 goto :BAD
+
+@rem ---- Cross compiler ----
+@set LJCOMPILE="%XEDK%\bin\win32\cl" /nologo /c /MT /O2 /W3 /GF /Gm- /GR- /GS- /Gy /openmp- /D_CRT_SECURE_NO_DEPRECATE /DNDEBUG /D_XBOX /D_LIB /DLUAJIT_USE_SYSMALLOC
+@set LJLIB="%XEDK%\bin\win32\lib" /nologo
+@set "INCLUDE=%XEDK%\include\xbox"
+
+@if "%1" neq "debug" goto :NODEBUG
+@shift
+@set "LJCOMPILE=%LJCOMPILE% /Zi"
+:NODEBUG
+@if "%1"=="amalg" goto :AMALG
+%LJCOMPILE% /DLUA_BUILD_AS_DLL lj_*.c lib_*.c
+@if errorlevel 1 goto :BAD
+%LJLIB% /OUT:luajit20.lib lj_*.obj lib_*.obj
+@if errorlevel 1 goto :BAD
+@goto :NOAMALG
+:AMALG
+%LJCOMPILE% /DLUA_BUILD_AS_DLL ljamalg.c
+@if errorlevel 1 goto :BAD
+%LJLIB% /OUT:luajit20.lib ljamalg.obj lj_vm.obj
+@if errorlevel 1 goto :BAD
+:NOAMALG
+
+@del *.obj *.manifest minilua.exe buildvm.exe
+@echo.
+@echo === Successfully built LuaJIT for Xbox 360 ===
+
+@goto :END
+:BAD
+@echo.
+@echo *******************************************************
+@echo *** Build FAILED -- Please check the error messages ***
+@echo *******************************************************
+@goto :END
+:FAIL
+@echo To run this script you must open a "Visual Studio .NET Command Prompt"
+@echo (32 bit host compiler). The Xbox 360 SDK must be installed, too.
+:END