summaryrefslogtreecommitdiff
path: root/libs/luajit-cmake/luajit/src/lj_opt_sink.c
diff options
context:
space:
mode:
authorsanine <sanine.not@pm.me>2023-03-11 15:58:20 -0600
committersanine <sanine.not@pm.me>2023-03-11 15:58:20 -0600
commitebc50b387ab209c9f9a0d92e340ac293d5697274 (patch)
treeea8c8b3677a18c994d2b9d33dbef3461dcf18113 /libs/luajit-cmake/luajit/src/lj_opt_sink.c
parentc2329b4c8258baa9429c77566c9def97d00e96d7 (diff)
build & link with luajit instead of lua5.1
Diffstat (limited to 'libs/luajit-cmake/luajit/src/lj_opt_sink.c')
-rw-r--r--libs/luajit-cmake/luajit/src/lj_opt_sink.c258
1 files changed, 258 insertions, 0 deletions
diff --git a/libs/luajit-cmake/luajit/src/lj_opt_sink.c b/libs/luajit-cmake/luajit/src/lj_opt_sink.c
new file mode 100644
index 0000000..4b9008b
--- /dev/null
+++ b/libs/luajit-cmake/luajit/src/lj_opt_sink.c
@@ -0,0 +1,258 @@
+/*
+** SINK: Allocation Sinking and Store Sinking.
+** Copyright (C) 2005-2022 Mike Pall. See Copyright Notice in luajit.h
+*/
+
+#define lj_opt_sink_c
+#define LUA_CORE
+
+#include "lj_obj.h"
+
+#if LJ_HASJIT
+
+#include "lj_ir.h"
+#include "lj_jit.h"
+#include "lj_iropt.h"
+#include "lj_target.h"
+
+/* Some local macros to save typing. Undef'd at the end. */
+#define IR(ref) (&J->cur.ir[(ref)])
+
+/* Check whether the store ref points to an eligible allocation. */
+static IRIns *sink_checkalloc(jit_State *J, IRIns *irs)
+{
+ IRIns *ir = IR(irs->op1);
+ if (!irref_isk(ir->op2))
+ return NULL; /* Non-constant key. */
+ if (ir->o == IR_HREFK || ir->o == IR_AREF)
+ ir = IR(ir->op1);
+ else if (!(ir->o == IR_HREF || ir->o == IR_NEWREF ||
+ ir->o == IR_FREF || ir->o == IR_ADD))
+ return NULL; /* Unhandled reference type (for XSTORE). */
+ ir = IR(ir->op1);
+ if (!(ir->o == IR_TNEW || ir->o == IR_TDUP || ir->o == IR_CNEW))
+ return NULL; /* Not an allocation. */
+ return ir; /* Return allocation. */
+}
+
+/* Recursively check whether a value depends on a PHI. */
+static int sink_phidep(jit_State *J, IRRef ref, int *workp)
+{
+ IRIns *ir = IR(ref);
+ if (!*workp) return 1; /* Give up and pretend it does. */
+ (*workp)--;
+ if (irt_isphi(ir->t)) return 1;
+ if (ir->op1 >= REF_FIRST && sink_phidep(J, ir->op1, workp)) return 1;
+ if (ir->op2 >= REF_FIRST && sink_phidep(J, ir->op2, workp)) return 1;
+ return 0;
+}
+
+/* Check whether a value is a sinkable PHI or loop-invariant. */
+static int sink_checkphi(jit_State *J, IRIns *ira, IRRef ref)
+{
+ if (ref >= REF_FIRST) {
+ IRIns *ir = IR(ref);
+ if (irt_isphi(ir->t) || (ir->o == IR_CONV && ir->op2 == IRCONV_NUM_INT &&
+ irt_isphi(IR(ir->op1)->t))) {
+ ira->prev++;
+ return 1; /* Sinkable PHI. */
+ }
+ /* Otherwise the value must be loop-invariant. */
+ if (ref < J->loopref) {
+ /* Check for PHI dependencies, but give up after reasonable effort. */
+ int work = 64;
+ return !sink_phidep(J, ref, &work);
+ } else {
+ return 0; /* Loop-variant. */
+ }
+ }
+ return 1; /* Constant (non-PHI). */
+}
+
+/* Mark non-sinkable allocations using single-pass backward propagation.
+**
+** Roots for the marking process are:
+** - Some PHIs or snapshots (see below).
+** - Non-PHI, non-constant values stored to PHI allocations.
+** - All guards.
+** - Any remaining loads not eliminated by store-to-load forwarding.
+** - Stores with non-constant keys.
+** - All stored values.
+*/
+static void sink_mark_ins(jit_State *J)
+{
+ IRIns *ir, *irlast = IR(J->cur.nins-1);
+ for (ir = irlast ; ; ir--) {
+ switch (ir->o) {
+ case IR_BASE:
+ return; /* Finished. */
+ case IR_ALOAD: case IR_HLOAD: case IR_XLOAD: case IR_TBAR: case IR_ALEN:
+ irt_setmark(IR(ir->op1)->t); /* Mark ref for remaining loads. */
+ break;
+ case IR_FLOAD:
+ if (irt_ismarked(ir->t) || ir->op2 == IRFL_TAB_META)
+ irt_setmark(IR(ir->op1)->t); /* Mark table for remaining loads. */
+ break;
+ case IR_ASTORE: case IR_HSTORE: case IR_FSTORE: case IR_XSTORE: {
+ IRIns *ira = sink_checkalloc(J, ir);
+ if (!ira || (irt_isphi(ira->t) && !sink_checkphi(J, ira, ir->op2)))
+ irt_setmark(IR(ir->op1)->t); /* Mark ineligible ref. */
+ irt_setmark(IR(ir->op2)->t); /* Mark stored value. */
+ break;
+ }
+#if LJ_HASFFI
+ case IR_CNEWI:
+ if (irt_isphi(ir->t) &&
+ (!sink_checkphi(J, ir, ir->op2) ||
+ (LJ_32 && ir+1 < irlast && (ir+1)->o == IR_HIOP &&
+ !sink_checkphi(J, ir, (ir+1)->op2))))
+ irt_setmark(ir->t); /* Mark ineligible allocation. */
+#endif
+ /* fallthrough */
+ case IR_USTORE:
+ irt_setmark(IR(ir->op2)->t); /* Mark stored value. */
+ break;
+#if LJ_HASFFI
+ case IR_CALLXS:
+#endif
+ case IR_CALLS:
+ irt_setmark(IR(ir->op1)->t); /* Mark (potentially) stored values. */
+ break;
+ case IR_PHI: {
+ IRIns *irl = IR(ir->op1), *irr = IR(ir->op2);
+ irl->prev = irr->prev = 0; /* Clear PHI value counts. */
+ if (irl->o == irr->o &&
+ (irl->o == IR_TNEW || irl->o == IR_TDUP ||
+ (LJ_HASFFI && (irl->o == IR_CNEW || irl->o == IR_CNEWI))))
+ break;
+ irt_setmark(irl->t);
+ irt_setmark(irr->t);
+ break;
+ }
+ default:
+ if (irt_ismarked(ir->t) || irt_isguard(ir->t)) { /* Propagate mark. */
+ if (ir->op1 >= REF_FIRST) irt_setmark(IR(ir->op1)->t);
+ if (ir->op2 >= REF_FIRST) irt_setmark(IR(ir->op2)->t);
+ }
+ break;
+ }
+ }
+}
+
+/* Mark all instructions referenced by a snapshot. */
+static void sink_mark_snap(jit_State *J, SnapShot *snap)
+{
+ SnapEntry *map = &J->cur.snapmap[snap->mapofs];
+ MSize n, nent = snap->nent;
+ for (n = 0; n < nent; n++) {
+ IRRef ref = snap_ref(map[n]);
+ if (!irref_isk(ref))
+ irt_setmark(IR(ref)->t);
+ }
+}
+
+/* Iteratively remark PHI refs with differing marks or PHI value counts. */
+static void sink_remark_phi(jit_State *J)
+{
+ IRIns *ir;
+ int remark;
+ do {
+ remark = 0;
+ for (ir = IR(J->cur.nins-1); ir->o == IR_PHI; ir--) {
+ IRIns *irl = IR(ir->op1), *irr = IR(ir->op2);
+ if (!((irl->t.irt ^ irr->t.irt) & IRT_MARK) && irl->prev == irr->prev)
+ continue;
+ remark |= (~(irl->t.irt & irr->t.irt) & IRT_MARK);
+ irt_setmark(IR(ir->op1)->t);
+ irt_setmark(IR(ir->op2)->t);
+ }
+ } while (remark);
+}
+
+/* Sweep instructions and tag sunken allocations and stores. */
+static void sink_sweep_ins(jit_State *J)
+{
+ IRIns *ir, *irbase = IR(REF_BASE);
+ for (ir = IR(J->cur.nins-1) ; ir >= irbase; ir--) {
+ switch (ir->o) {
+ case IR_ASTORE: case IR_HSTORE: case IR_FSTORE: case IR_XSTORE: {
+ IRIns *ira = sink_checkalloc(J, ir);
+ if (ira && !irt_ismarked(ira->t)) {
+ int delta = (int)(ir - ira);
+ ir->prev = REGSP(RID_SINK, delta > 255 ? 255 : delta);
+ } else {
+ ir->prev = REGSP_INIT;
+ }
+ break;
+ }
+ case IR_NEWREF:
+ if (!irt_ismarked(IR(ir->op1)->t)) {
+ ir->prev = REGSP(RID_SINK, 0);
+ } else {
+ irt_clearmark(ir->t);
+ ir->prev = REGSP_INIT;
+ }
+ break;
+#if LJ_HASFFI
+ case IR_CNEW: case IR_CNEWI:
+#endif
+ case IR_TNEW: case IR_TDUP:
+ if (!irt_ismarked(ir->t)) {
+ ir->t.irt &= ~IRT_GUARD;
+ ir->prev = REGSP(RID_SINK, 0);
+ J->cur.sinktags = 1; /* Signal present SINK tags to assembler. */
+ } else {
+ irt_clearmark(ir->t);
+ ir->prev = REGSP_INIT;
+ }
+ break;
+ case IR_PHI: {
+ IRIns *ira = IR(ir->op2);
+ if (!irt_ismarked(ira->t) &&
+ (ira->o == IR_TNEW || ira->o == IR_TDUP ||
+ (LJ_HASFFI && (ira->o == IR_CNEW || ira->o == IR_CNEWI)))) {
+ ir->prev = REGSP(RID_SINK, 0);
+ } else {
+ ir->prev = REGSP_INIT;
+ }
+ break;
+ }
+ default:
+ irt_clearmark(ir->t);
+ ir->prev = REGSP_INIT;
+ break;
+ }
+ }
+ for (ir = IR(J->cur.nk); ir < irbase; ir++) {
+ irt_clearmark(ir->t);
+ ir->prev = REGSP_INIT;
+ /* The false-positive of irt_is64() for ASMREF_L (REF_NIL) is OK here. */
+ if (irt_is64(ir->t) && ir->o != IR_KNULL)
+ ir++;
+ }
+}
+
+/* Allocation sinking and store sinking.
+**
+** 1. Mark all non-sinkable allocations.
+** 2. Then sink all remaining allocations and the related stores.
+*/
+void lj_opt_sink(jit_State *J)
+{
+ const uint32_t need = (JIT_F_OPT_SINK|JIT_F_OPT_FWD|
+ JIT_F_OPT_DCE|JIT_F_OPT_CSE|JIT_F_OPT_FOLD);
+ if ((J->flags & need) == need &&
+ (J->chain[IR_TNEW] || J->chain[IR_TDUP] ||
+ (LJ_HASFFI && (J->chain[IR_CNEW] || J->chain[IR_CNEWI])))) {
+ if (!J->loopref)
+ sink_mark_snap(J, &J->cur.snap[J->cur.nsnap-1]);
+ sink_mark_ins(J);
+ if (J->loopref)
+ sink_remark_phi(J);
+ sink_sweep_ins(J);
+ }
+}
+
+#undef IR
+
+#endif