summaryrefslogtreecommitdiffstats
path: root/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/fast-jit/fe
diff options
context:
space:
mode:
Diffstat (limited to 'fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/fast-jit/fe')
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/fast-jit/fe/jit_emit_compare.c345
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/fast-jit/fe/jit_emit_compare.h32
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/fast-jit/fe/jit_emit_const.c47
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/fast-jit/fe/jit_emit_const.h31
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/fast-jit/fe/jit_emit_control.c1318
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/fast-jit/fe/jit_emit_control.h56
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/fast-jit/fe/jit_emit_conversion.c660
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/fast-jit/fe/jit_emit_conversion.h73
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/fast-jit/fe/jit_emit_exception.c78
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/fast-jit/fe/jit_emit_exception.h23
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/fast-jit/fe/jit_emit_function.c945
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/fast-jit/fe/jit_emit_function.h39
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/fast-jit/fe/jit_emit_memory.c1200
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/fast-jit/fe/jit_emit_memory.h92
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/fast-jit/fe/jit_emit_numberic.c1707
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/fast-jit/fe/jit_emit_numberic.h76
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/fast-jit/fe/jit_emit_parametric.c130
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/fast-jit/fe/jit_emit_parametric.h25
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/fast-jit/fe/jit_emit_table.c318
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/fast-jit/fe/jit_emit_table.h47
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/fast-jit/fe/jit_emit_variable.c312
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/fast-jit/fe/jit_emit_variable.h35
22 files changed, 7589 insertions, 0 deletions
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/fast-jit/fe/jit_emit_compare.c b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/fast-jit/fe/jit_emit_compare.c
new file mode 100644
index 000000000..d41249346
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/fast-jit/fe/jit_emit_compare.c
@@ -0,0 +1,345 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#include "jit_emit_compare.h"
+#include "jit_emit_function.h"
+#include "../jit_frontend.h"
+#include "../jit_codegen.h"
+
+static bool
+jit_compile_op_compare_integer(JitCompContext *cc, IntCond cond, bool is64Bit)
+{
+ JitReg lhs, rhs, res, const_zero, const_one;
+
+ if (cond < INT_EQZ || cond > INT_GE_U) {
+ jit_set_last_error(cc, "unsupported comparation operation");
+ goto fail;
+ }
+
+ res = jit_cc_new_reg_I32(cc);
+ const_zero = NEW_CONST(I32, 0);
+ const_one = NEW_CONST(I32, 1);
+
+ if (is64Bit) {
+ if (INT_EQZ == cond) {
+ rhs = NEW_CONST(I64, 0);
+ }
+ else {
+ POP_I64(rhs);
+ }
+ POP_I64(lhs);
+ }
+ else {
+ if (INT_EQZ == cond) {
+ rhs = NEW_CONST(I32, 0);
+ }
+ else {
+ POP_I32(rhs);
+ }
+ POP_I32(lhs);
+ }
+
+ GEN_INSN(CMP, cc->cmp_reg, lhs, rhs);
+ switch (cond) {
+ case INT_EQ:
+ case INT_EQZ:
+ {
+ GEN_INSN(SELECTEQ, res, cc->cmp_reg, const_one, const_zero);
+ break;
+ }
+ case INT_NE:
+ {
+ GEN_INSN(SELECTNE, res, cc->cmp_reg, const_one, const_zero);
+ break;
+ }
+ case INT_LT_S:
+ {
+ GEN_INSN(SELECTLTS, res, cc->cmp_reg, const_one, const_zero);
+ break;
+ }
+ case INT_LT_U:
+ {
+ GEN_INSN(SELECTLTU, res, cc->cmp_reg, const_one, const_zero);
+ break;
+ }
+ case INT_GT_S:
+ {
+ GEN_INSN(SELECTGTS, res, cc->cmp_reg, const_one, const_zero);
+ break;
+ }
+ case INT_GT_U:
+ {
+ GEN_INSN(SELECTGTU, res, cc->cmp_reg, const_one, const_zero);
+ break;
+ }
+ case INT_LE_S:
+ {
+ GEN_INSN(SELECTLES, res, cc->cmp_reg, const_one, const_zero);
+ break;
+ }
+ case INT_LE_U:
+ {
+ GEN_INSN(SELECTLEU, res, cc->cmp_reg, const_one, const_zero);
+ break;
+ }
+ case INT_GE_S:
+ {
+ GEN_INSN(SELECTGES, res, cc->cmp_reg, const_one, const_zero);
+ break;
+ }
+ default: /* INT_GE_U */
+ {
+ GEN_INSN(SELECTGEU, res, cc->cmp_reg, const_one, const_zero);
+ break;
+ }
+ }
+
+ PUSH_I32(res);
+ return true;
+fail:
+ return false;
+}
+
+bool
+jit_compile_op_i32_compare(JitCompContext *cc, IntCond cond)
+{
+ return jit_compile_op_compare_integer(cc, cond, false);
+}
+
+bool
+jit_compile_op_i64_compare(JitCompContext *cc, IntCond cond)
+{
+ return jit_compile_op_compare_integer(cc, cond, true);
+}
+
+static int32
+float_cmp_eq(float f1, float f2)
+{
+ if (isnan(f1) || isnan(f2))
+ return 0;
+
+ return f1 == f2;
+}
+
+static int32
+float_cmp_ne(float f1, float f2)
+{
+ if (isnan(f1) || isnan(f2))
+ return 1;
+
+ return f1 != f2;
+}
+
+static int32
+double_cmp_eq(double d1, double d2)
+{
+ if (isnan(d1) || isnan(d2))
+ return 0;
+
+ return d1 == d2;
+}
+
+static int32
+double_cmp_ne(double d1, double d2)
+{
+ if (isnan(d1) || isnan(d2))
+ return 1;
+
+ return d1 != d2;
+}
+
+static bool
+jit_compile_op_compare_float_point(JitCompContext *cc, FloatCond cond,
+ JitReg lhs, JitReg rhs)
+{
+ JitReg res, args[2], const_zero, const_one;
+ JitRegKind kind;
+ void *func;
+
+ if (cond == FLOAT_EQ || cond == FLOAT_NE) {
+ kind = jit_reg_kind(lhs);
+ if (cond == FLOAT_EQ)
+ func = (kind == JIT_REG_KIND_F32) ? (void *)float_cmp_eq
+ : (void *)double_cmp_eq;
+ else
+ func = (kind == JIT_REG_KIND_F32) ? (void *)float_cmp_ne
+ : (void *)double_cmp_ne;
+
+ res = jit_cc_new_reg_I32(cc);
+ args[0] = lhs;
+ args[1] = rhs;
+
+ if (!jit_emit_callnative(cc, func, res, args, 2)) {
+ goto fail;
+ }
+ }
+ else {
+ res = jit_cc_new_reg_I32(cc);
+ const_zero = NEW_CONST(I32, 0);
+ const_one = NEW_CONST(I32, 1);
+ switch (cond) {
+ case FLOAT_LT:
+ {
+ GEN_INSN(CMP, cc->cmp_reg, rhs, lhs);
+ GEN_INSN(SELECTGTS, res, cc->cmp_reg, const_one, const_zero);
+ break;
+ }
+ case FLOAT_GT:
+ {
+ GEN_INSN(CMP, cc->cmp_reg, lhs, rhs);
+ GEN_INSN(SELECTGTS, res, cc->cmp_reg, const_one, const_zero);
+ break;
+ }
+ case FLOAT_LE:
+ {
+ GEN_INSN(CMP, cc->cmp_reg, rhs, lhs);
+ GEN_INSN(SELECTGES, res, cc->cmp_reg, const_one, const_zero);
+ break;
+ }
+ case FLOAT_GE:
+ {
+ GEN_INSN(CMP, cc->cmp_reg, lhs, rhs);
+ GEN_INSN(SELECTGES, res, cc->cmp_reg, const_one, const_zero);
+ break;
+ }
+ default:
+ {
+ bh_assert(!"unknown FloatCond");
+ goto fail;
+ }
+ }
+ }
+ PUSH_I32(res);
+
+ return true;
+fail:
+ return false;
+}
+
+bool
+jit_compile_op_f32_compare(JitCompContext *cc, FloatCond cond)
+{
+ JitReg res, const_zero, const_one;
+ JitReg lhs, rhs;
+
+ POP_F32(rhs);
+ POP_F32(lhs);
+
+ if (jit_reg_is_const_val(lhs) && jit_reg_is_const_val(rhs)) {
+ float32 lvalue = jit_cc_get_const_F32(cc, lhs);
+ float32 rvalue = jit_cc_get_const_F32(cc, rhs);
+
+ const_zero = NEW_CONST(I32, 0);
+ const_one = NEW_CONST(I32, 1);
+
+ switch (cond) {
+ case FLOAT_EQ:
+ {
+ res = (lvalue == rvalue) ? const_one : const_zero;
+ break;
+ }
+ case FLOAT_NE:
+ {
+ res = (lvalue != rvalue) ? const_one : const_zero;
+ break;
+ }
+ case FLOAT_LT:
+ {
+ res = (lvalue < rvalue) ? const_one : const_zero;
+ break;
+ }
+ case FLOAT_GT:
+ {
+ res = (lvalue > rvalue) ? const_one : const_zero;
+ break;
+ }
+ case FLOAT_LE:
+ {
+ res = (lvalue <= rvalue) ? const_one : const_zero;
+ break;
+ }
+ case FLOAT_GE:
+ {
+ res = (lvalue >= rvalue) ? const_one : const_zero;
+ break;
+ }
+ default:
+ {
+ bh_assert(!"unknown FloatCond");
+ goto fail;
+ }
+ }
+
+ PUSH_I32(res);
+ return true;
+ }
+
+ return jit_compile_op_compare_float_point(cc, cond, lhs, rhs);
+fail:
+ return false;
+}
+
+bool
+jit_compile_op_f64_compare(JitCompContext *cc, FloatCond cond)
+{
+ JitReg res, const_zero, const_one;
+ JitReg lhs, rhs;
+
+ POP_F64(rhs);
+ POP_F64(lhs);
+
+ if (jit_reg_is_const_val(lhs) && jit_reg_is_const_val(rhs)) {
+ float64 lvalue = jit_cc_get_const_F64(cc, lhs);
+ float64 rvalue = jit_cc_get_const_F64(cc, rhs);
+
+ const_zero = NEW_CONST(I32, 0);
+ const_one = NEW_CONST(I32, 1);
+
+ switch (cond) {
+ case FLOAT_EQ:
+ {
+ res = (lvalue == rvalue) ? const_one : const_zero;
+ break;
+ }
+ case FLOAT_NE:
+ {
+ res = (lvalue != rvalue) ? const_one : const_zero;
+ break;
+ }
+ case FLOAT_LT:
+ {
+ res = (lvalue < rvalue) ? const_one : const_zero;
+ break;
+ }
+ case FLOAT_GT:
+ {
+ res = (lvalue > rvalue) ? const_one : const_zero;
+ break;
+ }
+ case FLOAT_LE:
+ {
+ res = (lvalue <= rvalue) ? const_one : const_zero;
+ break;
+ }
+ case FLOAT_GE:
+ {
+ res = (lvalue >= rvalue) ? const_one : const_zero;
+ break;
+ }
+ default:
+ {
+ bh_assert(!"unknown FloatCond");
+ goto fail;
+ }
+ }
+
+ PUSH_I32(res);
+ return true;
+ }
+
+ return jit_compile_op_compare_float_point(cc, cond, lhs, rhs);
+fail:
+ return false;
+}
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/fast-jit/fe/jit_emit_compare.h b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/fast-jit/fe/jit_emit_compare.h
new file mode 100644
index 000000000..db905b550
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/fast-jit/fe/jit_emit_compare.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#ifndef _JIT_EMIT_COMPARE_H_
+#define _JIT_EMIT_COMPARE_H_
+
+#include "../jit_compiler.h"
+#include "../jit_frontend.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+bool
+jit_compile_op_i32_compare(JitCompContext *cc, IntCond cond);
+
+bool
+jit_compile_op_i64_compare(JitCompContext *cc, IntCond cond);
+
+bool
+jit_compile_op_f32_compare(JitCompContext *cc, FloatCond cond);
+
+bool
+jit_compile_op_f64_compare(JitCompContext *cc, FloatCond cond);
+
+#ifdef __cplusplus
+} /* end of extern "C" */
+#endif
+
+#endif /* end of _JIT_EMIT_COMPARE_H_ */
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/fast-jit/fe/jit_emit_const.c b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/fast-jit/fe/jit_emit_const.c
new file mode 100644
index 000000000..1bbc83c2f
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/fast-jit/fe/jit_emit_const.c
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#include "jit_emit_const.h"
+#include "../jit_frontend.h"
+
+bool
+jit_compile_op_i32_const(JitCompContext *cc, int32 i32_const)
+{
+ JitReg value = NEW_CONST(I32, i32_const);
+ PUSH_I32(value);
+ return true;
+fail:
+ return false;
+}
+
+bool
+jit_compile_op_i64_const(JitCompContext *cc, int64 i64_const)
+{
+ JitReg value = NEW_CONST(I64, i64_const);
+ PUSH_I64(value);
+ return true;
+fail:
+ return false;
+}
+
+bool
+jit_compile_op_f32_const(JitCompContext *cc, float32 f32_const)
+{
+ JitReg value = NEW_CONST(F32, f32_const);
+ PUSH_F32(value);
+ return true;
+fail:
+ return false;
+}
+
+bool
+jit_compile_op_f64_const(JitCompContext *cc, float64 f64_const)
+{
+ JitReg value = NEW_CONST(F64, f64_const);
+ PUSH_F64(value);
+ return true;
+fail:
+ return false;
+}
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/fast-jit/fe/jit_emit_const.h b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/fast-jit/fe/jit_emit_const.h
new file mode 100644
index 000000000..b75314117
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/fast-jit/fe/jit_emit_const.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#ifndef _JIT_EMIT_CONST_H_
+#define _JIT_EMIT_CONST_H_
+
+#include "../jit_compiler.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+bool
+jit_compile_op_i32_const(JitCompContext *cc, int32 i32_const);
+
+bool
+jit_compile_op_i64_const(JitCompContext *cc, int64 i64_const);
+
+bool
+jit_compile_op_f32_const(JitCompContext *cc, float32 f32_const);
+
+bool
+jit_compile_op_f64_const(JitCompContext *cc, float64 f64_const);
+
+#ifdef __cplusplus
+} /* end of extern "C" */
+#endif
+
+#endif /* end of _JIT_EMIT_CONST_H_ */
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/fast-jit/fe/jit_emit_control.c b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/fast-jit/fe/jit_emit_control.c
new file mode 100644
index 000000000..f7536c73e
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/fast-jit/fe/jit_emit_control.c
@@ -0,0 +1,1318 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#include "jit_emit_control.h"
+#include "jit_emit_exception.h"
+#include "jit_emit_function.h"
+#include "../jit_frontend.h"
+#include "../interpreter/wasm_loader.h"
+
+#define CREATE_BASIC_BLOCK(new_basic_block) \
+ do { \
+ bh_assert(!new_basic_block); \
+ if (!(new_basic_block = jit_cc_new_basic_block(cc, 0))) { \
+ jit_set_last_error(cc, "create basic block failed"); \
+ goto fail; \
+ } \
+ } while (0)
+
+#define CURR_BASIC_BLOCK() cc->cur_basic_block
+
+#define BUILD_BR(target_block) \
+ do { \
+ if (!GEN_INSN(JMP, jit_basic_block_label(target_block))) { \
+ jit_set_last_error(cc, "generate jmp insn failed"); \
+ goto fail; \
+ } \
+ } while (0)
+
+#define BUILD_COND_BR(value_if, block_then, block_else) \
+ do { \
+ if (!GEN_INSN(CMP, cc->cmp_reg, value_if, NEW_CONST(I32, 0)) \
+ || !GEN_INSN(BNE, cc->cmp_reg, jit_basic_block_label(block_then), \
+ jit_basic_block_label(block_else))) { \
+ jit_set_last_error(cc, "generate bne insn failed"); \
+ goto fail; \
+ } \
+ } while (0)
+
+#define SET_BUILDER_POS(basic_block) \
+ do { \
+ cc->cur_basic_block = basic_block; \
+ } while (0)
+
+#define SET_BB_BEGIN_BCIP(basic_block, bcip) \
+ do { \
+ *(jit_annl_begin_bcip(cc, jit_basic_block_label(basic_block))) = bcip; \
+ } while (0)
+
+#define SET_BB_END_BCIP(basic_block, bcip) \
+ do { \
+ *(jit_annl_end_bcip(cc, jit_basic_block_label(basic_block))) = bcip; \
+ } while (0)
+
+static JitBlock *
+get_target_block(JitCompContext *cc, uint32 br_depth)
+{
+ uint32 i = br_depth;
+ JitBlock *block = jit_block_stack_top(&cc->block_stack);
+
+ while (i-- > 0 && block) {
+ block = block->prev;
+ }
+
+ if (!block) {
+ jit_set_last_error(cc, "WASM block stack underflow");
+ return NULL;
+ }
+ return block;
+}
+
+static bool
+load_block_params(JitCompContext *cc, JitBlock *block)
+{
+ JitFrame *jit_frame = cc->jit_frame;
+ uint32 offset, i;
+ JitReg value = 0;
+
+ /* Clear jit frame's locals and stacks */
+ clear_values(jit_frame);
+
+ /* Restore jit frame's sp to block's sp begin */
+ jit_frame->sp = block->frame_sp_begin;
+
+ /* Load params to new block */
+ offset = (uint32)(jit_frame->sp - jit_frame->lp);
+ for (i = 0; i < block->param_count; i++) {
+ switch (block->param_types[i]) {
+ case VALUE_TYPE_I32:
+#if WASM_ENABLE_REF_TYPES != 0
+ case VALUE_TYPE_EXTERNREF:
+ case VALUE_TYPE_FUNCREF:
+#endif
+ value = gen_load_i32(jit_frame, offset);
+ offset++;
+ break;
+ case VALUE_TYPE_I64:
+ value = gen_load_i64(jit_frame, offset);
+ offset += 2;
+ break;
+ case VALUE_TYPE_F32:
+ value = gen_load_f32(jit_frame, offset);
+ offset++;
+ break;
+ case VALUE_TYPE_F64:
+ value = gen_load_f64(jit_frame, offset);
+ offset += 2;
+ break;
+ default:
+ bh_assert(0);
+ break;
+ }
+ PUSH(value, block->param_types[i]);
+ }
+
+ return true;
+fail:
+ return false;
+}
+
+static bool
+load_block_results(JitCompContext *cc, JitBlock *block)
+{
+ JitFrame *jit_frame = cc->jit_frame;
+ uint32 offset, i;
+ JitReg value = 0;
+
+ /* Restore jit frame's sp to block's sp begin */
+ jit_frame->sp = block->frame_sp_begin;
+
+ /* Load results to new block */
+ offset = (uint32)(jit_frame->sp - jit_frame->lp);
+ for (i = 0; i < block->result_count; i++) {
+ switch (block->result_types[i]) {
+ case VALUE_TYPE_I32:
+#if WASM_ENABLE_REF_TYPES != 0
+ case VALUE_TYPE_EXTERNREF:
+ case VALUE_TYPE_FUNCREF:
+#endif
+ value = gen_load_i32(jit_frame, offset);
+ offset++;
+ break;
+ case VALUE_TYPE_I64:
+ value = gen_load_i64(jit_frame, offset);
+ offset += 2;
+ break;
+ case VALUE_TYPE_F32:
+ value = gen_load_f32(jit_frame, offset);
+ offset++;
+ break;
+ case VALUE_TYPE_F64:
+ value = gen_load_f64(jit_frame, offset);
+ offset += 2;
+ break;
+ default:
+ bh_assert(0);
+ break;
+ }
+ PUSH(value, block->result_types[i]);
+ }
+
+ return true;
+fail:
+ return false;
+}
+
+static bool
+jit_reg_is_i32_const(JitCompContext *cc, JitReg reg, int32 val)
+{
+ return (jit_reg_kind(reg) == JIT_REG_KIND_I32 && jit_reg_is_const(reg)
+ && jit_cc_get_const_I32(cc, reg) == val)
+ ? true
+ : false;
+}
+
+/**
+ * get the last two insns:
+ * CMP cmp_reg, r0, r1
+ * SELECTcc r2, cmp_reg, 1, 0
+ */
+static void
+get_last_cmp_and_selectcc(JitCompContext *cc, JitReg cond, JitInsn **p_insn_cmp,
+ JitInsn **p_insn_select)
+{
+ JitInsn *insn = jit_basic_block_last_insn(cc->cur_basic_block);
+
+ if (insn && insn->prev && insn->prev->opcode == JIT_OP_CMP
+ && insn->opcode >= JIT_OP_SELECTEQ && insn->opcode <= JIT_OP_SELECTLEU
+ && *jit_insn_opnd(insn, 0) == cond
+ && jit_reg_is_i32_const(cc, *jit_insn_opnd(insn, 2), 1)
+ && jit_reg_is_i32_const(cc, *jit_insn_opnd(insn, 3), 0)) {
+ *p_insn_cmp = insn->prev;
+ *p_insn_select = insn;
+ }
+}
+
+static bool
+push_jit_block_to_stack_and_pass_params(JitCompContext *cc, JitBlock *block,
+ JitBasicBlock *basic_block, JitReg cond,
+ bool merge_cmp_and_if)
+{
+ JitFrame *jit_frame = cc->jit_frame;
+ JitValue *value_list_head = NULL, *value_list_end = NULL, *jit_value;
+ JitInsn *insn;
+ JitReg value;
+ uint32 i, param_index, cell_num;
+
+ if (cc->cur_basic_block == basic_block) {
+ /* Reuse the current basic block and no need to commit values,
+ we just move param values from current block's value stack to
+ the new block's value stack */
+ for (i = 0; i < block->param_count; i++) {
+ jit_value = jit_value_stack_pop(
+ &jit_block_stack_top(&cc->block_stack)->value_stack);
+ if (!value_list_head) {
+ value_list_head = value_list_end = jit_value;
+ jit_value->prev = jit_value->next = NULL;
+ }
+ else {
+ jit_value->prev = NULL;
+ jit_value->next = value_list_head;
+ value_list_head->prev = jit_value;
+ value_list_head = jit_value;
+ }
+ }
+ block->value_stack.value_list_head = value_list_head;
+ block->value_stack.value_list_end = value_list_end;
+
+ /* Save block's begin frame sp */
+ cell_num = wasm_get_cell_num(block->param_types, block->param_count);
+ block->frame_sp_begin = jit_frame->sp - cell_num;
+
+ /* Push the new block to block stack */
+ jit_block_stack_push(&cc->block_stack, block);
+
+ /* Continue to translate current block */
+ }
+ else {
+ JitInsn *insn_select = NULL, *insn_cmp = NULL;
+
+ if (merge_cmp_and_if) {
+ get_last_cmp_and_selectcc(cc, cond, &insn_cmp, &insn_select);
+ }
+
+ /* Commit register values to locals and stacks */
+ gen_commit_values(jit_frame, jit_frame->lp, jit_frame->sp);
+
+ /* Pop param values from current block's value stack */
+ for (i = 0; i < block->param_count; i++) {
+ param_index = block->param_count - 1 - i;
+ POP(value, block->param_types[param_index]);
+ }
+
+ /* Clear frame values */
+ clear_values(jit_frame);
+ /* Save block's begin frame sp */
+ block->frame_sp_begin = jit_frame->sp;
+
+ /* Push the new block to block stack */
+ jit_block_stack_push(&cc->block_stack, block);
+
+ if (block->label_type == LABEL_TYPE_LOOP) {
+ BUILD_BR(basic_block);
+ }
+ else {
+ /* IF block with condition br insn */
+ if (insn_select && insn_cmp) {
+ /* Change `CMP + SELECTcc` into `CMP + Bcc` */
+ if (!(insn = GEN_INSN(BEQ, cc->cmp_reg,
+ jit_basic_block_label(basic_block), 0))) {
+ jit_set_last_error(cc, "generate cond br failed");
+ goto fail;
+ }
+ insn->opcode =
+ JIT_OP_BEQ + (insn_select->opcode - JIT_OP_SELECTEQ);
+ jit_insn_unlink(insn_select);
+ jit_insn_delete(insn_select);
+ }
+ else {
+ if (!GEN_INSN(CMP, cc->cmp_reg, cond, NEW_CONST(I32, 0))
+ || !(insn =
+ GEN_INSN(BNE, cc->cmp_reg,
+ jit_basic_block_label(basic_block), 0))) {
+ jit_set_last_error(cc, "generate cond br failed");
+ goto fail;
+ }
+ }
+
+ /* Don't create else basic block or end basic block now, just
+ save its incoming BNE insn, and patch the insn's else label
+ when the basic block is lazily created */
+ if (block->wasm_code_else) {
+ block->incoming_insn_for_else_bb = insn;
+ }
+ else {
+ if (!jit_block_add_incoming_insn(block, insn, 2)) {
+ jit_set_last_error(cc, "add incoming insn failed");
+ goto fail;
+ }
+ }
+ }
+
+ /* Start to translate the block */
+ SET_BUILDER_POS(basic_block);
+
+ /* Push the block parameters */
+ if (!load_block_params(cc, block)) {
+ goto fail;
+ }
+ }
+ return true;
+fail:
+ return false;
+}
+
+static void
+copy_block_arities(JitCompContext *cc, JitReg dst_frame_sp, uint8 *dst_types,
+ uint32 dst_type_count, JitReg *p_first_res_reg)
+{
+ JitFrame *jit_frame;
+ uint32 offset_src, offset_dst, i;
+ JitReg value;
+
+ jit_frame = cc->jit_frame;
+ offset_src = (uint32)(jit_frame->sp - jit_frame->lp)
+ - wasm_get_cell_num(dst_types, dst_type_count);
+ offset_dst = 0;
+
+ /* pop values from stack and store to dest frame */
+ for (i = 0; i < dst_type_count; i++) {
+ switch (dst_types[i]) {
+ case VALUE_TYPE_I32:
+#if WASM_ENABLE_REF_TYPES != 0
+ case VALUE_TYPE_EXTERNREF:
+ case VALUE_TYPE_FUNCREF:
+#endif
+ value = gen_load_i32(jit_frame, offset_src);
+ if (i == 0 && p_first_res_reg)
+ *p_first_res_reg = value;
+ else
+ GEN_INSN(STI32, value, dst_frame_sp,
+ NEW_CONST(I32, offset_dst * 4));
+ offset_src++;
+ offset_dst++;
+ break;
+ case VALUE_TYPE_I64:
+ value = gen_load_i64(jit_frame, offset_src);
+ if (i == 0 && p_first_res_reg)
+ *p_first_res_reg = value;
+ else
+ GEN_INSN(STI64, value, dst_frame_sp,
+ NEW_CONST(I32, offset_dst * 4));
+ offset_src += 2;
+ offset_dst += 2;
+ break;
+ case VALUE_TYPE_F32:
+ value = gen_load_f32(jit_frame, offset_src);
+ if (i == 0 && p_first_res_reg)
+ *p_first_res_reg = value;
+ else
+ GEN_INSN(STF32, value, dst_frame_sp,
+ NEW_CONST(I32, offset_dst * 4));
+ offset_src++;
+ offset_dst++;
+ break;
+ case VALUE_TYPE_F64:
+ value = gen_load_f64(jit_frame, offset_src);
+ if (i == 0 && p_first_res_reg)
+ *p_first_res_reg = value;
+ else
+ GEN_INSN(STF64, value, dst_frame_sp,
+ NEW_CONST(I32, offset_dst * 4));
+ offset_src += 2;
+ offset_dst += 2;
+ break;
+ default:
+ bh_assert(0);
+ break;
+ }
+ }
+}
+
+static bool
+handle_func_return(JitCompContext *cc, JitBlock *block)
+{
+ JitReg prev_frame, prev_frame_sp;
+ JitReg ret_reg = 0;
+#if WASM_ENABLE_PERF_PROFILING != 0
+ JitReg func_inst = jit_cc_new_reg_ptr(cc);
+ JitReg time_start = jit_cc_new_reg_I64(cc);
+ JitReg time_end = jit_cc_new_reg_I64(cc);
+ JitReg cur_exec_time = jit_cc_new_reg_I64(cc);
+ JitReg total_exec_time = jit_cc_new_reg_I64(cc);
+ JitReg total_exec_cnt = jit_cc_new_reg_I32(cc);
+#endif
+
+#if WASM_ENABLE_PERF_PROFILING != 0
+ /* time_end = os_time_get_boot_microsecond() */
+ if (!jit_emit_callnative(cc, os_time_get_boot_microsecond, time_end, NULL,
+ 0)) {
+ return false;
+ }
+ /* time_start = cur_frame->time_started */
+ GEN_INSN(LDI64, time_start, cc->fp_reg,
+ NEW_CONST(I32, offsetof(WASMInterpFrame, time_started)));
+ /* cur_exec_time = time_end - time_start */
+ GEN_INSN(SUB, cur_exec_time, time_end, time_start);
+ /* func_inst = cur_frame->function */
+ GEN_INSN(LDPTR, func_inst, cc->fp_reg,
+ NEW_CONST(I32, offsetof(WASMInterpFrame, function)));
+ /* total_exec_time = func_inst->total_exec_time */
+ GEN_INSN(LDI64, total_exec_time, func_inst,
+ NEW_CONST(I32, offsetof(WASMFunctionInstance, total_exec_time)));
+ /* total_exec_time += cur_exec_time */
+ GEN_INSN(ADD, total_exec_time, total_exec_time, cur_exec_time);
+ /* func_inst->total_exec_time = total_exec_time */
+ GEN_INSN(STI64, total_exec_time, func_inst,
+ NEW_CONST(I32, offsetof(WASMFunctionInstance, total_exec_time)));
+ /* totoal_exec_cnt = func_inst->total_exec_cnt */
+ GEN_INSN(LDI32, total_exec_cnt, func_inst,
+ NEW_CONST(I32, offsetof(WASMFunctionInstance, total_exec_cnt)));
+ /* total_exec_cnt++ */
+ GEN_INSN(ADD, total_exec_cnt, total_exec_cnt, NEW_CONST(I32, 1));
+ /* func_inst->total_exec_cnt = total_exec_cnt */
+ GEN_INSN(STI32, total_exec_cnt, func_inst,
+ NEW_CONST(I32, offsetof(WASMFunctionInstance, total_exec_cnt)));
+#endif
+
+ prev_frame = jit_cc_new_reg_ptr(cc);
+ prev_frame_sp = jit_cc_new_reg_ptr(cc);
+
+ /* prev_frame = cur_frame->prev_frame */
+ GEN_INSN(LDPTR, prev_frame, cc->fp_reg,
+ NEW_CONST(I32, offsetof(WASMInterpFrame, prev_frame)));
+ GEN_INSN(LDPTR, prev_frame_sp, prev_frame,
+ NEW_CONST(I32, offsetof(WASMInterpFrame, sp)));
+
+ if (block->result_count) {
+ uint32 cell_num =
+ wasm_get_cell_num(block->result_types, block->result_count);
+
+ copy_block_arities(cc, prev_frame_sp, block->result_types,
+ block->result_count, &ret_reg);
+ /* prev_frame->sp += cell_num */
+ GEN_INSN(ADD, prev_frame_sp, prev_frame_sp,
+ NEW_CONST(PTR, cell_num * 4));
+ GEN_INSN(STPTR, prev_frame_sp, prev_frame,
+ NEW_CONST(I32, offsetof(WASMInterpFrame, sp)));
+ }
+
+ /* Free stack space of the current frame:
+ exec_env->wasm_stack.s.top = cur_frame */
+ GEN_INSN(STPTR, cc->fp_reg, cc->exec_env_reg,
+ NEW_CONST(I32, offsetof(WASMExecEnv, wasm_stack.s.top)));
+ /* Set the prev_frame as the current frame:
+ exec_env->cur_frame = prev_frame */
+ GEN_INSN(STPTR, prev_frame, cc->exec_env_reg,
+ NEW_CONST(I32, offsetof(WASMExecEnv, cur_frame)));
+ /* fp_reg = prev_frame */
+ GEN_INSN(MOV, cc->fp_reg, prev_frame);
+ /* return 0 */
+ GEN_INSN(RETURNBC, NEW_CONST(I32, JIT_INTERP_ACTION_NORMAL), ret_reg, 0);
+
+ return true;
+}
+
+/**
+ * is_block_polymorphic: whether current block's stack is in polymorphic state,
+ * if the opcode is one of unreachable/br/br_table/return, stack is marked
+ * to polymorphic state until the block's 'end' opcode is processed
+ */
+static bool
+handle_op_end(JitCompContext *cc, uint8 **p_frame_ip, bool is_block_polymorphic)
+{
+ JitFrame *jit_frame = cc->jit_frame;
+ JitBlock *block, *block_prev;
+ JitIncomingInsn *incoming_insn;
+ JitInsn *insn;
+
+ /* Check block stack */
+ if (!(block = jit_block_stack_top(&cc->block_stack))) {
+ jit_set_last_error(cc, "WASM block stack underflow");
+ return false;
+ }
+
+ if (!block->incoming_insns_for_end_bb) {
+ /* No other basic blocks jumping to this end, no need to
+ create the end basic block, just continue to translate
+ the following opcodes */
+ if (block->label_type == LABEL_TYPE_FUNCTION) {
+ if (!handle_func_return(cc, block)) {
+ return false;
+ }
+ SET_BB_END_BCIP(cc->cur_basic_block, *p_frame_ip - 1);
+ clear_values(jit_frame);
+ }
+ else if (block->result_count > 0) {
+ JitValue *value_list_head = NULL, *value_list_end = NULL;
+ JitValue *jit_value;
+ uint32 i;
+
+ /* No need to change cc->jit_frame, just move result values
+ from current block's value stack to previous block's
+ value stack */
+ block_prev = block->prev;
+
+ for (i = 0; i < block->result_count; i++) {
+ jit_value = jit_value_stack_pop(&block->value_stack);
+ bh_assert(jit_value);
+ if (!value_list_head) {
+ value_list_head = value_list_end = jit_value;
+ jit_value->prev = jit_value->next = NULL;
+ }
+ else {
+ jit_value->prev = NULL;
+ jit_value->next = value_list_head;
+ value_list_head->prev = jit_value;
+ value_list_head = jit_value;
+ }
+ }
+
+ if (!block_prev->value_stack.value_list_head) {
+ block_prev->value_stack.value_list_head = value_list_head;
+ block_prev->value_stack.value_list_end = value_list_end;
+ }
+ else {
+ /* Link to the end of previous block's value stack */
+ block_prev->value_stack.value_list_end->next = value_list_head;
+ value_list_head->prev = block_prev->value_stack.value_list_end;
+ block_prev->value_stack.value_list_end = value_list_end;
+ }
+ }
+
+ /* Pop block and destroy the block */
+ block = jit_block_stack_pop(&cc->block_stack);
+ jit_block_destroy(block);
+ return true;
+ }
+ else {
+ /* Commit register values to locals and stacks */
+ gen_commit_values(jit_frame, jit_frame->lp, jit_frame->sp);
+ /* Clear frame values */
+ clear_values(jit_frame);
+
+ /* Create the end basic block */
+ CREATE_BASIC_BLOCK(block->basic_block_end);
+ SET_BB_END_BCIP(cc->cur_basic_block, *p_frame_ip - 1);
+ SET_BB_BEGIN_BCIP(block->basic_block_end, *p_frame_ip);
+ /* No need to create 'JMP' insn if block is in stack polymorphic
+ state, as previous br/br_table opcode has created 'JMP' insn
+ to this end basic block */
+ if (!is_block_polymorphic) {
+ /* Jump to the end basic block */
+ BUILD_BR(block->basic_block_end);
+ }
+
+ /* Patch the INSNs which jump to this basic block */
+ incoming_insn = block->incoming_insns_for_end_bb;
+ while (incoming_insn) {
+ insn = incoming_insn->insn;
+
+ bh_assert(
+ insn->opcode == JIT_OP_JMP
+ || (insn->opcode >= JIT_OP_BEQ && insn->opcode <= JIT_OP_BLEU)
+ || insn->opcode == JIT_OP_LOOKUPSWITCH);
+
+ if (insn->opcode == JIT_OP_JMP
+ || (insn->opcode >= JIT_OP_BEQ
+ && insn->opcode <= JIT_OP_BLEU)) {
+ *(jit_insn_opnd(insn, incoming_insn->opnd_idx)) =
+ jit_basic_block_label(block->basic_block_end);
+ }
+ else {
+ /* Patch LOOKUPSWITCH INSN */
+ JitOpndLookupSwitch *opnd = jit_insn_opndls(insn);
+ if (incoming_insn->opnd_idx < opnd->match_pairs_num) {
+ opnd->match_pairs[incoming_insn->opnd_idx].target =
+ jit_basic_block_label(block->basic_block_end);
+ }
+ else {
+ opnd->default_target =
+ jit_basic_block_label(block->basic_block_end);
+ }
+ }
+
+ incoming_insn = incoming_insn->next;
+ }
+
+ SET_BUILDER_POS(block->basic_block_end);
+
+ /* Pop block and load block results */
+ block = jit_block_stack_pop(&cc->block_stack);
+
+ if (block->label_type == LABEL_TYPE_FUNCTION) {
+ if (!handle_func_return(cc, block)) {
+ jit_block_destroy(block);
+ goto fail;
+ }
+ SET_BB_END_BCIP(cc->cur_basic_block, *p_frame_ip - 1);
+ clear_values(jit_frame);
+ }
+ else {
+ if (!load_block_results(cc, block)) {
+ jit_block_destroy(block);
+ goto fail;
+ }
+ }
+
+ jit_block_destroy(block);
+ return true;
+ }
+ return true;
+fail:
+ return false;
+}
+
+/**
+ * is_block_polymorphic: whether current block's stack is in polymorphic state,
+ * if the opcode is one of unreachable/br/br_table/return, stack is marked
+ * to polymorphic state until the block's 'end' opcode is processed
+ */
+static bool
+handle_op_else(JitCompContext *cc, uint8 **p_frame_ip,
+ bool is_block_polymorphic)
+{
+ JitBlock *block = jit_block_stack_top(&cc->block_stack);
+ JitFrame *jit_frame = cc->jit_frame;
+ JitInsn *insn;
+
+ /* Check block */
+ if (!block) {
+ jit_set_last_error(cc, "WASM block stack underflow");
+ return false;
+ }
+ if (block->label_type != LABEL_TYPE_IF) {
+ jit_set_last_error(cc, "Invalid WASM block type");
+ return false;
+ }
+
+ if (!block->incoming_insn_for_else_bb) {
+ /* The if branch is handled like OP_BLOCK (cond is const and != 0),
+ just skip the else branch and handle OP_END */
+ *p_frame_ip = block->wasm_code_end + 1;
+ return handle_op_end(cc, p_frame_ip, false);
+ }
+ else {
+ /* Has else branch and need to translate else branch */
+
+ /* Commit register values to locals and stacks */
+ gen_commit_values(jit_frame, jit_frame->lp, jit_frame->sp);
+ /* Clear frame values */
+ clear_values(jit_frame);
+
+ /* No need to create 'JMP' insn if block is in stack polymorphic
+ state, as previous br/br_table opcode has created 'JMP' insn
+ to this end basic block */
+ if (!is_block_polymorphic) {
+ /* Jump to end basic block */
+ if (!(insn = GEN_INSN(JMP, 0))) {
+ jit_set_last_error(cc, "generate jmp insn failed");
+ return false;
+ }
+ if (!jit_block_add_incoming_insn(block, insn, 0)) {
+ jit_set_last_error(cc, "add incoming insn failed");
+ return false;
+ }
+ }
+
+ /* Clear value stack, restore param values and
+ start to translate the else branch. */
+ jit_value_stack_destroy(&block->value_stack);
+
+ /* create else basic block */
+ CREATE_BASIC_BLOCK(block->basic_block_else);
+ SET_BB_END_BCIP(block->basic_block_entry, *p_frame_ip - 1);
+ SET_BB_BEGIN_BCIP(block->basic_block_else, *p_frame_ip);
+
+ /* Patch the insn which conditionly jumps to the else basic block */
+ insn = block->incoming_insn_for_else_bb;
+ *(jit_insn_opnd(insn, 2)) =
+ jit_basic_block_label(block->basic_block_else);
+
+ SET_BUILDER_POS(block->basic_block_else);
+
+ /* Reload block parameters */
+ if (!load_block_params(cc, block)) {
+ return false;
+ }
+
+ return true;
+ }
+ return true;
+fail:
+ return false;
+}
+
+static bool
+handle_next_reachable_block(JitCompContext *cc, uint8 **p_frame_ip)
+{
+ JitBlock *block = jit_block_stack_top(&cc->block_stack);
+
+ bh_assert(block);
+
+ do {
+ if (block->label_type == LABEL_TYPE_IF
+ && block->incoming_insn_for_else_bb
+ && *p_frame_ip <= block->wasm_code_else) {
+ /* Else branch hasn't been translated,
+ start to translate the else branch */
+ *p_frame_ip = block->wasm_code_else + 1;
+ /* Restore jit frame's sp to block's sp begin */
+ cc->jit_frame->sp = block->frame_sp_begin;
+ return handle_op_else(cc, p_frame_ip, true);
+ }
+ else if (block->incoming_insns_for_end_bb) {
+ *p_frame_ip = block->wasm_code_end + 1;
+ /* Restore jit frame's sp to block's sp end */
+ cc->jit_frame->sp =
+ block->frame_sp_begin
+ + wasm_get_cell_num(block->result_types, block->result_count);
+ return handle_op_end(cc, p_frame_ip, true);
+ }
+ else {
+ *p_frame_ip = block->wasm_code_end + 1;
+ jit_block_stack_pop(&cc->block_stack);
+ jit_block_destroy(block);
+ block = jit_block_stack_top(&cc->block_stack);
+ }
+ } while (block != NULL);
+
+ return true;
+}
+
+bool
+jit_compile_op_block(JitCompContext *cc, uint8 **p_frame_ip,
+ uint8 *frame_ip_end, uint32 label_type, uint32 param_count,
+ uint8 *param_types, uint32 result_count,
+ uint8 *result_types, bool merge_cmp_and_if)
+{
+ BlockAddr block_addr_cache[BLOCK_ADDR_CACHE_SIZE][BLOCK_ADDR_CONFLICT_SIZE];
+ JitBlock *block;
+ JitReg value;
+ uint8 *else_addr, *end_addr;
+
+ /* Check block stack */
+ if (!jit_block_stack_top(&cc->block_stack)) {
+ jit_set_last_error(cc, "WASM block stack underflow");
+ return false;
+ }
+
+ memset(block_addr_cache, 0, sizeof(block_addr_cache));
+
+ /* Get block info */
+ if (!(wasm_loader_find_block_addr(
+ NULL, (BlockAddr *)block_addr_cache, *p_frame_ip, frame_ip_end,
+ (uint8)label_type, &else_addr, &end_addr))) {
+ jit_set_last_error(cc, "find block end addr failed");
+ return false;
+ }
+
+ /* Allocate memory */
+ if (!(block = jit_calloc(sizeof(JitBlock)))) {
+ jit_set_last_error(cc, "allocate memory failed");
+ return false;
+ }
+
+ if (param_count && !(block->param_types = jit_calloc(param_count))) {
+ jit_set_last_error(cc, "allocate memory failed");
+ goto fail;
+ }
+ if (result_count && !(block->result_types = jit_calloc(result_count))) {
+ jit_set_last_error(cc, "allocate memory failed");
+ goto fail;
+ }
+
+ /* Initialize block data */
+ block->label_type = label_type;
+ block->param_count = param_count;
+ if (param_count) {
+ bh_memcpy_s(block->param_types, param_count, param_types, param_count);
+ }
+ block->result_count = result_count;
+ if (result_count) {
+ bh_memcpy_s(block->result_types, result_count, result_types,
+ result_count);
+ }
+ block->wasm_code_else = else_addr;
+ block->wasm_code_end = end_addr;
+
+ if (label_type == LABEL_TYPE_BLOCK) {
+ /* Push the new jit block to block stack and continue to
+ translate current basic block */
+ if (!push_jit_block_to_stack_and_pass_params(
+ cc, block, cc->cur_basic_block, 0, false))
+ goto fail;
+ }
+ else if (label_type == LABEL_TYPE_LOOP) {
+ CREATE_BASIC_BLOCK(block->basic_block_entry);
+ SET_BB_END_BCIP(cc->cur_basic_block, *p_frame_ip - 1);
+ SET_BB_BEGIN_BCIP(block->basic_block_entry, *p_frame_ip);
+ /* Push the new jit block to block stack and continue to
+ translate the new basic block */
+ if (!push_jit_block_to_stack_and_pass_params(
+ cc, block, block->basic_block_entry, 0, false))
+ goto fail;
+ }
+ else if (label_type == LABEL_TYPE_IF) {
+ POP_I32(value);
+
+ if (!jit_reg_is_const_val(value)) {
+ /* Compare value is not constant, create condition br IR */
+
+ /* Create entry block */
+ CREATE_BASIC_BLOCK(block->basic_block_entry);
+ SET_BB_END_BCIP(cc->cur_basic_block, *p_frame_ip - 1);
+ SET_BB_BEGIN_BCIP(block->basic_block_entry, *p_frame_ip);
+
+ if (!push_jit_block_to_stack_and_pass_params(
+ cc, block, block->basic_block_entry, value,
+ merge_cmp_and_if))
+ goto fail;
+ }
+ else {
+ if (jit_cc_get_const_I32(cc, value) != 0) {
+ /* Compare value is not 0, condition is true, else branch of
+ BASIC_BLOCK if cannot be reached, we treat it same as
+ LABEL_TYPE_BLOCK and start to translate if branch */
+ if (!push_jit_block_to_stack_and_pass_params(
+ cc, block, cc->cur_basic_block, 0, false))
+ goto fail;
+ }
+ else {
+ if (else_addr) {
+ /* Compare value is not 0, condition is false, if branch of
+ BASIC_BLOCK if cannot be reached, we treat it same as
+ LABEL_TYPE_BLOCK and start to translate else branch */
+ if (!push_jit_block_to_stack_and_pass_params(
+ cc, block, cc->cur_basic_block, 0, false))
+ goto fail;
+ *p_frame_ip = else_addr + 1;
+ }
+ else {
+ /* The whole if block cannot be reached, skip it */
+ jit_block_destroy(block);
+ *p_frame_ip = end_addr + 1;
+ }
+ }
+ }
+ }
+ else {
+ jit_set_last_error(cc, "Invalid block type");
+ goto fail;
+ }
+
+ return true;
+fail:
+ /* Only destroy the block if it hasn't been pushed into
+ the block stack, or if will be destroyed again when
+ destroying the block stack */
+ if (jit_block_stack_top(&cc->block_stack) != block)
+ jit_block_destroy(block);
+ return false;
+}
+
+bool
+jit_compile_op_else(JitCompContext *cc, uint8 **p_frame_ip)
+{
+ return handle_op_else(cc, p_frame_ip, false);
+}
+
+bool
+jit_compile_op_end(JitCompContext *cc, uint8 **p_frame_ip)
+{
+ return handle_op_end(cc, p_frame_ip, false);
+}
+
+/* Check whether need to copy arities when jumping from current block
+ to the dest block */
+static bool
+check_copy_arities(const JitBlock *block_dst, JitFrame *jit_frame)
+{
+ JitValueSlot *frame_sp_src = NULL;
+
+ if (block_dst->label_type == LABEL_TYPE_LOOP) {
+ frame_sp_src =
+ jit_frame->sp
+ - wasm_get_cell_num(block_dst->param_types, block_dst->param_count);
+ /* There are parameters to copy and the src/dst addr are different */
+ return (block_dst->param_count > 0
+ && block_dst->frame_sp_begin != frame_sp_src)
+ ? true
+ : false;
+ }
+ else {
+ frame_sp_src = jit_frame->sp
+ - wasm_get_cell_num(block_dst->result_types,
+ block_dst->result_count);
+ /* There are results to copy and the src/dst addr are different */
+ return (block_dst->result_count > 0
+ && block_dst->frame_sp_begin != frame_sp_src)
+ ? true
+ : false;
+ }
+}
+
+#if WASM_ENABLE_THREAD_MGR != 0
+bool
+jit_check_suspend_flags(JitCompContext *cc)
+{
+ JitReg exec_env, suspend_flags, terminate_flag, offset;
+ JitBasicBlock *terminate_block, *cur_basic_block;
+ JitFrame *jit_frame = cc->jit_frame;
+
+ cur_basic_block = cc->cur_basic_block;
+ terminate_block = jit_cc_new_basic_block(cc, 0);
+ if (!terminate_block) {
+ return false;
+ }
+
+ gen_commit_values(jit_frame, jit_frame->lp, jit_frame->sp);
+ exec_env = cc->exec_env_reg;
+ suspend_flags = jit_cc_new_reg_I32(cc);
+ terminate_flag = jit_cc_new_reg_I32(cc);
+
+ offset = jit_cc_new_const_I32(cc, offsetof(WASMExecEnv, suspend_flags));
+ GEN_INSN(LDI32, suspend_flags, exec_env, offset);
+ GEN_INSN(AND, terminate_flag, suspend_flags, NEW_CONST(I32, 1));
+
+ GEN_INSN(CMP, cc->cmp_reg, terminate_flag, NEW_CONST(I32, 0));
+ GEN_INSN(BNE, cc->cmp_reg, jit_basic_block_label(terminate_block), 0);
+
+ cc->cur_basic_block = terminate_block;
+ GEN_INSN(RETURN, NEW_CONST(I32, 0));
+
+ cc->cur_basic_block = cur_basic_block;
+
+ return true;
+}
+
+#endif
+
+static bool
+handle_op_br(JitCompContext *cc, uint32 br_depth, uint8 **p_frame_ip)
+{
+ JitFrame *jit_frame;
+ JitBlock *block_dst, *block;
+ JitReg frame_sp_dst;
+ JitInsn *insn;
+ bool copy_arities;
+ uint32 offset;
+
+ /* Check block stack */
+ if (!(block = jit_block_stack_top(&cc->block_stack))) {
+ jit_set_last_error(cc, "WASM block stack underflow");
+ return false;
+ }
+
+ if (!(block_dst = get_target_block(cc, br_depth))) {
+ return false;
+ }
+
+ jit_frame = cc->jit_frame;
+
+ /* Only opy parameters or results when their count > 0 and
+ the src/dst addr are different */
+ copy_arities = check_copy_arities(block_dst, jit_frame);
+
+ if (copy_arities) {
+ frame_sp_dst = jit_cc_new_reg_ptr(cc);
+ offset = offsetof(WASMInterpFrame, lp)
+ + (block_dst->frame_sp_begin - jit_frame->lp) * 4;
+ GEN_INSN(ADD, frame_sp_dst, cc->fp_reg, NEW_CONST(PTR, offset));
+
+ /* No need to commit results as they will be copied to dest block */
+ gen_commit_values(jit_frame, jit_frame->lp, block->frame_sp_begin);
+ }
+ else {
+ /* Commit all including results as they won't be copied */
+ gen_commit_values(jit_frame, jit_frame->lp, jit_frame->sp);
+ }
+
+ if (block_dst->label_type == LABEL_TYPE_LOOP) {
+ if (copy_arities) {
+ /* Dest block is Loop block, copy loop parameters */
+ copy_block_arities(cc, frame_sp_dst, block_dst->param_types,
+ block_dst->param_count, NULL);
+ }
+
+ clear_values(jit_frame);
+
+ /* Jump to the begin basic block */
+ BUILD_BR(block_dst->basic_block_entry);
+ SET_BB_END_BCIP(cc->cur_basic_block, *p_frame_ip - 1);
+ }
+ else {
+ if (copy_arities) {
+ /* Dest block is Block/If/Function block, copy block results */
+ copy_block_arities(cc, frame_sp_dst, block_dst->result_types,
+ block_dst->result_count, NULL);
+ }
+
+ clear_values(jit_frame);
+
+ /* Jump to the end basic block */
+ if (!(insn = GEN_INSN(JMP, 0))) {
+ jit_set_last_error(cc, "generate jmp insn failed");
+ goto fail;
+ }
+ if (!jit_block_add_incoming_insn(block_dst, insn, 0)) {
+ jit_set_last_error(cc, "add incoming insn failed");
+ goto fail;
+ }
+ SET_BB_END_BCIP(cc->cur_basic_block, *p_frame_ip - 1);
+ }
+
+ return true;
+fail:
+ return false;
+}
+
+bool
+jit_compile_op_br(JitCompContext *cc, uint32 br_depth, uint8 **p_frame_ip)
+{
+
+#if WASM_ENABLE_THREAD_MGR != 0
+ /* Insert suspend check point */
+ if (!jit_check_suspend_flags(cc))
+ return false;
+#endif
+
+ return handle_op_br(cc, br_depth, p_frame_ip)
+ && handle_next_reachable_block(cc, p_frame_ip);
+}
+
+static JitFrame *
+jit_frame_clone(const JitFrame *jit_frame)
+{
+ JitFrame *jit_frame_cloned;
+ uint32 max_locals = jit_frame->max_locals;
+ uint32 max_stacks = jit_frame->max_stacks;
+ uint32 total_size;
+
+ total_size = (uint32)(offsetof(JitFrame, lp)
+ + sizeof(*jit_frame->lp) * (max_locals + max_stacks));
+
+ jit_frame_cloned = jit_calloc(total_size);
+ if (jit_frame_cloned) {
+ bh_memcpy_s(jit_frame_cloned, total_size, jit_frame, total_size);
+ jit_frame_cloned->sp =
+ jit_frame_cloned->lp + (jit_frame->sp - jit_frame->lp);
+ }
+
+ return jit_frame_cloned;
+}
+
+static void
+jit_frame_copy(JitFrame *jit_frame_dst, const JitFrame *jit_frame_src)
+{
+ uint32 max_locals = jit_frame_src->max_locals;
+ uint32 max_stacks = jit_frame_src->max_stacks;
+ uint32 total_size;
+
+ total_size =
+ (uint32)(offsetof(JitFrame, lp)
+ + sizeof(*jit_frame_src->lp) * (max_locals + max_stacks));
+ bh_memcpy_s(jit_frame_dst, total_size, jit_frame_src, total_size);
+ jit_frame_dst->sp =
+ jit_frame_dst->lp + (jit_frame_src->sp - jit_frame_src->lp);
+}
+
+bool
+jit_compile_op_br_if(JitCompContext *cc, uint32 br_depth,
+ bool merge_cmp_and_br_if, uint8 **p_frame_ip)
+{
+ JitFrame *jit_frame, *jit_frame_cloned;
+ JitBlock *block_dst;
+ JitReg cond;
+ JitBasicBlock *cur_basic_block, *if_basic_block = NULL;
+ JitInsn *insn, *insn_select = NULL, *insn_cmp = NULL;
+ bool copy_arities;
+
+ if (!(block_dst = get_target_block(cc, br_depth))) {
+ return false;
+ }
+
+ /* append IF to current basic block */
+ POP_I32(cond);
+
+ if (merge_cmp_and_br_if) {
+ get_last_cmp_and_selectcc(cc, cond, &insn_cmp, &insn_select);
+ }
+
+ jit_frame = cc->jit_frame;
+ cur_basic_block = cc->cur_basic_block;
+ gen_commit_values(jit_frame, jit_frame->lp, jit_frame->sp);
+
+ if (!(insn_select && insn_cmp)) {
+ if (!GEN_INSN(CMP, cc->cmp_reg, cond, NEW_CONST(I32, 0))) {
+ jit_set_last_error(cc, "generate cmp insn failed");
+ goto fail;
+ }
+ }
+
+ /* Only opy parameters or results when their count > 0 and
+ the src/dst addr are different */
+ copy_arities = check_copy_arities(block_dst, jit_frame);
+
+ if (!copy_arities) {
+ if (block_dst->label_type == LABEL_TYPE_LOOP) {
+ if (!(insn = GEN_INSN(
+ BNE, cc->cmp_reg,
+ jit_basic_block_label(block_dst->basic_block_entry),
+ 0))) {
+ jit_set_last_error(cc, "generate bne insn failed");
+ goto fail;
+ }
+ }
+ else {
+ if (!(insn = GEN_INSN(BNE, cc->cmp_reg, 0, 0))) {
+ jit_set_last_error(cc, "generate bne insn failed");
+ goto fail;
+ }
+ if (!jit_block_add_incoming_insn(block_dst, insn, 1)) {
+ jit_set_last_error(cc, "add incoming insn failed");
+ goto fail;
+ }
+ }
+ if (insn_select && insn_cmp) {
+ /* Change `CMP + SELECTcc` into `CMP + Bcc` */
+ insn->opcode = JIT_OP_BEQ + (insn_select->opcode - JIT_OP_SELECTEQ);
+ jit_insn_unlink(insn_select);
+ jit_insn_delete(insn_select);
+ }
+ return true;
+ }
+
+ CREATE_BASIC_BLOCK(if_basic_block);
+ if (!(insn = GEN_INSN(BNE, cc->cmp_reg,
+ jit_basic_block_label(if_basic_block), 0))) {
+ jit_set_last_error(cc, "generate bne insn failed");
+ goto fail;
+ }
+ if (insn_select && insn_cmp) {
+ /* Change `CMP + SELECTcc` into `CMP + Bcc` */
+ insn->opcode = JIT_OP_BEQ + (insn_select->opcode - JIT_OP_SELECTEQ);
+ jit_insn_unlink(insn_select);
+ jit_insn_delete(insn_select);
+ }
+
+#if WASM_ENABLE_THREAD_MGR != 0
+ /* Insert suspend check point */
+ if (!jit_check_suspend_flags(cc))
+ return false;
+#endif
+
+ SET_BUILDER_POS(if_basic_block);
+ SET_BB_BEGIN_BCIP(if_basic_block, *p_frame_ip - 1);
+
+ /* Clone current jit frame to a new jit fame */
+ if (!(jit_frame_cloned = jit_frame_clone(jit_frame))) {
+ jit_set_last_error(cc, "allocate memory failed");
+ goto fail;
+ }
+
+ /* Clear current jit frame so that the registers
+ in the new basic block will be loaded again */
+ clear_values(jit_frame);
+ if (!handle_op_br(cc, br_depth, p_frame_ip)) {
+ jit_free(jit_frame_cloned);
+ goto fail;
+ }
+
+ /* Restore the jit frame so that the registers can
+ be used again in current basic block */
+ jit_frame_copy(jit_frame, jit_frame_cloned);
+ jit_free(jit_frame_cloned);
+
+ /* Continue processing opcodes after BR_IF */
+ SET_BUILDER_POS(cur_basic_block);
+ return true;
+fail:
+ return false;
+}
+
+bool
+jit_compile_op_br_table(JitCompContext *cc, uint32 *br_depths, uint32 br_count,
+ uint8 **p_frame_ip)
+{
+ JitBasicBlock *cur_basic_block;
+ JitReg value;
+ JitInsn *insn;
+ uint32 i = 0;
+ JitOpndLookupSwitch *opnd = NULL;
+
+#if WASM_ENABLE_THREAD_MGR != 0
+ /* Insert suspend check point */
+ if (!jit_check_suspend_flags(cc))
+ return false;
+#endif
+
+ cur_basic_block = cc->cur_basic_block;
+
+ POP_I32(value);
+
+ /* append LOOKUPSWITCH to current basic block */
+ gen_commit_values(cc->jit_frame, cc->jit_frame->lp, cc->jit_frame->sp);
+ /* Clear frame values */
+ clear_values(cc->jit_frame);
+ SET_BB_END_BCIP(cur_basic_block, *p_frame_ip - 1);
+
+ /* prepare basic blocks for br */
+ insn = GEN_INSN(LOOKUPSWITCH, value, br_count);
+ if (NULL == insn) {
+ jit_set_last_error(cc, "generate insn LOOKUPSWITCH failed");
+ goto fail;
+ }
+
+ for (i = 0, opnd = jit_insn_opndls(insn); i < br_count + 1; i++) {
+ JitBasicBlock *basic_block = NULL;
+ JitBlock *block_dst;
+ bool copy_arities;
+
+ if (!(block_dst = get_target_block(cc, br_depths[i]))) {
+ goto fail;
+ }
+
+ /* Only opy parameters or results when their count > 0 and
+ the src/dst addr are different */
+ copy_arities = check_copy_arities(block_dst, cc->jit_frame);
+
+ if (!copy_arities) {
+ /* No need to create new basic block, direclty jump to
+ the existing basic block when no need to copy arities */
+ if (i == br_count) {
+ if (block_dst->label_type == LABEL_TYPE_LOOP) {
+ opnd->default_target =
+ jit_basic_block_label(block_dst->basic_block_entry);
+ }
+ else {
+ bh_assert(!block_dst->basic_block_end);
+ if (!jit_block_add_incoming_insn(block_dst, insn, i)) {
+ jit_set_last_error(cc, "add incoming insn failed");
+ goto fail;
+ }
+ }
+ }
+ else {
+ opnd->match_pairs[i].value = i;
+ if (block_dst->label_type == LABEL_TYPE_LOOP) {
+ opnd->match_pairs[i].target =
+ jit_basic_block_label(block_dst->basic_block_entry);
+ }
+ else {
+ bh_assert(!block_dst->basic_block_end);
+ if (!jit_block_add_incoming_insn(block_dst, insn, i)) {
+ jit_set_last_error(cc, "add incoming insn failed");
+ goto fail;
+ }
+ }
+ }
+ continue;
+ }
+
+ /* Create new basic block when need to copy arities */
+ CREATE_BASIC_BLOCK(basic_block);
+ SET_BB_BEGIN_BCIP(basic_block, *p_frame_ip - 1);
+
+ if (i == br_count) {
+ opnd->default_target = jit_basic_block_label(basic_block);
+ }
+ else {
+ opnd->match_pairs[i].value = i;
+ opnd->match_pairs[i].target = jit_basic_block_label(basic_block);
+ }
+
+ SET_BUILDER_POS(basic_block);
+
+ if (!handle_op_br(cc, br_depths[i], p_frame_ip))
+ goto fail;
+ }
+
+ /* Search next available block to handle */
+ return handle_next_reachable_block(cc, p_frame_ip);
+fail:
+ return false;
+}
+
+bool
+jit_compile_op_return(JitCompContext *cc, uint8 **p_frame_ip)
+{
+ JitBlock *block_func = cc->block_stack.block_list_head;
+
+ bh_assert(block_func);
+
+ if (!handle_func_return(cc, block_func)) {
+ return false;
+ }
+ SET_BB_END_BCIP(cc->cur_basic_block, *p_frame_ip - 1);
+ clear_values(cc->jit_frame);
+
+ return handle_next_reachable_block(cc, p_frame_ip);
+}
+
+bool
+jit_compile_op_unreachable(JitCompContext *cc, uint8 **p_frame_ip)
+{
+ if (!jit_emit_exception(cc, EXCE_UNREACHABLE, JIT_OP_JMP, 0, NULL))
+ return false;
+
+ return handle_next_reachable_block(cc, p_frame_ip);
+}
+
+bool
+jit_handle_next_reachable_block(JitCompContext *cc, uint8 **p_frame_ip)
+{
+ return handle_next_reachable_block(cc, p_frame_ip);
+}
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/fast-jit/fe/jit_emit_control.h b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/fast-jit/fe/jit_emit_control.h
new file mode 100644
index 000000000..e1bc09a0a
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/fast-jit/fe/jit_emit_control.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#ifndef _JIT_EMIT_CONTROL_H_
+#define _JIT_EMIT_CONTROL_H_
+
+#include "../jit_compiler.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+bool
+jit_compile_op_block(JitCompContext *cc, uint8 **p_frame_ip,
+ uint8 *frame_ip_end, uint32 label_type, uint32 param_count,
+ uint8 *param_types, uint32 result_count,
+ uint8 *result_types, bool merge_cmp_and_if);
+
+bool
+jit_compile_op_else(JitCompContext *cc, uint8 **p_frame_ip);
+
+bool
+jit_compile_op_end(JitCompContext *cc, uint8 **p_frame_ip);
+
+bool
+jit_compile_op_br(JitCompContext *cc, uint32 br_depth, uint8 **p_frame_ip);
+
+bool
+jit_compile_op_br_if(JitCompContext *cc, uint32 br_depth,
+ bool merge_cmp_and_br_if, uint8 **p_frame_ip);
+
+bool
+jit_compile_op_br_table(JitCompContext *cc, uint32 *br_depths, uint32 br_count,
+ uint8 **p_frame_ip);
+
+bool
+jit_compile_op_return(JitCompContext *cc, uint8 **p_frame_ip);
+
+bool
+jit_compile_op_unreachable(JitCompContext *cc, uint8 **p_frame_ip);
+
+bool
+jit_handle_next_reachable_block(JitCompContext *cc, uint8 **p_frame_ip);
+
+#if WASM_ENABLE_THREAD_MGR != 0
+bool
+jit_check_suspend_flags(JitCompContext *cc);
+#endif
+
+#ifdef __cplusplus
+} /* end of extern "C" */
+#endif
+
+#endif /* end of _JIT_EMIT_CONTROL_H_ */
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/fast-jit/fe/jit_emit_conversion.c b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/fast-jit/fe/jit_emit_conversion.c
new file mode 100644
index 000000000..8308a3ca9
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/fast-jit/fe/jit_emit_conversion.c
@@ -0,0 +1,660 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#include "jit_emit_conversion.h"
+#include "jit_emit_exception.h"
+#include "jit_emit_function.h"
+#include "../jit_codegen.h"
+#include "../jit_frontend.h"
+
+#define F32_I32_S_MIN (-2147483904.0f)
+#define F32_I32_S_MAX (2147483648.0f)
+#define F32_I32_U_MIN (-1.0f)
+#define F32_I32_U_MAX (4294967296.0f)
+#define F32_I64_S_MIN (-9223373136366403584.0f)
+#define F32_I64_S_MAX (9223372036854775808.0f)
+#define F32_I64_U_MIN (-1.0f)
+#define F32_I64_U_MAX (18446744073709551616.0f)
+
+#define F64_I32_S_MIN (-2147483649.0)
+#define F64_I32_S_MAX (2147483648.0)
+#define F64_I32_U_MIN (-1.0)
+#define F64_I32_U_MAX (4294967296.0)
+#define F64_I64_S_MIN (-9223372036854777856.0)
+#define F64_I64_S_MAX (9223372036854775808.0)
+#define F64_I64_U_MIN (-1.0)
+#define F64_I64_U_MAX (18446744073709551616.0)
+
+#define FP_TO_INT(f_ty, i_ty, f_nm, i_nm) \
+ static i_ty i_nm##_trunc_##f_nm(f_ty fp)
+
+#define INT_TO_FP(i_ty, f_ty, i_nm, f_nm) \
+ static f_ty f_nm##_convert_##i_nm(i_ty i)
+
+#define FP_TO_INT_SAT(f_ty, i_ty, f_nm, i_nm) \
+ static i_ty i_nm##_trunc_##f_nm##_sat(f_ty fp)
+
+static int
+local_isnan(double x)
+{
+ return isnan(x);
+}
+
+static int
+local_isnanf(float x)
+{
+ return isnan(x);
+}
+
+#define RETURN_IF_NANF(fp) \
+ if (local_isnanf(fp)) { \
+ return 0; \
+ }
+
+#define RETURN_IF_NAN(fp) \
+ if (local_isnan(fp)) { \
+ return 0; \
+ }
+
+#define RETURN_IF_INF(fp, i_min, i_max) \
+ if (isinf(fp)) { \
+ return fp < 0 ? i_min : i_max; \
+ }
+
+#define RETURN_IF_MIN(fp, f_min, i_min) \
+ if (fp <= f_min) { \
+ return i_min; \
+ }
+
+#define RETURN_IF_MAX(fp, f_max, i_max) \
+ if (fp >= f_max) { \
+ return i_max; \
+ }
+
+FP_TO_INT_SAT(float, int32, f32, i32)
+{
+ RETURN_IF_NANF(fp)
+ RETURN_IF_INF(fp, INT32_MIN, INT32_MAX)
+ RETURN_IF_MIN(fp, F32_I32_S_MIN, INT32_MIN)
+ RETURN_IF_MAX(fp, F32_I32_S_MAX, INT32_MAX)
+ return (int32)fp;
+}
+
+FP_TO_INT_SAT(float, uint32, f32, u32)
+{
+ RETURN_IF_NANF(fp)
+ RETURN_IF_INF(fp, 0, UINT32_MAX)
+ RETURN_IF_MIN(fp, F32_I32_U_MIN, 0)
+ RETURN_IF_MAX(fp, F32_I32_U_MAX, UINT32_MAX)
+ return (uint32)fp;
+}
+
+FP_TO_INT_SAT(double, int32, f64, i32)
+{
+ RETURN_IF_NAN(fp)
+ RETURN_IF_INF(fp, INT32_MIN, INT32_MAX)
+ RETURN_IF_MIN(fp, F64_I32_S_MIN, INT32_MIN)
+ RETURN_IF_MAX(fp, F64_I32_S_MAX, INT32_MAX)
+ return (int32)fp;
+}
+
+FP_TO_INT_SAT(double, uint32, f64, u32)
+{
+ RETURN_IF_NAN(fp)
+ RETURN_IF_INF(fp, 0, UINT32_MAX)
+ RETURN_IF_MIN(fp, F64_I32_U_MIN, 0)
+ RETURN_IF_MAX(fp, F64_I32_U_MAX, UINT32_MAX)
+ return (uint32)fp;
+}
+
+FP_TO_INT_SAT(float, int64, f32, i64)
+{
+ RETURN_IF_NANF(fp)
+ RETURN_IF_INF(fp, INT64_MIN, INT64_MAX)
+ RETURN_IF_MIN(fp, F32_I64_S_MIN, INT64_MIN)
+ RETURN_IF_MAX(fp, F32_I64_S_MAX, INT64_MAX)
+ return (int64)fp;
+}
+
+FP_TO_INT(float, uint64, f32, u64)
+{
+ return (uint64)fp;
+}
+
+FP_TO_INT_SAT(float, uint64, f32, u64)
+{
+ RETURN_IF_NANF(fp)
+ RETURN_IF_INF(fp, 0, UINT64_MAX)
+ RETURN_IF_MIN(fp, F32_I64_U_MIN, 0)
+ RETURN_IF_MAX(fp, F32_I64_U_MAX, UINT64_MAX)
+ return (uint64)fp;
+}
+
+FP_TO_INT_SAT(double, int64, f64, i64)
+{
+ RETURN_IF_NANF(fp)
+ RETURN_IF_INF(fp, INT64_MIN, INT64_MAX)
+ RETURN_IF_MIN(fp, F64_I64_S_MIN, INT64_MIN)
+ RETURN_IF_MAX(fp, F64_I64_S_MAX, INT64_MAX)
+ return (int64)fp;
+}
+
+FP_TO_INT(double, uint64, f64, u64)
+{
+ return (uint64)fp;
+}
+
+FP_TO_INT_SAT(double, uint64, f64, u64)
+{
+ RETURN_IF_NANF(fp)
+ RETURN_IF_INF(fp, 0, UINT64_MAX)
+ RETURN_IF_MIN(fp, F64_I64_U_MIN, 0)
+ RETURN_IF_MAX(fp, F64_I64_U_MAX, UINT64_MAX)
+ return (uint64)fp;
+}
+
+INT_TO_FP(uint64, float, u64, f32)
+{
+ return (float)i;
+}
+
+INT_TO_FP(uint64, double, u64, f64)
+{
+ return (double)i;
+}
+
+bool
+jit_compile_op_i32_wrap_i64(JitCompContext *cc)
+{
+ JitReg num, res;
+
+ POP_I64(num);
+
+ res = jit_cc_new_reg_I32(cc);
+ GEN_INSN(I64TOI32, res, num);
+
+ PUSH_I32(res);
+
+ return true;
+fail:
+ return false;
+}
+
+static bool
+jit_compile_check_value_range(JitCompContext *cc, JitReg value, JitReg min_fp,
+ JitReg max_fp)
+{
+ JitReg nan_ret = jit_cc_new_reg_I32(cc);
+ JitRegKind kind = jit_reg_kind(value);
+ bool emit_ret = false;
+
+ bh_assert(JIT_REG_KIND_F32 == kind || JIT_REG_KIND_F64 == kind);
+
+ /* If value is NaN, throw exception */
+ if (JIT_REG_KIND_F32 == kind)
+ emit_ret = jit_emit_callnative(cc, local_isnanf, nan_ret, &value, 1);
+ else
+ emit_ret = jit_emit_callnative(cc, local_isnan, nan_ret, &value, 1);
+ if (!emit_ret)
+ goto fail;
+
+ GEN_INSN(CMP, cc->cmp_reg, nan_ret, NEW_CONST(I32, 1));
+ if (!jit_emit_exception(cc, EXCE_INVALID_CONVERSION_TO_INTEGER, JIT_OP_BEQ,
+ cc->cmp_reg, NULL))
+ goto fail;
+
+ /* If value is out of integer range, throw exception */
+ GEN_INSN(CMP, cc->cmp_reg, min_fp, value);
+ if (!jit_emit_exception(cc, EXCE_INTEGER_OVERFLOW, JIT_OP_BGES, cc->cmp_reg,
+ NULL))
+ goto fail;
+
+ GEN_INSN(CMP, cc->cmp_reg, value, max_fp);
+ if (!jit_emit_exception(cc, EXCE_INTEGER_OVERFLOW, JIT_OP_BGES, cc->cmp_reg,
+ NULL))
+ goto fail;
+
+ return true;
+fail:
+ return false;
+}
+
+bool
+jit_compile_op_i32_trunc_f32(JitCompContext *cc, bool sign, bool sat)
+{
+ JitReg value, res;
+
+ POP_F32(value);
+
+ res = jit_cc_new_reg_I32(cc);
+ if (!sat) {
+ JitReg min_fp = NEW_CONST(F32, sign ? F32_I32_S_MIN : F32_I32_U_MIN);
+ JitReg max_fp = NEW_CONST(F32, sign ? F32_I32_S_MAX : F32_I32_U_MAX);
+
+ if (!jit_compile_check_value_range(cc, value, min_fp, max_fp))
+ goto fail;
+
+ if (sign)
+ GEN_INSN(F32TOI32, res, value);
+ else
+ GEN_INSN(F32TOU32, res, value);
+ }
+ else {
+ if (!jit_emit_callnative(cc,
+ sign ? (void *)i32_trunc_f32_sat
+ : (void *)u32_trunc_f32_sat,
+ res, &value, 1))
+ goto fail;
+ }
+
+ PUSH_I32(res);
+
+ return true;
+fail:
+ return false;
+}
+
+bool
+jit_compile_op_i32_trunc_f64(JitCompContext *cc, bool sign, bool sat)
+{
+ JitReg value, res;
+
+ POP_F64(value);
+
+ res = jit_cc_new_reg_I32(cc);
+ if (!sat) {
+ JitReg min_fp = NEW_CONST(F64, sign ? F64_I32_S_MIN : F64_I32_U_MIN);
+ JitReg max_fp = NEW_CONST(F64, sign ? F64_I32_S_MAX : F64_I32_U_MAX);
+
+ if (!jit_compile_check_value_range(cc, value, min_fp, max_fp))
+ goto fail;
+
+ if (sign)
+ GEN_INSN(F64TOI32, res, value);
+ else
+ GEN_INSN(F64TOU32, res, value);
+ }
+ else {
+ if (!jit_emit_callnative(cc,
+ sign ? (void *)i32_trunc_f64_sat
+ : (void *)u32_trunc_f64_sat,
+ res, &value, 1))
+ goto fail;
+ }
+
+ PUSH_I32(res);
+
+ return true;
+fail:
+ return false;
+}
+
+bool
+jit_compile_op_i64_extend_i32(JitCompContext *cc, bool sign)
+{
+ JitReg num, res;
+
+ POP_I32(num);
+
+ res = jit_cc_new_reg_I64(cc);
+ if (sign)
+ GEN_INSN(I32TOI64, res, num);
+ else
+ GEN_INSN(U32TOI64, res, num);
+
+ PUSH_I64(res);
+
+ return true;
+fail:
+ return false;
+}
+
+bool
+jit_compile_op_i64_extend_i64(JitCompContext *cc, int8 bitwidth)
+{
+ JitReg value, tmp, res;
+
+ POP_I64(value);
+
+ tmp = jit_cc_new_reg_I32(cc);
+ res = jit_cc_new_reg_I64(cc);
+
+ switch (bitwidth) {
+ case 8:
+ {
+ GEN_INSN(I64TOI8, tmp, value);
+ GEN_INSN(I8TOI64, res, tmp);
+ break;
+ }
+ case 16:
+ {
+ GEN_INSN(I64TOI16, tmp, value);
+ GEN_INSN(I16TOI64, res, tmp);
+ break;
+ }
+ case 32:
+ {
+ GEN_INSN(I64TOI32, tmp, value);
+ GEN_INSN(I32TOI64, res, tmp);
+ break;
+ }
+ default:
+ {
+ bh_assert(0);
+ goto fail;
+ }
+ }
+
+ PUSH_I64(res);
+
+ return true;
+fail:
+ return false;
+}
+
+bool
+jit_compile_op_i32_extend_i32(JitCompContext *cc, int8 bitwidth)
+{
+ JitReg value, tmp, res;
+
+ POP_I32(value);
+
+ tmp = jit_cc_new_reg_I32(cc);
+ res = jit_cc_new_reg_I32(cc);
+
+ switch (bitwidth) {
+ case 8:
+ {
+ GEN_INSN(I32TOI8, tmp, value);
+ GEN_INSN(I8TOI32, res, tmp);
+ break;
+ }
+ case 16:
+ {
+ GEN_INSN(I32TOI16, tmp, value);
+ GEN_INSN(I16TOI32, res, tmp);
+ break;
+ }
+ default:
+ {
+ bh_assert(0);
+ goto fail;
+ }
+ }
+
+ PUSH_I32(res);
+
+ return true;
+fail:
+ return false;
+}
+
+bool
+jit_compile_op_i64_trunc_f32(JitCompContext *cc, bool sign, bool sat)
+{
+ JitReg value, res;
+
+ POP_F32(value);
+
+ res = jit_cc_new_reg_I64(cc);
+ if (!sat) {
+ JitReg min_fp = NEW_CONST(F32, sign ? F32_I64_S_MIN : F32_I64_U_MIN);
+ JitReg max_fp = NEW_CONST(F32, sign ? F32_I64_S_MAX : F32_I64_U_MAX);
+
+ if (!jit_compile_check_value_range(cc, value, min_fp, max_fp))
+ goto fail;
+
+ if (sign) {
+ GEN_INSN(F32TOI64, res, value);
+ }
+ else {
+ if (!jit_emit_callnative(cc, u64_trunc_f32, res, &value, 1))
+ goto fail;
+ }
+ }
+ else {
+ if (!jit_emit_callnative(cc,
+ sign ? (void *)i64_trunc_f32_sat
+ : (void *)u64_trunc_f32_sat,
+ res, &value, 1))
+ goto fail;
+ }
+
+ PUSH_I64(res);
+
+ return true;
+fail:
+ return false;
+}
+
+bool
+jit_compile_op_i64_trunc_f64(JitCompContext *cc, bool sign, bool sat)
+{
+ JitReg value, res;
+
+ POP_F64(value);
+
+ res = jit_cc_new_reg_I64(cc);
+ if (!sat) {
+ JitReg min_fp = NEW_CONST(F64, sign ? F64_I64_S_MIN : F64_I64_U_MIN);
+ JitReg max_fp = NEW_CONST(F64, sign ? F64_I64_S_MAX : F64_I64_U_MAX);
+
+ if (!jit_compile_check_value_range(cc, value, min_fp, max_fp))
+ goto fail;
+
+ if (sign) {
+ GEN_INSN(F64TOI64, res, value);
+ }
+ else {
+ if (!jit_emit_callnative(cc, u64_trunc_f64, res, &value, 1))
+ goto fail;
+ }
+ }
+ else {
+ if (!jit_emit_callnative(cc,
+ sign ? (void *)i64_trunc_f64_sat
+ : (void *)u64_trunc_f64_sat,
+ res, &value, 1))
+ goto fail;
+ }
+
+ PUSH_I64(res);
+
+ return true;
+fail:
+ return false;
+}
+
+bool
+jit_compile_op_f32_convert_i32(JitCompContext *cc, bool sign)
+{
+ JitReg value, res;
+
+ POP_I32(value);
+
+ res = jit_cc_new_reg_F32(cc);
+ if (sign) {
+ GEN_INSN(I32TOF32, res, value);
+ }
+ else {
+ GEN_INSN(U32TOF32, res, value);
+ }
+
+ PUSH_F32(res);
+
+ return true;
+fail:
+ return false;
+}
+
+bool
+jit_compile_op_f32_convert_i64(JitCompContext *cc, bool sign)
+{
+ JitReg value, res;
+
+ POP_I64(value);
+
+ res = jit_cc_new_reg_F32(cc);
+ if (sign) {
+ GEN_INSN(I64TOF32, res, value);
+ }
+ else {
+ if (!jit_emit_callnative(cc, f32_convert_u64, res, &value, 1)) {
+ goto fail;
+ }
+ }
+
+ PUSH_F32(res);
+
+ return true;
+fail:
+ return false;
+}
+
+bool
+jit_compile_op_f32_demote_f64(JitCompContext *cc)
+{
+ JitReg value, res;
+
+ POP_F64(value);
+
+ res = jit_cc_new_reg_F32(cc);
+ GEN_INSN(F64TOF32, res, value);
+
+ PUSH_F32(res);
+
+ return true;
+fail:
+ return false;
+}
+
+bool
+jit_compile_op_f64_convert_i32(JitCompContext *cc, bool sign)
+{
+ JitReg value, res;
+
+ POP_I32(value);
+
+ res = jit_cc_new_reg_F64(cc);
+ if (sign)
+ GEN_INSN(I32TOF64, res, value);
+ else
+ GEN_INSN(U32TOF64, res, value);
+
+ PUSH_F64(res);
+
+ return true;
+fail:
+ return false;
+}
+
+bool
+jit_compile_op_f64_convert_i64(JitCompContext *cc, bool sign)
+{
+ JitReg value, res;
+
+ POP_I64(value);
+
+ res = jit_cc_new_reg_F64(cc);
+ if (sign) {
+ GEN_INSN(I64TOF64, res, value);
+ }
+ else {
+ if (!jit_emit_callnative(cc, f64_convert_u64, res, &value, 1)) {
+ goto fail;
+ }
+ }
+
+ PUSH_F64(res);
+
+ return true;
+fail:
+ return false;
+}
+
+bool
+jit_compile_op_f64_promote_f32(JitCompContext *cc)
+{
+ JitReg value, res;
+
+ POP_F32(value);
+
+ res = jit_cc_new_reg_F64(cc);
+ GEN_INSN(F32TOF64, res, value);
+
+ PUSH_F64(res);
+
+ return true;
+fail:
+ return false;
+}
+
+bool
+jit_compile_op_i64_reinterpret_f64(JitCompContext *cc)
+{
+ JitReg value, res;
+
+ POP_F64(value);
+
+ res = jit_cc_new_reg_I64(cc);
+ GEN_INSN(F64CASTI64, res, value);
+
+ PUSH_I64(res);
+
+ return true;
+fail:
+ return false;
+}
+
+bool
+jit_compile_op_i32_reinterpret_f32(JitCompContext *cc)
+{
+ JitReg value, res;
+
+ POP_F32(value);
+
+ res = jit_cc_new_reg_I32(cc);
+ GEN_INSN(F32CASTI32, res, value);
+
+ PUSH_I32(res);
+
+ return true;
+fail:
+ return false;
+}
+
+bool
+jit_compile_op_f64_reinterpret_i64(JitCompContext *cc)
+{
+ JitReg value, res;
+
+ POP_I64(value);
+
+ res = jit_cc_new_reg_F64(cc);
+ GEN_INSN(I64CASTF64, res, value);
+
+ PUSH_F64(res);
+
+ return true;
+fail:
+ return false;
+}
+
+bool
+jit_compile_op_f32_reinterpret_i32(JitCompContext *cc)
+{
+ JitReg value, res;
+
+ POP_I32(value);
+
+ res = jit_cc_new_reg_F32(cc);
+ GEN_INSN(I32CASTF32, res, value);
+
+ PUSH_F32(res);
+
+ return true;
+fail:
+ return false;
+}
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/fast-jit/fe/jit_emit_conversion.h b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/fast-jit/fe/jit_emit_conversion.h
new file mode 100644
index 000000000..28952fc61
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/fast-jit/fe/jit_emit_conversion.h
@@ -0,0 +1,73 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#ifndef _JIT_EMIT_CONVERSION_H_
+#define _JIT_EMIT_CONVERSION_H_
+
+#include "../jit_compiler.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+bool
+jit_compile_op_i32_wrap_i64(JitCompContext *cc);
+
+bool
+jit_compile_op_i32_trunc_f32(JitCompContext *cc, bool sign, bool sat);
+
+bool
+jit_compile_op_i32_trunc_f64(JitCompContext *cc, bool sign, bool sat);
+
+bool
+jit_compile_op_i64_extend_i32(JitCompContext *comp_ctx, bool sign);
+
+bool
+jit_compile_op_i64_extend_i64(JitCompContext *comp_ctx, int8 bitwidth);
+
+bool
+jit_compile_op_i32_extend_i32(JitCompContext *comp_ctx, int8 bitwidth);
+
+bool
+jit_compile_op_i64_trunc_f32(JitCompContext *cc, bool sign, bool sat);
+
+bool
+jit_compile_op_i64_trunc_f64(JitCompContext *cc, bool sign, bool sat);
+
+bool
+jit_compile_op_f32_convert_i32(JitCompContext *comp_ctx, bool sign);
+
+bool
+jit_compile_op_f32_convert_i64(JitCompContext *comp_ctx, bool sign);
+
+bool
+jit_compile_op_f32_demote_f64(JitCompContext *comp_ctx);
+
+bool
+jit_compile_op_f64_convert_i32(JitCompContext *comp_ctx, bool sign);
+
+bool
+jit_compile_op_f64_convert_i64(JitCompContext *comp_ctx, bool sign);
+
+bool
+jit_compile_op_f64_promote_f32(JitCompContext *comp_ctx);
+
+bool
+jit_compile_op_i64_reinterpret_f64(JitCompContext *comp_ctx);
+
+bool
+jit_compile_op_i32_reinterpret_f32(JitCompContext *comp_ctx);
+
+bool
+jit_compile_op_f64_reinterpret_i64(JitCompContext *comp_ctx);
+
+bool
+jit_compile_op_f32_reinterpret_i32(JitCompContext *comp_ctx);
+
+#ifdef __cplusplus
+} /* end of extern "C" */
+#endif
+
+#endif /* end of _JIT_EMIT_CONVERSION_H_ */
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/fast-jit/fe/jit_emit_exception.c b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/fast-jit/fe/jit_emit_exception.c
new file mode 100644
index 000000000..2addb5cde
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/fast-jit/fe/jit_emit_exception.c
@@ -0,0 +1,78 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#include "jit_emit_exception.h"
+#include "../jit_frontend.h"
+
+bool
+jit_emit_exception(JitCompContext *cc, int32 exception_id, uint8 jit_opcode,
+ JitReg cond_br_if, JitBasicBlock *cond_br_else_block)
+{
+ JitInsn *insn = NULL;
+ JitIncomingInsn *incoming_insn;
+ JitReg else_label;
+
+ bh_assert(exception_id < EXCE_NUM);
+
+ if (jit_opcode >= JIT_OP_BEQ && jit_opcode <= JIT_OP_BLEU) {
+ bh_assert(cond_br_if == cc->cmp_reg);
+ else_label =
+ cond_br_else_block ? jit_basic_block_label(cond_br_else_block) : 0;
+ switch (jit_opcode) {
+ case JIT_OP_BEQ:
+ insn = GEN_INSN(BEQ, cond_br_if, 0, else_label);
+ break;
+ case JIT_OP_BNE:
+ insn = GEN_INSN(BNE, cond_br_if, 0, else_label);
+ break;
+ case JIT_OP_BGTS:
+ insn = GEN_INSN(BGTS, cond_br_if, 0, else_label);
+ break;
+ case JIT_OP_BGES:
+ insn = GEN_INSN(BGES, cond_br_if, 0, else_label);
+ break;
+ case JIT_OP_BLTS:
+ insn = GEN_INSN(BLTS, cond_br_if, 0, else_label);
+ break;
+ case JIT_OP_BLES:
+ insn = GEN_INSN(BLES, cond_br_if, 0, else_label);
+ break;
+ case JIT_OP_BGTU:
+ insn = GEN_INSN(BGTU, cond_br_if, 0, else_label);
+ break;
+ case JIT_OP_BGEU:
+ insn = GEN_INSN(BGEU, cond_br_if, 0, else_label);
+ break;
+ case JIT_OP_BLTU:
+ insn = GEN_INSN(BLTU, cond_br_if, 0, else_label);
+ break;
+ case JIT_OP_BLEU:
+ insn = GEN_INSN(BLEU, cond_br_if, 0, else_label);
+ break;
+ }
+ if (!insn) {
+ jit_set_last_error(cc, "generate cond br insn failed");
+ return false;
+ }
+ }
+ else if (jit_opcode == JIT_OP_JMP) {
+ insn = GEN_INSN(JMP, 0);
+ if (!insn) {
+ jit_set_last_error(cc, "generate jmp insn failed");
+ return false;
+ }
+ }
+
+ incoming_insn = jit_calloc(sizeof(JitIncomingInsn));
+ if (!incoming_insn) {
+ jit_set_last_error(cc, "allocate memory failed");
+ return false;
+ }
+
+ incoming_insn->insn = insn;
+ incoming_insn->next = cc->incoming_insns_for_exec_bbs[exception_id];
+ cc->incoming_insns_for_exec_bbs[exception_id] = incoming_insn;
+ return true;
+}
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/fast-jit/fe/jit_emit_exception.h b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/fast-jit/fe/jit_emit_exception.h
new file mode 100644
index 000000000..7aa393b78
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/fast-jit/fe/jit_emit_exception.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#ifndef _JIT_EMIT_EXCEPTION_H_
+#define _JIT_EMIT_EXCEPTION_H_
+
+#include "../jit_compiler.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+bool
+jit_emit_exception(JitCompContext *cc, int32 exception_id, uint8 jit_opcode,
+ JitReg cond_br_if, JitBasicBlock *cond_br_else_block);
+
+#ifdef __cplusplus
+} /* end of extern "C" */
+#endif
+
+#endif /* end of _JIT_EMIT_EXCEPTION_H_ */
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/fast-jit/fe/jit_emit_function.c b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/fast-jit/fe/jit_emit_function.c
new file mode 100644
index 000000000..3ac9e3ed6
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/fast-jit/fe/jit_emit_function.c
@@ -0,0 +1,945 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#include "jit_emit_function.h"
+#include "jit_emit_exception.h"
+#include "jit_emit_control.h"
+#include "../jit_frontend.h"
+#include "../jit_codegen.h"
+#include "../../interpreter/wasm_runtime.h"
+
+static bool
+emit_callnative(JitCompContext *cc, JitReg native_func_reg, JitReg res,
+ JitReg *params, uint32 param_count);
+
+/* Prepare parameters for the function to call */
+static bool
+pre_call(JitCompContext *cc, const WASMType *func_type)
+{
+ JitReg value;
+ uint32 i, outs_off;
+ /* Prepare parameters for the function to call */
+ outs_off =
+ cc->total_frame_size + offsetof(WASMInterpFrame, lp)
+ + wasm_get_cell_num(func_type->types, func_type->param_count) * 4;
+
+ for (i = 0; i < func_type->param_count; i++) {
+ switch (func_type->types[func_type->param_count - 1 - i]) {
+ case VALUE_TYPE_I32:
+#if WASM_ENABLE_REF_TYPES != 0
+ case VALUE_TYPE_EXTERNREF:
+ case VALUE_TYPE_FUNCREF:
+#endif
+ POP_I32(value);
+ outs_off -= 4;
+ GEN_INSN(STI32, value, cc->fp_reg, NEW_CONST(I32, outs_off));
+ break;
+ case VALUE_TYPE_I64:
+ POP_I64(value);
+ outs_off -= 8;
+ GEN_INSN(STI64, value, cc->fp_reg, NEW_CONST(I32, outs_off));
+ break;
+ case VALUE_TYPE_F32:
+ POP_F32(value);
+ outs_off -= 4;
+ GEN_INSN(STF32, value, cc->fp_reg, NEW_CONST(I32, outs_off));
+ break;
+ case VALUE_TYPE_F64:
+ POP_F64(value);
+ outs_off -= 8;
+ GEN_INSN(STF64, value, cc->fp_reg, NEW_CONST(I32, outs_off));
+ break;
+ default:
+ bh_assert(0);
+ goto fail;
+ }
+ }
+
+ /* Commit sp as the callee may use it to store the results */
+ gen_commit_sp_ip(cc->jit_frame);
+
+ return true;
+fail:
+ return false;
+}
+
+/* Push results */
+static bool
+post_return(JitCompContext *cc, const WASMType *func_type, JitReg first_res,
+ bool update_committed_sp)
+{
+ uint32 i, n;
+ JitReg value;
+
+ n = cc->jit_frame->sp - cc->jit_frame->lp;
+ for (i = 0; i < func_type->result_count; i++) {
+ switch (func_type->types[func_type->param_count + i]) {
+ case VALUE_TYPE_I32:
+#if WASM_ENABLE_REF_TYPES != 0
+ case VALUE_TYPE_EXTERNREF:
+ case VALUE_TYPE_FUNCREF:
+#endif
+ if (i == 0 && first_res) {
+ bh_assert(jit_reg_kind(first_res) == JIT_REG_KIND_I32);
+ value = first_res;
+ }
+ else {
+ value = jit_cc_new_reg_I32(cc);
+ GEN_INSN(LDI32, value, cc->fp_reg,
+ NEW_CONST(I32, offset_of_local(n)));
+ }
+ PUSH_I32(value);
+ n++;
+ break;
+ case VALUE_TYPE_I64:
+ if (i == 0 && first_res) {
+ bh_assert(jit_reg_kind(first_res) == JIT_REG_KIND_I64);
+ value = first_res;
+ }
+ else {
+ value = jit_cc_new_reg_I64(cc);
+ GEN_INSN(LDI64, value, cc->fp_reg,
+ NEW_CONST(I32, offset_of_local(n)));
+ }
+ PUSH_I64(value);
+ n += 2;
+ break;
+ case VALUE_TYPE_F32:
+ if (i == 0 && first_res) {
+ bh_assert(jit_reg_kind(first_res) == JIT_REG_KIND_F32);
+ value = first_res;
+ }
+ else {
+ value = jit_cc_new_reg_F32(cc);
+ GEN_INSN(LDF32, value, cc->fp_reg,
+ NEW_CONST(I32, offset_of_local(n)));
+ }
+ PUSH_F32(value);
+ n++;
+ break;
+ case VALUE_TYPE_F64:
+ if (i == 0 && first_res) {
+ bh_assert(jit_reg_kind(first_res) == JIT_REG_KIND_F64);
+ value = first_res;
+ }
+ else {
+ value = jit_cc_new_reg_F64(cc);
+ GEN_INSN(LDF64, value, cc->fp_reg,
+ NEW_CONST(I32, offset_of_local(n)));
+ }
+ PUSH_F64(value);
+ n += 2;
+ break;
+ default:
+ bh_assert(0);
+ goto fail;
+ }
+ }
+
+ if (update_committed_sp)
+ /* Update the committed_sp as the callee has updated the frame sp */
+ cc->jit_frame->committed_sp = cc->jit_frame->sp;
+
+ return true;
+fail:
+ return false;
+}
+
+static bool
+pre_load(JitCompContext *cc, JitReg *argvs, const WASMType *func_type)
+{
+ JitReg value;
+ uint32 i;
+
+ /* Prepare parameters for the function to call */
+ for (i = 0; i < func_type->param_count; i++) {
+ switch (func_type->types[func_type->param_count - 1 - i]) {
+ case VALUE_TYPE_I32:
+#if WASM_ENABLE_REF_TYPES != 0
+ case VALUE_TYPE_EXTERNREF:
+ case VALUE_TYPE_FUNCREF:
+#endif
+ POP_I32(value);
+ argvs[func_type->param_count - 1 - i] = value;
+ break;
+ case VALUE_TYPE_I64:
+ POP_I64(value);
+ argvs[func_type->param_count - 1 - i] = value;
+ break;
+ case VALUE_TYPE_F32:
+ POP_F32(value);
+ argvs[func_type->param_count - 1 - i] = value;
+ break;
+ case VALUE_TYPE_F64:
+ POP_F64(value);
+ argvs[func_type->param_count - 1 - i] = value;
+ break;
+ default:
+ bh_assert(0);
+ goto fail;
+ }
+ }
+
+ gen_commit_sp_ip(cc->jit_frame);
+
+ return true;
+fail:
+ return false;
+}
+
+static JitReg
+create_first_res_reg(JitCompContext *cc, const WASMType *func_type)
+{
+ if (func_type->result_count) {
+ switch (func_type->types[func_type->param_count]) {
+ case VALUE_TYPE_I32:
+#if WASM_ENABLE_REF_TYPES != 0
+ case VALUE_TYPE_EXTERNREF:
+ case VALUE_TYPE_FUNCREF:
+#endif
+ return jit_cc_new_reg_I32(cc);
+ case VALUE_TYPE_I64:
+ return jit_cc_new_reg_I64(cc);
+ case VALUE_TYPE_F32:
+ return jit_cc_new_reg_F32(cc);
+ case VALUE_TYPE_F64:
+ return jit_cc_new_reg_F64(cc);
+ default:
+ bh_assert(0);
+ return 0;
+ }
+ }
+ return 0;
+}
+
+bool
+jit_compile_op_call(JitCompContext *cc, uint32 func_idx, bool tail_call)
+{
+ WASMModule *wasm_module = cc->cur_wasm_module;
+ WASMFunctionImport *func_import;
+ WASMFunction *func;
+ WASMType *func_type;
+ JitFrame *jit_frame = cc->jit_frame;
+ JitReg fast_jit_func_ptrs, jitted_code = 0;
+ JitReg native_func, *argvs = NULL, *argvs1 = NULL, func_params[5];
+ JitReg native_addr_ptr, module_inst_reg, ret, res;
+ uint32 jitted_func_idx, i;
+ uint64 total_size;
+ const char *signature = NULL;
+ /* Whether the argument is a pointer/str argument and
+ need to call jit_check_app_addr_and_convert */
+ bool is_pointer_arg;
+ bool return_value = false;
+
+#if WASM_ENABLE_THREAD_MGR != 0
+ /* Insert suspend check point */
+ if (!jit_check_suspend_flags(cc))
+ goto fail;
+#endif
+
+ if (func_idx < wasm_module->import_function_count) {
+ /* The function to call is an import function */
+ func_import = &wasm_module->import_functions[func_idx].u.function;
+ func_type = func_import->func_type;
+
+ /* Call fast_jit_invoke_native in some cases */
+ if (!func_import->func_ptr_linked /* import func hasn't been linked */
+ || func_import->call_conv_wasm_c_api /* linked by wasm_c_api */
+ || func_import->call_conv_raw /* registered as raw mode */
+ || func_type->param_count >= 5 /* registered as normal mode, but
+ jit_emit_callnative only supports
+ maximum 6 registers now
+ (include exec_nev) */) {
+ JitReg arg_regs[3];
+
+ if (!pre_call(cc, func_type)) {
+ goto fail;
+ }
+
+ /* Call fast_jit_invoke_native */
+ ret = jit_cc_new_reg_I32(cc);
+ arg_regs[0] = cc->exec_env_reg;
+ arg_regs[1] = NEW_CONST(I32, func_idx);
+ arg_regs[2] = cc->fp_reg;
+ if (!jit_emit_callnative(cc, fast_jit_invoke_native, ret, arg_regs,
+ 3)) {
+ goto fail;
+ }
+
+ /* Convert the return value from bool to uint32 */
+ GEN_INSN(AND, ret, ret, NEW_CONST(I32, 0xFF));
+
+ /* Check whether there is exception thrown */
+ GEN_INSN(CMP, cc->cmp_reg, ret, NEW_CONST(I32, 0));
+ if (!jit_emit_exception(cc, EXCE_ALREADY_THROWN, JIT_OP_BEQ,
+ cc->cmp_reg, NULL)) {
+ goto fail;
+ }
+
+ if (!post_return(cc, func_type, 0, true)) {
+ goto fail;
+ }
+
+#if WASM_ENABLE_THREAD_MGR != 0
+ /* Insert suspend check point */
+ if (!jit_check_suspend_flags(cc))
+ goto fail;
+#endif
+
+ return true;
+ }
+
+ /* Import function was registered as normal mode, and its argument count
+ is no more than 5, we directly call it */
+
+ signature = func_import->signature;
+ bh_assert(signature);
+
+ /* Allocate memory for argvs*/
+ total_size = sizeof(JitReg) * (uint64)(func_type->param_count);
+ if (total_size > 0) {
+ if (total_size >= UINT32_MAX
+ || !(argvs = jit_malloc((uint32)total_size))) {
+ goto fail;
+ }
+ }
+
+ /* Pop function params from stack and store them into argvs */
+ if (!pre_load(cc, argvs, func_type)) {
+ goto fail;
+ }
+
+ ret = jit_cc_new_reg_I32(cc);
+ func_params[0] = module_inst_reg = get_module_inst_reg(jit_frame);
+ func_params[4] = native_addr_ptr = jit_cc_new_reg_ptr(cc);
+ GEN_INSN(ADD, native_addr_ptr, cc->exec_env_reg,
+ NEW_CONST(PTR, offsetof(WASMExecEnv, jit_cache)));
+
+ /* Traverse each pointer/str argument, call
+ jit_check_app_addr_and_convert to check whether it is
+ in the range of linear memory and and convert it from
+ app offset into native address */
+ for (i = 0; i < func_type->param_count; i++) {
+
+ is_pointer_arg = false;
+
+ if (signature[i + 1] == '*') {
+ /* param is a pointer */
+ is_pointer_arg = true;
+ func_params[1] = NEW_CONST(I32, false); /* is_str = false */
+ func_params[2] = argvs[i];
+ if (signature[i + 2] == '~') {
+ /* pointer with length followed */
+ func_params[3] = argvs[i + 1];
+ }
+ else {
+ /* pointer with length followed */
+ func_params[3] = NEW_CONST(I32, 1);
+ }
+ }
+ else if (signature[i + 1] == '$') {
+ /* param is a string */
+ is_pointer_arg = true;
+ func_params[1] = NEW_CONST(I32, true); /* is_str = true */
+ func_params[2] = argvs[i];
+ func_params[3] = NEW_CONST(I32, 1);
+ }
+
+ if (is_pointer_arg) {
+ if (!jit_emit_callnative(cc, jit_check_app_addr_and_convert,
+ ret, func_params, 5)) {
+ goto fail;
+ }
+
+ /* Convert the return value from bool to uint32 */
+ GEN_INSN(AND, ret, ret, NEW_CONST(I32, 0xFF));
+ /* Check whether there is exception thrown */
+ GEN_INSN(CMP, cc->cmp_reg, ret, NEW_CONST(I32, 0));
+ if (!jit_emit_exception(cc, EXCE_ALREADY_THROWN, JIT_OP_BEQ,
+ cc->cmp_reg, NULL)) {
+ return false;
+ }
+
+ /* Load native addr from pointer of native addr,
+ or exec_env->jit_cache */
+ argvs[i] = jit_cc_new_reg_ptr(cc);
+ GEN_INSN(LDPTR, argvs[i], native_addr_ptr, NEW_CONST(I32, 0));
+ }
+ }
+
+ res = create_first_res_reg(cc, func_type);
+
+ /* Prepare arguments of the native function */
+ if (!(argvs1 =
+ jit_calloc(sizeof(JitReg) * (func_type->param_count + 1)))) {
+ goto fail;
+ }
+ argvs1[0] = cc->exec_env_reg;
+ for (i = 0; i < func_type->param_count; i++) {
+ argvs1[i + 1] = argvs[i];
+ }
+
+ /* Call the native function */
+ native_func = NEW_CONST(PTR, (uintptr_t)func_import->func_ptr_linked);
+ if (!emit_callnative(cc, native_func, res, argvs1,
+ func_type->param_count + 1)) {
+ jit_free(argvs1);
+ goto fail;
+ }
+ jit_free(argvs1);
+
+ /* Check whether there is exception thrown */
+ GEN_INSN(LDI8, ret, module_inst_reg,
+ NEW_CONST(I32, offsetof(WASMModuleInstance, cur_exception)));
+ GEN_INSN(CMP, cc->cmp_reg, ret, NEW_CONST(I32, 0));
+ if (!jit_emit_exception(cc, EXCE_ALREADY_THROWN, JIT_OP_BNE,
+ cc->cmp_reg, NULL)) {
+ goto fail;
+ }
+
+ if (!post_return(cc, func_type, res, false)) {
+ goto fail;
+ }
+ }
+ else {
+ /* The function to call is a bytecode function */
+ func = wasm_module
+ ->functions[func_idx - wasm_module->import_function_count];
+ func_type = func->func_type;
+
+ /* jitted_code = func_ptrs[func_idx - import_function_count] */
+ fast_jit_func_ptrs = get_fast_jit_func_ptrs_reg(jit_frame);
+ jitted_code = jit_cc_new_reg_ptr(cc);
+ jitted_func_idx = func_idx - wasm_module->import_function_count;
+ GEN_INSN(LDPTR, jitted_code, fast_jit_func_ptrs,
+ NEW_CONST(I32, (uint32)sizeof(void *) * jitted_func_idx));
+
+ if (!pre_call(cc, func_type)) {
+ goto fail;
+ }
+
+ res = create_first_res_reg(cc, func_type);
+
+ GEN_INSN(CALLBC, res, 0, jitted_code, NEW_CONST(I32, func_idx));
+
+ if (!post_return(cc, func_type, res, true)) {
+ goto fail;
+ }
+ }
+
+#if WASM_ENABLE_THREAD_MGR != 0
+ /* Insert suspend check point */
+ if (!jit_check_suspend_flags(cc))
+ goto fail;
+#endif
+
+ /* Clear part of memory regs and table regs as their values
+ may be changed in the function call */
+ if (cc->cur_wasm_module->possible_memory_grow)
+ clear_memory_regs(jit_frame);
+ clear_table_regs(jit_frame);
+
+ /* Ignore tail call currently */
+ (void)tail_call;
+
+ return_value = true;
+
+fail:
+ if (argvs)
+ jit_free(argvs);
+
+ return return_value;
+}
+
+static JitReg
+pack_argv(JitCompContext *cc)
+{
+ /* reuse the stack of the next frame */
+ uint32 stack_base;
+ JitReg argv;
+
+ stack_base = cc->total_frame_size + offsetof(WASMInterpFrame, lp);
+ argv = jit_cc_new_reg_ptr(cc);
+ GEN_INSN(ADD, argv, cc->fp_reg, NEW_CONST(PTR, stack_base));
+ if (jit_get_last_error(cc)) {
+ return (JitReg)0;
+ }
+ return argv;
+}
+
+bool
+jit_compile_op_call_indirect(JitCompContext *cc, uint32 type_idx,
+ uint32 tbl_idx)
+{
+ WASMModule *wasm_module = cc->cur_wasm_module;
+ JitBasicBlock *block_import, *block_nonimport, *func_return;
+ JitReg elem_idx, native_ret, argv, arg_regs[6];
+ JitFrame *jit_frame = cc->jit_frame;
+ JitReg tbl_size, offset, offset_i32;
+ JitReg func_import, func_idx, tbl_elems, func_count;
+ JitReg func_type_indexes, func_type_idx, fast_jit_func_ptrs;
+ JitReg offset1_i32, offset1, func_type_idx1, res;
+ JitReg import_func_ptrs, jitted_code_idx, jitted_code;
+ WASMType *func_type;
+ uint32 n;
+
+ POP_I32(elem_idx);
+
+ /* check elem_idx */
+ tbl_size = get_table_cur_size_reg(jit_frame, tbl_idx);
+
+ GEN_INSN(CMP, cc->cmp_reg, elem_idx, tbl_size);
+ if (!jit_emit_exception(cc, EXCE_UNDEFINED_ELEMENT, JIT_OP_BGEU,
+ cc->cmp_reg, NULL))
+ goto fail;
+
+ /* check func_idx */
+ if (UINTPTR_MAX == UINT64_MAX) {
+ offset_i32 = jit_cc_new_reg_I32(cc);
+ offset = jit_cc_new_reg_I64(cc);
+ GEN_INSN(SHL, offset_i32, elem_idx, NEW_CONST(I32, 2));
+ GEN_INSN(I32TOI64, offset, offset_i32);
+ }
+ else {
+ offset = jit_cc_new_reg_I32(cc);
+ GEN_INSN(SHL, offset, elem_idx, NEW_CONST(I32, 2));
+ }
+ func_idx = jit_cc_new_reg_I32(cc);
+ tbl_elems = get_table_elems_reg(jit_frame, tbl_idx);
+ GEN_INSN(LDI32, func_idx, tbl_elems, offset);
+
+ GEN_INSN(CMP, cc->cmp_reg, func_idx, NEW_CONST(I32, -1));
+ if (!jit_emit_exception(cc, EXCE_UNINITIALIZED_ELEMENT, JIT_OP_BEQ,
+ cc->cmp_reg, NULL))
+ goto fail;
+
+ func_count = NEW_CONST(I32, wasm_module->import_function_count
+ + wasm_module->function_count);
+ GEN_INSN(CMP, cc->cmp_reg, func_idx, func_count);
+ if (!jit_emit_exception(cc, EXCE_INVALID_FUNCTION_INDEX, JIT_OP_BGTU,
+ cc->cmp_reg, NULL))
+ goto fail;
+
+ /* check func_type */
+ /* get func_type_idx from func_type_indexes */
+ if (UINTPTR_MAX == UINT64_MAX) {
+ offset1_i32 = jit_cc_new_reg_I32(cc);
+ offset1 = jit_cc_new_reg_I64(cc);
+ GEN_INSN(SHL, offset1_i32, func_idx, NEW_CONST(I32, 2));
+ GEN_INSN(I32TOI64, offset1, offset1_i32);
+ }
+ else {
+ offset1 = jit_cc_new_reg_I32(cc);
+ GEN_INSN(SHL, offset1, func_idx, NEW_CONST(I32, 2));
+ }
+
+ func_type_indexes = get_func_type_indexes_reg(jit_frame);
+ func_type_idx = jit_cc_new_reg_I32(cc);
+ GEN_INSN(LDI32, func_type_idx, func_type_indexes, offset1);
+
+ type_idx = wasm_get_smallest_type_idx(wasm_module->types,
+ wasm_module->type_count, type_idx);
+ func_type_idx1 = NEW_CONST(I32, type_idx);
+ GEN_INSN(CMP, cc->cmp_reg, func_type_idx, func_type_idx1);
+ if (!jit_emit_exception(cc, EXCE_INVALID_FUNCTION_TYPE_INDEX, JIT_OP_BNE,
+ cc->cmp_reg, NULL))
+ goto fail;
+
+ /* pop function arguments and store it to out area of callee stack frame */
+ func_type = wasm_module->types[type_idx];
+ if (!pre_call(cc, func_type)) {
+ goto fail;
+ }
+
+ /* store elem_idx and func_idx to exec_env->jit_cache */
+ GEN_INSN(STI32, elem_idx, cc->exec_env_reg,
+ NEW_CONST(I32, offsetof(WASMExecEnv, jit_cache)));
+ GEN_INSN(STI32, func_idx, cc->exec_env_reg,
+ NEW_CONST(I32, offsetof(WASMExecEnv, jit_cache) + 4));
+
+#if WASM_ENABLE_THREAD_MGR != 0
+ /* Insert suspend check point */
+ if (!jit_check_suspend_flags(cc))
+ goto fail;
+#endif
+
+ block_import = jit_cc_new_basic_block(cc, 0);
+ block_nonimport = jit_cc_new_basic_block(cc, 0);
+ func_return = jit_cc_new_basic_block(cc, 0);
+ if (!block_import || !block_nonimport || !func_return) {
+ goto fail;
+ }
+
+ /* Commit register values to locals and stacks */
+ gen_commit_values(jit_frame, jit_frame->lp, jit_frame->sp);
+ /* Clear frame values */
+ clear_values(jit_frame);
+
+ /* jump to block_import or block_nonimport */
+ GEN_INSN(CMP, cc->cmp_reg, func_idx,
+ NEW_CONST(I32, cc->cur_wasm_module->import_function_count));
+ GEN_INSN(BLTU, cc->cmp_reg, jit_basic_block_label(block_import),
+ jit_basic_block_label(block_nonimport));
+
+ /* block_import */
+ cc->cur_basic_block = block_import;
+
+ elem_idx = jit_cc_new_reg_I32(cc);
+ GEN_INSN(LDI32, elem_idx, cc->exec_env_reg,
+ NEW_CONST(I32, offsetof(WASMExecEnv, jit_cache)));
+ GEN_INSN(LDI32, func_idx, cc->exec_env_reg,
+ NEW_CONST(I32, offsetof(WASMExecEnv, jit_cache) + 4));
+
+ argv = pack_argv(cc);
+ if (!argv) {
+ goto fail;
+ }
+ native_ret = jit_cc_new_reg_I32(cc);
+ arg_regs[0] = cc->exec_env_reg;
+ arg_regs[1] = NEW_CONST(I32, tbl_idx);
+ arg_regs[2] = elem_idx;
+ arg_regs[3] = NEW_CONST(I32, type_idx);
+ arg_regs[4] = NEW_CONST(I32, func_type->param_cell_num);
+ arg_regs[5] = argv;
+
+ import_func_ptrs = get_import_func_ptrs_reg(jit_frame);
+ func_import = jit_cc_new_reg_ptr(cc);
+ if (UINTPTR_MAX == UINT64_MAX) {
+ JitReg func_import_offset = jit_cc_new_reg_I32(cc);
+ JitReg func_import_offset_i64 = jit_cc_new_reg_I64(cc);
+ GEN_INSN(SHL, func_import_offset, func_idx, NEW_CONST(I32, 3));
+ GEN_INSN(I32TOI64, func_import_offset_i64, func_import_offset);
+ GEN_INSN(LDPTR, func_import, import_func_ptrs, func_import_offset_i64);
+ }
+ else {
+ JitReg func_import_offset = jit_cc_new_reg_I32(cc);
+ GEN_INSN(SHL, func_import_offset, func_idx, NEW_CONST(I32, 2));
+ GEN_INSN(LDPTR, func_import, import_func_ptrs, func_import_offset);
+ }
+ if (!jit_emit_callnative(cc, fast_jit_call_indirect, native_ret, arg_regs,
+ 6)) {
+ goto fail;
+ }
+
+ /* Convert bool to uint32 */
+ GEN_INSN(AND, native_ret, native_ret, NEW_CONST(I32, 0xFF));
+
+ /* Check whether there is exception thrown */
+ GEN_INSN(CMP, cc->cmp_reg, native_ret, NEW_CONST(I32, 0));
+ if (!jit_emit_exception(cc, EXCE_ALREADY_THROWN, JIT_OP_BEQ, cc->cmp_reg,
+ NULL)) {
+ return false;
+ }
+
+ /* Store res into current frame, so that post_return in
+ block func_return can get the value */
+ n = cc->jit_frame->sp - cc->jit_frame->lp;
+ if (func_type->result_count > 0) {
+ switch (func_type->types[func_type->param_count]) {
+ case VALUE_TYPE_I32:
+#if WASM_ENABLE_REF_TYPES != 0
+ case VALUE_TYPE_EXTERNREF:
+ case VALUE_TYPE_FUNCREF:
+#endif
+ res = jit_cc_new_reg_I32(cc);
+ GEN_INSN(LDI32, res, argv, NEW_CONST(I32, 0));
+ GEN_INSN(STI32, res, cc->fp_reg,
+ NEW_CONST(I32, offset_of_local(n)));
+ break;
+ case VALUE_TYPE_I64:
+ res = jit_cc_new_reg_I64(cc);
+ GEN_INSN(LDI64, res, argv, NEW_CONST(I32, 0));
+ GEN_INSN(STI64, res, cc->fp_reg,
+ NEW_CONST(I32, offset_of_local(n)));
+ break;
+ case VALUE_TYPE_F32:
+ res = jit_cc_new_reg_F32(cc);
+ GEN_INSN(LDF32, res, argv, NEW_CONST(I32, 0));
+ GEN_INSN(STF32, res, cc->fp_reg,
+ NEW_CONST(I32, offset_of_local(n)));
+ break;
+ case VALUE_TYPE_F64:
+ res = jit_cc_new_reg_F64(cc);
+ GEN_INSN(LDF64, res, argv, NEW_CONST(I32, 0));
+ GEN_INSN(STF64, res, cc->fp_reg,
+ NEW_CONST(I32, offset_of_local(n)));
+ break;
+ default:
+ bh_assert(0);
+ goto fail;
+ }
+ }
+
+ gen_commit_values(jit_frame, jit_frame->lp, jit_frame->sp);
+ clear_values(jit_frame);
+ GEN_INSN(JMP, jit_basic_block_label(func_return));
+
+ /* basic_block non_import */
+ cc->cur_basic_block = block_nonimport;
+
+ GEN_INSN(LDI32, func_idx, cc->exec_env_reg,
+ NEW_CONST(I32, offsetof(WASMExecEnv, jit_cache) + 4));
+
+ /* get jitted_code */
+ fast_jit_func_ptrs = get_fast_jit_func_ptrs_reg(jit_frame);
+ jitted_code_idx = jit_cc_new_reg_I32(cc);
+ jitted_code = jit_cc_new_reg_ptr(cc);
+ GEN_INSN(SUB, jitted_code_idx, func_idx,
+ NEW_CONST(I32, cc->cur_wasm_module->import_function_count));
+ if (UINTPTR_MAX == UINT64_MAX) {
+ JitReg jitted_code_offset = jit_cc_new_reg_I32(cc);
+ JitReg jitted_code_offset_64 = jit_cc_new_reg_I64(cc);
+ GEN_INSN(SHL, jitted_code_offset, jitted_code_idx, NEW_CONST(I32, 3));
+ GEN_INSN(I32TOI64, jitted_code_offset_64, jitted_code_offset);
+ GEN_INSN(LDPTR, jitted_code, fast_jit_func_ptrs, jitted_code_offset_64);
+ }
+ else {
+ JitReg jitted_code_offset = jit_cc_new_reg_I32(cc);
+ GEN_INSN(SHL, jitted_code_offset, jitted_code_idx, NEW_CONST(I32, 2));
+ GEN_INSN(LDPTR, jitted_code, fast_jit_func_ptrs, jitted_code_offset);
+ }
+
+ res = 0;
+ if (func_type->result_count > 0) {
+ switch (func_type->types[func_type->param_count]) {
+ case VALUE_TYPE_I32:
+#if WASM_ENABLE_REF_TYPES != 0
+ case VALUE_TYPE_EXTERNREF:
+ case VALUE_TYPE_FUNCREF:
+#endif
+ res = jit_cc_new_reg_I32(cc);
+ break;
+ case VALUE_TYPE_I64:
+ res = jit_cc_new_reg_I64(cc);
+ break;
+ case VALUE_TYPE_F32:
+ res = jit_cc_new_reg_F32(cc);
+ break;
+ case VALUE_TYPE_F64:
+ res = jit_cc_new_reg_F64(cc);
+ break;
+ default:
+ bh_assert(0);
+ goto fail;
+ }
+ }
+ GEN_INSN(CALLBC, res, 0, jitted_code, func_idx);
+ /* Store res into current frame, so that post_return in
+ block func_return can get the value */
+ n = cc->jit_frame->sp - cc->jit_frame->lp;
+ if (func_type->result_count > 0) {
+ switch (func_type->types[func_type->param_count]) {
+ case VALUE_TYPE_I32:
+#if WASM_ENABLE_REF_TYPES != 0
+ case VALUE_TYPE_EXTERNREF:
+ case VALUE_TYPE_FUNCREF:
+#endif
+ GEN_INSN(STI32, res, cc->fp_reg,
+ NEW_CONST(I32, offset_of_local(n)));
+ break;
+ case VALUE_TYPE_I64:
+ GEN_INSN(STI64, res, cc->fp_reg,
+ NEW_CONST(I32, offset_of_local(n)));
+ break;
+ case VALUE_TYPE_F32:
+ GEN_INSN(STF32, res, cc->fp_reg,
+ NEW_CONST(I32, offset_of_local(n)));
+ break;
+ case VALUE_TYPE_F64:
+ GEN_INSN(STF64, res, cc->fp_reg,
+ NEW_CONST(I32, offset_of_local(n)));
+ break;
+ default:
+ bh_assert(0);
+ goto fail;
+ }
+ }
+ /* commit and clear jit frame, then jump to block func_ret */
+ gen_commit_values(jit_frame, jit_frame->lp, jit_frame->sp);
+ clear_values(jit_frame);
+ GEN_INSN(JMP, jit_basic_block_label(func_return));
+
+ /* translate block func_return */
+ cc->cur_basic_block = func_return;
+ if (!post_return(cc, func_type, 0, true)) {
+ goto fail;
+ }
+
+#if WASM_ENABLE_THREAD_MGR != 0
+ /* Insert suspend check point */
+ if (!jit_check_suspend_flags(cc))
+ goto fail;
+#endif
+
+ /* Clear part of memory regs and table regs as their values
+ may be changed in the function call */
+ if (cc->cur_wasm_module->possible_memory_grow)
+ clear_memory_regs(cc->jit_frame);
+ clear_table_regs(cc->jit_frame);
+ return true;
+fail:
+ return false;
+}
+
+#if WASM_ENABLE_REF_TYPES != 0
+bool
+jit_compile_op_ref_null(JitCompContext *cc, uint32 ref_type)
+{
+ PUSH_I32(NEW_CONST(I32, NULL_REF));
+ (void)ref_type;
+ return true;
+fail:
+ return false;
+}
+
+bool
+jit_compile_op_ref_is_null(JitCompContext *cc)
+{
+ JitReg ref, res;
+
+ POP_I32(ref);
+
+ GEN_INSN(CMP, cc->cmp_reg, ref, NEW_CONST(I32, NULL_REF));
+ res = jit_cc_new_reg_I32(cc);
+ GEN_INSN(SELECTEQ, res, cc->cmp_reg, NEW_CONST(I32, 1), NEW_CONST(I32, 0));
+ PUSH_I32(res);
+
+ return true;
+fail:
+ return false;
+}
+
+bool
+jit_compile_op_ref_func(JitCompContext *cc, uint32 func_idx)
+{
+ PUSH_I32(NEW_CONST(I32, func_idx));
+ return true;
+fail:
+ return false;
+}
+#endif
+
+#if defined(BUILD_TARGET_X86_64) || defined(BUILD_TARGET_AMD_64)
+static bool
+emit_callnative(JitCompContext *cc, JitReg native_func_reg, JitReg res,
+ JitReg *params, uint32 param_count)
+{
+ JitInsn *insn;
+ char *i64_arg_names[] = { "rdi", "rsi", "rdx", "rcx", "r8", "r9" };
+ char *f32_arg_names[] = { "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5" };
+ char *f64_arg_names[] = { "xmm0_f64", "xmm1_f64", "xmm2_f64",
+ "xmm3_f64", "xmm4_f64", "xmm5_f64" };
+ JitReg i64_arg_regs[6], f32_arg_regs[6], f64_arg_regs[6], res_reg = 0;
+ JitReg eax_hreg = jit_codegen_get_hreg_by_name("eax");
+ JitReg xmm0_hreg = jit_codegen_get_hreg_by_name("xmm0");
+ uint32 i, i64_reg_idx, float_reg_idx;
+
+ bh_assert(param_count <= 6);
+
+ for (i = 0; i < 6; i++) {
+ i64_arg_regs[i] = jit_codegen_get_hreg_by_name(i64_arg_names[i]);
+ f32_arg_regs[i] = jit_codegen_get_hreg_by_name(f32_arg_names[i]);
+ f64_arg_regs[i] = jit_codegen_get_hreg_by_name(f64_arg_names[i]);
+ }
+
+ i64_reg_idx = float_reg_idx = 0;
+ for (i = 0; i < param_count; i++) {
+ switch (jit_reg_kind(params[i])) {
+ case JIT_REG_KIND_I32:
+ GEN_INSN(I32TOI64, i64_arg_regs[i64_reg_idx++], params[i]);
+ break;
+ case JIT_REG_KIND_I64:
+ GEN_INSN(MOV, i64_arg_regs[i64_reg_idx++], params[i]);
+ break;
+ case JIT_REG_KIND_F32:
+ GEN_INSN(MOV, f32_arg_regs[float_reg_idx++], params[i]);
+ break;
+ case JIT_REG_KIND_F64:
+ GEN_INSN(MOV, f64_arg_regs[float_reg_idx++], params[i]);
+ break;
+ default:
+ bh_assert(0);
+ return false;
+ }
+ }
+
+ if (res) {
+ switch (jit_reg_kind(res)) {
+ case JIT_REG_KIND_I32:
+ res_reg = eax_hreg;
+ break;
+ case JIT_REG_KIND_I64:
+ res_reg = res;
+ break;
+ case JIT_REG_KIND_F32:
+ res_reg = xmm0_hreg;
+ break;
+ case JIT_REG_KIND_F64:
+ res_reg = res;
+ break;
+ default:
+ bh_assert(0);
+ return false;
+ }
+ }
+
+ insn = GEN_INSN(CALLNATIVE, res_reg, native_func_reg, param_count);
+ if (!insn) {
+ return false;
+ }
+
+ i64_reg_idx = float_reg_idx = 0;
+ for (i = 0; i < param_count; i++) {
+ switch (jit_reg_kind(params[i])) {
+ case JIT_REG_KIND_I32:
+ case JIT_REG_KIND_I64:
+ *(jit_insn_opndv(insn, i + 2)) = i64_arg_regs[i64_reg_idx++];
+ break;
+ case JIT_REG_KIND_F32:
+ *(jit_insn_opndv(insn, i + 2)) = f32_arg_regs[float_reg_idx++];
+ break;
+ case JIT_REG_KIND_F64:
+ *(jit_insn_opndv(insn, i + 2)) = f64_arg_regs[float_reg_idx++];
+ break;
+ default:
+ bh_assert(0);
+ return false;
+ }
+ }
+
+ if (res && res != res_reg) {
+ GEN_INSN(MOV, res, res_reg);
+ }
+
+ return true;
+}
+#else
+static bool
+emit_callnative(JitCompContext *cc, JitRef native_func_reg, JitReg res,
+ JitReg *params, uint32 param_count)
+{
+ JitInsn *insn;
+ uint32 i;
+
+ bh_assert(param_count <= 6);
+
+ insn = GEN_INSN(CALLNATIVE, res, native_func_reg, param_count);
+ if (!insn)
+ return false;
+
+ for (i = 0; i < param_count; i++) {
+ *(jit_insn_opndv(insn, i + 2)) = params[i];
+ }
+ return true;
+}
+#endif
+
+bool
+jit_emit_callnative(JitCompContext *cc, void *native_func, JitReg res,
+ JitReg *params, uint32 param_count)
+{
+ return emit_callnative(cc, NEW_CONST(PTR, (uintptr_t)native_func), res,
+ params, param_count);
+}
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/fast-jit/fe/jit_emit_function.h b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/fast-jit/fe/jit_emit_function.h
new file mode 100644
index 000000000..7405f774c
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/fast-jit/fe/jit_emit_function.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#ifndef _JIT_EMIT_FUNCTION_H_
+#define _JIT_EMIT_FUNCTION_H_
+
+#include "../jit_compiler.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+bool
+jit_compile_op_call(JitCompContext *cc, uint32 func_idx, bool tail_call);
+
+bool
+jit_compile_op_call_indirect(JitCompContext *cc, uint32 type_idx,
+ uint32 tbl_idx);
+
+bool
+jit_compile_op_ref_null(JitCompContext *cc, uint32 ref_type);
+
+bool
+jit_compile_op_ref_is_null(JitCompContext *cc);
+
+bool
+jit_compile_op_ref_func(JitCompContext *cc, uint32 func_idx);
+
+bool
+jit_emit_callnative(JitCompContext *cc, void *native_func, JitReg res,
+ JitReg *params, uint32 param_count);
+
+#ifdef __cplusplus
+} /* end of extern "C" */
+#endif
+
+#endif /* end of _JIT_EMIT_FUNCTION_H_ */
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/fast-jit/fe/jit_emit_memory.c b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/fast-jit/fe/jit_emit_memory.c
new file mode 100644
index 000000000..9635d4e57
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/fast-jit/fe/jit_emit_memory.c
@@ -0,0 +1,1200 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#include "jit_emit_memory.h"
+#include "jit_emit_exception.h"
+#include "jit_emit_function.h"
+#include "../jit_frontend.h"
+#include "../jit_codegen.h"
+#include "../../interpreter/wasm_runtime.h"
+#include "jit_emit_control.h"
+
+#ifndef OS_ENABLE_HW_BOUND_CHECK
+static JitReg
+get_memory_boundary(JitCompContext *cc, uint32 mem_idx, uint32 bytes)
+{
+ JitReg memory_boundary;
+
+ switch (bytes) {
+ case 1:
+ {
+ memory_boundary =
+ get_mem_bound_check_1byte_reg(cc->jit_frame, mem_idx);
+ break;
+ }
+ case 2:
+ {
+ memory_boundary =
+ get_mem_bound_check_2bytes_reg(cc->jit_frame, mem_idx);
+ break;
+ }
+ case 4:
+ {
+ memory_boundary =
+ get_mem_bound_check_4bytes_reg(cc->jit_frame, mem_idx);
+ break;
+ }
+ case 8:
+ {
+ memory_boundary =
+ get_mem_bound_check_8bytes_reg(cc->jit_frame, mem_idx);
+ break;
+ }
+ case 16:
+ {
+ memory_boundary =
+ get_mem_bound_check_16bytes_reg(cc->jit_frame, mem_idx);
+ break;
+ }
+ default:
+ {
+ bh_assert(0);
+ goto fail;
+ }
+ }
+
+ return memory_boundary;
+fail:
+ return 0;
+}
+#endif
+
+#if WASM_ENABLE_SHARED_MEMORY != 0
+static void
+set_load_or_store_atomic(JitInsn *load_or_store_inst)
+{
+ load_or_store_inst->flags_u8 |= 0x1;
+}
+#endif
+
+#if UINTPTR_MAX == UINT64_MAX
+static JitReg
+check_and_seek_on_64bit_platform(JitCompContext *cc, JitReg addr, JitReg offset,
+ JitReg memory_boundary)
+{
+ JitReg long_addr, offset1;
+
+ /* long_addr = (int64_t)addr */
+ long_addr = jit_cc_new_reg_I64(cc);
+ GEN_INSN(U32TOI64, long_addr, addr);
+
+ /* offset1 = offset + long_addr */
+ offset1 = jit_cc_new_reg_I64(cc);
+ GEN_INSN(ADD, offset1, offset, long_addr);
+
+#ifndef OS_ENABLE_HW_BOUND_CHECK
+ /* if (offset1 > memory_boundary) goto EXCEPTION */
+ GEN_INSN(CMP, cc->cmp_reg, offset1, memory_boundary);
+ if (!jit_emit_exception(cc, EXCE_OUT_OF_BOUNDS_MEMORY_ACCESS, JIT_OP_BGTU,
+ cc->cmp_reg, NULL)) {
+ goto fail;
+ }
+#endif
+
+ return offset1;
+#ifndef OS_ENABLE_HW_BOUND_CHECK
+fail:
+ return 0;
+#endif
+}
+#else
+static JitReg
+check_and_seek_on_32bit_platform(JitCompContext *cc, JitReg addr, JitReg offset,
+ JitReg memory_boundary)
+{
+ JitReg offset1;
+
+ /* offset1 = offset + addr */
+ offset1 = jit_cc_new_reg_I32(cc);
+ GEN_INSN(ADD, offset1, offset, addr);
+
+ /* if (offset1 < addr) goto EXCEPTION */
+ GEN_INSN(CMP, cc->cmp_reg, offset1, addr);
+ if (!jit_emit_exception(cc, EXCE_OUT_OF_BOUNDS_MEMORY_ACCESS, JIT_OP_BLTU,
+ cc->cmp_reg, NULL)) {
+ goto fail;
+ }
+
+#ifndef OS_ENABLE_HW_BOUND_CHECK
+ /* if (offset1 > memory_boundary) goto EXCEPTION */
+ GEN_INSN(CMP, cc->cmp_reg, offset1, memory_boundary);
+ if (!jit_emit_exception(cc, EXCE_OUT_OF_BOUNDS_MEMORY_ACCESS, JIT_OP_BGTU,
+ cc->cmp_reg, NULL)) {
+ goto fail;
+ }
+#endif
+
+ return offset1;
+fail:
+ return 0;
+}
+#endif
+
+static JitReg
+check_and_seek(JitCompContext *cc, JitReg addr, uint32 offset, uint32 bytes)
+{
+ JitReg memory_boundary = 0, offset1;
+#ifndef OS_ENABLE_HW_BOUND_CHECK
+ /* the default memory */
+ uint32 mem_idx = 0;
+#endif
+
+#ifndef OS_ENABLE_HW_BOUND_CHECK
+ /* ---------- check ---------- */
+ /* 1. shortcut if the memory size is 0 */
+ if (cc->cur_wasm_module->memories != NULL
+ && 0 == cc->cur_wasm_module->memories[mem_idx].init_page_count) {
+ JitReg module_inst, cur_page_count;
+ uint32 cur_page_count_offset =
+ (uint32)offsetof(WASMModuleInstance, global_table_data.bytes)
+ + (uint32)offsetof(WASMMemoryInstance, cur_page_count);
+
+ /* if (cur_mem_page_count == 0) goto EXCEPTION */
+ module_inst = get_module_inst_reg(cc->jit_frame);
+ cur_page_count = jit_cc_new_reg_I32(cc);
+ GEN_INSN(LDI32, cur_page_count, module_inst,
+ NEW_CONST(I32, cur_page_count_offset));
+ GEN_INSN(CMP, cc->cmp_reg, cur_page_count, NEW_CONST(I32, 0));
+ if (!jit_emit_exception(cc, EXCE_OUT_OF_BOUNDS_MEMORY_ACCESS,
+ JIT_OP_BEQ, cc->cmp_reg, NULL)) {
+ goto fail;
+ }
+ }
+
+ /* 2. a complete boundary check */
+ memory_boundary = get_memory_boundary(cc, mem_idx, bytes);
+ if (!memory_boundary)
+ goto fail;
+#endif
+
+#if UINTPTR_MAX == UINT64_MAX
+ offset1 = check_and_seek_on_64bit_platform(cc, addr, NEW_CONST(I64, offset),
+ memory_boundary);
+ if (!offset1)
+ goto fail;
+#else
+ offset1 = check_and_seek_on_32bit_platform(cc, addr, NEW_CONST(I32, offset),
+ memory_boundary);
+ if (!offset1)
+ goto fail;
+#endif
+
+ return offset1;
+fail:
+ return 0;
+}
+
+#if UINTPTR_MAX == UINT64_MAX
+#define CHECK_ALIGNMENT(offset1) \
+ do { \
+ JitReg align_mask = NEW_CONST(I64, ((uint64)1 << align) - 1); \
+ JitReg AND_res = jit_cc_new_reg_I64(cc); \
+ GEN_INSN(AND, AND_res, offset1, align_mask); \
+ GEN_INSN(CMP, cc->cmp_reg, AND_res, NEW_CONST(I64, 0)); \
+ if (!jit_emit_exception(cc, EXCE_UNALIGNED_ATOMIC, JIT_OP_BNE, \
+ cc->cmp_reg, NULL)) \
+ goto fail; \
+ } while (0)
+#else
+#define CHECK_ALIGNMENT(offset1) \
+ do { \
+ JitReg align_mask = NEW_CONST(I32, (1 << align) - 1); \
+ JitReg AND_res = jit_cc_new_reg_I32(cc); \
+ GEN_INSN(AND, AND_res, offset1, align_mask); \
+ GEN_INSN(CMP, cc->cmp_reg, AND_res, NEW_CONST(I32, 0)); \
+ if (!jit_emit_exception(cc, EXCE_UNALIGNED_ATOMIC, JIT_OP_BNE, \
+ cc->cmp_reg, NULL)) \
+ goto fail; \
+ } while (0)
+#endif
+
+bool
+jit_compile_op_i32_load(JitCompContext *cc, uint32 align, uint32 offset,
+ uint32 bytes, bool sign, bool atomic)
+{
+ JitReg addr, offset1, value, memory_data;
+ JitInsn *load_insn = NULL;
+
+ POP_I32(addr);
+
+ offset1 = check_and_seek(cc, addr, offset, bytes);
+ if (!offset1) {
+ goto fail;
+ }
+#if WASM_ENABLE_SHARED_MEMORY != 0
+ if (atomic) {
+ CHECK_ALIGNMENT(offset1);
+ }
+#endif
+
+ memory_data = get_memory_data_reg(cc->jit_frame, 0);
+
+ value = jit_cc_new_reg_I32(cc);
+ switch (bytes) {
+ case 1:
+ {
+ if (sign) {
+ load_insn = GEN_INSN(LDI8, value, memory_data, offset1);
+ }
+ else {
+ load_insn = GEN_INSN(LDU8, value, memory_data, offset1);
+ }
+ break;
+ }
+ case 2:
+ {
+ if (sign) {
+ load_insn = GEN_INSN(LDI16, value, memory_data, offset1);
+ }
+ else {
+ load_insn = GEN_INSN(LDU16, value, memory_data, offset1);
+ }
+ break;
+ }
+ case 4:
+ {
+ if (sign) {
+ load_insn = GEN_INSN(LDI32, value, memory_data, offset1);
+ }
+ else {
+ load_insn = GEN_INSN(LDU32, value, memory_data, offset1);
+ }
+ break;
+ }
+ default:
+ {
+ bh_assert(0);
+ goto fail;
+ }
+ }
+
+#if WASM_ENABLE_SHARED_MEMORY != 0
+ if (atomic && load_insn)
+ set_load_or_store_atomic(load_insn);
+#else
+ (void)load_insn;
+#endif
+
+ PUSH_I32(value);
+ return true;
+fail:
+ return false;
+}
+
+bool
+jit_compile_op_i64_load(JitCompContext *cc, uint32 align, uint32 offset,
+ uint32 bytes, bool sign, bool atomic)
+{
+ JitReg addr, offset1, value, memory_data;
+ JitInsn *load_insn = NULL;
+
+ POP_I32(addr);
+
+ offset1 = check_and_seek(cc, addr, offset, bytes);
+ if (!offset1) {
+ goto fail;
+ }
+#if WASM_ENABLE_SHARED_MEMORY != 0
+ if (atomic) {
+ CHECK_ALIGNMENT(offset1);
+ }
+#endif
+
+ memory_data = get_memory_data_reg(cc->jit_frame, 0);
+
+ value = jit_cc_new_reg_I64(cc);
+ switch (bytes) {
+ case 1:
+ {
+ if (sign) {
+ load_insn = GEN_INSN(LDI8, value, memory_data, offset1);
+ }
+ else {
+ load_insn = GEN_INSN(LDU8, value, memory_data, offset1);
+ }
+ break;
+ }
+ case 2:
+ {
+ if (sign) {
+ load_insn = GEN_INSN(LDI16, value, memory_data, offset1);
+ }
+ else {
+ load_insn = GEN_INSN(LDU16, value, memory_data, offset1);
+ }
+ break;
+ }
+ case 4:
+ {
+ if (sign) {
+ load_insn = GEN_INSN(LDI32, value, memory_data, offset1);
+ }
+ else {
+ load_insn = GEN_INSN(LDU32, value, memory_data, offset1);
+ }
+ break;
+ }
+ case 8:
+ {
+ if (sign) {
+ load_insn = GEN_INSN(LDI64, value, memory_data, offset1);
+ }
+ else {
+ load_insn = GEN_INSN(LDU64, value, memory_data, offset1);
+ }
+ break;
+ }
+ default:
+ {
+ bh_assert(0);
+ goto fail;
+ }
+ }
+
+#if WASM_ENABLE_SHARED_MEMORY != 0
+ if (atomic && load_insn)
+ set_load_or_store_atomic(load_insn);
+#else
+ (void)load_insn;
+#endif
+
+ PUSH_I64(value);
+ return true;
+fail:
+ return false;
+}
+
+bool
+jit_compile_op_f32_load(JitCompContext *cc, uint32 align, uint32 offset)
+{
+ JitReg addr, offset1, value, memory_data;
+
+ POP_I32(addr);
+
+ offset1 = check_and_seek(cc, addr, offset, 4);
+ if (!offset1) {
+ goto fail;
+ }
+
+ memory_data = get_memory_data_reg(cc->jit_frame, 0);
+
+ value = jit_cc_new_reg_F32(cc);
+ GEN_INSN(LDF32, value, memory_data, offset1);
+
+ PUSH_F32(value);
+ return true;
+fail:
+ return false;
+}
+
+bool
+jit_compile_op_f64_load(JitCompContext *cc, uint32 align, uint32 offset)
+{
+ JitReg addr, offset1, value, memory_data;
+
+ POP_I32(addr);
+
+ offset1 = check_and_seek(cc, addr, offset, 8);
+ if (!offset1) {
+ goto fail;
+ }
+
+ memory_data = get_memory_data_reg(cc->jit_frame, 0);
+
+ value = jit_cc_new_reg_F64(cc);
+ GEN_INSN(LDF64, value, memory_data, offset1);
+
+ PUSH_F64(value);
+ return true;
+fail:
+ return false;
+}
+
+bool
+jit_compile_op_i32_store(JitCompContext *cc, uint32 align, uint32 offset,
+ uint32 bytes, bool atomic)
+{
+ JitReg value, addr, offset1, memory_data;
+ JitInsn *store_insn = NULL;
+
+ POP_I32(value);
+ POP_I32(addr);
+
+ offset1 = check_and_seek(cc, addr, offset, bytes);
+ if (!offset1) {
+ goto fail;
+ }
+#if WASM_ENABLE_SHARED_MEMORY != 0
+ if (atomic) {
+ CHECK_ALIGNMENT(offset1);
+ }
+#endif
+
+ memory_data = get_memory_data_reg(cc->jit_frame, 0);
+
+ switch (bytes) {
+ case 1:
+ {
+ store_insn = GEN_INSN(STI8, value, memory_data, offset1);
+ break;
+ }
+ case 2:
+ {
+ store_insn = GEN_INSN(STI16, value, memory_data, offset1);
+ break;
+ }
+ case 4:
+ {
+ store_insn = GEN_INSN(STI32, value, memory_data, offset1);
+ break;
+ }
+ default:
+ {
+ bh_assert(0);
+ goto fail;
+ }
+ }
+#if WASM_ENABLE_SHARED_MEMORY != 0
+ if (atomic && store_insn)
+ set_load_or_store_atomic(store_insn);
+#else
+ (void)store_insn;
+#endif
+
+ return true;
+fail:
+ return false;
+}
+
+bool
+jit_compile_op_i64_store(JitCompContext *cc, uint32 align, uint32 offset,
+ uint32 bytes, bool atomic)
+{
+ JitReg value, addr, offset1, memory_data;
+ JitInsn *store_insn = NULL;
+
+ POP_I64(value);
+ POP_I32(addr);
+
+ offset1 = check_and_seek(cc, addr, offset, bytes);
+ if (!offset1) {
+ goto fail;
+ }
+#if WASM_ENABLE_SHARED_MEMORY != 0
+ if (atomic) {
+ CHECK_ALIGNMENT(offset1);
+ }
+#endif
+
+ if (jit_reg_is_const(value) && bytes < 8) {
+ value = NEW_CONST(I32, (int32)jit_cc_get_const_I64(cc, value));
+ }
+
+ memory_data = get_memory_data_reg(cc->jit_frame, 0);
+
+ switch (bytes) {
+ case 1:
+ {
+ store_insn = GEN_INSN(STI8, value, memory_data, offset1);
+ break;
+ }
+ case 2:
+ {
+ store_insn = GEN_INSN(STI16, value, memory_data, offset1);
+ break;
+ }
+ case 4:
+ {
+ store_insn = GEN_INSN(STI32, value, memory_data, offset1);
+ break;
+ }
+ case 8:
+ {
+ store_insn = GEN_INSN(STI64, value, memory_data, offset1);
+ break;
+ }
+ default:
+ {
+ bh_assert(0);
+ goto fail;
+ }
+ }
+#if WASM_ENABLE_SHARED_MEMORY != 0
+ if (atomic && store_insn)
+ set_load_or_store_atomic(store_insn);
+#else
+ (void)store_insn;
+#endif
+
+ return true;
+fail:
+ return false;
+}
+
+bool
+jit_compile_op_f32_store(JitCompContext *cc, uint32 align, uint32 offset)
+{
+ JitReg value, addr, offset1, memory_data;
+
+ POP_F32(value);
+ POP_I32(addr);
+
+ offset1 = check_and_seek(cc, addr, offset, 4);
+ if (!offset1) {
+ goto fail;
+ }
+
+ memory_data = get_memory_data_reg(cc->jit_frame, 0);
+
+ GEN_INSN(STF32, value, memory_data, offset1);
+
+ return true;
+fail:
+ return false;
+}
+
+bool
+jit_compile_op_f64_store(JitCompContext *cc, uint32 align, uint32 offset)
+{
+ JitReg value, addr, offset1, memory_data;
+
+ POP_F64(value);
+ POP_I32(addr);
+
+ offset1 = check_and_seek(cc, addr, offset, 8);
+ if (!offset1) {
+ goto fail;
+ }
+
+ memory_data = get_memory_data_reg(cc->jit_frame, 0);
+
+ GEN_INSN(STF64, value, memory_data, offset1);
+
+ return true;
+fail:
+ return false;
+}
+
+bool
+jit_compile_op_memory_size(JitCompContext *cc, uint32 mem_idx)
+{
+ JitReg module_inst, cur_page_count;
+ uint32 cur_page_count_offset =
+ (uint32)offsetof(WASMModuleInstance, global_table_data.bytes)
+ + (uint32)offsetof(WASMMemoryInstance, cur_page_count);
+
+ module_inst = get_module_inst_reg(cc->jit_frame);
+ cur_page_count = jit_cc_new_reg_I32(cc);
+ GEN_INSN(LDI32, cur_page_count, module_inst,
+ NEW_CONST(I32, cur_page_count_offset));
+
+ PUSH_I32(cur_page_count);
+
+ return true;
+fail:
+ return false;
+}
+
+bool
+jit_compile_op_memory_grow(JitCompContext *cc, uint32 mem_idx)
+{
+ JitReg module_inst, grow_res, res;
+ JitReg prev_page_count, inc_page_count, args[2];
+
+ /* Get current page count */
+ uint32 cur_page_count_offset =
+ (uint32)offsetof(WASMModuleInstance, global_table_data.bytes)
+ + (uint32)offsetof(WASMMemoryInstance, cur_page_count);
+
+ module_inst = get_module_inst_reg(cc->jit_frame);
+ prev_page_count = jit_cc_new_reg_I32(cc);
+ GEN_INSN(LDI32, prev_page_count, module_inst,
+ NEW_CONST(I32, cur_page_count_offset));
+
+ /* Call wasm_enlarge_memory */
+ POP_I32(inc_page_count);
+
+ grow_res = jit_cc_new_reg_I32(cc);
+ args[0] = get_module_inst_reg(cc->jit_frame);
+ args[1] = inc_page_count;
+
+ if (!jit_emit_callnative(cc, wasm_enlarge_memory, grow_res, args, 2)) {
+ goto fail;
+ }
+ /* Convert bool to uint32 */
+ GEN_INSN(AND, grow_res, grow_res, NEW_CONST(I32, 0xFF));
+
+ /* return different values according to memory.grow result */
+ res = jit_cc_new_reg_I32(cc);
+ GEN_INSN(CMP, cc->cmp_reg, grow_res, NEW_CONST(I32, 0));
+ GEN_INSN(SELECTNE, res, cc->cmp_reg, prev_page_count,
+ NEW_CONST(I32, (int32)-1));
+ PUSH_I32(res);
+
+ /* Ensure a refresh in next get memory related registers */
+ clear_memory_regs(cc->jit_frame);
+
+ return true;
+fail:
+ return false;
+}
+
+#if WASM_ENABLE_BULK_MEMORY != 0
+static int
+wasm_init_memory(WASMModuleInstance *inst, uint32 mem_idx, uint32 seg_idx,
+ uint32 len, uint32 mem_offset, uint32 data_offset)
+{
+ WASMMemoryInstance *mem_inst;
+ WASMDataSeg *data_segment;
+ uint32 mem_size;
+ uint8 *mem_addr, *data_addr;
+
+ /* if d + n > the length of mem.data */
+ mem_inst = inst->memories[mem_idx];
+ mem_size = mem_inst->cur_page_count * mem_inst->num_bytes_per_page;
+ if (mem_size < mem_offset || mem_size - mem_offset < len)
+ goto out_of_bounds;
+
+ /* if s + n > the length of data.data */
+ bh_assert(seg_idx < inst->module->data_seg_count);
+ data_segment = inst->module->data_segments[seg_idx];
+ if (data_segment->data_length < data_offset
+ || data_segment->data_length - data_offset < len)
+ goto out_of_bounds;
+
+ mem_addr = mem_inst->memory_data + mem_offset;
+ data_addr = data_segment->data + data_offset;
+ bh_memcpy_s(mem_addr, mem_size - mem_offset, data_addr, len);
+
+ return 0;
+out_of_bounds:
+ wasm_set_exception(inst, "out of bounds memory access");
+ return -1;
+}
+
+bool
+jit_compile_op_memory_init(JitCompContext *cc, uint32 mem_idx, uint32 seg_idx)
+{
+ JitReg len, mem_offset, data_offset, res;
+ JitReg args[6] = { 0 };
+
+ POP_I32(len);
+ POP_I32(data_offset);
+ POP_I32(mem_offset);
+
+ res = jit_cc_new_reg_I32(cc);
+ args[0] = get_module_inst_reg(cc->jit_frame);
+ args[1] = NEW_CONST(I32, mem_idx);
+ args[2] = NEW_CONST(I32, seg_idx);
+ args[3] = len;
+ args[4] = mem_offset;
+ args[5] = data_offset;
+
+ if (!jit_emit_callnative(cc, wasm_init_memory, res, args,
+ sizeof(args) / sizeof(args[0])))
+ goto fail;
+
+ GEN_INSN(CMP, cc->cmp_reg, res, NEW_CONST(I32, 0));
+ if (!jit_emit_exception(cc, EXCE_ALREADY_THROWN, JIT_OP_BLTS, cc->cmp_reg,
+ NULL))
+ goto fail;
+
+ return true;
+fail:
+ return false;
+}
+
+bool
+jit_compile_op_data_drop(JitCompContext *cc, uint32 seg_idx)
+{
+ JitReg module = get_module_reg(cc->jit_frame);
+ JitReg data_segments = jit_cc_new_reg_ptr(cc);
+ JitReg data_segment = jit_cc_new_reg_ptr(cc);
+
+ GEN_INSN(LDPTR, data_segments, module,
+ NEW_CONST(I32, offsetof(WASMModule, data_segments)));
+ GEN_INSN(LDPTR, data_segment, data_segments,
+ NEW_CONST(I32, seg_idx * sizeof(WASMDataSeg *)));
+ GEN_INSN(STI32, NEW_CONST(I32, 0), data_segment,
+ NEW_CONST(I32, offsetof(WASMDataSeg, data_length)));
+
+ return true;
+}
+
+static int
+wasm_copy_memory(WASMModuleInstance *inst, uint32 src_mem_idx,
+ uint32 dst_mem_idx, uint32 len, uint32 src_offset,
+ uint32 dst_offset)
+{
+ WASMMemoryInstance *src_mem, *dst_mem;
+ uint32 src_mem_size, dst_mem_size;
+ uint8 *src_addr, *dst_addr;
+
+ src_mem = inst->memories[src_mem_idx];
+ dst_mem = inst->memories[dst_mem_idx];
+ src_mem_size = src_mem->cur_page_count * src_mem->num_bytes_per_page;
+ dst_mem_size = dst_mem->cur_page_count * dst_mem->num_bytes_per_page;
+
+ /* if s + n > the length of mem.data */
+ if (src_mem_size < src_offset || src_mem_size - src_offset < len)
+ goto out_of_bounds;
+
+ /* if d + n > the length of mem.data */
+ if (dst_mem_size < dst_offset || dst_mem_size - dst_offset < len)
+ goto out_of_bounds;
+
+ src_addr = src_mem->memory_data + src_offset;
+ dst_addr = dst_mem->memory_data + dst_offset;
+ /* allowing the destination and source to overlap */
+ bh_memmove_s(dst_addr, dst_mem_size - dst_offset, src_addr, len);
+
+ return 0;
+out_of_bounds:
+ wasm_set_exception(inst, "out of bounds memory access");
+ return -1;
+}
+
+bool
+jit_compile_op_memory_copy(JitCompContext *cc, uint32 src_mem_idx,
+ uint32 dst_mem_idx)
+{
+ JitReg len, src, dst, res;
+ JitReg args[6] = { 0 };
+
+ POP_I32(len);
+ POP_I32(src);
+ POP_I32(dst);
+
+ res = jit_cc_new_reg_I32(cc);
+ args[0] = get_module_inst_reg(cc->jit_frame);
+ args[1] = NEW_CONST(I32, src_mem_idx);
+ args[2] = NEW_CONST(I32, dst_mem_idx);
+ args[3] = len;
+ args[4] = src;
+ args[5] = dst;
+
+ if (!jit_emit_callnative(cc, wasm_copy_memory, res, args,
+ sizeof(args) / sizeof(args[0])))
+ goto fail;
+
+ GEN_INSN(CMP, cc->cmp_reg, res, NEW_CONST(I32, 0));
+ if (!jit_emit_exception(cc, EXCE_ALREADY_THROWN, JIT_OP_BLTS, cc->cmp_reg,
+ NULL))
+ goto fail;
+
+ return true;
+fail:
+ return false;
+}
+
+static int
+wasm_fill_memory(WASMModuleInstance *inst, uint32 mem_idx, uint32 len,
+ uint32 val, uint32 dst)
+{
+ WASMMemoryInstance *mem_inst;
+ uint32 mem_size;
+ uint8 *dst_addr;
+
+ mem_inst = inst->memories[mem_idx];
+ mem_size = mem_inst->cur_page_count * mem_inst->num_bytes_per_page;
+
+ if (mem_size < dst || mem_size - dst < len)
+ goto out_of_bounds;
+
+ dst_addr = mem_inst->memory_data + dst;
+ memset(dst_addr, val, len);
+
+ return 0;
+out_of_bounds:
+ wasm_set_exception(inst, "out of bounds memory access");
+ return -1;
+}
+
+bool
+jit_compile_op_memory_fill(JitCompContext *cc, uint32 mem_idx)
+{
+ JitReg res, len, val, dst;
+ JitReg args[5] = { 0 };
+
+ POP_I32(len);
+ POP_I32(val);
+ POP_I32(dst);
+
+ res = jit_cc_new_reg_I32(cc);
+ args[0] = get_module_inst_reg(cc->jit_frame);
+ args[1] = NEW_CONST(I32, mem_idx);
+ args[2] = len;
+ args[3] = val;
+ args[4] = dst;
+
+ if (!jit_emit_callnative(cc, wasm_fill_memory, res, args,
+ sizeof(args) / sizeof(args[0])))
+ goto fail;
+
+ GEN_INSN(CMP, cc->cmp_reg, res, NEW_CONST(I32, 0));
+ if (!jit_emit_exception(cc, EXCE_ALREADY_THROWN, JIT_OP_BLTS, cc->cmp_reg,
+ NULL))
+ goto fail;
+
+ return true;
+fail:
+ return false;
+}
+#endif
+
+#if WASM_ENABLE_SHARED_MEMORY != 0
+#define GEN_AT_RMW_INSN(op, op_type, bytes, result, value, memory_data, \
+ offset1) \
+ do { \
+ switch (bytes) { \
+ case 1: \
+ { \
+ insn = GEN_INSN(AT_##op##U8, result, value, memory_data, \
+ offset1); \
+ break; \
+ } \
+ case 2: \
+ { \
+ insn = GEN_INSN(AT_##op##U16, result, value, memory_data, \
+ offset1); \
+ break; \
+ } \
+ case 4: \
+ { \
+ if (op_type == VALUE_TYPE_I32) \
+ insn = GEN_INSN(AT_##op##I32, result, value, memory_data, \
+ offset1); \
+ else \
+ insn = GEN_INSN(AT_##op##U32, result, value, memory_data, \
+ offset1); \
+ break; \
+ } \
+ case 8: \
+ { \
+ insn = GEN_INSN(AT_##op##I64, result, value, memory_data, \
+ offset1); \
+ break; \
+ } \
+ default: \
+ { \
+ bh_assert(0); \
+ goto fail; \
+ } \
+ } \
+ } while (0)
+
+bool
+jit_compile_op_atomic_rmw(JitCompContext *cc, uint8 atomic_op, uint8 op_type,
+ uint32 align, uint32 offset, uint32 bytes)
+{
+ JitReg addr, offset1, memory_data, value, result, eax_hreg, rax_hreg,
+ ebx_hreg, rbx_hreg;
+ JitInsn *insn = NULL;
+ bool is_i32 = op_type == VALUE_TYPE_I32;
+ bool is_logical_op = atomic_op == AtomicRMWBinOpAnd
+ || atomic_op == AtomicRMWBinOpOr
+ || atomic_op == AtomicRMWBinOpXor;
+
+ /* currently we only implement atomic rmw on x86-64 target */
+#if defined(BUILD_TARGET_X86_64) || defined(BUILD_TARGET_AMD_64)
+
+ /* For atomic logical binary ops, it implicitly uses rax in cmpxchg
+ * instruction and implicitly uses rbx for storing temp value in the
+ * generated loop */
+ eax_hreg = jit_codegen_get_hreg_by_name("eax");
+ rax_hreg = jit_codegen_get_hreg_by_name("rax");
+ ebx_hreg = jit_codegen_get_hreg_by_name("ebx");
+ rbx_hreg = jit_codegen_get_hreg_by_name("rbx");
+
+ bh_assert(op_type == VALUE_TYPE_I32 || op_type == VALUE_TYPE_I64);
+ if (op_type == VALUE_TYPE_I32) {
+ POP_I32(value);
+ }
+ else {
+ POP_I64(value);
+ }
+ POP_I32(addr);
+
+ offset1 = check_and_seek(cc, addr, offset, bytes);
+ if (!offset1) {
+ goto fail;
+ }
+ CHECK_ALIGNMENT(offset1);
+
+ memory_data = get_memory_data_reg(cc->jit_frame, 0);
+
+ if (op_type == VALUE_TYPE_I32)
+ result = jit_cc_new_reg_I32(cc);
+ else
+ result = jit_cc_new_reg_I64(cc);
+
+ switch (atomic_op) {
+ case AtomicRMWBinOpAdd:
+ {
+ GEN_AT_RMW_INSN(ADD, op_type, bytes, result, value, memory_data,
+ offset1);
+ break;
+ }
+ case AtomicRMWBinOpSub:
+ {
+ GEN_AT_RMW_INSN(SUB, op_type, bytes, result, value, memory_data,
+ offset1);
+ break;
+ }
+ case AtomicRMWBinOpAnd:
+ {
+ GEN_AT_RMW_INSN(AND, op_type, bytes, result, value, memory_data,
+ offset1);
+ break;
+ }
+ case AtomicRMWBinOpOr:
+ {
+ GEN_AT_RMW_INSN(OR, op_type, bytes, result, value, memory_data,
+ offset1);
+ break;
+ }
+ case AtomicRMWBinOpXor:
+ {
+ GEN_AT_RMW_INSN(XOR, op_type, bytes, result, value, memory_data,
+ offset1);
+ break;
+ }
+ case AtomicRMWBinOpXchg:
+ {
+ GEN_AT_RMW_INSN(XCHG, op_type, bytes, result, value, memory_data,
+ offset1);
+ break;
+ }
+ default:
+ {
+ bh_assert(0);
+ goto fail;
+ }
+ }
+
+ if (is_logical_op
+ && (!insn
+ || !jit_lock_reg_in_insn(cc, insn, is_i32 ? eax_hreg : rax_hreg)
+ || !jit_lock_reg_in_insn(cc, insn, is_i32 ? ebx_hreg : rbx_hreg))) {
+ jit_set_last_error(
+ cc, "generate atomic logical insn or lock ra&rb hreg failed");
+ goto fail;
+ }
+
+ if (op_type == VALUE_TYPE_I32)
+ PUSH_I32(result);
+ else
+ PUSH_I64(result);
+
+ return true;
+#endif /* defined(BUILD_TARGET_X86_64) || defined(BUILD_TARGET_AMD_64) */
+
+fail:
+ return false;
+}
+
+bool
+jit_compile_op_atomic_cmpxchg(JitCompContext *cc, uint8 op_type, uint32 align,
+ uint32 offset, uint32 bytes)
+{
+ JitReg addr, offset1, memory_data, value, expect, result;
+ bool is_i32 = op_type == VALUE_TYPE_I32;
+ /* currently we only implement atomic cmpxchg on x86-64 target */
+#if defined(BUILD_TARGET_X86_64) || defined(BUILD_TARGET_AMD_64)
+ /* cmpxchg will use register al/ax/eax/rax to store parameter expected
+ * value, and the read result will also be stored to al/ax/eax/rax */
+ JitReg eax_hreg = jit_codegen_get_hreg_by_name("eax");
+ JitReg rax_hreg = jit_codegen_get_hreg_by_name("rax");
+ JitInsn *insn = NULL;
+
+ bh_assert(op_type == VALUE_TYPE_I32 || op_type == VALUE_TYPE_I64);
+ if (is_i32) {
+ POP_I32(value);
+ POP_I32(expect);
+ result = jit_cc_new_reg_I32(cc);
+ }
+ else {
+ POP_I64(value);
+ POP_I64(expect);
+ result = jit_cc_new_reg_I64(cc);
+ }
+ POP_I32(addr);
+
+ offset1 = check_and_seek(cc, addr, offset, bytes);
+ if (!offset1) {
+ goto fail;
+ }
+ CHECK_ALIGNMENT(offset1);
+
+ memory_data = get_memory_data_reg(cc->jit_frame, 0);
+
+ GEN_INSN(MOV, is_i32 ? eax_hreg : rax_hreg, expect);
+ switch (bytes) {
+ case 1:
+ {
+ insn = GEN_INSN(AT_CMPXCHGU8, value, is_i32 ? eax_hreg : rax_hreg,
+ memory_data, offset1);
+ break;
+ }
+ case 2:
+ {
+ insn = GEN_INSN(AT_CMPXCHGU16, value, is_i32 ? eax_hreg : rax_hreg,
+ memory_data, offset1);
+ break;
+ }
+ case 4:
+ {
+ if (op_type == VALUE_TYPE_I32)
+ insn =
+ GEN_INSN(AT_CMPXCHGI32, value, is_i32 ? eax_hreg : rax_hreg,
+ memory_data, offset1);
+ else
+ insn =
+ GEN_INSN(AT_CMPXCHGU32, value, is_i32 ? eax_hreg : rax_hreg,
+ memory_data, offset1);
+ break;
+ }
+ case 8:
+ {
+ insn = GEN_INSN(AT_CMPXCHGI64, value, is_i32 ? eax_hreg : rax_hreg,
+ memory_data, offset1);
+ break;
+ }
+ default:
+ {
+ bh_assert(0);
+ goto fail;
+ }
+ }
+
+ if (!insn
+ || !jit_lock_reg_in_insn(cc, insn, is_i32 ? eax_hreg : rax_hreg)) {
+ jit_set_last_error(cc, "generate cmpxchg insn or lock ra hreg failed");
+ goto fail;
+ }
+
+ GEN_INSN(MOV, result, is_i32 ? eax_hreg : rax_hreg);
+
+ if (is_i32)
+ PUSH_I32(result);
+ else
+ PUSH_I64(result);
+
+ return true;
+#endif /* defined(BUILD_TARGET_X86_64) || defined(BUILD_TARGET_AMD_64) */
+
+fail:
+ return false;
+}
+
+bool
+jit_compile_op_atomic_wait(JitCompContext *cc, uint8 op_type, uint32 align,
+ uint32 offset, uint32 bytes)
+{
+ bh_assert(op_type == VALUE_TYPE_I32 || op_type == VALUE_TYPE_I64);
+
+ // Pop atomic.wait arguments
+ JitReg timeout, expect, expect_64, addr;
+ POP_I64(timeout);
+ if (op_type == VALUE_TYPE_I32) {
+ POP_I32(expect);
+ expect_64 = jit_cc_new_reg_I64(cc);
+ GEN_INSN(I32TOI64, expect_64, expect);
+ }
+ else {
+ POP_I64(expect_64);
+ }
+ POP_I32(addr);
+
+ // Get referenced address and store it in `maddr`
+ JitReg memory_data = get_memory_data_reg(cc->jit_frame, 0);
+ JitReg offset1 = check_and_seek(cc, addr, offset, bytes);
+ if (!offset1)
+ goto fail;
+ CHECK_ALIGNMENT(offset1);
+
+ JitReg maddr = jit_cc_new_reg_ptr(cc);
+ GEN_INSN(ADD, maddr, memory_data, offset1);
+
+ // Prepare `wasm_runtime_atomic_wait` arguments
+ JitReg res = jit_cc_new_reg_I32(cc);
+ JitReg args[5] = { 0 };
+ args[0] = get_module_inst_reg(cc->jit_frame);
+ args[1] = maddr;
+ args[2] = expect_64;
+ args[3] = timeout;
+ args[4] = NEW_CONST(I32, false);
+
+ if (!jit_emit_callnative(cc, wasm_runtime_atomic_wait, res, args,
+ sizeof(args) / sizeof(args[0])))
+ goto fail;
+
+ // Handle return code
+ GEN_INSN(CMP, cc->cmp_reg, res, NEW_CONST(I32, -1));
+ if (!jit_emit_exception(cc, EXCE_ALREADY_THROWN, JIT_OP_BEQ, cc->cmp_reg,
+ NULL))
+ goto fail;
+
+ PUSH_I32(res);
+
+#if WASM_ENABLE_THREAD_MGR != 0
+ /* Insert suspend check point */
+ if (!jit_check_suspend_flags(cc))
+ goto fail;
+#endif
+ return true;
+fail:
+ return false;
+}
+
+bool
+jit_compiler_op_atomic_notify(JitCompContext *cc, uint32 align, uint32 offset,
+ uint32 bytes)
+{
+ // Pop atomic.notify arguments
+ JitReg notify_count, addr;
+ POP_I32(notify_count);
+ POP_I32(addr);
+
+ // Get referenced address and store it in `maddr`
+ JitReg memory_data = get_memory_data_reg(cc->jit_frame, 0);
+ JitReg offset1 = check_and_seek(cc, addr, offset, bytes);
+ if (!offset1)
+ goto fail;
+ CHECK_ALIGNMENT(offset1);
+
+ JitReg maddr = jit_cc_new_reg_ptr(cc);
+ GEN_INSN(ADD, maddr, memory_data, offset1);
+
+ // Prepare `wasm_runtime_atomic_notify` arguments
+ JitReg res = jit_cc_new_reg_I32(cc);
+ JitReg args[3] = { 0 };
+ args[0] = get_module_inst_reg(cc->jit_frame);
+ args[1] = maddr;
+ args[2] = notify_count;
+
+ if (!jit_emit_callnative(cc, wasm_runtime_atomic_notify, res, args,
+ sizeof(args) / sizeof(args[0])))
+ goto fail;
+
+ // Handle return code
+ GEN_INSN(CMP, cc->cmp_reg, res, NEW_CONST(I32, 0));
+ if (!jit_emit_exception(cc, EXCE_ALREADY_THROWN, JIT_OP_BLTS, cc->cmp_reg,
+ NULL))
+ goto fail;
+
+ PUSH_I32(res);
+ return true;
+fail:
+ return false;
+}
+
+bool
+jit_compiler_op_atomic_fence(JitCompContext *cc)
+{
+ GEN_INSN(FENCE);
+ return true;
+}
+#endif
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/fast-jit/fe/jit_emit_memory.h b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/fast-jit/fe/jit_emit_memory.h
new file mode 100644
index 000000000..6565cdc11
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/fast-jit/fe/jit_emit_memory.h
@@ -0,0 +1,92 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#ifndef _JIT_EMIT_MEMORY_H_
+#define _JIT_EMIT_MEMORY_H_
+
+#include "../jit_compiler.h"
+#if WASM_ENABLE_SHARED_MEMORY != 0
+#include "../../common/wasm_shared_memory.h"
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+bool
+jit_compile_op_i32_load(JitCompContext *cc, uint32 align, uint32 offset,
+ uint32 bytes, bool sign, bool atomic);
+
+bool
+jit_compile_op_i64_load(JitCompContext *cc, uint32 align, uint32 offset,
+ uint32 bytes, bool sign, bool atomic);
+
+bool
+jit_compile_op_f32_load(JitCompContext *cc, uint32 align, uint32 offset);
+
+bool
+jit_compile_op_f64_load(JitCompContext *cc, uint32 align, uint32 offset);
+
+bool
+jit_compile_op_i32_store(JitCompContext *cc, uint32 align, uint32 offset,
+ uint32 bytes, bool atomic);
+
+bool
+jit_compile_op_i64_store(JitCompContext *cc, uint32 align, uint32 offset,
+ uint32 bytes, bool atomic);
+
+bool
+jit_compile_op_f32_store(JitCompContext *cc, uint32 align, uint32 offset);
+
+bool
+jit_compile_op_f64_store(JitCompContext *cc, uint32 align, uint32 offset);
+
+bool
+jit_compile_op_memory_size(JitCompContext *cc, uint32 mem_idx);
+
+bool
+jit_compile_op_memory_grow(JitCompContext *cc, uint32 mem_idx);
+
+#if WASM_ENABLE_BULK_MEMORY != 0
+bool
+jit_compile_op_memory_init(JitCompContext *cc, uint32 mem_idx, uint32 seg_idx);
+
+bool
+jit_compile_op_data_drop(JitCompContext *cc, uint32 seg_idx);
+
+bool
+jit_compile_op_memory_copy(JitCompContext *cc, uint32 src_mem_idx,
+ uint32 dst_mem_idx);
+
+bool
+jit_compile_op_memory_fill(JitCompContext *cc, uint32 mem_idx);
+#endif
+
+#if WASM_ENABLE_SHARED_MEMORY != 0
+bool
+jit_compile_op_atomic_rmw(JitCompContext *cc, uint8 atomic_op, uint8 op_type,
+ uint32 align, uint32 offset, uint32 bytes);
+
+bool
+jit_compile_op_atomic_cmpxchg(JitCompContext *cc, uint8 op_type, uint32 align,
+ uint32 offset, uint32 bytes);
+
+bool
+jit_compile_op_atomic_wait(JitCompContext *cc, uint8 op_type, uint32 align,
+ uint32 offset, uint32 bytes);
+
+bool
+jit_compiler_op_atomic_notify(JitCompContext *cc, uint32 align, uint32 offset,
+ uint32 bytes);
+
+bool
+jit_compiler_op_atomic_fence(JitCompContext *cc);
+#endif
+
+#ifdef __cplusplus
+} /* end of extern "C" */
+#endif
+
+#endif /* end of _JIT_EMIT_MEMORY_H_ */
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/fast-jit/fe/jit_emit_numberic.c b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/fast-jit/fe/jit_emit_numberic.c
new file mode 100644
index 000000000..03491e691
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/fast-jit/fe/jit_emit_numberic.c
@@ -0,0 +1,1707 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#include "jit_emit_numberic.h"
+#include "jit_emit_exception.h"
+#include "jit_emit_control.h"
+#include "jit_emit_function.h"
+#include "../jit_frontend.h"
+#include "../jit_codegen.h"
+
+#define PUSH_INT(v) \
+ do { \
+ if (is_i32) \
+ PUSH_I32(v); \
+ else \
+ PUSH_I64(v); \
+ } while (0)
+
+#define POP_INT(v) \
+ do { \
+ if (is_i32) \
+ POP_I32(v); \
+ else \
+ POP_I64(v); \
+ } while (0)
+
+#define PUSH_FLOAT(v) \
+ do { \
+ if (is_f32) \
+ PUSH_F32(v); \
+ else \
+ PUSH_F64(v); \
+ } while (0)
+
+#define POP_FLOAT(v) \
+ do { \
+ if (is_f32) \
+ POP_F32(v); \
+ else \
+ POP_F64(v); \
+ } while (0)
+
+#define DEF_INT_UNARY_OP(op, err) \
+ do { \
+ JitReg res, operand; \
+ POP_INT(operand); \
+ if (!(res = op)) { \
+ if (err) \
+ jit_set_last_error(cc, err); \
+ goto fail; \
+ } \
+ PUSH_INT(res); \
+ } while (0)
+
+#define DEF_INT_BINARY_OP(op, err) \
+ do { \
+ JitReg res, left, right; \
+ POP_INT(right); \
+ POP_INT(left); \
+ if (!(res = op)) { \
+ if (err) \
+ jit_set_last_error(cc, err); \
+ goto fail; \
+ } \
+ PUSH_INT(res); \
+ } while (0)
+
+#define DEF_FP_UNARY_OP(op, err) \
+ do { \
+ JitReg res, operand; \
+ POP_FLOAT(operand); \
+ if (!(res = op)) { \
+ if (err) \
+ jit_set_last_error(cc, err); \
+ goto fail; \
+ } \
+ PUSH_FLOAT(res); \
+ } while (0)
+
+#define DEF_FP_BINARY_OP(op, err) \
+ do { \
+ JitReg res, left, right; \
+ POP_FLOAT(right); \
+ POP_FLOAT(left); \
+ if (!(res = op)) { \
+ if (err) \
+ jit_set_last_error(cc, err); \
+ goto fail; \
+ } \
+ PUSH_FLOAT(res); \
+ } while (0)
+
+static uint32
+clz32(uint32 type)
+{
+ uint32 num = 0;
+ if (type == 0)
+ return 32;
+ while (!(type & 0x80000000)) {
+ num++;
+ type <<= 1;
+ }
+ return num;
+}
+
+static uint64
+clz64(uint64 type)
+{
+ uint32 num = 0;
+ if (type == 0)
+ return 64;
+ while (!(type & 0x8000000000000000LL)) {
+ num++;
+ type <<= 1;
+ }
+ return num;
+}
+
+static uint32
+ctz32(uint32 type)
+{
+ uint32 num = 0;
+ if (type == 0)
+ return 32;
+ while (!(type & 1)) {
+ num++;
+ type >>= 1;
+ }
+ return num;
+}
+
+static uint64
+ctz64(uint64 type)
+{
+ uint32 num = 0;
+ if (type == 0)
+ return 64;
+ while (!(type & 1)) {
+ num++;
+ type >>= 1;
+ }
+ return num;
+}
+
+static uint32
+popcnt32(uint32 u)
+{
+ uint32 ret = 0;
+ while (u) {
+ u = (u & (u - 1));
+ ret++;
+ }
+ return ret;
+}
+
+static uint64
+popcnt64(uint64 u)
+{
+ uint32 ret = 0;
+ while (u) {
+ u = (u & (u - 1));
+ ret++;
+ }
+ return ret;
+}
+
+bool
+jit_compile_op_i32_clz(JitCompContext *cc)
+{
+ JitReg value, res;
+
+ POP_I32(value);
+ if (jit_reg_is_const(value)) {
+ uint32 i32 = jit_cc_get_const_I32(cc, value);
+ PUSH_I32(NEW_CONST(I32, clz32(i32)));
+ return true;
+ }
+
+ res = jit_cc_new_reg_I32(cc);
+ GEN_INSN(CLZ, res, value);
+ PUSH_I32(res);
+ return true;
+fail:
+ return false;
+}
+
+bool
+jit_compile_op_i32_ctz(JitCompContext *cc)
+{
+ JitReg value, res = jit_cc_new_reg_I32(cc);
+
+ POP_I32(value);
+ if (jit_reg_is_const(value)) {
+ uint32 i32 = jit_cc_get_const_I32(cc, value);
+ PUSH_I32(NEW_CONST(I32, ctz32(i32)));
+ return true;
+ }
+
+ res = jit_cc_new_reg_I32(cc);
+ GEN_INSN(CTZ, res, value);
+ PUSH_I32(res);
+ return true;
+fail:
+ return false;
+}
+
+bool
+jit_compile_op_i32_popcnt(JitCompContext *cc)
+{
+ JitReg value, res;
+
+ POP_I32(value);
+ if (jit_reg_is_const(value)) {
+ uint32 i32 = jit_cc_get_const_I32(cc, value);
+ PUSH_I32(NEW_CONST(I32, popcnt32(i32)));
+ return true;
+ }
+
+ res = jit_cc_new_reg_I32(cc);
+ GEN_INSN(POPCNT, res, value);
+ PUSH_I32(res);
+ return true;
+fail:
+ return false;
+}
+
+bool
+jit_compile_op_i64_clz(JitCompContext *cc)
+{
+ JitReg value, res;
+
+ POP_I64(value);
+ if (jit_reg_is_const(value)) {
+ uint64 i64 = jit_cc_get_const_I64(cc, value);
+ PUSH_I64(NEW_CONST(I64, clz64(i64)));
+ return true;
+ }
+
+ res = jit_cc_new_reg_I64(cc);
+ GEN_INSN(CLZ, res, value);
+ PUSH_I64(res);
+ return true;
+fail:
+ return false;
+}
+
+bool
+jit_compile_op_i64_ctz(JitCompContext *cc)
+{
+ JitReg value, res;
+
+ POP_I64(value);
+ if (jit_reg_is_const(value)) {
+ uint64 i64 = jit_cc_get_const_I64(cc, value);
+ PUSH_I64(NEW_CONST(I64, ctz64(i64)));
+ return true;
+ }
+
+ res = jit_cc_new_reg_I64(cc);
+ GEN_INSN(CTZ, res, value);
+ PUSH_I64(res);
+ return true;
+fail:
+ return false;
+}
+
+bool
+jit_compile_op_i64_popcnt(JitCompContext *cc)
+{
+ JitReg value, res;
+
+ POP_I64(value);
+ if (jit_reg_is_const(value)) {
+ uint64 i64 = jit_cc_get_const_I64(cc, value);
+ PUSH_I64(NEW_CONST(I64, popcnt64(i64)));
+ return true;
+ }
+
+ res = jit_cc_new_reg_I64(cc);
+ GEN_INSN(POPCNT, res, value);
+ PUSH_I64(res);
+ return true;
+fail:
+ return false;
+}
+
+#define IS_CONST_ALL_ONE(val, is_i32) \
+ (jit_reg_is_const(val) \
+ && ((is_i32 && jit_cc_get_const_I32(cc, val) == -1) \
+ || (!is_i32 && jit_cc_get_const_I64(cc, val) == -1LL)))
+
+#define IS_CONST_ZERO(val) \
+ (jit_reg_is_const(val) \
+ && ((is_i32 && jit_cc_get_const_I32(cc, val) == 0) \
+ || (!is_i32 && jit_cc_get_const_I64(cc, val) == 0)))
+
+/* macros for integer binary operations (ibinop) */
+
+#define __DEF_BI_INT_CONST_OPS(bits, opname, op) \
+ static int##bits do_i##bits##_const_##opname(int##bits lhs, int##bits rhs) \
+ { \
+ return lhs op rhs; \
+ }
+
+#define DEF_BI_INT_CONST_OPS(opname, op) \
+ __DEF_BI_INT_CONST_OPS(32, opname, op) \
+ __DEF_BI_INT_CONST_OPS(64, opname, op)
+
+#define DEF_UNI_INT_CONST_OPS(opname) \
+ static JitReg compile_int_##opname##_consts( \
+ JitCompContext *cc, JitReg left, JitReg right, bool is_i32)
+
+typedef JitReg (*uni_const_handler)(JitCompContext *, JitReg, JitReg, bool);
+typedef int32 (*bin_i32_consts_handler)(int32, int32);
+typedef int64 (*bin_i64_consts_handler)(int64, int64);
+
+/* ibinopt for integer binary operations */
+static JitReg
+compile_op_ibinopt_const(JitCompContext *cc, JitReg left, JitReg right,
+ bool is_i32, uni_const_handler handle_one_const,
+ bin_i32_consts_handler handle_two_i32_const,
+ bin_i64_consts_handler handle_two_i64_const)
+{
+ JitReg res;
+
+ if (jit_reg_is_const(left) && jit_reg_is_const(right)) {
+ if (is_i32) {
+ int32 left_val = jit_cc_get_const_I32(cc, left);
+ int32 right_val = jit_cc_get_const_I32(cc, right);
+ res = NEW_CONST(I32, handle_two_i32_const(left_val, right_val));
+ }
+ else {
+ int64 left_val = jit_cc_get_const_I64(cc, left);
+ int64 right_val = jit_cc_get_const_I64(cc, right);
+ res = NEW_CONST(I64, handle_two_i64_const(left_val, right_val));
+ }
+ goto shortcut;
+ }
+
+ if (jit_reg_is_const(left) || jit_reg_is_const(right)) {
+ res = handle_one_const(cc, left, right, is_i32);
+ if (res)
+ goto shortcut;
+ }
+
+ return 0;
+shortcut:
+ return res;
+}
+
+#define CHECK_AND_PROCESS_INT_CONSTS(cc, left, right, is_i32, opname) \
+ compile_op_ibinopt_const(cc, left, right, is_i32, \
+ compile_int_##opname##_consts, \
+ do_i32_const_##opname, do_i64_const_##opname)
+
+DEF_UNI_INT_CONST_OPS(add)
+{
+ /* If one of the operands is 0, just return the other */
+ if (IS_CONST_ZERO(left))
+ return right;
+ if (IS_CONST_ZERO(right))
+ return left;
+
+ return 0;
+}
+
+DEF_BI_INT_CONST_OPS(add, +)
+
+static JitReg
+compile_int_add(JitCompContext *cc, JitReg left, JitReg right, bool is_i32)
+{
+ JitReg res;
+
+ res = CHECK_AND_PROCESS_INT_CONSTS(cc, left, right, is_i32, add);
+ if (res)
+ goto shortcut;
+
+ /* Build add */
+ res = is_i32 ? jit_cc_new_reg_I32(cc) : jit_cc_new_reg_I64(cc);
+ GEN_INSN(ADD, res, left, right);
+
+shortcut:
+ return res;
+}
+
+DEF_UNI_INT_CONST_OPS(sub)
+{
+ /* If the right operand is 0, just return the left */
+ if (IS_CONST_ZERO(right))
+ return left;
+
+ return 0;
+}
+
+DEF_BI_INT_CONST_OPS(sub, -)
+
+static JitReg
+compile_int_sub(JitCompContext *cc, JitReg left, JitReg right, bool is_i32)
+{
+ JitReg res;
+
+ res = CHECK_AND_PROCESS_INT_CONSTS(cc, left, right, is_i32, sub);
+ if (res)
+ goto shortcut;
+
+ /* Build sub */
+ res = is_i32 ? jit_cc_new_reg_I32(cc) : jit_cc_new_reg_I64(cc);
+ GEN_INSN(SUB, res, left, right);
+
+shortcut:
+ return res;
+}
+
+DEF_UNI_INT_CONST_OPS(mul)
+{
+ /* If one of the operands is 0, just return constant 0 */
+ if (IS_CONST_ZERO(left) || IS_CONST_ZERO(right))
+ return is_i32 ? NEW_CONST(I32, 0) : NEW_CONST(I64, 0);
+
+ return 0;
+}
+
+static int32
+do_i32_const_mul(int32 lhs, int32 rhs)
+{
+ return (int32)((uint64)lhs * (uint64)rhs);
+}
+
+static int64
+do_i64_const_mul(int64 lhs, int64 rhs)
+{
+ return (int64)((uint64)lhs * (uint64)rhs);
+}
+
+static JitReg
+compile_int_mul(JitCompContext *cc, JitReg left, JitReg right, bool is_i32)
+{
+ JitReg res;
+
+ res = CHECK_AND_PROCESS_INT_CONSTS(cc, left, right, is_i32, mul);
+ if (res)
+ goto shortcut;
+
+ /* Build mul */
+ res = is_i32 ? jit_cc_new_reg_I32(cc) : jit_cc_new_reg_I64(cc);
+ GEN_INSN(MUL, res, left, right);
+
+shortcut:
+ return res;
+}
+
+static bool
+compile_int_div_no_check(JitCompContext *cc, IntArithmetic arith_op,
+ bool is_i32, JitReg left, JitReg right, JitReg res)
+{
+#if defined(BUILD_TARGET_X86_64) || defined(BUILD_TARGET_AMD_64)
+ JitReg eax_hreg = jit_codegen_get_hreg_by_name("eax");
+ JitReg edx_hreg = jit_codegen_get_hreg_by_name("edx");
+ JitReg rax_hreg = jit_codegen_get_hreg_by_name("rax");
+ JitReg rdx_hreg = jit_codegen_get_hreg_by_name("rdx");
+#endif
+
+ if (jit_reg_is_const(right) && jit_reg_is_const(left)) {
+ if (INT_DIV_S == arith_op || INT_REM_S == arith_op) {
+ if (is_i32) {
+ int32 lhs = jit_cc_get_const_I32(cc, left);
+ int32 rhs = jit_cc_get_const_I32(cc, right);
+ if (INT_DIV_S == arith_op) {
+ res = NEW_CONST(I32, lhs / rhs);
+ }
+ else {
+ res = NEW_CONST(I32, lhs % rhs);
+ }
+ PUSH_I32(res);
+ return true;
+ }
+ else {
+ int64 lhs = jit_cc_get_const_I64(cc, left);
+ int64 rhs = jit_cc_get_const_I64(cc, right);
+ if (INT_DIV_S == arith_op) {
+ res = NEW_CONST(I64, lhs / rhs);
+ }
+ else {
+ res = NEW_CONST(I64, lhs % rhs);
+ }
+ PUSH_I64(res);
+ return true;
+ }
+ }
+ else {
+ if (is_i32) {
+ uint32 lhs = (uint32)jit_cc_get_const_I32(cc, left);
+ uint32 rhs = (uint32)jit_cc_get_const_I32(cc, right);
+ if (INT_DIV_U == arith_op) {
+ res = NEW_CONST(I32, lhs / rhs);
+ }
+ else {
+ res = NEW_CONST(I32, lhs % rhs);
+ }
+ PUSH_I32(res);
+ return true;
+ }
+ else {
+ uint64 lhs = (uint64)jit_cc_get_const_I64(cc, left);
+ uint64 rhs = (uint64)jit_cc_get_const_I64(cc, right);
+ if (INT_DIV_U == arith_op) {
+ res = NEW_CONST(I64, lhs / rhs);
+ }
+ else {
+ res = NEW_CONST(I64, lhs % rhs);
+ }
+ PUSH_I64(res);
+ return true;
+ }
+ }
+ }
+
+ switch (arith_op) {
+#if defined(BUILD_TARGET_X86_64) || defined(BUILD_TARGET_AMD_64)
+ case INT_DIV_S:
+ case INT_DIV_U:
+ {
+ JitInsn *insn = NULL, *insn1 = NULL;
+
+ if (is_i32) {
+ GEN_INSN(MOV, eax_hreg, left);
+ if (arith_op == INT_DIV_S)
+ insn = GEN_INSN(DIV_S, eax_hreg, eax_hreg, right);
+ else
+ insn = GEN_INSN(DIV_U, eax_hreg, eax_hreg, right);
+ }
+ else {
+ GEN_INSN(MOV, rax_hreg, left);
+ if (arith_op == INT_DIV_S)
+ insn = GEN_INSN(DIV_S, rax_hreg, rax_hreg, right);
+ else
+ insn = GEN_INSN(DIV_U, rax_hreg, rax_hreg, right);
+ }
+
+ if (!insn) {
+ goto fail;
+ }
+ if (!jit_lock_reg_in_insn(cc, insn, eax_hreg)
+ || !jit_lock_reg_in_insn(cc, insn, edx_hreg)) {
+ goto fail;
+ }
+
+ if (is_i32) {
+ res = jit_cc_new_reg_I32(cc);
+ insn1 = jit_insn_new_MOV(res, eax_hreg);
+ }
+ else {
+ res = jit_cc_new_reg_I64(cc);
+ insn1 = jit_insn_new_MOV(res, rax_hreg);
+ }
+
+ if (!insn1) {
+ jit_set_last_error(cc, "generate insn failed");
+ goto fail;
+ }
+
+ jit_insn_insert_after(insn, insn1);
+ break;
+ }
+ case INT_REM_S:
+ case INT_REM_U:
+ {
+ JitInsn *insn = NULL, *insn1 = NULL;
+
+ if (is_i32) {
+ GEN_INSN(MOV, eax_hreg, left);
+ if (arith_op == INT_REM_S)
+ insn = GEN_INSN(REM_S, edx_hreg, eax_hreg, right);
+ else
+ insn = GEN_INSN(REM_U, edx_hreg, eax_hreg, right);
+ }
+ else {
+ GEN_INSN(MOV, rax_hreg, left);
+ if (arith_op == INT_REM_S)
+ insn = GEN_INSN(REM_S, rdx_hreg, rax_hreg, right);
+ else
+ insn = GEN_INSN(REM_U, rdx_hreg, rax_hreg, right);
+ }
+
+ if (!insn) {
+ goto fail;
+ }
+ if (!jit_lock_reg_in_insn(cc, insn, eax_hreg)
+ || !jit_lock_reg_in_insn(cc, insn, edx_hreg)) {
+ goto fail;
+ }
+
+ if (is_i32) {
+ res = jit_cc_new_reg_I32(cc);
+ insn1 = jit_insn_new_MOV(res, edx_hreg);
+ }
+ else {
+ res = jit_cc_new_reg_I64(cc);
+ insn1 = jit_insn_new_MOV(res, rdx_hreg);
+ }
+
+ if (!insn1) {
+ jit_set_last_error(cc, "generate insn failed");
+ goto fail;
+ }
+
+ jit_insn_insert_after(insn, insn1);
+ break;
+ }
+#else
+ case INT_DIV_S:
+ GEN_INSN(DIV_S, res, left, right);
+ break;
+ case INT_DIV_U:
+ GEN_INSN(DIV_U, res, left, right);
+ break;
+ case INT_REM_S:
+ GEN_INSN(REM_S, res, left, right);
+ break;
+ case INT_REM_U:
+ GEN_INSN(REM_U, res, left, right);
+ break;
+#endif /* defined(BUILD_TARGET_X86_64) || defined(BUILD_TARGET_AMD_64) */
+ default:
+ bh_assert(0);
+ return false;
+ }
+
+ if (is_i32)
+ PUSH_I32(res);
+ else
+ PUSH_I64(res);
+ return true;
+fail:
+ return false;
+}
+
+static bool
+compile_int_div(JitCompContext *cc, IntArithmetic arith_op, bool is_i32,
+ uint8 **p_frame_ip)
+{
+ JitReg left, right, res;
+
+ bh_assert(arith_op == INT_DIV_S || arith_op == INT_DIV_U
+ || arith_op == INT_REM_S || arith_op == INT_REM_U);
+
+ if (is_i32) {
+ POP_I32(right);
+ POP_I32(left);
+ res = jit_cc_new_reg_I32(cc);
+ }
+ else {
+ POP_I64(right);
+ POP_I64(left);
+ res = jit_cc_new_reg_I64(cc);
+ }
+
+ if (jit_reg_is_const(right)) {
+ int64 right_val = is_i32 ? (int64)jit_cc_get_const_I32(cc, right)
+ : jit_cc_get_const_I64(cc, right);
+
+ switch (right_val) {
+ case 0:
+ {
+ /* Directly throw exception if divided by zero */
+ if (!(jit_emit_exception(cc, EXCE_INTEGER_DIVIDE_BY_ZERO,
+ JIT_OP_JMP, 0, NULL)))
+ goto fail;
+
+ return jit_handle_next_reachable_block(cc, p_frame_ip);
+ }
+ case 1:
+ {
+ if (arith_op == INT_DIV_S || arith_op == INT_DIV_U) {
+ if (is_i32)
+ PUSH_I32(left);
+ else
+ PUSH_I64(left);
+ }
+ else {
+ if (is_i32)
+ PUSH_I32(NEW_CONST(I32, 0));
+ else
+ PUSH_I64(NEW_CONST(I64, 0));
+ }
+ return true;
+ }
+ case -1:
+ {
+ if (arith_op == INT_DIV_S) {
+ if (is_i32)
+ GEN_INSN(CMP, cc->cmp_reg, left,
+ NEW_CONST(I32, INT32_MIN));
+ else
+ GEN_INSN(CMP, cc->cmp_reg, left,
+ NEW_CONST(I64, INT64_MIN));
+
+ /* Throw integer overflow exception if left is
+ INT32_MIN or INT64_MIN */
+ if (!(jit_emit_exception(cc, EXCE_INTEGER_OVERFLOW,
+ JIT_OP_BEQ, cc->cmp_reg, NULL)))
+ goto fail;
+
+ /* Push -(left) to stack */
+ GEN_INSN(NEG, res, left);
+ if (is_i32)
+ PUSH_I32(res);
+ else
+ PUSH_I64(res);
+ return true;
+ }
+ else if (arith_op == INT_REM_S) {
+ if (is_i32)
+ PUSH_I32(NEW_CONST(I32, 0));
+ else
+ PUSH_I64(NEW_CONST(I64, 0));
+ return true;
+ }
+ else {
+ /* Build default div and rem */
+ return compile_int_div_no_check(cc, arith_op, is_i32, left,
+ right, res);
+ }
+ }
+ default:
+ {
+ /* Build default div and rem */
+ return compile_int_div_no_check(cc, arith_op, is_i32, left,
+ right, res);
+ }
+ }
+ }
+ else {
+ JitReg cmp1 = jit_cc_new_reg_I32(cc);
+ JitReg cmp2 = jit_cc_new_reg_I32(cc);
+
+ GEN_INSN(CMP, cc->cmp_reg, right,
+ is_i32 ? NEW_CONST(I32, 0) : NEW_CONST(I64, 0));
+ /* Throw integer divided by zero exception if right is zero */
+ if (!(jit_emit_exception(cc, EXCE_INTEGER_DIVIDE_BY_ZERO, JIT_OP_BEQ,
+ cc->cmp_reg, NULL)))
+ goto fail;
+
+ switch (arith_op) {
+ case INT_DIV_S:
+ {
+ /* Check integer overflow */
+ GEN_INSN(CMP, cc->cmp_reg, left,
+ is_i32 ? NEW_CONST(I32, INT32_MIN)
+ : NEW_CONST(I64, INT64_MIN));
+ GEN_INSN(SELECTEQ, cmp1, cc->cmp_reg, NEW_CONST(I32, 1),
+ NEW_CONST(I32, 0));
+ GEN_INSN(CMP, cc->cmp_reg, right,
+ is_i32 ? NEW_CONST(I32, -1) : NEW_CONST(I64, -1LL));
+ GEN_INSN(SELECTEQ, cmp2, cc->cmp_reg, NEW_CONST(I32, 1),
+ NEW_CONST(I32, 0));
+ GEN_INSN(AND, cmp1, cmp1, cmp2);
+ GEN_INSN(CMP, cc->cmp_reg, cmp1, NEW_CONST(I32, 1));
+ /* Throw integer overflow exception if left is INT32_MIN or
+ INT64_MIN, and right is -1 */
+ if (!(jit_emit_exception(cc, EXCE_INTEGER_OVERFLOW, JIT_OP_BEQ,
+ cc->cmp_reg, NULL)))
+ goto fail;
+
+ /* Build default div and rem */
+ return compile_int_div_no_check(cc, arith_op, is_i32, left,
+ right, res);
+ }
+ case INT_REM_S:
+ {
+ JitReg left1 =
+ is_i32 ? jit_cc_new_reg_I32(cc) : jit_cc_new_reg_I64(cc);
+
+ GEN_INSN(CMP, cc->cmp_reg, right,
+ is_i32 ? NEW_CONST(I32, -1) : NEW_CONST(I64, -1LL));
+ /* Don't generate `SELECTEQ left, cmp_reg, 0, left` since
+ left might be const, use left1 instead */
+ if (is_i32)
+ GEN_INSN(SELECTEQ, left1, cc->cmp_reg, NEW_CONST(I32, 0),
+ left);
+ else
+ GEN_INSN(SELECTEQ, left1, cc->cmp_reg, NEW_CONST(I64, 0),
+ left);
+ /* Build default div and rem */
+ return compile_int_div_no_check(cc, arith_op, is_i32, left1,
+ right, res);
+ }
+ default:
+ {
+ /* Build default div and rem */
+ return compile_int_div_no_check(cc, arith_op, is_i32, left,
+ right, res);
+ }
+ }
+ }
+
+fail:
+ return false;
+}
+
+static bool
+compile_op_int_arithmetic(JitCompContext *cc, IntArithmetic arith_op,
+ bool is_i32, uint8 **p_frame_ip)
+{
+ switch (arith_op) {
+ case INT_ADD:
+ DEF_INT_BINARY_OP(compile_int_add(cc, left, right, is_i32),
+ "compile int add fail.");
+ return true;
+ case INT_SUB:
+ DEF_INT_BINARY_OP(compile_int_sub(cc, left, right, is_i32),
+ "compile int sub fail.");
+ return true;
+ case INT_MUL:
+ DEF_INT_BINARY_OP(compile_int_mul(cc, left, right, is_i32),
+ "compile int mul fail.");
+ return true;
+ case INT_DIV_S:
+ case INT_DIV_U:
+ case INT_REM_S:
+ case INT_REM_U:
+ return compile_int_div(cc, arith_op, is_i32, p_frame_ip);
+ default:
+ bh_assert(0);
+ return false;
+ }
+
+fail:
+ return false;
+}
+
+bool
+jit_compile_op_i32_arithmetic(JitCompContext *cc, IntArithmetic arith_op,
+ uint8 **p_frame_ip)
+{
+ return compile_op_int_arithmetic(cc, arith_op, true, p_frame_ip);
+}
+
+bool
+jit_compile_op_i64_arithmetic(JitCompContext *cc, IntArithmetic arith_op,
+ uint8 **p_frame_ip)
+{
+ return compile_op_int_arithmetic(cc, arith_op, false, p_frame_ip);
+}
+
+DEF_UNI_INT_CONST_OPS(and)
+{
+ JitReg res;
+ if (IS_CONST_ZERO(left) || IS_CONST_ZERO(right)) {
+ res = is_i32 ? NEW_CONST(I32, 0) : NEW_CONST(I64, 0);
+ goto shortcut;
+ }
+
+ if (IS_CONST_ALL_ONE(left, is_i32)) {
+ res = right;
+ goto shortcut;
+ }
+
+ if (IS_CONST_ALL_ONE(right, is_i32)) {
+ res = left;
+ goto shortcut;
+ }
+
+ return 0;
+shortcut:
+ return res;
+}
+
+DEF_BI_INT_CONST_OPS(and, &)
+
+static JitReg
+compile_int_and(JitCompContext *cc, JitReg left, JitReg right, bool is_i32)
+{
+ JitReg res;
+
+ /* shortcuts */
+ res = CHECK_AND_PROCESS_INT_CONSTS(cc, left, right, is_i32, and);
+ if (res)
+ goto shortcut;
+
+ /* do and */
+ res = is_i32 ? jit_cc_new_reg_I32(cc) : jit_cc_new_reg_I64(cc);
+ GEN_INSN(AND, res, left, right);
+
+shortcut:
+ return res;
+}
+
+DEF_UNI_INT_CONST_OPS(or)
+{
+ JitReg res;
+
+ if (IS_CONST_ZERO(left)) {
+ res = right;
+ goto shortcut;
+ }
+
+ if (IS_CONST_ZERO(right)) {
+ res = left;
+ goto shortcut;
+ }
+
+ if (IS_CONST_ALL_ONE(left, is_i32) || IS_CONST_ALL_ONE(right, is_i32)) {
+ res = is_i32 ? NEW_CONST(I32, -1) : NEW_CONST(I64, -1LL);
+ goto shortcut;
+ }
+
+ return 0;
+shortcut:
+ return res;
+}
+
+DEF_BI_INT_CONST_OPS(or, |)
+
+static JitReg
+compile_int_or(JitCompContext *cc, JitReg left, JitReg right, bool is_i32)
+{
+ JitReg res;
+
+ /* shortcuts */
+ res = CHECK_AND_PROCESS_INT_CONSTS(cc, left, right, is_i32, or);
+ if (res)
+ goto shortcut;
+
+ /* do or */
+ res = is_i32 ? jit_cc_new_reg_I32(cc) : jit_cc_new_reg_I64(cc);
+ GEN_INSN(OR, res, left, right);
+
+shortcut:
+ return res;
+}
+
+DEF_UNI_INT_CONST_OPS(xor)
+{
+ if (IS_CONST_ZERO(left))
+ return right;
+
+ if (IS_CONST_ZERO(right))
+ return left;
+
+ return 0;
+}
+
+DEF_BI_INT_CONST_OPS(xor, ^)
+
+static JitReg
+compile_int_xor(JitCompContext *cc, JitReg left, JitReg right, bool is_i32)
+{
+ JitReg res;
+
+ /* shortcuts */
+ res = CHECK_AND_PROCESS_INT_CONSTS(cc, left, right, is_i32, xor);
+ if (res)
+ goto shortcut;
+
+ /* do xor */
+ res = is_i32 ? jit_cc_new_reg_I32(cc) : jit_cc_new_reg_I64(cc);
+ GEN_INSN(XOR, res, left, right);
+
+shortcut:
+ return res;
+}
+
+static bool
+compile_op_int_bitwise(JitCompContext *cc, IntBitwise arith_op, bool is_i32)
+{
+ JitReg left, right, res;
+
+ POP_INT(right);
+ POP_INT(left);
+
+ switch (arith_op) {
+ case INT_AND:
+ {
+ res = compile_int_and(cc, left, right, is_i32);
+ break;
+ }
+ case INT_OR:
+ {
+ res = compile_int_or(cc, left, right, is_i32);
+ break;
+ }
+ case INT_XOR:
+ {
+ res = compile_int_xor(cc, left, right, is_i32);
+ break;
+ }
+ default:
+ {
+ bh_assert(0);
+ goto fail;
+ }
+ }
+
+ PUSH_INT(res);
+ return true;
+fail:
+ return false;
+}
+
+bool
+jit_compile_op_i32_bitwise(JitCompContext *cc, IntBitwise bitwise_op)
+{
+ return compile_op_int_bitwise(cc, bitwise_op, true);
+}
+
+bool
+jit_compile_op_i64_bitwise(JitCompContext *cc, IntBitwise bitwise_op)
+{
+ return compile_op_int_bitwise(cc, bitwise_op, false);
+}
+
+DEF_UNI_INT_CONST_OPS(shl)
+{
+ if (IS_CONST_ZERO(right) || IS_CONST_ZERO(left)) {
+ return left;
+ }
+
+ if (jit_reg_is_const(right)) {
+ JitReg res = is_i32 ? jit_cc_new_reg_I32(cc) : jit_cc_new_reg_I64(cc);
+ GEN_INSN(SHL, res, left, right);
+ return res;
+ }
+ return 0;
+}
+
+DEF_UNI_INT_CONST_OPS(shrs)
+{
+ if (IS_CONST_ZERO(right) || IS_CONST_ZERO(left)
+ || IS_CONST_ALL_ONE(left, is_i32)) {
+ return left;
+ }
+
+ if (jit_reg_is_const(right)) {
+ JitReg res = is_i32 ? jit_cc_new_reg_I32(cc) : jit_cc_new_reg_I64(cc);
+ GEN_INSN(SHRS, res, left, right);
+ return res;
+ }
+ return 0;
+}
+
+DEF_UNI_INT_CONST_OPS(shru)
+{
+ if (IS_CONST_ZERO(right) || IS_CONST_ZERO(left)) {
+ return left;
+ }
+
+ if (jit_reg_is_const(right)) {
+ JitReg res = is_i32 ? jit_cc_new_reg_I32(cc) : jit_cc_new_reg_I64(cc);
+ GEN_INSN(SHRU, res, left, right);
+ return res;
+ }
+ return 0;
+}
+
+static int32
+do_i32_const_shl(int32 lhs, int32 rhs)
+{
+ return (int32)((uint32)lhs << (uint32)rhs);
+}
+
+static int64
+do_i64_const_shl(int64 lhs, int64 rhs)
+{
+ return (int32)((uint64)lhs << (uint64)rhs);
+}
+
+DEF_BI_INT_CONST_OPS(shrs, >>)
+
+static int32
+do_i32_const_shru(int32 lhs, int32 rhs)
+{
+ return (uint32)lhs >> rhs;
+}
+
+static int64
+do_i64_const_shru(int64 lhs, int64 rhs)
+{
+ return (uint64)lhs >> rhs;
+}
+
+typedef enum { SHL, SHRS, SHRU, ROTL, ROTR } SHIFT_OP;
+
+static JitReg
+compile_int_shift_modulo(JitCompContext *cc, JitReg rhs, bool is_i32,
+ SHIFT_OP op)
+{
+ JitReg res;
+
+ if (jit_reg_is_const(rhs)) {
+ if (is_i32) {
+ int32 val = jit_cc_get_const_I32(cc, rhs);
+ val = val & 0x1f;
+ res = NEW_CONST(I32, val);
+ }
+ else {
+ int64 val = jit_cc_get_const_I64(cc, rhs);
+ val = val & 0x3f;
+ res = NEW_CONST(I64, val);
+ }
+ }
+ else {
+ if (op == ROTL || op == ROTR) {
+ /* No need to generate AND insn as the result
+ is same for rotate shift */
+ res = rhs;
+ }
+ else if (is_i32) {
+ res = jit_cc_new_reg_I32(cc);
+ GEN_INSN(AND, res, rhs, NEW_CONST(I32, 0x1f));
+ }
+ else {
+ res = jit_cc_new_reg_I64(cc);
+ GEN_INSN(AND, res, rhs, NEW_CONST(I64, 0x3f));
+ }
+ }
+
+ return res;
+}
+
+static JitReg
+mov_left_to_reg(JitCompContext *cc, bool is_i32, JitReg left)
+{
+ JitReg res = left;
+ /* left needs to be a variable */
+ if (jit_reg_is_const(left)) {
+ res = is_i32 ? jit_cc_new_reg_I32(cc) : jit_cc_new_reg_I64(cc);
+ GEN_INSN(MOV, res, left);
+ }
+ return res;
+}
+
+static JitReg
+compile_int_shl(JitCompContext *cc, JitReg left, JitReg right, bool is_i32)
+{
+ JitReg res;
+#if defined(BUILD_TARGET_X86_64) || defined(BUILD_TARGET_AMD_64)
+ JitReg ecx_hreg = jit_codegen_get_hreg_by_name("ecx");
+ JitReg rcx_hreg = jit_codegen_get_hreg_by_name("rcx");
+ JitInsn *insn = NULL;
+#endif
+
+ right = compile_int_shift_modulo(cc, right, is_i32, SHL);
+
+ res = CHECK_AND_PROCESS_INT_CONSTS(cc, left, right, is_i32, shl);
+ if (res)
+ goto shortcut;
+
+ left = mov_left_to_reg(cc, is_i32, left);
+
+ res = is_i32 ? jit_cc_new_reg_I32(cc) : jit_cc_new_reg_I64(cc);
+#if defined(BUILD_TARGET_X86_64) || defined(BUILD_TARGET_AMD_64)
+ GEN_INSN(MOV, is_i32 ? ecx_hreg : rcx_hreg, right);
+ insn = GEN_INSN(SHL, res, left, is_i32 ? ecx_hreg : rcx_hreg);
+ if (jit_get_last_error(cc) || !jit_lock_reg_in_insn(cc, insn, ecx_hreg)) {
+ goto fail;
+ }
+#else
+ GEN_INSN(SHL, res, left, right);
+ if (jit_get_last_error(cc)) {
+ goto fail;
+ }
+#endif
+
+shortcut:
+ return res;
+fail:
+ return (JitReg)0;
+}
+
+static JitReg
+compile_int_shrs(JitCompContext *cc, JitReg left, JitReg right, bool is_i32)
+{
+ JitReg res;
+#if defined(BUILD_TARGET_X86_64) || defined(BUILD_TARGET_AMD_64)
+ JitReg ecx_hreg = jit_codegen_get_hreg_by_name("ecx");
+ JitReg rcx_hreg = jit_codegen_get_hreg_by_name("rcx");
+ JitInsn *insn = NULL;
+#endif
+
+ right = compile_int_shift_modulo(cc, right, is_i32, SHRS);
+
+ res = CHECK_AND_PROCESS_INT_CONSTS(cc, left, right, is_i32, shrs);
+ if (res)
+ goto shortcut;
+
+ left = mov_left_to_reg(cc, is_i32, left);
+
+ res = is_i32 ? jit_cc_new_reg_I32(cc) : jit_cc_new_reg_I64(cc);
+#if defined(BUILD_TARGET_X86_64) || defined(BUILD_TARGET_AMD_64)
+ GEN_INSN(MOV, is_i32 ? ecx_hreg : rcx_hreg, right);
+ insn = GEN_INSN(SHRS, res, left, is_i32 ? ecx_hreg : rcx_hreg);
+ if (jit_get_last_error(cc) || !jit_lock_reg_in_insn(cc, insn, ecx_hreg)) {
+ goto fail;
+ }
+#else
+ GEN_INSN(SHRS, res, left, right);
+ if (jit_get_last_error(cc)) {
+ goto fail;
+ }
+#endif
+
+shortcut:
+ return res;
+fail:
+ return (JitReg)0;
+}
+
+static JitReg
+compile_int_shru(JitCompContext *cc, JitReg left, JitReg right, bool is_i32)
+{
+ JitReg res;
+#if defined(BUILD_TARGET_X86_64) || defined(BUILD_TARGET_AMD_64)
+ JitReg ecx_hreg = jit_codegen_get_hreg_by_name("ecx");
+ JitReg rcx_hreg = jit_codegen_get_hreg_by_name("rcx");
+ JitInsn *insn = NULL;
+#endif
+
+ right = compile_int_shift_modulo(cc, right, is_i32, SHRU);
+
+ res = CHECK_AND_PROCESS_INT_CONSTS(cc, left, right, is_i32, shru);
+ if (res)
+ goto shortcut;
+
+ left = mov_left_to_reg(cc, is_i32, left);
+
+ res = is_i32 ? jit_cc_new_reg_I32(cc) : jit_cc_new_reg_I64(cc);
+#if defined(BUILD_TARGET_X86_64) || defined(BUILD_TARGET_AMD_64)
+ GEN_INSN(MOV, is_i32 ? ecx_hreg : rcx_hreg, right);
+ insn = GEN_INSN(SHRU, res, left, is_i32 ? ecx_hreg : rcx_hreg);
+ if (jit_get_last_error(cc) || !jit_lock_reg_in_insn(cc, insn, ecx_hreg)) {
+ goto fail;
+ }
+#else
+ GEN_INSN(SHRU, res, left, right);
+ if (jit_get_last_error(cc)) {
+ goto fail;
+ }
+#endif
+
+shortcut:
+ return res;
+fail:
+ return (JitReg)0;
+}
+
+DEF_UNI_INT_CONST_OPS(rotl)
+{
+ if (IS_CONST_ZERO(right) || IS_CONST_ZERO(left)
+ || IS_CONST_ALL_ONE(left, is_i32))
+ return left;
+
+ if (jit_reg_is_const(right)) {
+ JitReg res = is_i32 ? jit_cc_new_reg_I32(cc) : jit_cc_new_reg_I64(cc);
+ GEN_INSN(ROTL, res, left, right);
+ return res;
+ }
+
+ return 0;
+}
+
+static int32
+do_i32_const_rotl(int32 lhs, int32 rhs)
+{
+ uint32 n = (uint32)lhs;
+ uint32 d = (uint32)rhs;
+ return (n << d) | (n >> (32 - d));
+}
+
+static int64
+do_i64_const_rotl(int64 lhs, int64 rhs)
+{
+ uint64 n = (uint64)lhs;
+ uint64 d = (uint64)rhs;
+ return (n << d) | (n >> (64 - d));
+}
+
+static JitReg
+compile_int_rotl(JitCompContext *cc, JitReg left, JitReg right, bool is_i32)
+{
+ JitReg res;
+#if defined(BUILD_TARGET_X86_64) || defined(BUILD_TARGET_AMD_64)
+ JitReg ecx_hreg = jit_codegen_get_hreg_by_name("ecx");
+ JitReg rcx_hreg = jit_codegen_get_hreg_by_name("rcx");
+ JitInsn *insn = NULL;
+#endif
+
+ right = compile_int_shift_modulo(cc, right, is_i32, ROTL);
+
+ res = CHECK_AND_PROCESS_INT_CONSTS(cc, left, right, is_i32, rotl);
+ if (res)
+ goto shortcut;
+
+ left = mov_left_to_reg(cc, is_i32, left);
+
+ res = is_i32 ? jit_cc_new_reg_I32(cc) : jit_cc_new_reg_I64(cc);
+#if defined(BUILD_TARGET_X86_64) || defined(BUILD_TARGET_AMD_64)
+ GEN_INSN(MOV, is_i32 ? ecx_hreg : rcx_hreg, right);
+ insn = GEN_INSN(ROTL, res, left, is_i32 ? ecx_hreg : rcx_hreg);
+ if (jit_get_last_error(cc) || !jit_lock_reg_in_insn(cc, insn, ecx_hreg)) {
+ goto fail;
+ }
+#else
+ GEN_INSN(ROTL, res, left, right);
+ if (jit_get_last_error(cc)) {
+ goto fail;
+ }
+#endif
+
+shortcut:
+ return res;
+fail:
+ return (JitReg)0;
+}
+
+DEF_UNI_INT_CONST_OPS(rotr)
+{
+ if (IS_CONST_ZERO(right) || IS_CONST_ZERO(left)
+ || IS_CONST_ALL_ONE(left, is_i32))
+ return left;
+
+ if (jit_reg_is_const(right)) {
+ JitReg res = is_i32 ? jit_cc_new_reg_I32(cc) : jit_cc_new_reg_I64(cc);
+ GEN_INSN(ROTR, res, left, right);
+ return res;
+ }
+
+ return 0;
+}
+
+static int32
+do_i32_const_rotr(int32 lhs, int32 rhs)
+{
+ uint32 n = (uint32)lhs;
+ uint32 d = (uint32)rhs;
+ return (n >> d) | (n << (32 - d));
+}
+
+static int64
+do_i64_const_rotr(int64 lhs, int64 rhs)
+{
+ uint64 n = (uint64)lhs;
+ uint64 d = (uint64)rhs;
+ return (n >> d) | (n << (64 - d));
+}
+
+static JitReg
+compile_int_rotr(JitCompContext *cc, JitReg left, JitReg right, bool is_i32)
+{
+ JitReg res;
+#if defined(BUILD_TARGET_X86_64) || defined(BUILD_TARGET_AMD_64)
+ JitReg ecx_hreg = jit_codegen_get_hreg_by_name("ecx");
+ JitReg rcx_hreg = jit_codegen_get_hreg_by_name("rcx");
+ JitInsn *insn = NULL;
+#endif
+
+ right = compile_int_shift_modulo(cc, right, is_i32, ROTR);
+
+ res = CHECK_AND_PROCESS_INT_CONSTS(cc, left, right, is_i32, rotr);
+ if (res)
+ goto shortcut;
+
+ left = mov_left_to_reg(cc, is_i32, left);
+
+ res = is_i32 ? jit_cc_new_reg_I32(cc) : jit_cc_new_reg_I64(cc);
+#if defined(BUILD_TARGET_X86_64) || defined(BUILD_TARGET_AMD_64)
+ GEN_INSN(MOV, is_i32 ? ecx_hreg : rcx_hreg, right);
+ insn = GEN_INSN(ROTR, res, left, is_i32 ? ecx_hreg : rcx_hreg);
+ if (jit_get_last_error(cc) || !jit_lock_reg_in_insn(cc, insn, ecx_hreg)) {
+ goto fail;
+ }
+#else
+ GEN_INSN(ROTR, res, left, right);
+ if (jit_get_last_error(cc)) {
+ goto fail;
+ }
+#endif
+
+shortcut:
+ return res;
+fail:
+ return (JitReg)0;
+}
+
+static bool
+compile_op_int_shift(JitCompContext *cc, IntShift shift_op, bool is_i32)
+{
+ JitReg left, right, res;
+
+ POP_INT(right);
+ POP_INT(left);
+
+ switch (shift_op) {
+ case INT_SHL:
+ {
+ res = compile_int_shl(cc, left, right, is_i32);
+ break;
+ }
+ case INT_SHR_S:
+ {
+ res = compile_int_shrs(cc, left, right, is_i32);
+ break;
+ }
+ case INT_SHR_U:
+ {
+ res = compile_int_shru(cc, left, right, is_i32);
+ break;
+ }
+ case INT_ROTL:
+ {
+ res = compile_int_rotl(cc, left, right, is_i32);
+ break;
+ }
+ case INT_ROTR:
+ {
+ res = compile_int_rotr(cc, left, right, is_i32);
+ break;
+ }
+ default:
+ {
+ bh_assert(0);
+ goto fail;
+ }
+ }
+
+ PUSH_INT(res);
+ return true;
+fail:
+ return false;
+}
+
+bool
+jit_compile_op_i32_shift(JitCompContext *cc, IntShift shift_op)
+{
+ return compile_op_int_shift(cc, shift_op, true);
+}
+
+bool
+jit_compile_op_i64_shift(JitCompContext *cc, IntShift shift_op)
+{
+ return compile_op_int_shift(cc, shift_op, false);
+}
+
+static float32
+negf(float32 f32)
+{
+ return -f32;
+}
+
+static float64
+neg(float64 f64)
+{
+ return -f64;
+}
+
+static bool
+compile_op_float_math(JitCompContext *cc, FloatMath math_op, bool is_f32)
+{
+ JitReg value, res;
+ void *func = NULL;
+
+ if (is_f32)
+ res = jit_cc_new_reg_F32(cc);
+ else
+ res = jit_cc_new_reg_F64(cc);
+
+ if (is_f32)
+ POP_F32(value);
+ else
+ POP_F64(value);
+
+ switch (math_op) {
+ case FLOAT_ABS:
+ /* TODO: andps 0x7fffffffffffffff */
+ func = is_f32 ? (void *)fabsf : (void *)fabs;
+ break;
+ case FLOAT_NEG:
+ /* TODO: xorps 0x8000000000000000 */
+ func = is_f32 ? (void *)negf : (void *)neg;
+ break;
+ case FLOAT_CEIL:
+ func = is_f32 ? (void *)ceilf : (void *)ceil;
+ break;
+ case FLOAT_FLOOR:
+ func = is_f32 ? (void *)floorf : (void *)floor;
+ break;
+ case FLOAT_TRUNC:
+ func = is_f32 ? (void *)truncf : (void *)trunc;
+ break;
+ case FLOAT_NEAREST:
+ func = is_f32 ? (void *)rintf : (void *)rint;
+ break;
+ case FLOAT_SQRT:
+ func = is_f32 ? (void *)sqrtf : (void *)sqrt;
+ break;
+ default:
+ bh_assert(0);
+ goto fail;
+ }
+
+ if (!jit_emit_callnative(cc, func, res, &value, 1)) {
+ goto fail;
+ }
+
+ if (is_f32)
+ PUSH_F32(res);
+ else
+ PUSH_F64(res);
+
+ return true;
+fail:
+ return false;
+}
+
+bool
+jit_compile_op_f32_math(JitCompContext *cc, FloatMath math_op)
+{
+ return compile_op_float_math(cc, math_op, true);
+}
+
+bool
+jit_compile_op_f64_math(JitCompContext *cc, FloatMath math_op)
+{
+ return compile_op_float_math(cc, math_op, false);
+}
+
+static float32
+f32_min(float32 a, float32 b)
+{
+ if (isnan(a) || isnan(b))
+ return NAN;
+ else if (a == 0 && a == b)
+ return signbit(a) ? a : b;
+ else
+ return a > b ? b : a;
+}
+
+static float32
+f32_max(float32 a, float32 b)
+{
+ if (isnan(a) || isnan(b))
+ return NAN;
+ else if (a == 0 && a == b)
+ return signbit(a) ? b : a;
+ else
+ return a > b ? a : b;
+}
+
+static float64
+f64_min(float64 a, float64 b)
+{
+ if (isnan(a) || isnan(b))
+ return NAN;
+ else if (a == 0 && a == b)
+ return signbit(a) ? a : b;
+ else
+ return a > b ? b : a;
+}
+
+static float64
+f64_max(float64 a, float64 b)
+{
+ if (isnan(a) || isnan(b))
+ return NAN;
+ else if (a == 0 && a == b)
+ return signbit(a) ? b : a;
+ else
+ return a > b ? a : b;
+}
+
+static bool
+compile_op_float_min_max(JitCompContext *cc, FloatArithmetic arith_op,
+ bool is_f32, JitReg lhs, JitReg rhs, JitReg *out)
+{
+ JitReg res, args[2];
+ void *func;
+
+ res = is_f32 ? jit_cc_new_reg_F32(cc) : jit_cc_new_reg_F64(cc);
+ if (arith_op == FLOAT_MIN)
+ func = is_f32 ? (void *)f32_min : (void *)f64_min;
+ else
+ func = is_f32 ? (void *)f32_max : (void *)f64_max;
+
+ args[0] = lhs;
+ args[1] = rhs;
+ if (!jit_emit_callnative(cc, func, res, args, 2))
+ return false;
+
+ *out = res;
+ return true;
+}
+
+static bool
+compile_op_float_arithmetic(JitCompContext *cc, FloatArithmetic arith_op,
+ bool is_f32)
+{
+ JitReg lhs, rhs, res;
+
+ if (is_f32) {
+ POP_F32(rhs);
+ POP_F32(lhs);
+ res = jit_cc_new_reg_F32(cc);
+ }
+ else {
+ POP_F64(rhs);
+ POP_F64(lhs);
+ res = jit_cc_new_reg_F64(cc);
+ }
+
+ switch (arith_op) {
+ case FLOAT_ADD:
+ {
+ GEN_INSN(ADD, res, lhs, rhs);
+ break;
+ }
+ case FLOAT_SUB:
+ {
+ GEN_INSN(SUB, res, lhs, rhs);
+ break;
+ }
+ case FLOAT_MUL:
+ {
+ GEN_INSN(MUL, res, lhs, rhs);
+ break;
+ }
+ case FLOAT_DIV:
+ {
+ GEN_INSN(DIV_S, res, lhs, rhs);
+ break;
+ }
+ case FLOAT_MIN:
+ case FLOAT_MAX:
+ {
+ if (!compile_op_float_min_max(cc, arith_op, is_f32, lhs, rhs, &res))
+ goto fail;
+ break;
+ }
+ default:
+ {
+ bh_assert(0);
+ goto fail;
+ }
+ }
+
+ if (is_f32)
+ PUSH_F32(res);
+ else
+ PUSH_F64(res);
+
+ return true;
+fail:
+ return false;
+}
+
+bool
+jit_compile_op_f32_arithmetic(JitCompContext *cc, FloatArithmetic arith_op)
+{
+ return compile_op_float_arithmetic(cc, arith_op, true);
+}
+
+bool
+jit_compile_op_f64_arithmetic(JitCompContext *cc, FloatArithmetic arith_op)
+{
+ return compile_op_float_arithmetic(cc, arith_op, false);
+}
+
+bool
+jit_compile_op_f32_copysign(JitCompContext *cc)
+{
+ JitReg res;
+ JitReg args[2] = { 0 };
+
+ POP_F32(args[1]);
+ POP_F32(args[0]);
+
+ res = jit_cc_new_reg_F32(cc);
+ if (!jit_emit_callnative(cc, copysignf, res, args, 2))
+ goto fail;
+
+ PUSH_F32(res);
+
+ return true;
+fail:
+ return false;
+}
+
+bool
+jit_compile_op_f64_copysign(JitCompContext *cc)
+{
+ JitReg res;
+ JitReg args[2] = { 0 };
+
+ POP_F64(args[1]);
+ POP_F64(args[0]);
+
+ res = jit_cc_new_reg_F64(cc);
+ if (!jit_emit_callnative(cc, copysign, res, args, 2))
+ goto fail;
+
+ PUSH_F64(res);
+
+ return true;
+fail:
+ return false;
+}
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/fast-jit/fe/jit_emit_numberic.h b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/fast-jit/fe/jit_emit_numberic.h
new file mode 100644
index 000000000..e73c3ebad
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/fast-jit/fe/jit_emit_numberic.h
@@ -0,0 +1,76 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#ifndef _JIT_EMIT_NUMBERIC_H_
+#define _JIT_EMIT_NUMBERIC_H_
+
+#include "../jit_compiler.h"
+#include "../jit_frontend.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+bool
+jit_compile_op_i32_clz(JitCompContext *cc);
+
+bool
+jit_compile_op_i32_ctz(JitCompContext *cc);
+
+bool
+jit_compile_op_i32_popcnt(JitCompContext *cc);
+
+bool
+jit_compile_op_i64_clz(JitCompContext *cc);
+
+bool
+jit_compile_op_i64_ctz(JitCompContext *cc);
+
+bool
+jit_compile_op_i64_popcnt(JitCompContext *cc);
+
+bool
+jit_compile_op_i32_arithmetic(JitCompContext *cc, IntArithmetic arith_op,
+ uint8 **p_frame_ip);
+
+bool
+jit_compile_op_i64_arithmetic(JitCompContext *cc, IntArithmetic arith_op,
+ uint8 **p_frame_ip);
+
+bool
+jit_compile_op_i32_bitwise(JitCompContext *cc, IntBitwise bitwise_op);
+
+bool
+jit_compile_op_i64_bitwise(JitCompContext *cc, IntBitwise bitwise_op);
+
+bool
+jit_compile_op_i32_shift(JitCompContext *cc, IntShift shift_op);
+
+bool
+jit_compile_op_i64_shift(JitCompContext *cc, IntShift shift_op);
+
+bool
+jit_compile_op_f32_math(JitCompContext *cc, FloatMath math_op);
+
+bool
+jit_compile_op_f64_math(JitCompContext *cc, FloatMath math_op);
+
+bool
+jit_compile_op_f32_arithmetic(JitCompContext *cc, FloatArithmetic arith_op);
+
+bool
+jit_compile_op_f64_arithmetic(JitCompContext *cc, FloatArithmetic arith_op);
+
+bool
+jit_compile_op_f32_copysign(JitCompContext *cc);
+
+bool
+jit_compile_op_f64_copysign(JitCompContext *cc);
+
+#ifdef __cplusplus
+} /* end of extern "C" */
+#endif
+
+#endif /* end of _JIT_EMIT_NUMBERIC_H_ */
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/fast-jit/fe/jit_emit_parametric.c b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/fast-jit/fe/jit_emit_parametric.c
new file mode 100644
index 000000000..df0b23a7a
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/fast-jit/fe/jit_emit_parametric.c
@@ -0,0 +1,130 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#include "jit_emit_parametric.h"
+#include "../jit_frontend.h"
+
+static bool
+pop_value_from_wasm_stack(JitCompContext *cc, bool is_32bit, JitReg *p_value,
+ uint8 *p_type)
+{
+ JitValue *jit_value;
+ JitReg value;
+ uint8 type;
+
+ if (!jit_block_stack_top(&cc->block_stack)) {
+ jit_set_last_error(cc, "WASM block stack underflow.");
+ return false;
+ }
+ if (!jit_block_stack_top(&cc->block_stack)->value_stack.value_list_end) {
+ jit_set_last_error(cc, "WASM data stack underflow.");
+ return false;
+ }
+
+ jit_value = jit_value_stack_pop(
+ &jit_block_stack_top(&cc->block_stack)->value_stack);
+ type = jit_value->type;
+
+ if (p_type != NULL) {
+ *p_type = jit_value->type;
+ }
+
+ wasm_runtime_free(jit_value);
+
+ /* is_32: i32, f32, ref.func, ref.extern, v128 */
+ if (is_32bit
+ && !(type == VALUE_TYPE_I32 || type == VALUE_TYPE_F32
+#if WASM_ENABLE_REF_TYPES != 0
+ || type == VALUE_TYPE_FUNCREF || type == VALUE_TYPE_EXTERNREF
+#endif
+ || type == VALUE_TYPE_V128)) {
+ jit_set_last_error(cc, "invalid WASM stack data type.");
+ return false;
+ }
+ /* !is_32: i64, f64 */
+ if (!is_32bit && !(type == VALUE_TYPE_I64 || type == VALUE_TYPE_F64)) {
+ jit_set_last_error(cc, "invalid WASM stack data type.");
+ return false;
+ }
+
+ switch (type) {
+ case VALUE_TYPE_I32:
+#if WASM_ENABLE_REF_TYPES != 0
+ case VALUE_TYPE_FUNCREF:
+ case VALUE_TYPE_EXTERNREF:
+#endif
+ value = pop_i32(cc->jit_frame);
+ break;
+ case VALUE_TYPE_I64:
+ value = pop_i64(cc->jit_frame);
+ break;
+ case VALUE_TYPE_F32:
+ value = pop_f32(cc->jit_frame);
+ break;
+ case VALUE_TYPE_F64:
+ value = pop_f64(cc->jit_frame);
+ break;
+ default:
+ bh_assert(0);
+ return false;
+ }
+
+ if (p_value != NULL) {
+ *p_value = value;
+ }
+ return true;
+}
+
+bool
+jit_compile_op_drop(JitCompContext *cc, bool is_drop_32)
+{
+ if (!pop_value_from_wasm_stack(cc, is_drop_32, NULL, NULL))
+ return false;
+ return true;
+}
+
+bool
+jit_compile_op_select(JitCompContext *cc, bool is_select_32)
+{
+ JitReg val1, val2, cond, selected;
+ uint8 val1_type, val2_type;
+
+ POP_I32(cond);
+
+ if (!pop_value_from_wasm_stack(cc, is_select_32, &val2, &val2_type)
+ || !pop_value_from_wasm_stack(cc, is_select_32, &val1, &val1_type)) {
+ return false;
+ }
+
+ if (val1_type != val2_type) {
+ jit_set_last_error(cc, "invalid stack values with different type");
+ return false;
+ }
+
+ switch (val1_type) {
+ case VALUE_TYPE_I32:
+ selected = jit_cc_new_reg_I32(cc);
+ break;
+ case VALUE_TYPE_I64:
+ selected = jit_cc_new_reg_I64(cc);
+ break;
+ case VALUE_TYPE_F32:
+ selected = jit_cc_new_reg_F32(cc);
+ break;
+ case VALUE_TYPE_F64:
+ selected = jit_cc_new_reg_F64(cc);
+ break;
+ default:
+ bh_assert(0);
+ return false;
+ }
+
+ GEN_INSN(CMP, cc->cmp_reg, cond, NEW_CONST(I32, 0));
+ GEN_INSN(SELECTNE, selected, cc->cmp_reg, val1, val2);
+ PUSH(selected, val1_type);
+ return true;
+fail:
+ return false;
+}
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/fast-jit/fe/jit_emit_parametric.h b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/fast-jit/fe/jit_emit_parametric.h
new file mode 100644
index 000000000..40025ed21
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/fast-jit/fe/jit_emit_parametric.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#ifndef _JIT_EMIT_PARAMETRIC_H_
+#define _JIT_EMIT_PARAMETRIC_H_
+
+#include "../jit_compiler.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+bool
+jit_compile_op_drop(JitCompContext *cc, bool is_drop_32);
+
+bool
+jit_compile_op_select(JitCompContext *cc, bool is_select_32);
+
+#ifdef __cplusplus
+} /* end of extern "C" */
+#endif
+
+#endif /* end of _JIT_EMIT_PARAMETRIC_H_ */
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/fast-jit/fe/jit_emit_table.c b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/fast-jit/fe/jit_emit_table.c
new file mode 100644
index 000000000..9fb61931f
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/fast-jit/fe/jit_emit_table.c
@@ -0,0 +1,318 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#include "jit_emit_table.h"
+#include "jit_emit_exception.h"
+#include "jit_emit_function.h"
+#include "../../interpreter/wasm_runtime.h"
+#include "../jit_frontend.h"
+
+#if WASM_ENABLE_REF_TYPES != 0
+bool
+jit_compile_op_elem_drop(JitCompContext *cc, uint32 tbl_seg_idx)
+{
+ JitReg module, tbl_segs;
+
+ module = get_module_reg(cc->jit_frame);
+
+ tbl_segs = jit_cc_new_reg_ptr(cc);
+ GEN_INSN(LDPTR, tbl_segs, module,
+ NEW_CONST(I32, offsetof(WASMModule, table_segments)));
+
+ GEN_INSN(STI32, NEW_CONST(I32, true), tbl_segs,
+ NEW_CONST(I32, tbl_seg_idx * sizeof(WASMTableSeg)
+ + offsetof(WASMTableSeg, is_dropped)));
+ return true;
+}
+
+bool
+jit_compile_op_table_get(JitCompContext *cc, uint32 tbl_idx)
+{
+ JitReg elem_idx, tbl_sz, tbl_elems, elem_idx_long, offset, res;
+
+ POP_I32(elem_idx);
+
+ /* if (elem_idx >= tbl_sz) goto exception; */
+ tbl_sz = get_table_cur_size_reg(cc->jit_frame, tbl_idx);
+ GEN_INSN(CMP, cc->cmp_reg, elem_idx, tbl_sz);
+ if (!jit_emit_exception(cc, EXCE_OUT_OF_BOUNDS_TABLE_ACCESS, JIT_OP_BGEU,
+ cc->cmp_reg, NULL))
+ goto fail;
+
+ elem_idx_long = jit_cc_new_reg_I64(cc);
+ GEN_INSN(I32TOI64, elem_idx_long, elem_idx);
+
+ offset = jit_cc_new_reg_I64(cc);
+ GEN_INSN(MUL, offset, elem_idx_long, NEW_CONST(I64, sizeof(uint32)));
+
+ res = jit_cc_new_reg_I32(cc);
+ tbl_elems = get_table_elems_reg(cc->jit_frame, tbl_idx);
+ GEN_INSN(LDI32, res, tbl_elems, offset);
+ PUSH_I32(res);
+
+ return true;
+fail:
+ return false;
+}
+
+bool
+jit_compile_op_table_set(JitCompContext *cc, uint32 tbl_idx)
+{
+ JitReg elem_idx, elem_val, tbl_sz, tbl_elems, elem_idx_long, offset;
+
+ POP_I32(elem_val);
+ POP_I32(elem_idx);
+
+ /* if (elem_idx >= tbl_sz) goto exception; */
+ tbl_sz = get_table_cur_size_reg(cc->jit_frame, tbl_idx);
+ GEN_INSN(CMP, cc->cmp_reg, elem_idx, tbl_sz);
+ if (!jit_emit_exception(cc, EXCE_OUT_OF_BOUNDS_TABLE_ACCESS, JIT_OP_BGEU,
+ cc->cmp_reg, NULL))
+ goto fail;
+
+ elem_idx_long = jit_cc_new_reg_I64(cc);
+ GEN_INSN(I32TOI64, elem_idx_long, elem_idx);
+
+ offset = jit_cc_new_reg_I64(cc);
+ GEN_INSN(MUL, offset, elem_idx_long, NEW_CONST(I64, sizeof(uint32)));
+
+ tbl_elems = get_table_elems_reg(cc->jit_frame, tbl_idx);
+ GEN_INSN(STI32, elem_val, tbl_elems, offset);
+
+ return true;
+fail:
+ return false;
+}
+
+static int
+wasm_init_table(WASMModuleInstance *inst, uint32 tbl_idx, uint32 elem_idx,
+ uint32 dst, uint32 len, uint32 src)
+{
+ WASMTableInstance *tbl;
+ uint32 tbl_sz;
+ WASMTableSeg *elem;
+ uint32 elem_len;
+
+ tbl = inst->tables[tbl_idx];
+ tbl_sz = tbl->cur_size;
+ if (dst > tbl_sz || tbl_sz - dst < len)
+ goto out_of_bounds;
+
+ elem = inst->module->table_segments + elem_idx;
+ elem_len = elem->function_count;
+ if (src > elem_len || elem_len - src < len)
+ goto out_of_bounds;
+
+ bh_memcpy_s((uint8 *)tbl + offsetof(WASMTableInstance, elems)
+ + dst * sizeof(uint32),
+ (uint32)((tbl_sz - dst) * sizeof(uint32)),
+ elem->func_indexes + src, (uint32)(len * sizeof(uint32)));
+
+ return 0;
+out_of_bounds:
+ wasm_set_exception(inst, "out of bounds table access");
+ return -1;
+}
+
+bool
+jit_compile_op_table_init(JitCompContext *cc, uint32 tbl_idx,
+ uint32 tbl_seg_idx)
+{
+ JitReg len, src, dst, res;
+ JitReg args[6] = { 0 };
+
+ POP_I32(len);
+ POP_I32(src);
+ POP_I32(dst);
+
+ res = jit_cc_new_reg_I32(cc);
+ args[0] = get_module_inst_reg(cc->jit_frame);
+ args[1] = NEW_CONST(I32, tbl_idx);
+ args[2] = NEW_CONST(I32, tbl_seg_idx);
+ args[3] = dst;
+ args[4] = len;
+ args[5] = src;
+
+ if (!jit_emit_callnative(cc, wasm_init_table, res, args,
+ sizeof(args) / sizeof(args[0])))
+ goto fail;
+
+ GEN_INSN(CMP, cc->cmp_reg, res, NEW_CONST(I32, 0));
+ if (!jit_emit_exception(cc, EXCE_ALREADY_THROWN, JIT_OP_BLTS, cc->cmp_reg,
+ NULL))
+ goto fail;
+
+ return true;
+fail:
+ return false;
+}
+
+static int
+wasm_copy_table(WASMModuleInstance *inst, uint32 src_tbl_idx,
+ uint32 dst_tbl_idx, uint32 dst_offset, uint32 len,
+ uint32 src_offset)
+{
+ WASMTableInstance *src_tbl, *dst_tbl;
+ uint32 src_tbl_sz, dst_tbl_sz;
+
+ src_tbl = inst->tables[src_tbl_idx];
+ src_tbl_sz = src_tbl->cur_size;
+ if (src_offset > src_tbl_sz || src_tbl_sz - src_offset < len)
+ goto out_of_bounds;
+
+ dst_tbl = inst->tables[dst_tbl_idx];
+ dst_tbl_sz = dst_tbl->cur_size;
+ if (dst_offset > dst_tbl_sz || dst_tbl_sz - dst_offset < len)
+ goto out_of_bounds;
+
+ bh_memmove_s((uint8 *)dst_tbl + offsetof(WASMTableInstance, elems)
+ + dst_offset * sizeof(uint32),
+ (uint32)((dst_tbl_sz - dst_offset) * sizeof(uint32)),
+ (uint8 *)src_tbl + offsetof(WASMTableInstance, elems)
+ + src_offset * sizeof(uint32),
+ (uint32)(len * sizeof(uint32)));
+
+ return 0;
+out_of_bounds:
+ wasm_set_exception(inst, "out of bounds table access");
+ return -1;
+}
+
+bool
+jit_compile_op_table_copy(JitCompContext *cc, uint32 src_tbl_idx,
+ uint32 dst_tbl_idx)
+{
+ JitReg len, src, dst, res;
+ JitReg args[6] = { 0 };
+
+ POP_I32(len);
+ POP_I32(src);
+ POP_I32(dst);
+
+ res = jit_cc_new_reg_I32(cc);
+ args[0] = get_module_inst_reg(cc->jit_frame);
+ args[1] = NEW_CONST(I32, src_tbl_idx);
+ args[2] = NEW_CONST(I32, dst_tbl_idx);
+ args[3] = dst;
+ args[4] = len;
+ args[5] = src;
+
+ if (!jit_emit_callnative(cc, wasm_copy_table, res, args,
+ sizeof(args) / sizeof(args[0])))
+ goto fail;
+
+ GEN_INSN(CMP, cc->cmp_reg, res, NEW_CONST(I32, 0));
+ if (!jit_emit_exception(cc, EXCE_ALREADY_THROWN, JIT_OP_BLTS, cc->cmp_reg,
+ NULL))
+ goto fail;
+
+ return true;
+fail:
+ return false;
+}
+
+bool
+jit_compile_op_table_size(JitCompContext *cc, uint32 tbl_idx)
+{
+ JitReg res;
+
+ res = get_table_cur_size_reg(cc->jit_frame, tbl_idx);
+ PUSH_I32(res);
+
+ return true;
+fail:
+ return false;
+}
+
+bool
+jit_compile_op_table_grow(JitCompContext *cc, uint32 tbl_idx)
+{
+ JitReg tbl_sz, n, val, enlarge_ret, res;
+ JitReg args[4] = { 0 };
+
+ POP_I32(n);
+ POP_I32(val);
+
+ tbl_sz = get_table_cur_size_reg(cc->jit_frame, tbl_idx);
+
+ enlarge_ret = jit_cc_new_reg_I32(cc);
+ args[0] = get_module_inst_reg(cc->jit_frame);
+ args[1] = NEW_CONST(I32, tbl_idx);
+ args[2] = n;
+ args[3] = val;
+
+ if (!jit_emit_callnative(cc, wasm_enlarge_table, enlarge_ret, args,
+ sizeof(args) / sizeof(args[0])))
+ goto fail;
+
+ /* Convert bool to uint32 */
+ GEN_INSN(AND, enlarge_ret, enlarge_ret, NEW_CONST(I32, 0xFF));
+
+ res = jit_cc_new_reg_I32(cc);
+ GEN_INSN(CMP, cc->cmp_reg, enlarge_ret, NEW_CONST(I32, 1));
+ GEN_INSN(SELECTEQ, res, cc->cmp_reg, tbl_sz, NEW_CONST(I32, -1));
+ PUSH_I32(res);
+
+ /* Ensure a refresh in next get memory related registers */
+ clear_table_regs(cc->jit_frame);
+ return true;
+fail:
+ return false;
+}
+
+static int
+wasm_fill_table(WASMModuleInstance *inst, uint32 tbl_idx, uint32 dst,
+ uint32 val, uint32 len)
+{
+ WASMTableInstance *tbl;
+ uint32 tbl_sz;
+
+ tbl = inst->tables[tbl_idx];
+ tbl_sz = tbl->cur_size;
+
+ if (dst > tbl_sz || tbl_sz - dst < len)
+ goto out_of_bounds;
+
+ for (; len != 0; dst++, len--) {
+ tbl->elems[dst] = val;
+ }
+
+ return 0;
+out_of_bounds:
+ wasm_set_exception(inst, "out of bounds table access");
+ return -1;
+}
+
+bool
+jit_compile_op_table_fill(JitCompContext *cc, uint32 tbl_idx)
+{
+ JitReg len, val, dst, res;
+ JitReg args[5] = { 0 };
+
+ POP_I32(len);
+ POP_I32(val);
+ POP_I32(dst);
+
+ res = jit_cc_new_reg_I32(cc);
+ args[0] = get_module_inst_reg(cc->jit_frame);
+ args[1] = NEW_CONST(I32, tbl_idx);
+ args[2] = dst;
+ args[3] = val;
+ args[4] = len;
+
+ if (!jit_emit_callnative(cc, wasm_fill_table, res, args,
+ sizeof(args) / sizeof(args[0])))
+ goto fail;
+
+ GEN_INSN(CMP, cc->cmp_reg, res, NEW_CONST(I32, 0));
+ if (!jit_emit_exception(cc, EXCE_ALREADY_THROWN, JIT_OP_BLTS, cc->cmp_reg,
+ NULL))
+ goto fail;
+
+ return true;
+fail:
+ return false;
+}
+#endif
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/fast-jit/fe/jit_emit_table.h b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/fast-jit/fe/jit_emit_table.h
new file mode 100644
index 000000000..acfb655f2
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/fast-jit/fe/jit_emit_table.h
@@ -0,0 +1,47 @@
+
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#ifndef _JIT_EMIT_TABLE_H_
+#define _JIT_EMIT_TABLE_H_
+
+#include "../jit_compiler.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#if WASM_ENABLE_REF_TYPES != 0
+bool
+jit_compile_op_elem_drop(JitCompContext *cc, uint32 tbl_seg_idx);
+
+bool
+jit_compile_op_table_get(JitCompContext *cc, uint32 tbl_idx);
+
+bool
+jit_compile_op_table_set(JitCompContext *cc, uint32 tbl_idx);
+
+bool
+jit_compile_op_table_init(JitCompContext *cc, uint32 tbl_idx,
+ uint32 tbl_seg_idx);
+
+bool
+jit_compile_op_table_copy(JitCompContext *cc, uint32 src_tbl_idx,
+ uint32 dst_tbl_idx);
+
+bool
+jit_compile_op_table_size(JitCompContext *cc, uint32 tbl_idx);
+
+bool
+jit_compile_op_table_grow(JitCompContext *cc, uint32 tbl_idx);
+
+bool
+jit_compile_op_table_fill(JitCompContext *cc, uint32 tbl_idx);
+#endif
+
+#ifdef __cplusplus
+} /* end of extern "C" */
+#endif
+#endif
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/fast-jit/fe/jit_emit_variable.c b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/fast-jit/fe/jit_emit_variable.c
new file mode 100644
index 000000000..ffbf06ab1
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/fast-jit/fe/jit_emit_variable.c
@@ -0,0 +1,312 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#include "jit_emit_variable.h"
+#include "jit_emit_exception.h"
+#include "../jit_frontend.h"
+
+#define CHECK_LOCAL(idx) \
+ do { \
+ if (idx \
+ >= wasm_func->func_type->param_count + wasm_func->local_count) { \
+ jit_set_last_error(cc, "local index out of range"); \
+ goto fail; \
+ } \
+ } while (0)
+
+static uint8
+get_local_type(const WASMFunction *wasm_func, uint32 local_idx)
+{
+ uint32 param_count = wasm_func->func_type->param_count;
+ return local_idx < param_count
+ ? wasm_func->func_type->types[local_idx]
+ : wasm_func->local_types[local_idx - param_count];
+}
+
+bool
+jit_compile_op_get_local(JitCompContext *cc, uint32 local_idx)
+{
+ WASMFunction *wasm_func = cc->cur_wasm_func;
+ uint16 *local_offsets = wasm_func->local_offsets;
+ uint16 local_offset;
+ uint8 local_type;
+ JitReg value = 0;
+
+ CHECK_LOCAL(local_idx);
+
+ local_offset = local_offsets[local_idx];
+ local_type = get_local_type(wasm_func, local_idx);
+
+ switch (local_type) {
+ case VALUE_TYPE_I32:
+#if WASM_ENABLE_REF_TYPES != 0
+ case VALUE_TYPE_EXTERNREF:
+ case VALUE_TYPE_FUNCREF:
+#endif
+ value = local_i32(cc->jit_frame, local_offset);
+
+ break;
+ case VALUE_TYPE_I64:
+ value = local_i64(cc->jit_frame, local_offset);
+ break;
+ case VALUE_TYPE_F32:
+ value = local_f32(cc->jit_frame, local_offset);
+ break;
+ case VALUE_TYPE_F64:
+ value = local_f64(cc->jit_frame, local_offset);
+ break;
+ default:
+ bh_assert(0);
+ break;
+ }
+
+ PUSH(value, local_type);
+ return true;
+fail:
+ return false;
+}
+
+bool
+jit_compile_op_set_local(JitCompContext *cc, uint32 local_idx)
+{
+ WASMFunction *wasm_func = cc->cur_wasm_func;
+ uint16 *local_offsets = wasm_func->local_offsets;
+ uint16 local_offset;
+ uint8 local_type;
+ JitReg value;
+
+ CHECK_LOCAL(local_idx);
+
+ local_offset = local_offsets[local_idx];
+ local_type = get_local_type(wasm_func, local_idx);
+
+ switch (local_type) {
+ case VALUE_TYPE_I32:
+#if WASM_ENABLE_REF_TYPES != 0
+ case VALUE_TYPE_EXTERNREF:
+ case VALUE_TYPE_FUNCREF:
+#endif
+ POP_I32(value);
+ set_local_i32(cc->jit_frame, local_offset, value);
+ break;
+ case VALUE_TYPE_I64:
+ POP_I64(value);
+ set_local_i64(cc->jit_frame, local_offset, value);
+ break;
+ case VALUE_TYPE_F32:
+ POP_F32(value);
+ set_local_f32(cc->jit_frame, local_offset, value);
+ break;
+ case VALUE_TYPE_F64:
+ POP_F64(value);
+ set_local_f64(cc->jit_frame, local_offset, value);
+ break;
+ default:
+ bh_assert(0);
+ break;
+ }
+
+ return true;
+fail:
+ return false;
+}
+
+bool
+jit_compile_op_tee_local(JitCompContext *cc, uint32 local_idx)
+{
+ WASMFunction *wasm_func = cc->cur_wasm_func;
+ uint16 *local_offsets = wasm_func->local_offsets;
+ uint16 local_offset;
+ uint8 local_type;
+ JitReg value = 0;
+
+ CHECK_LOCAL(local_idx);
+
+ local_offset = local_offsets[local_idx];
+ local_type = get_local_type(wasm_func, local_idx);
+
+ switch (local_type) {
+ case VALUE_TYPE_I32:
+#if WASM_ENABLE_REF_TYPES != 0
+ case VALUE_TYPE_EXTERNREF:
+ case VALUE_TYPE_FUNCREF:
+#endif
+ POP_I32(value);
+ set_local_i32(cc->jit_frame, local_offset, value);
+ PUSH_I32(value);
+ break;
+ case VALUE_TYPE_I64:
+ POP_I64(value);
+ set_local_i64(cc->jit_frame, local_offset, value);
+ PUSH_I64(value);
+ break;
+ case VALUE_TYPE_F32:
+ POP_F32(value);
+ set_local_f32(cc->jit_frame, local_offset, value);
+ PUSH_F32(value);
+ break;
+ case VALUE_TYPE_F64:
+ POP_F64(value);
+ set_local_f64(cc->jit_frame, local_offset, value);
+ PUSH_F64(value);
+ break;
+ default:
+ bh_assert(0);
+ goto fail;
+ }
+
+ return true;
+fail:
+ return false;
+}
+
+static uint8
+get_global_type(const WASMModule *module, uint32 global_idx)
+{
+ if (global_idx < module->import_global_count) {
+ const WASMGlobalImport *import_global =
+ &((module->import_globals + global_idx)->u.global);
+ return import_global->type;
+ }
+ else {
+ const WASMGlobal *global =
+ module->globals + (global_idx - module->import_global_count);
+ return global->type;
+ }
+}
+
+bool
+jit_compile_op_get_global(JitCompContext *cc, uint32 global_idx)
+{
+ uint32 data_offset;
+ uint8 global_type = 0;
+ JitReg value = 0;
+
+ bh_assert(global_idx < cc->cur_wasm_module->import_global_count
+ + cc->cur_wasm_module->global_count);
+
+ data_offset =
+ jit_frontend_get_global_data_offset(cc->cur_wasm_module, global_idx);
+ global_type = get_global_type(cc->cur_wasm_module, global_idx);
+
+ switch (global_type) {
+ case VALUE_TYPE_I32:
+#if WASM_ENABLE_REF_TYPES != 0
+ case VALUE_TYPE_EXTERNREF:
+ case VALUE_TYPE_FUNCREF:
+#endif
+ {
+ value = jit_cc_new_reg_I32(cc);
+ GEN_INSN(LDI32, value, get_module_inst_reg(cc->jit_frame),
+ NEW_CONST(I32, data_offset));
+ break;
+ }
+ case VALUE_TYPE_I64:
+ {
+ value = jit_cc_new_reg_I64(cc);
+ GEN_INSN(LDI64, value, get_module_inst_reg(cc->jit_frame),
+ NEW_CONST(I32, data_offset));
+ break;
+ }
+ case VALUE_TYPE_F32:
+ {
+ value = jit_cc_new_reg_F32(cc);
+ GEN_INSN(LDF32, value, get_module_inst_reg(cc->jit_frame),
+ NEW_CONST(I32, data_offset));
+ break;
+ }
+ case VALUE_TYPE_F64:
+ {
+ value = jit_cc_new_reg_F64(cc);
+ GEN_INSN(LDF64, value, get_module_inst_reg(cc->jit_frame),
+ NEW_CONST(I32, data_offset));
+ break;
+ }
+ default:
+ {
+ jit_set_last_error(cc, "unexpected global type");
+ goto fail;
+ }
+ }
+
+ PUSH(value, global_type);
+
+ return true;
+fail:
+ return false;
+}
+
+bool
+jit_compile_op_set_global(JitCompContext *cc, uint32 global_idx,
+ bool is_aux_stack)
+{
+ uint32 data_offset;
+ uint8 global_type = 0;
+ JitReg value = 0;
+
+ bh_assert(global_idx < cc->cur_wasm_module->import_global_count
+ + cc->cur_wasm_module->global_count);
+
+ data_offset =
+ jit_frontend_get_global_data_offset(cc->cur_wasm_module, global_idx);
+ global_type = get_global_type(cc->cur_wasm_module, global_idx);
+
+ switch (global_type) {
+ case VALUE_TYPE_I32:
+#if WASM_ENABLE_REF_TYPES != 0
+ case VALUE_TYPE_EXTERNREF:
+ case VALUE_TYPE_FUNCREF:
+#endif
+ {
+ POP_I32(value);
+ if (is_aux_stack) {
+ JitReg aux_stack_bound = get_aux_stack_bound_reg(cc->jit_frame);
+ JitReg aux_stack_bottom =
+ get_aux_stack_bottom_reg(cc->jit_frame);
+ GEN_INSN(CMP, cc->cmp_reg, value, aux_stack_bound);
+ if (!(jit_emit_exception(cc, EXCE_AUX_STACK_OVERFLOW,
+ JIT_OP_BLEU, cc->cmp_reg, NULL)))
+ goto fail;
+ GEN_INSN(CMP, cc->cmp_reg, value, aux_stack_bottom);
+ if (!(jit_emit_exception(cc, EXCE_AUX_STACK_UNDERFLOW,
+ JIT_OP_BGTU, cc->cmp_reg, NULL)))
+ goto fail;
+ }
+ GEN_INSN(STI32, value, get_module_inst_reg(cc->jit_frame),
+ NEW_CONST(I32, data_offset));
+ break;
+ }
+ case VALUE_TYPE_I64:
+ {
+ POP_I64(value);
+ GEN_INSN(STI64, value, get_module_inst_reg(cc->jit_frame),
+ NEW_CONST(I32, data_offset));
+ break;
+ }
+ case VALUE_TYPE_F32:
+ {
+ POP_F32(value);
+ GEN_INSN(STF32, value, get_module_inst_reg(cc->jit_frame),
+ NEW_CONST(I32, data_offset));
+ break;
+ }
+ case VALUE_TYPE_F64:
+ {
+ POP_F64(value);
+ GEN_INSN(STF64, value, get_module_inst_reg(cc->jit_frame),
+ NEW_CONST(I32, data_offset));
+ break;
+ }
+ default:
+ {
+ jit_set_last_error(cc, "unexpected global type");
+ goto fail;
+ }
+ }
+
+ return true;
+fail:
+ return false;
+}
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/fast-jit/fe/jit_emit_variable.h b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/fast-jit/fe/jit_emit_variable.h
new file mode 100644
index 000000000..80a10511d
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/fast-jit/fe/jit_emit_variable.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#ifndef _JIT_EMIT_VARIABLE_H_
+#define _JIT_EMIT_VARIABLE_H_
+
+#include "../jit_compiler.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+bool
+jit_compile_op_get_local(JitCompContext *cc, uint32 local_idx);
+
+bool
+jit_compile_op_set_local(JitCompContext *cc, uint32 local_idx);
+
+bool
+jit_compile_op_tee_local(JitCompContext *cc, uint32 local_idx);
+
+bool
+jit_compile_op_get_global(JitCompContext *cc, uint32 global_idx);
+
+bool
+jit_compile_op_set_global(JitCompContext *cc, uint32 global_idx,
+ bool is_aux_stack);
+
+#ifdef __cplusplus
+} /* end of extern "C" */
+#endif
+
+#endif /* end of _JIT_EMIT_VARIABLE_H_ */