summaryrefslogtreecommitdiffstats
path: root/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd
diff options
context:
space:
mode:
Diffstat (limited to 'fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd')
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_access_lanes.c418
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_access_lanes.h92
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_bit_shifts.c144
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_bit_shifts.h35
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_bitmask_extracts.c133
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_bitmask_extracts.h35
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_bitwise_ops.c144
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_bitwise_ops.h23
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_bool_reductions.c138
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_bool_reductions.h39
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_common.c157
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_common.h45
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_comparisons.c229
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_comparisons.h43
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_construct_values.c135
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_construct_values.h27
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_conversions.c743
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_conversions.h90
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_floating_point.c388
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_floating_point.h107
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_int_arith.c406
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_int_arith.h91
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_load_store.c331
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_load_store.h49
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_sat_int_arith.c81
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_sat_int_arith.h33
26 files changed, 4156 insertions, 0 deletions
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_access_lanes.c b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_access_lanes.c
new file mode 100644
index 000000000..4f43c35a9
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_access_lanes.c
@@ -0,0 +1,418 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#include "simd_access_lanes.h"
+#include "simd_common.h"
+#include "../aot_emit_exception.h"
+#include "../../aot/aot_runtime.h"
+
+bool
+aot_compile_simd_shuffle(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ const uint8 *frame_ip)
+{
+ LLVMValueRef vec1, vec2, mask, result;
+ uint8 imm[16] = { 0 };
+ int values[16];
+ unsigned i;
+
+ wasm_runtime_read_v128(frame_ip, (uint64 *)imm, (uint64 *)(imm + 8));
+ for (i = 0; i < 16; i++) {
+ values[i] = imm[i];
+ }
+
+ if (!(vec2 = simd_pop_v128_and_bitcast(comp_ctx, func_ctx, V128_i8x16_TYPE,
+ "vec2"))) {
+ goto fail;
+ }
+
+ if (!(vec1 = simd_pop_v128_and_bitcast(comp_ctx, func_ctx, V128_i8x16_TYPE,
+ "vec1"))) {
+ goto fail;
+ }
+
+ /* build a vector <16 x i32> */
+ if (!(mask = simd_build_const_integer_vector(comp_ctx, I32_TYPE, values,
+ 16))) {
+ goto fail;
+ }
+
+ if (!(result = LLVMBuildShuffleVector(comp_ctx->builder, vec1, vec2, mask,
+ "new_vector"))) {
+ HANDLE_FAILURE("LLVMBuildShuffleVector");
+ goto fail;
+ }
+
+ return simd_bitcast_and_push_v128(comp_ctx, func_ctx, result, "result");
+
+fail:
+ return false;
+}
+
+/*TODO: llvm.experimental.vector.*/
+/* shufflevector is not an option, since it requires *mask as a const */
+bool
+aot_compile_simd_swizzle_x86(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
+{
+ LLVMValueRef vector, mask, max_lanes, condition, mask_lanes, result;
+ LLVMTypeRef param_types[2];
+
+ if (!(mask = simd_pop_v128_and_bitcast(comp_ctx, func_ctx, V128_i8x16_TYPE,
+ "mask"))) {
+ goto fail;
+ }
+
+ if (!(vector = simd_pop_v128_and_bitcast(comp_ctx, func_ctx,
+ V128_i8x16_TYPE, "vec"))) {
+ goto fail;
+ }
+
+ /* icmp uge <16 x i8> mask, <16, 16, 16, 16, ...> */
+ if (!(max_lanes = simd_build_splat_const_integer_vector(comp_ctx, INT8_TYPE,
+ 16, 16))) {
+ goto fail;
+ }
+
+ /* if the highest bit of every i8 of mask is 1, means doesn't pick up
+ from vector */
+ /* select <16 x i1> %condition, <16 x i8> <0x80, 0x80, ...>,
+ <16 x i8> %mask */
+ if (!(mask_lanes = simd_build_splat_const_integer_vector(
+ comp_ctx, INT8_TYPE, 0x80, 16))) {
+ goto fail;
+ }
+
+ if (!(condition = LLVMBuildICmp(comp_ctx->builder, LLVMIntUGE, mask,
+ max_lanes, "compare_with_16"))) {
+ HANDLE_FAILURE("LLVMBuldICmp");
+ goto fail;
+ }
+
+ if (!(mask = LLVMBuildSelect(comp_ctx->builder, condition, mask_lanes, mask,
+ "mask"))) {
+ HANDLE_FAILURE("LLVMBuildSelect");
+ goto fail;
+ }
+
+ param_types[0] = V128_i8x16_TYPE;
+ param_types[1] = V128_i8x16_TYPE;
+ if (!(result = aot_call_llvm_intrinsic(
+ comp_ctx, func_ctx, "llvm.x86.ssse3.pshuf.b.128", V128_i8x16_TYPE,
+ param_types, 2, vector, mask))) {
+ HANDLE_FAILURE("LLVMBuildCall");
+ goto fail;
+ }
+
+ if (!(result = LLVMBuildBitCast(comp_ctx->builder, result, V128_i64x2_TYPE,
+ "ret"))) {
+ HANDLE_FAILURE("LLVMBuildBitCast");
+ goto fail;
+ }
+
+ PUSH_V128(result);
+
+ return true;
+fail:
+ return false;
+}
+
+static bool
+aot_compile_simd_swizzle_common(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx)
+{
+ LLVMValueRef vector, mask, default_lane_value, condition, max_lane_id,
+ result, idx, id, replace_with_zero, elem, elem_or_zero, undef;
+ uint8 i;
+
+ if (!(mask = simd_pop_v128_and_bitcast(comp_ctx, func_ctx, V128_i8x16_TYPE,
+ "mask"))) {
+ goto fail;
+ }
+
+ if (!(vector = simd_pop_v128_and_bitcast(comp_ctx, func_ctx,
+ V128_i8x16_TYPE, "vec"))) {
+ goto fail;
+ }
+
+ if (!(undef = LLVMGetUndef(V128_i8x16_TYPE))) {
+ HANDLE_FAILURE("LLVMGetUndef");
+ goto fail;
+ }
+
+ /* icmp uge <16 x i8> mask, <16, 16, 16, 16, ...> */
+ if (!(max_lane_id = simd_build_splat_const_integer_vector(
+ comp_ctx, INT8_TYPE, 16, 16))) {
+ goto fail;
+ }
+
+ if (!(condition = LLVMBuildICmp(comp_ctx->builder, LLVMIntUGE, mask,
+ max_lane_id, "out_of_range"))) {
+ HANDLE_FAILURE("LLVMBuldICmp");
+ goto fail;
+ }
+
+ /* if the id is out of range (>=16), set the id as 0 */
+ if (!(default_lane_value = simd_build_splat_const_integer_vector(
+ comp_ctx, INT8_TYPE, 0, 16))) {
+ goto fail;
+ }
+
+ if (!(idx = LLVMBuildSelect(comp_ctx->builder, condition,
+ default_lane_value, mask, "mask"))) {
+ HANDLE_FAILURE("LLVMBuildSelect");
+ goto fail;
+ }
+
+ for (i = 0; i < 16; i++) {
+ if (!(id = LLVMBuildExtractElement(comp_ctx->builder, idx, I8_CONST(i),
+ "id"))) {
+ HANDLE_FAILURE("LLVMBuildExtractElement");
+ goto fail;
+ }
+
+ if (!(replace_with_zero =
+ LLVMBuildExtractElement(comp_ctx->builder, condition,
+ I8_CONST(i), "replace_with_zero"))) {
+ HANDLE_FAILURE("LLVMBuildExtractElement");
+ goto fail;
+ }
+
+ if (!(elem = LLVMBuildExtractElement(comp_ctx->builder, vector, id,
+ "vector[mask[i]]"))) {
+ HANDLE_FAILURE("LLVMBuildExtractElement");
+ goto fail;
+ }
+
+ if (!(elem_or_zero =
+ LLVMBuildSelect(comp_ctx->builder, replace_with_zero,
+ I8_CONST(0), elem, "elem_or_zero"))) {
+ HANDLE_FAILURE("LLVMBuildSelect");
+ goto fail;
+ }
+
+ if (!(undef =
+ LLVMBuildInsertElement(comp_ctx->builder, undef, elem_or_zero,
+ I8_CONST(i), "new_vector"))) {
+ HANDLE_FAILURE("LLVMBuildInsertElement");
+ goto fail;
+ }
+ }
+
+ if (!(result = LLVMBuildBitCast(comp_ctx->builder, undef, V128_i64x2_TYPE,
+ "ret"))) {
+ HANDLE_FAILURE("LLVMBuildBitCast");
+ goto fail;
+ }
+
+ PUSH_V128(result);
+
+ return true;
+fail:
+ return false;
+}
+
+bool
+aot_compile_simd_swizzle(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
+{
+ if (is_target_x86(comp_ctx)) {
+ return aot_compile_simd_swizzle_x86(comp_ctx, func_ctx);
+ }
+ else {
+ return aot_compile_simd_swizzle_common(comp_ctx, func_ctx);
+ }
+}
+
+static bool
+aot_compile_simd_extract(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ uint8 lane_id, bool need_extend, bool is_signed,
+ LLVMTypeRef vector_type, LLVMTypeRef result_type,
+ unsigned aot_value_type)
+{
+ LLVMValueRef vector, lane, result;
+
+ if (!(lane = simd_lane_id_to_llvm_value(comp_ctx, lane_id))) {
+ HANDLE_FAILURE("LLVMConstInt");
+ goto fail;
+ }
+
+ /* bitcast <2 x i64> %0 to <vector_type> */
+ if (!(vector = simd_pop_v128_and_bitcast(comp_ctx, func_ctx, vector_type,
+ "vec"))) {
+ goto fail;
+ }
+
+ /* extractelement <vector_type> %vector, i8 lane_id*/
+ if (!(result = LLVMBuildExtractElement(comp_ctx->builder, vector, lane,
+ "element"))) {
+ HANDLE_FAILURE("LLVMBuildExtractElement");
+ goto fail;
+ }
+
+ if (need_extend) {
+ if (is_signed) {
+ /* sext <element_type> %element to <result_type> */
+ if (!(result = LLVMBuildSExt(comp_ctx->builder, result, result_type,
+ "ret"))) {
+ HANDLE_FAILURE("LLVMBuildSExt");
+ goto fail;
+ }
+ }
+ else {
+ /* sext <element_type> %element to <result_type> */
+ if (!(result = LLVMBuildZExt(comp_ctx->builder, result, result_type,
+ "ret"))) {
+ HANDLE_FAILURE("LLVMBuildZExt");
+ goto fail;
+ }
+ }
+ }
+
+ PUSH(result, aot_value_type);
+
+ return true;
+fail:
+ return false;
+}
+
+bool
+aot_compile_simd_extract_i8x16(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, uint8 lane_id,
+ bool is_signed)
+{
+ return aot_compile_simd_extract(comp_ctx, func_ctx, lane_id, true,
+ is_signed, V128_i8x16_TYPE, I32_TYPE,
+ VALUE_TYPE_I32);
+}
+
+bool
+aot_compile_simd_extract_i16x8(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, uint8 lane_id,
+ bool is_signed)
+{
+ return aot_compile_simd_extract(comp_ctx, func_ctx, lane_id, true,
+ is_signed, V128_i16x8_TYPE, I32_TYPE,
+ VALUE_TYPE_I32);
+}
+
+bool
+aot_compile_simd_extract_i32x4(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, uint8 lane_id)
+{
+ return aot_compile_simd_extract(comp_ctx, func_ctx, lane_id, false, false,
+ V128_i32x4_TYPE, I32_TYPE, VALUE_TYPE_I32);
+}
+
+bool
+aot_compile_simd_extract_i64x2(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, uint8 lane_id)
+{
+ return aot_compile_simd_extract(comp_ctx, func_ctx, lane_id, false, false,
+ V128_i64x2_TYPE, I64_TYPE, VALUE_TYPE_I64);
+}
+
+bool
+aot_compile_simd_extract_f32x4(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, uint8 lane_id)
+{
+ return aot_compile_simd_extract(comp_ctx, func_ctx, lane_id, false, false,
+ V128_f32x4_TYPE, F32_TYPE, VALUE_TYPE_F32);
+}
+
+bool
+aot_compile_simd_extract_f64x2(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, uint8 lane_id)
+{
+ return aot_compile_simd_extract(comp_ctx, func_ctx, lane_id, false, false,
+ V128_f64x2_TYPE, F64_TYPE, VALUE_TYPE_F64);
+}
+
+static bool
+aot_compile_simd_replace(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ uint8 lane_id, unsigned new_value_type,
+ LLVMTypeRef vector_type, bool need_reduce,
+ LLVMTypeRef element_type)
+{
+ LLVMValueRef vector, new_value, lane, result;
+
+ POP(new_value, new_value_type);
+
+ if (!(lane = simd_lane_id_to_llvm_value(comp_ctx, lane_id))) {
+ goto fail;
+ }
+
+ if (!(vector = simd_pop_v128_and_bitcast(comp_ctx, func_ctx, vector_type,
+ "vec"))) {
+ goto fail;
+ }
+
+ /* trunc <new_value_type> to <element_type> */
+ if (need_reduce) {
+ if (!(new_value = LLVMBuildTrunc(comp_ctx->builder, new_value,
+ element_type, "element"))) {
+ HANDLE_FAILURE("LLVMBuildTrunc");
+ goto fail;
+ }
+ }
+
+ /* insertelement <vector_type> %vector, <element_type> %element,
+ i32 lane */
+ if (!(result = LLVMBuildInsertElement(comp_ctx->builder, vector, new_value,
+ lane, "new_vector"))) {
+ HANDLE_FAILURE("LLVMBuildInsertElement");
+ goto fail;
+ }
+
+ return simd_bitcast_and_push_v128(comp_ctx, func_ctx, result, "reesult");
+
+fail:
+ return false;
+}
+
+bool
+aot_compile_simd_replace_i8x16(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, uint8 lane_id)
+{
+ return aot_compile_simd_replace(comp_ctx, func_ctx, lane_id, VALUE_TYPE_I32,
+ V128_i8x16_TYPE, true, INT8_TYPE);
+}
+
+bool
+aot_compile_simd_replace_i16x8(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, uint8 lane_id)
+{
+ return aot_compile_simd_replace(comp_ctx, func_ctx, lane_id, VALUE_TYPE_I32,
+ V128_i16x8_TYPE, true, INT16_TYPE);
+}
+
+bool
+aot_compile_simd_replace_i32x4(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, uint8 lane_id)
+{
+ return aot_compile_simd_replace(comp_ctx, func_ctx, lane_id, VALUE_TYPE_I32,
+ V128_i32x4_TYPE, false, I32_TYPE);
+}
+
+bool
+aot_compile_simd_replace_i64x2(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, uint8 lane_id)
+{
+ return aot_compile_simd_replace(comp_ctx, func_ctx, lane_id, VALUE_TYPE_I64,
+ V128_i64x2_TYPE, false, I64_TYPE);
+}
+
+bool
+aot_compile_simd_replace_f32x4(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, uint8 lane_id)
+{
+ return aot_compile_simd_replace(comp_ctx, func_ctx, lane_id, VALUE_TYPE_F32,
+ V128_f32x4_TYPE, false, F32_TYPE);
+}
+
+bool
+aot_compile_simd_replace_f64x2(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, uint8 lane_id)
+{
+ return aot_compile_simd_replace(comp_ctx, func_ctx, lane_id, VALUE_TYPE_F64,
+ V128_f64x2_TYPE, false, F64_TYPE);
+}
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_access_lanes.h b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_access_lanes.h
new file mode 100644
index 000000000..75ca71ced
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_access_lanes.h
@@ -0,0 +1,92 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#ifndef _SIMD_ACCESS_LANES_H_
+#define _SIMD_ACCESS_LANES_H_
+
+#include "../aot_compiler.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+bool
+aot_compile_simd_shuffle(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ const uint8 *frame_ip);
+
+bool
+aot_compile_simd_swizzle(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx);
+
+bool
+aot_compile_simd_extract_i8x16(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, uint8 lane_id,
+ bool is_signed);
+
+bool
+aot_compile_simd_extract_i16x8(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, uint8 lane_id,
+ bool is_signed);
+
+bool
+aot_compile_simd_extract_i32x4(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, uint8 lane_id);
+
+bool
+aot_compile_simd_extract_i64x2(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, uint8 lane_id);
+
+bool
+aot_compile_simd_extract_f32x4(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, uint8 lane_id);
+
+bool
+aot_compile_simd_extract_f64x2(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, uint8 lane_id);
+
+bool
+aot_compile_simd_replace_i8x16(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, uint8 lane_id);
+
+bool
+aot_compile_simd_replace_i16x8(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, uint8 lane_id);
+
+bool
+aot_compile_simd_replace_i32x4(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, uint8 lane_id);
+
+bool
+aot_compile_simd_replace_i64x2(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, uint8 lane_id);
+
+bool
+aot_compile_simd_replace_f32x4(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, uint8 lane_id);
+
+bool
+aot_compile_simd_replace_f64x2(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, uint8 lane_id);
+
+bool
+aot_compile_simd_load8_lane(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ uint8 lane_id);
+
+bool
+aot_compile_simd_load16_lane(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ uint8 lane_id);
+
+bool
+aot_compile_simd_load32_lane(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ uint8 lane_id);
+
+bool
+aot_compile_simd_load64_lane(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ uint8 lane_id);
+
+#ifdef __cplusplus
+} /* end of extern "C" */
+#endif
+
+#endif /* end of _SIMD_ACCESS_LANES_H_ */
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_bit_shifts.c b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_bit_shifts.c
new file mode 100644
index 000000000..675ffbcfe
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_bit_shifts.c
@@ -0,0 +1,144 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#include "simd_bit_shifts.h"
+#include "simd_common.h"
+#include "../aot_emit_exception.h"
+#include "../../aot/aot_runtime.h"
+
+enum integer_shift {
+ e_shift_i8x16,
+ e_shift_i16x8,
+ e_shift_i32x4,
+ e_shift_i64x2,
+};
+
+static bool
+simd_shift(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ IntShift shift_op, enum integer_shift itype)
+{
+ LLVMValueRef vector, offset, result = NULL;
+ LLVMTypeRef vector_type[] = { V128_i8x16_TYPE, V128_i16x8_TYPE,
+ V128_i32x4_TYPE, V128_i64x2_TYPE };
+ LLVMTypeRef element_type[] = { INT8_TYPE, INT16_TYPE, I32_TYPE, I64_TYPE };
+
+ LLVMValueRef undef[] = { LLVM_CONST(i8x16_undef), LLVM_CONST(i16x8_undef),
+ LLVM_CONST(i32x4_undef), LLVM_CONST(i64x2_undef) };
+ LLVMValueRef mask[] = { LLVM_CONST(i8x16_vec_zero),
+ LLVM_CONST(i16x8_vec_zero),
+ LLVM_CONST(i32x4_vec_zero),
+ LLVM_CONST(i64x2_vec_zero) };
+ LLVMValueRef lane_bits[] = {
+ LLVM_CONST(i32_eight),
+ LLVMConstInt(I32_TYPE, 16, true),
+ LLVMConstInt(I32_TYPE, 32, true),
+ LLVMConstInt(I32_TYPE, 64, true),
+ };
+
+ POP_I32(offset);
+
+ if (!(vector = simd_pop_v128_and_bitcast(comp_ctx, func_ctx,
+ vector_type[itype], "vec"))) {
+ return false;
+ }
+
+ /* offset mod LaneBits */
+ if (!lane_bits[itype]
+ || !(offset = LLVMBuildSRem(comp_ctx->builder, offset, lane_bits[itype],
+ "offset_fix"))) {
+ HANDLE_FAILURE("LLVMBuildSRem");
+ return false;
+ }
+
+ /* change type */
+ if (itype < e_shift_i32x4) {
+ offset = LLVMBuildTrunc(comp_ctx->builder, offset, element_type[itype],
+ "offset_trunc");
+ }
+ else if (itype == e_shift_i64x2) {
+ offset = LLVMBuildZExt(comp_ctx->builder, offset, element_type[itype],
+ "offset_ext");
+ }
+
+ if (!offset) {
+ HANDLE_FAILURE("LLVMBuildZext/LLVMBuildTrunc");
+ return false;
+ }
+
+ /* splat to a vector */
+ if (!(offset =
+ LLVMBuildInsertElement(comp_ctx->builder, undef[itype], offset,
+ I32_ZERO, "offset_vector_base"))) {
+ HANDLE_FAILURE("LLVMBuildInsertElement");
+ return false;
+ }
+
+ if (!(offset =
+ LLVMBuildShuffleVector(comp_ctx->builder, offset, undef[itype],
+ mask[itype], "offset_vector"))) {
+ HANDLE_FAILURE("LLVMBuildShuffleVector");
+ return false;
+ }
+
+ switch (shift_op) {
+ case INT_SHL:
+ {
+ result = LLVMBuildShl(comp_ctx->builder, vector, offset, "shl");
+ break;
+ }
+ case INT_SHR_S:
+ {
+ result = LLVMBuildAShr(comp_ctx->builder, vector, offset, "ashr");
+ break;
+ }
+ case INT_SHR_U:
+ {
+ result = LLVMBuildLShr(comp_ctx->builder, vector, offset, "lshr");
+ break;
+ }
+ default:
+ {
+ break;
+ }
+ }
+
+ if (!result) {
+ HANDLE_FAILURE("LLVMBuildShl/LLVMBuildLShr/LLVMBuildAShr");
+ goto fail;
+ }
+
+ return simd_bitcast_and_push_v128(comp_ctx, func_ctx, result, "result");
+
+fail:
+ return false;
+}
+
+bool
+aot_compile_simd_i8x16_shift(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ IntShift shift_op)
+{
+ return simd_shift(comp_ctx, func_ctx, shift_op, e_shift_i8x16);
+}
+
+bool
+aot_compile_simd_i16x8_shift(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ IntShift shift_op)
+{
+ return simd_shift(comp_ctx, func_ctx, shift_op, e_shift_i16x8);
+}
+
+bool
+aot_compile_simd_i32x4_shift(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ IntShift shift_op)
+{
+ return simd_shift(comp_ctx, func_ctx, shift_op, e_shift_i32x4);
+}
+
+bool
+aot_compile_simd_i64x2_shift(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ IntShift shift_op)
+{
+ return simd_shift(comp_ctx, func_ctx, shift_op, e_shift_i64x2);
+}
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_bit_shifts.h b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_bit_shifts.h
new file mode 100644
index 000000000..06e86cad0
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_bit_shifts.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#ifndef _SIMD_BIT_SHIFTS_H_
+#define _SIMD_BIT_SHIFTS_H_
+
+#include "../aot_compiler.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+bool
+aot_compile_simd_i8x16_shift(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ IntShift shift_op);
+
+bool
+aot_compile_simd_i16x8_shift(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ IntShift shift_op);
+
+bool
+aot_compile_simd_i32x4_shift(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ IntShift shift_op);
+
+bool
+aot_compile_simd_i64x2_shift(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ IntShift shift_op);
+
+#ifdef __cplusplus
+} /* end of extern "C" */
+#endif
+
+#endif /* end of _SIMD_BIT_SHIFTS_H_ */
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_bitmask_extracts.c b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_bitmask_extracts.c
new file mode 100644
index 000000000..67d965426
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_bitmask_extracts.c
@@ -0,0 +1,133 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#include "simd_bitmask_extracts.h"
+#include "simd_common.h"
+#include "../aot_emit_exception.h"
+#include "../../aot/aot_runtime.h"
+
+enum integer_bitmask_type {
+ e_bitmask_i8x16,
+ e_bitmask_i16x8,
+ e_bitmask_i32x4,
+ e_bitmask_i64x2,
+};
+
+/* TODO: should use a much clever intrinsic */
+static bool
+simd_build_bitmask(const AOTCompContext *comp_ctx,
+ const AOTFuncContext *func_ctx,
+ enum integer_bitmask_type itype)
+{
+ LLVMValueRef vector, mask, result;
+ uint8 i;
+ LLVMTypeRef vector_ext_type;
+
+ uint32 lanes[] = { 16, 8, 4, 2 };
+ uint32 lane_bits[] = { 8, 16, 32, 64 };
+ LLVMTypeRef element_type[] = { INT8_TYPE, INT16_TYPE, I32_TYPE, I64_TYPE };
+ LLVMTypeRef vector_type[] = { V128_i8x16_TYPE, V128_i16x8_TYPE,
+ V128_i32x4_TYPE, V128_i64x2_TYPE };
+ int32 mask_element[16] = { 0 };
+ const char *intrinsic[] = {
+ "llvm.vector.reduce.or.v16i64",
+ "llvm.vector.reduce.or.v8i64",
+ "llvm.vector.reduce.or.v4i64",
+ "llvm.vector.reduce.or.v2i64",
+ };
+
+ LLVMValueRef ashr_distance;
+
+ if (!(vector = simd_pop_v128_and_bitcast(comp_ctx, func_ctx,
+ vector_type[itype], "vec"))) {
+ goto fail;
+ }
+
+ /* fill every bit in a lange with its sign bit */
+ if (!(ashr_distance = simd_build_splat_const_integer_vector(
+ comp_ctx, element_type[itype], lane_bits[itype] - 1,
+ lanes[itype]))) {
+ goto fail;
+ }
+
+ if (!(vector = LLVMBuildAShr(comp_ctx->builder, vector, ashr_distance,
+ "vec_ashr"))) {
+ HANDLE_FAILURE("LLVMBuildAShr");
+ goto fail;
+ }
+
+ if (!(vector_ext_type = LLVMVectorType(I64_TYPE, lanes[itype]))) {
+ HANDLE_FAILURE("LLVMVectorType");
+ goto fail;
+ }
+
+ if (e_bitmask_i64x2 != itype) {
+ if (!(vector = LLVMBuildSExt(comp_ctx->builder, vector, vector_ext_type,
+ "zext_to_i64"))) {
+ goto fail;
+ }
+ }
+
+ for (i = 0; i < 16; i++) {
+ mask_element[i] = 0x1 << i;
+ }
+
+ if (!(mask = simd_build_const_integer_vector(comp_ctx, I64_TYPE,
+ mask_element, lanes[itype]))) {
+ goto fail;
+ }
+
+ if (!(vector =
+ LLVMBuildAnd(comp_ctx->builder, vector, mask, "mask_bits"))) {
+ HANDLE_FAILURE("LLVMBuildAnd");
+ goto fail;
+ }
+
+ if (!(result =
+ aot_call_llvm_intrinsic(comp_ctx, func_ctx, intrinsic[itype],
+ I64_TYPE, &vector_ext_type, 1, vector))) {
+ goto fail;
+ }
+
+ if (!(result =
+ LLVMBuildTrunc(comp_ctx->builder, result, I32_TYPE, "to_i32"))) {
+ HANDLE_FAILURE("LLVMBuildTrunc");
+ goto fail;
+ }
+
+ PUSH_I32(result);
+
+ return true;
+fail:
+ return false;
+}
+
+bool
+aot_compile_simd_i8x16_bitmask(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx)
+{
+ return simd_build_bitmask(comp_ctx, func_ctx, e_bitmask_i8x16);
+}
+
+bool
+aot_compile_simd_i16x8_bitmask(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx)
+{
+ return simd_build_bitmask(comp_ctx, func_ctx, e_bitmask_i16x8);
+}
+
+bool
+aot_compile_simd_i32x4_bitmask(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx)
+{
+ return simd_build_bitmask(comp_ctx, func_ctx, e_bitmask_i32x4);
+}
+
+bool
+aot_compile_simd_i64x2_bitmask(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx)
+{
+ return simd_build_bitmask(comp_ctx, func_ctx, e_bitmask_i64x2);
+}
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_bitmask_extracts.h b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_bitmask_extracts.h
new file mode 100644
index 000000000..aac4cc2ce
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_bitmask_extracts.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#ifndef _SIMD_BITMASK_EXTRACTS_H_
+#define _SIMD_BITMASK_EXTRACTS_H_
+
+#include "../aot_compiler.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+bool
+aot_compile_simd_i8x16_bitmask(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx);
+
+bool
+aot_compile_simd_i16x8_bitmask(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx);
+
+bool
+aot_compile_simd_i32x4_bitmask(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx);
+
+bool
+aot_compile_simd_i64x2_bitmask(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx);
+
+#ifdef __cplusplus
+} /* end of extern "C" */
+#endif
+
+#endif /* end of _SIMD_BITMASK_EXTRACTS_H_ */
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_bitwise_ops.c b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_bitwise_ops.c
new file mode 100644
index 000000000..66aef3637
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_bitwise_ops.c
@@ -0,0 +1,144 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#include "simd_bitwise_ops.h"
+#include "../aot_emit_exception.h"
+#include "../../aot/aot_runtime.h"
+
+static bool
+v128_bitwise_two_component(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ V128Bitwise bitwise_op)
+{
+ LLVMValueRef vector1, vector2, result;
+
+ POP_V128(vector2);
+ POP_V128(vector1);
+
+ switch (bitwise_op) {
+ case V128_AND:
+ if (!(result = LLVMBuildAnd(comp_ctx->builder, vector1, vector2,
+ "and"))) {
+ HANDLE_FAILURE("LLVMBuildAnd");
+ goto fail;
+ }
+ break;
+ case V128_OR:
+ if (!(result =
+ LLVMBuildOr(comp_ctx->builder, vector1, vector2, "or"))) {
+ HANDLE_FAILURE("LLVMBuildAnd");
+ goto fail;
+ }
+ break;
+ case V128_XOR:
+ if (!(result = LLVMBuildXor(comp_ctx->builder, vector1, vector2,
+ "xor"))) {
+ HANDLE_FAILURE("LLVMBuildAnd");
+ goto fail;
+ }
+ break;
+ case V128_ANDNOT:
+ {
+ /* v128.and(a, v128.not(b)) */
+ if (!(vector2 = LLVMBuildNot(comp_ctx->builder, vector2, "not"))) {
+ HANDLE_FAILURE("LLVMBuildNot");
+ goto fail;
+ }
+
+ if (!(result = LLVMBuildAnd(comp_ctx->builder, vector1, vector2,
+ "and"))) {
+ HANDLE_FAILURE("LLVMBuildAnd");
+ goto fail;
+ }
+
+ break;
+ }
+ default:
+ bh_assert(0);
+ goto fail;
+ }
+
+ PUSH_V128(result);
+ return true;
+fail:
+ return false;
+}
+
+static bool
+v128_bitwise_not(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
+{
+ LLVMValueRef vector, result;
+
+ POP_V128(vector);
+
+ if (!(result = LLVMBuildNot(comp_ctx->builder, vector, "not"))) {
+ HANDLE_FAILURE("LLVMBuildNot");
+ goto fail;
+ }
+
+ PUSH_V128(result);
+ return true;
+fail:
+ return false;
+}
+
+/* v128.or(v128.and(v1, c), v128.and(v2, v128.not(c))) */
+static bool
+v128_bitwise_bitselect(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
+{
+ LLVMValueRef vector1, vector2, vector3, result;
+
+ POP_V128(vector3);
+ POP_V128(vector2);
+ POP_V128(vector1);
+
+ if (!(vector1 =
+ LLVMBuildAnd(comp_ctx->builder, vector1, vector3, "a_and_c"))) {
+ HANDLE_FAILURE("LLVMBuildAdd");
+ goto fail;
+ }
+
+ if (!(vector3 = LLVMBuildNot(comp_ctx->builder, vector3, "not_c"))) {
+ HANDLE_FAILURE("LLVMBuildNot");
+ goto fail;
+ }
+
+ if (!(vector2 =
+ LLVMBuildAnd(comp_ctx->builder, vector2, vector3, "b_and_c"))) {
+ HANDLE_FAILURE("LLVMBuildAdd");
+ goto fail;
+ }
+
+ if (!(result =
+ LLVMBuildOr(comp_ctx->builder, vector1, vector2, "a_or_b"))) {
+ HANDLE_FAILURE("LLVMBuildOr");
+ goto fail;
+ }
+
+ PUSH_V128(result);
+
+ return true;
+fail:
+ return false;
+}
+
+bool
+aot_compile_simd_v128_bitwise(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, V128Bitwise bitwise_op)
+{
+ switch (bitwise_op) {
+ case V128_AND:
+ case V128_OR:
+ case V128_XOR:
+ case V128_ANDNOT:
+ return v128_bitwise_two_component(comp_ctx, func_ctx, bitwise_op);
+ case V128_NOT:
+ return v128_bitwise_not(comp_ctx, func_ctx);
+ case V128_BITSELECT:
+ return v128_bitwise_bitselect(comp_ctx, func_ctx);
+ default:
+ bh_assert(0);
+ return false;
+ }
+}
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_bitwise_ops.h b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_bitwise_ops.h
new file mode 100644
index 000000000..ddf81c0b7
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_bitwise_ops.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#ifndef _SIMD_BITWISE_OPS_H_
+#define _SIMD_BITWISE_OPS_H_
+
+#include "../aot_compiler.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+bool
+aot_compile_simd_v128_bitwise(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, V128Bitwise bitwise_op);
+
+#ifdef __cplusplus
+} /* end of extern "C" */
+#endif
+
+#endif /* end of _SIMD_BITWISE_OPS_H_ */
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_bool_reductions.c b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_bool_reductions.c
new file mode 100644
index 000000000..4607d680a
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_bool_reductions.c
@@ -0,0 +1,138 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#include "simd_bool_reductions.h"
+#include "simd_common.h"
+#include "../aot_emit_exception.h"
+#include "../../aot/aot_runtime.h"
+
+enum integer_all_true {
+ e_int_all_true_v16i8,
+ e_int_all_true_v8i16,
+ e_int_all_true_v4i32,
+ e_int_all_true_v2i64,
+};
+
+static bool
+simd_all_true(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ enum integer_all_true itype)
+{
+ LLVMValueRef vector, result;
+ LLVMTypeRef vector_i1_type;
+ LLVMTypeRef vector_type[] = { V128_i8x16_TYPE, V128_i16x8_TYPE,
+ V128_i32x4_TYPE, V128_i64x2_TYPE };
+ uint32 lanes[] = { 16, 8, 4, 2 };
+ const char *intrinsic[] = {
+ "llvm.vector.reduce.and.v16i1",
+ "llvm.vector.reduce.and.v8i1",
+ "llvm.vector.reduce.and.v4i1",
+ "llvm.vector.reduce.and.v2i1",
+ };
+ LLVMValueRef zero[] = {
+ LLVM_CONST(i8x16_vec_zero),
+ LLVM_CONST(i16x8_vec_zero),
+ LLVM_CONST(i32x4_vec_zero),
+ LLVM_CONST(i64x2_vec_zero),
+ };
+
+ if (!(vector_i1_type = LLVMVectorType(INT1_TYPE, lanes[itype]))) {
+ HANDLE_FAILURE("LLVMVectorType");
+ goto fail;
+ }
+
+ if (!(vector = simd_pop_v128_and_bitcast(comp_ctx, func_ctx,
+ vector_type[itype], "vector"))) {
+ goto fail;
+ }
+
+ /* compare with zero */
+ if (!(result = LLVMBuildICmp(comp_ctx->builder, LLVMIntNE, vector,
+ zero[itype], "ne_zero"))) {
+ HANDLE_FAILURE("LLVMBuildICmp");
+ goto fail;
+ }
+
+ /* check zero */
+ if (!(result =
+ aot_call_llvm_intrinsic(comp_ctx, func_ctx, intrinsic[itype],
+ INT1_TYPE, &vector_i1_type, 1, result))) {
+ goto fail;
+ }
+
+ if (!(result =
+ LLVMBuildZExt(comp_ctx->builder, result, I32_TYPE, "to_i32"))) {
+ HANDLE_FAILURE("LLVMBuildZExt");
+ goto fail;
+ }
+
+ PUSH_I32(result);
+
+ return true;
+fail:
+ return false;
+}
+
+bool
+aot_compile_simd_i8x16_all_true(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx)
+{
+ return simd_all_true(comp_ctx, func_ctx, e_int_all_true_v16i8);
+}
+
+bool
+aot_compile_simd_i16x8_all_true(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx)
+{
+ return simd_all_true(comp_ctx, func_ctx, e_int_all_true_v8i16);
+}
+
+bool
+aot_compile_simd_i32x4_all_true(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx)
+{
+ return simd_all_true(comp_ctx, func_ctx, e_int_all_true_v4i32);
+}
+
+bool
+aot_compile_simd_i64x2_all_true(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx)
+{
+ return simd_all_true(comp_ctx, func_ctx, e_int_all_true_v2i64);
+}
+
+bool
+aot_compile_simd_v128_any_true(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx)
+{
+ LLVMTypeRef vector_type;
+ LLVMValueRef vector, result;
+
+ if (!(vector_type = LLVMVectorType(INT1_TYPE, 128))) {
+ return false;
+ }
+
+ if (!(vector = simd_pop_v128_and_bitcast(comp_ctx, func_ctx, vector_type,
+ "vector"))) {
+ goto fail;
+ }
+
+ if (!(result = aot_call_llvm_intrinsic(
+ comp_ctx, func_ctx, "llvm.vector.reduce.or.v128i1", INT1_TYPE,
+ &vector_type, 1, vector))) {
+ goto fail;
+ }
+
+ if (!(result =
+ LLVMBuildZExt(comp_ctx->builder, result, I32_TYPE, "to_i32"))) {
+ HANDLE_FAILURE("LLVMBuildZExt");
+ goto fail;
+ }
+
+ PUSH_I32(result);
+
+ return true;
+fail:
+ return false;
+}
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_bool_reductions.h b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_bool_reductions.h
new file mode 100644
index 000000000..649d5a5e2
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_bool_reductions.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#ifndef _SIMD_BOOL_REDUCTIONS_H_
+#define _SIMD_BOOL_REDUCTIONS_H_
+
+#include "../aot_compiler.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+bool
+aot_compile_simd_i8x16_all_true(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx);
+
+bool
+aot_compile_simd_i16x8_all_true(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx);
+
+bool
+aot_compile_simd_i32x4_all_true(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx);
+
+bool
+aot_compile_simd_i64x2_all_true(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx);
+
+bool
+aot_compile_simd_v128_any_true(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx);
+
+#ifdef __cplusplus
+} /* end of extern "C" */
+#endif
+
+#endif /* end of _SIMD_BOOL_REDUCTIONS_H_ */
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_common.c b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_common.c
new file mode 100644
index 000000000..95bcdfdb0
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_common.c
@@ -0,0 +1,157 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#include "simd_common.h"
+
+LLVMValueRef
+simd_pop_v128_and_bitcast(const AOTCompContext *comp_ctx,
+ const AOTFuncContext *func_ctx, LLVMTypeRef vec_type,
+ const char *name)
+{
+ LLVMValueRef number;
+
+ POP_V128(number);
+
+ if (!(number =
+ LLVMBuildBitCast(comp_ctx->builder, number, vec_type, name))) {
+ HANDLE_FAILURE("LLVMBuildBitCast");
+ goto fail;
+ }
+
+ return number;
+fail:
+ return NULL;
+}
+
+bool
+simd_bitcast_and_push_v128(const AOTCompContext *comp_ctx,
+ const AOTFuncContext *func_ctx, LLVMValueRef vector,
+ const char *name)
+{
+ if (!(vector = LLVMBuildBitCast(comp_ctx->builder, vector, V128_i64x2_TYPE,
+ name))) {
+ HANDLE_FAILURE("LLVMBuildBitCast");
+ goto fail;
+ }
+
+ /* push result into the stack */
+ PUSH_V128(vector);
+
+ return true;
+fail:
+ return false;
+}
+
+LLVMValueRef
+simd_lane_id_to_llvm_value(AOTCompContext *comp_ctx, uint8 lane_id)
+{
+ LLVMValueRef lane_indexes[] = {
+ LLVM_CONST(i32_zero), LLVM_CONST(i32_one),
+ LLVM_CONST(i32_two), LLVM_CONST(i32_three),
+ LLVM_CONST(i32_four), LLVM_CONST(i32_five),
+ LLVM_CONST(i32_six), LLVM_CONST(i32_seven),
+ LLVM_CONST(i32_eight), LLVM_CONST(i32_nine),
+ LLVM_CONST(i32_ten), LLVM_CONST(i32_eleven),
+ LLVM_CONST(i32_twelve), LLVM_CONST(i32_thirteen),
+ LLVM_CONST(i32_fourteen), LLVM_CONST(i32_fifteen),
+ };
+
+ return lane_id < 16 ? lane_indexes[lane_id] : NULL;
+}
+
+LLVMValueRef
+simd_build_const_integer_vector(const AOTCompContext *comp_ctx,
+ const LLVMTypeRef element_type,
+ const int *element_value, uint32 length)
+{
+ LLVMValueRef vector = NULL;
+ LLVMValueRef *elements;
+ unsigned i;
+
+ if (!(elements = wasm_runtime_malloc(sizeof(LLVMValueRef) * length))) {
+ return NULL;
+ }
+
+ for (i = 0; i < length; i++) {
+ if (!(elements[i] =
+ LLVMConstInt(element_type, element_value[i], true))) {
+ HANDLE_FAILURE("LLVMConstInst");
+ goto fail;
+ }
+ }
+
+ if (!(vector = LLVMConstVector(elements, length))) {
+ HANDLE_FAILURE("LLVMConstVector");
+ goto fail;
+ }
+
+fail:
+ wasm_runtime_free(elements);
+ return vector;
+}
+
+LLVMValueRef
+simd_build_splat_const_integer_vector(const AOTCompContext *comp_ctx,
+ const LLVMTypeRef element_type,
+ const int64 element_value, uint32 length)
+{
+ LLVMValueRef vector = NULL, element;
+ LLVMValueRef *elements;
+ unsigned i;
+
+ if (!(elements = wasm_runtime_malloc(sizeof(LLVMValueRef) * length))) {
+ return NULL;
+ }
+
+ if (!(element = LLVMConstInt(element_type, element_value, true))) {
+ HANDLE_FAILURE("LLVMConstInt");
+ goto fail;
+ }
+
+ for (i = 0; i < length; i++) {
+ elements[i] = element;
+ }
+
+ if (!(vector = LLVMConstVector(elements, length))) {
+ HANDLE_FAILURE("LLVMConstVector");
+ goto fail;
+ }
+
+fail:
+ wasm_runtime_free(elements);
+ return vector;
+}
+
+LLVMValueRef
+simd_build_splat_const_float_vector(const AOTCompContext *comp_ctx,
+ const LLVMTypeRef element_type,
+ const float element_value, uint32 length)
+{
+ LLVMValueRef vector = NULL, element;
+ LLVMValueRef *elements;
+ unsigned i;
+
+ if (!(elements = wasm_runtime_malloc(sizeof(LLVMValueRef) * length))) {
+ return NULL;
+ }
+
+ if (!(element = LLVMConstReal(element_type, element_value))) {
+ HANDLE_FAILURE("LLVMConstReal");
+ goto fail;
+ }
+
+ for (i = 0; i < length; i++) {
+ elements[i] = element;
+ }
+
+ if (!(vector = LLVMConstVector(elements, length))) {
+ HANDLE_FAILURE("LLVMConstVector");
+ goto fail;
+ }
+
+fail:
+ wasm_runtime_free(elements);
+ return vector;
+}
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_common.h b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_common.h
new file mode 100644
index 000000000..c7a08dbc7
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_common.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#ifndef _SIMD_COMMON_H_
+#define _SIMD_COMMON_H_
+
+#include "../aot_compiler.h"
+
+static inline bool
+is_target_x86(AOTCompContext *comp_ctx)
+{
+ return !strncmp(comp_ctx->target_arch, "x86_64", 6)
+ || !strncmp(comp_ctx->target_arch, "i386", 4);
+}
+
+LLVMValueRef
+simd_pop_v128_and_bitcast(const AOTCompContext *comp_ctx,
+ const AOTFuncContext *func_ctx, LLVMTypeRef vec_type,
+ const char *name);
+
+bool
+simd_bitcast_and_push_v128(const AOTCompContext *comp_ctx,
+ const AOTFuncContext *func_ctx, LLVMValueRef vector,
+ const char *name);
+
+LLVMValueRef
+simd_lane_id_to_llvm_value(AOTCompContext *comp_ctx, uint8 lane_id);
+
+LLVMValueRef
+simd_build_const_integer_vector(const AOTCompContext *comp_ctx,
+ const LLVMTypeRef element_type,
+ const int *element_value, uint32 length);
+
+LLVMValueRef
+simd_build_splat_const_integer_vector(const AOTCompContext *comp_ctx,
+ const LLVMTypeRef element_type,
+ const int64 element_value, uint32 length);
+
+LLVMValueRef
+simd_build_splat_const_float_vector(const AOTCompContext *comp_ctx,
+ const LLVMTypeRef element_type,
+ const float element_value, uint32 length);
+#endif /* _SIMD_COMMON_H_ */ \ No newline at end of file
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_comparisons.c b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_comparisons.c
new file mode 100644
index 000000000..8a87ab25b
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_comparisons.c
@@ -0,0 +1,229 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#include "simd_comparisons.h"
+#include "simd_common.h"
+#include "../aot_emit_exception.h"
+#include "../../aot/aot_runtime.h"
+
+static bool
+float_cond_2_predicate(FloatCond cond, LLVMRealPredicate *out)
+{
+ switch (cond) {
+ case FLOAT_EQ:
+ *out = LLVMRealOEQ;
+ break;
+ case FLOAT_NE:
+ *out = LLVMRealUNE;
+ break;
+ case FLOAT_LT:
+ *out = LLVMRealOLT;
+ break;
+ case FLOAT_GT:
+ *out = LLVMRealOGT;
+ break;
+ case FLOAT_LE:
+ *out = LLVMRealOLE;
+ break;
+ case FLOAT_GE:
+ *out = LLVMRealOGE;
+ break;
+ default:
+ bh_assert(0);
+ goto fail;
+ }
+
+ return true;
+fail:
+ return false;
+}
+
+static bool
+int_cond_2_predicate(IntCond cond, LLVMIntPredicate *out)
+{
+ switch (cond) {
+ case INT_EQZ:
+ case INT_EQ:
+ *out = LLVMIntEQ;
+ break;
+ case INT_NE:
+ *out = LLVMIntNE;
+ break;
+ case INT_LT_S:
+ *out = LLVMIntSLT;
+ break;
+ case INT_LT_U:
+ *out = LLVMIntULT;
+ break;
+ case INT_GT_S:
+ *out = LLVMIntSGT;
+ break;
+ case INT_GT_U:
+ *out = LLVMIntUGT;
+ break;
+ case INT_LE_S:
+ *out = LLVMIntSLE;
+ break;
+ case INT_LE_U:
+ *out = LLVMIntULE;
+ break;
+ case INT_GE_S:
+ *out = LLVMIntSGE;
+ break;
+ case INT_GE_U:
+ *out = LLVMIntUGE;
+ break;
+ default:
+ bh_assert(0);
+ goto fail;
+ }
+
+ return true;
+fail:
+ return false;
+}
+
+static bool
+interger_vector_compare(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ IntCond cond, LLVMTypeRef vector_type)
+{
+ LLVMValueRef vec1, vec2, result;
+ LLVMIntPredicate int_pred;
+
+ if (!(vec2 = simd_pop_v128_and_bitcast(comp_ctx, func_ctx, vector_type,
+ "vec2"))) {
+ goto fail;
+ }
+
+ if (!(vec1 = simd_pop_v128_and_bitcast(comp_ctx, func_ctx, vector_type,
+ "vec1"))) {
+ goto fail;
+ }
+
+ if (!int_cond_2_predicate(cond, &int_pred)) {
+ HANDLE_FAILURE("int_cond_2_predicate");
+ goto fail;
+ }
+ /* icmp <N x iX> %vec1, %vec2 */
+ if (!(result =
+ LLVMBuildICmp(comp_ctx->builder, int_pred, vec1, vec2, "cmp"))) {
+ HANDLE_FAILURE("LLVMBuildICmp");
+ goto fail;
+ }
+
+ /* sext <N x i1> %result to <N x iX> */
+ if (!(result =
+ LLVMBuildSExt(comp_ctx->builder, result, vector_type, "ext"))) {
+ HANDLE_FAILURE("LLVMBuildSExt");
+ goto fail;
+ }
+
+ /* bitcast <N x iX> %result to <2 x i64> */
+ if (!(result = LLVMBuildBitCast(comp_ctx->builder, result, V128_i64x2_TYPE,
+ "result"))) {
+ HANDLE_FAILURE("LLVMBuildBitCast");
+ goto fail;
+ }
+
+ PUSH_V128(result);
+
+ return true;
+fail:
+ return false;
+}
+
+bool
+aot_compile_simd_i8x16_compare(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, IntCond cond)
+{
+ return interger_vector_compare(comp_ctx, func_ctx, cond, V128_i8x16_TYPE);
+}
+
+bool
+aot_compile_simd_i16x8_compare(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, IntCond cond)
+{
+ return interger_vector_compare(comp_ctx, func_ctx, cond, V128_i16x8_TYPE);
+}
+
+bool
+aot_compile_simd_i32x4_compare(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, IntCond cond)
+{
+ return interger_vector_compare(comp_ctx, func_ctx, cond, V128_i32x4_TYPE);
+}
+
+bool
+aot_compile_simd_i64x2_compare(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, IntCond cond)
+{
+ return interger_vector_compare(comp_ctx, func_ctx, cond, V128_i64x2_TYPE);
+}
+
+static bool
+float_vector_compare(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ FloatCond cond, LLVMTypeRef vector_type,
+ LLVMTypeRef result_type)
+{
+ LLVMValueRef vec1, vec2, result;
+ LLVMRealPredicate real_pred;
+
+ if (!(vec2 = simd_pop_v128_and_bitcast(comp_ctx, func_ctx, vector_type,
+ "vec2"))) {
+ goto fail;
+ }
+
+ if (!(vec1 = simd_pop_v128_and_bitcast(comp_ctx, func_ctx, vector_type,
+ "vec1"))) {
+ goto fail;
+ }
+
+ if (!float_cond_2_predicate(cond, &real_pred)) {
+ HANDLE_FAILURE("float_cond_2_predicate");
+ goto fail;
+ }
+ /* fcmp <N x iX> %vec1, %vec2 */
+ if (!(result =
+ LLVMBuildFCmp(comp_ctx->builder, real_pred, vec1, vec2, "cmp"))) {
+ HANDLE_FAILURE("LLVMBuildFCmp");
+ goto fail;
+ }
+
+ /* sext <N x i1> %result to <N x iX> */
+ if (!(result =
+ LLVMBuildSExt(comp_ctx->builder, result, result_type, "ext"))) {
+ HANDLE_FAILURE("LLVMBuildSExt");
+ goto fail;
+ }
+
+ /* bitcast <N x iX> %result to <2 x i64> */
+ if (!(result = LLVMBuildBitCast(comp_ctx->builder, result, V128_i64x2_TYPE,
+ "result"))) {
+ HANDLE_FAILURE("LLVMBuildBitCast");
+ goto fail;
+ }
+
+ PUSH_V128(result);
+
+ return true;
+fail:
+ return false;
+}
+
+bool
+aot_compile_simd_f32x4_compare(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, FloatCond cond)
+{
+ return float_vector_compare(comp_ctx, func_ctx, cond, V128_f32x4_TYPE,
+ V128_i32x4_TYPE);
+}
+
+bool
+aot_compile_simd_f64x2_compare(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, FloatCond cond)
+{
+ return float_vector_compare(comp_ctx, func_ctx, cond, V128_f64x2_TYPE,
+ V128_i64x2_TYPE);
+}
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_comparisons.h b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_comparisons.h
new file mode 100644
index 000000000..322ebefb2
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_comparisons.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#ifndef _SIMD_COMPARISONS_H_
+#define _SIMD_COMPARISONS_H_
+
+#include "../aot_compiler.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+bool
+aot_compile_simd_i8x16_compare(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, IntCond cond);
+
+bool
+aot_compile_simd_i16x8_compare(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, IntCond cond);
+
+bool
+aot_compile_simd_i32x4_compare(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, IntCond cond);
+
+bool
+aot_compile_simd_i64x2_compare(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, IntCond cond);
+
+bool
+aot_compile_simd_f32x4_compare(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, FloatCond cond);
+
+bool
+aot_compile_simd_f64x2_compare(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, FloatCond cond);
+
+#ifdef __cplusplus
+} /* end of extern "C" */
+#endif
+
+#endif /* end of _SIMD_COMPARISONS_H_ */
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_construct_values.c b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_construct_values.c
new file mode 100644
index 000000000..ceb09e370
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_construct_values.c
@@ -0,0 +1,135 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#include "simd_common.h"
+#include "simd_construct_values.h"
+#include "../aot_emit_exception.h"
+#include "../interpreter/wasm_opcode.h"
+#include "../../aot/aot_runtime.h"
+
+bool
+aot_compile_simd_v128_const(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ const uint8 *imm_bytes)
+{
+ uint64 imm1, imm2;
+ LLVMValueRef first_long, agg1, second_long, agg2;
+
+ wasm_runtime_read_v128(imm_bytes, &imm1, &imm2);
+
+ /* %agg1 = insertelement <2 x i64> undef, i16 0, i64 ${*imm} */
+ if (!(first_long = I64_CONST(imm1))) {
+ HANDLE_FAILURE("LLVMConstInt");
+ goto fail;
+ }
+
+ if (!(agg1 =
+ LLVMBuildInsertElement(comp_ctx->builder, LLVM_CONST(i64x2_undef),
+ first_long, I32_ZERO, "agg1"))) {
+ HANDLE_FAILURE("LLVMBuildInsertElement");
+ goto fail;
+ }
+
+ /* %agg2 = insertelement <2 x i64> %agg1, i16 1, i64 ${*(imm + 1)} */
+ if (!(second_long = I64_CONST(imm2))) {
+ HANDLE_FAILURE("LLVMGetUndef");
+ goto fail;
+ }
+
+ if (!(agg2 = LLVMBuildInsertElement(comp_ctx->builder, agg1, second_long,
+ I32_ONE, "agg2"))) {
+ HANDLE_FAILURE("LLVMBuildInsertElement");
+ goto fail;
+ }
+
+ PUSH_V128(agg2);
+ return true;
+fail:
+ return false;
+}
+
+bool
+aot_compile_simd_splat(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ uint8 opcode)
+{
+ uint32 opcode_index = opcode - SIMD_i8x16_splat;
+ LLVMValueRef value = NULL, base, new_vector;
+ LLVMValueRef undefs[] = {
+ LLVM_CONST(i8x16_undef), LLVM_CONST(i16x8_undef),
+ LLVM_CONST(i32x4_undef), LLVM_CONST(i64x2_undef),
+ LLVM_CONST(f32x4_undef), LLVM_CONST(f64x2_undef),
+ };
+ LLVMValueRef masks[] = {
+ LLVM_CONST(i32x16_zero), LLVM_CONST(i32x8_zero), LLVM_CONST(i32x4_zero),
+ LLVM_CONST(i32x2_zero), LLVM_CONST(i32x4_zero), LLVM_CONST(i32x2_zero),
+ };
+
+ switch (opcode) {
+ case SIMD_i8x16_splat:
+ {
+ LLVMValueRef input;
+ POP_I32(input);
+ /* trunc i32 %input to i8 */
+ value =
+ LLVMBuildTrunc(comp_ctx->builder, input, INT8_TYPE, "trunc");
+ break;
+ }
+ case SIMD_i16x8_splat:
+ {
+ LLVMValueRef input;
+ POP_I32(input);
+ /* trunc i32 %input to i16 */
+ value =
+ LLVMBuildTrunc(comp_ctx->builder, input, INT16_TYPE, "trunc");
+ break;
+ }
+ case SIMD_i32x4_splat:
+ {
+ POP_I32(value);
+ break;
+ }
+ case SIMD_i64x2_splat:
+ {
+ POP(value, VALUE_TYPE_I64);
+ break;
+ }
+ case SIMD_f32x4_splat:
+ {
+ POP(value, VALUE_TYPE_F32);
+ break;
+ }
+ case SIMD_f64x2_splat:
+ {
+ POP(value, VALUE_TYPE_F64);
+ break;
+ }
+ default:
+ {
+ break;
+ }
+ }
+
+ if (!value) {
+ goto fail;
+ }
+
+ /* insertelement <n x ty> undef, ty %value, i32 0 */
+ if (!(base = LLVMBuildInsertElement(comp_ctx->builder, undefs[opcode_index],
+ value, I32_ZERO, "base"))) {
+ HANDLE_FAILURE("LLVMBuildInsertElement");
+ goto fail;
+ }
+
+ /* shufflevector <ty1> %base, <ty2> undef, <n x i32> zeroinitializer */
+ if (!(new_vector = LLVMBuildShuffleVector(
+ comp_ctx->builder, base, undefs[opcode_index],
+ masks[opcode_index], "new_vector"))) {
+ HANDLE_FAILURE("LLVMBuildShuffleVector");
+ goto fail;
+ }
+
+ return simd_bitcast_and_push_v128(comp_ctx, func_ctx, new_vector, "result");
+fail:
+ return false;
+}
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_construct_values.h b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_construct_values.h
new file mode 100644
index 000000000..8cd50c88b
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_construct_values.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#ifndef _SIMD_CONSTRUCT_VALUES_H_
+#define _SIMD_CONSTRUCT_VALUES_H_
+
+#include "../aot_compiler.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+bool
+aot_compile_simd_v128_const(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ const uint8 *imm_bytes);
+
+bool
+aot_compile_simd_splat(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ uint8 splat_opcode);
+
+#ifdef __cplusplus
+} /* end of extern "C" */
+#endif
+
+#endif /* end of _SIMD_CONSTRUCT_VALUES_H_ */
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_conversions.c b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_conversions.c
new file mode 100644
index 000000000..e9d30bfcb
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_conversions.c
@@ -0,0 +1,743 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#include "simd_conversions.h"
+#include "simd_common.h"
+#include "../aot_emit_exception.h"
+#include "../aot_emit_numberic.h"
+#include "../../aot/aot_runtime.h"
+
+static bool
+simd_integer_narrow_x86(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ LLVMTypeRef in_vector_type, LLVMTypeRef out_vector_type,
+ const char *instrinsic)
+{
+ LLVMValueRef vector1, vector2, result;
+ LLVMTypeRef param_types[2] = { in_vector_type, in_vector_type };
+
+ if (!(vector2 = simd_pop_v128_and_bitcast(comp_ctx, func_ctx,
+ in_vector_type, "vec2"))
+ || !(vector1 = simd_pop_v128_and_bitcast(comp_ctx, func_ctx,
+ in_vector_type, "vec1"))) {
+ return false;
+ }
+
+ if (!(result = aot_call_llvm_intrinsic(comp_ctx, func_ctx, instrinsic,
+ out_vector_type, param_types, 2,
+ vector1, vector2))) {
+ HANDLE_FAILURE("LLVMBuildCall");
+ return false;
+ }
+
+ return simd_bitcast_and_push_v128(comp_ctx, func_ctx, result, "result");
+}
+
+enum integer_sat_type {
+ e_sat_i16x8 = 0,
+ e_sat_i32x4,
+ e_sat_i64x2,
+ e_sat_i32x8,
+};
+
+static LLVMValueRef
+simd_saturate(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ enum integer_sat_type itype, LLVMValueRef vector,
+ LLVMValueRef min, LLVMValueRef max, bool is_signed)
+{
+ LLVMValueRef result;
+ LLVMTypeRef vector_type;
+
+ LLVMTypeRef param_types[][2] = {
+ { V128_i16x8_TYPE, V128_i16x8_TYPE },
+ { V128_i32x4_TYPE, V128_i32x4_TYPE },
+ { V128_i64x2_TYPE, V128_i64x2_TYPE },
+ { 0 },
+ };
+
+ const char *smin_intrinsic[] = {
+ "llvm.smin.v8i16",
+ "llvm.smin.v4i32",
+ "llvm.smin.v2i64",
+ "llvm.smin.v8i32",
+ };
+
+ const char *umin_intrinsic[] = {
+ "llvm.umin.v8i16",
+ "llvm.umin.v4i32",
+ "llvm.umin.v2i64",
+ "llvm.umin.v8i32",
+ };
+
+ const char *smax_intrinsic[] = {
+ "llvm.smax.v8i16",
+ "llvm.smax.v4i32",
+ "llvm.smax.v2i64",
+ "llvm.smax.v8i32",
+ };
+
+ const char *umax_intrinsic[] = {
+ "llvm.umax.v8i16",
+ "llvm.umax.v4i32",
+ "llvm.umax.v2i64",
+ "llvm.umax.v8i32",
+ };
+
+ if (e_sat_i32x8 == itype) {
+ if (!(vector_type = LLVMVectorType(I32_TYPE, 8))) {
+ HANDLE_FAILURE("LLVMVectorType");
+ return NULL;
+ }
+
+ param_types[itype][0] = vector_type;
+ param_types[itype][1] = vector_type;
+ }
+
+ if (!(result = aot_call_llvm_intrinsic(
+ comp_ctx, func_ctx,
+ is_signed ? smin_intrinsic[itype] : umin_intrinsic[itype],
+ param_types[itype][0], param_types[itype], 2, vector, max))
+ || !(result = aot_call_llvm_intrinsic(
+ comp_ctx, func_ctx,
+ is_signed ? smax_intrinsic[itype] : umax_intrinsic[itype],
+ param_types[itype][0], param_types[itype], 2, result, min))) {
+ return NULL;
+ }
+
+ return result;
+}
+
+static bool
+simd_integer_narrow_common(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ enum integer_sat_type itype, bool is_signed)
+{
+ LLVMValueRef vec1, vec2, min, max, mask, result;
+ LLVMTypeRef in_vector_type[] = { V128_i16x8_TYPE, V128_i32x4_TYPE,
+ V128_i64x2_TYPE };
+ LLVMTypeRef min_max_type[] = { INT16_TYPE, I32_TYPE, I64_TYPE };
+ LLVMTypeRef trunc_type[3] = { 0 };
+ uint8 length[] = { 8, 4, 2 };
+
+ int64 smin[] = { 0xff80, 0xffFF8000, 0xffFFffFF80000000 };
+ int64 umin[] = { 0x0, 0x0, 0x0 };
+ int64 smax[] = { 0x007f, 0x00007fff, 0x000000007fFFffFF };
+ int64 umax[] = { 0x00ff, 0x0000ffff, 0x00000000ffFFffFF };
+
+ LLVMValueRef mask_element[] = {
+ LLVM_CONST(i32_zero), LLVM_CONST(i32_one),
+ LLVM_CONST(i32_two), LLVM_CONST(i32_three),
+ LLVM_CONST(i32_four), LLVM_CONST(i32_five),
+ LLVM_CONST(i32_six), LLVM_CONST(i32_seven),
+ LLVM_CONST(i32_eight), LLVM_CONST(i32_nine),
+ LLVM_CONST(i32_ten), LLVM_CONST(i32_eleven),
+ LLVM_CONST(i32_twelve), LLVM_CONST(i32_thirteen),
+ LLVM_CONST(i32_fourteen), LLVM_CONST(i32_fifteen),
+ };
+
+ if (!(trunc_type[0] = LLVMVectorType(INT8_TYPE, 8))
+ || !(trunc_type[1] = LLVMVectorType(INT16_TYPE, 4))
+ || !(trunc_type[2] = LLVMVectorType(I32_TYPE, 2))) {
+ HANDLE_FAILURE("LLVMVectorType");
+ return false;
+ }
+
+ if (!(vec2 = simd_pop_v128_and_bitcast(comp_ctx, func_ctx,
+ in_vector_type[itype], "vec2"))
+ || !(vec1 = simd_pop_v128_and_bitcast(comp_ctx, func_ctx,
+ in_vector_type[itype], "vec1"))) {
+ return false;
+ }
+
+ if (!(max = simd_build_splat_const_integer_vector(
+ comp_ctx, min_max_type[itype],
+ is_signed ? smax[itype] : umax[itype], length[itype]))
+ || !(min = simd_build_splat_const_integer_vector(
+ comp_ctx, min_max_type[itype],
+ is_signed ? smin[itype] : umin[itype], length[itype]))) {
+ return false;
+ }
+
+ /* sat */
+ if (!(vec1 = simd_saturate(comp_ctx, func_ctx, e_sat_i16x8, vec1, min, max,
+ is_signed))
+ || !(vec2 = simd_saturate(comp_ctx, func_ctx, e_sat_i16x8, vec2, min,
+ max, is_signed))) {
+ return false;
+ }
+
+ /* trunc */
+ if (!(vec1 = LLVMBuildTrunc(comp_ctx->builder, vec1, trunc_type[itype],
+ "vec1_trunc"))
+ || !(vec2 = LLVMBuildTrunc(comp_ctx->builder, vec2, trunc_type[itype],
+ "vec2_trunc"))) {
+ HANDLE_FAILURE("LLVMBuildTrunc");
+ return false;
+ }
+
+ /* combine */
+ if (!(mask = LLVMConstVector(mask_element, (length[itype] << 1)))) {
+ HANDLE_FAILURE("LLVMConstInt");
+ return false;
+ }
+
+ if (!(result = LLVMBuildShuffleVector(comp_ctx->builder, vec1, vec2, mask,
+ "vec_shuffle"))) {
+ HANDLE_FAILURE("LLVMBuildShuffleVector");
+ return false;
+ }
+
+ return simd_bitcast_and_push_v128(comp_ctx, func_ctx, result, "result");
+}
+
+bool
+aot_compile_simd_i8x16_narrow_i16x8(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, bool is_signed)
+{
+ if (is_target_x86(comp_ctx)) {
+ return simd_integer_narrow_x86(
+ comp_ctx, func_ctx, V128_i16x8_TYPE, V128_i8x16_TYPE,
+ is_signed ? "llvm.x86.sse2.packsswb.128"
+ : "llvm.x86.sse2.packuswb.128");
+ }
+ else {
+ return simd_integer_narrow_common(comp_ctx, func_ctx, e_sat_i16x8,
+ is_signed);
+ }
+}
+
+bool
+aot_compile_simd_i16x8_narrow_i32x4(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, bool is_signed)
+{
+ if (is_target_x86(comp_ctx)) {
+ return simd_integer_narrow_x86(comp_ctx, func_ctx, V128_i32x4_TYPE,
+ V128_i16x8_TYPE,
+ is_signed ? "llvm.x86.sse2.packssdw.128"
+ : "llvm.x86.sse41.packusdw");
+ }
+ else {
+ return simd_integer_narrow_common(comp_ctx, func_ctx, e_sat_i32x4,
+ is_signed);
+ }
+}
+
+bool
+aot_compile_simd_i32x4_narrow_i64x2(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, bool is_signed)
+{
+ /* TODO: x86 intrinsics */
+ return simd_integer_narrow_common(comp_ctx, func_ctx, e_sat_i64x2,
+ is_signed);
+}
+
+enum integer_extend_type {
+ e_ext_i8x16,
+ e_ext_i16x8,
+ e_ext_i32x4,
+};
+
+static LLVMValueRef
+simd_integer_extension(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ enum integer_extend_type itype, LLVMValueRef vector,
+ bool lower_half, bool is_signed)
+{
+ LLVMValueRef mask, sub_vector, result;
+ LLVMValueRef bits[] = {
+ LLVM_CONST(i32_zero), LLVM_CONST(i32_one),
+ LLVM_CONST(i32_two), LLVM_CONST(i32_three),
+ LLVM_CONST(i32_four), LLVM_CONST(i32_five),
+ LLVM_CONST(i32_six), LLVM_CONST(i32_seven),
+ LLVM_CONST(i32_eight), LLVM_CONST(i32_nine),
+ LLVM_CONST(i32_ten), LLVM_CONST(i32_eleven),
+ LLVM_CONST(i32_twelve), LLVM_CONST(i32_thirteen),
+ LLVM_CONST(i32_fourteen), LLVM_CONST(i32_fifteen),
+ };
+ LLVMTypeRef out_vector_type[] = { V128_i16x8_TYPE, V128_i32x4_TYPE,
+ V128_i64x2_TYPE };
+ LLVMValueRef undef[] = { LLVM_CONST(i8x16_undef), LLVM_CONST(i16x8_undef),
+ LLVM_CONST(i32x4_undef) };
+ uint32 sub_vector_length[] = { 8, 4, 2 };
+
+ if (!(mask = lower_half ? LLVMConstVector(bits, sub_vector_length[itype])
+ : LLVMConstVector(bits + sub_vector_length[itype],
+ sub_vector_length[itype]))) {
+ HANDLE_FAILURE("LLVMConstVector");
+ return false;
+ }
+
+ /* retrive the low or high half */
+ if (!(sub_vector = LLVMBuildShuffleVector(comp_ctx->builder, vector,
+ undef[itype], mask, "half"))) {
+ HANDLE_FAILURE("LLVMBuildShuffleVector");
+ return false;
+ }
+
+ if (is_signed) {
+ if (!(result = LLVMBuildSExt(comp_ctx->builder, sub_vector,
+ out_vector_type[itype], "sext"))) {
+ HANDLE_FAILURE("LLVMBuildSExt");
+ return false;
+ }
+ }
+ else {
+ if (!(result = LLVMBuildZExt(comp_ctx->builder, sub_vector,
+ out_vector_type[itype], "zext"))) {
+ HANDLE_FAILURE("LLVMBuildZExt");
+ return false;
+ }
+ }
+
+ return result;
+}
+
+static bool
+simd_integer_extension_wrapper(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx,
+ enum integer_extend_type itype, bool lower_half,
+ bool is_signed)
+{
+ LLVMValueRef vector, result;
+
+ LLVMTypeRef in_vector_type[] = { V128_i8x16_TYPE, V128_i16x8_TYPE,
+ V128_i32x4_TYPE };
+
+ if (!(vector = simd_pop_v128_and_bitcast(comp_ctx, func_ctx,
+ in_vector_type[itype], "vec"))) {
+ return false;
+ }
+
+ if (!(result = simd_integer_extension(comp_ctx, func_ctx, itype, vector,
+ lower_half, is_signed))) {
+ return false;
+ }
+
+ return simd_bitcast_and_push_v128(comp_ctx, func_ctx, result, "result");
+}
+
+bool
+aot_compile_simd_i16x8_extend_i8x16(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, bool lower_half,
+ bool is_signed)
+{
+ return simd_integer_extension_wrapper(comp_ctx, func_ctx, e_ext_i8x16,
+ lower_half, is_signed);
+}
+
+bool
+aot_compile_simd_i32x4_extend_i16x8(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, bool lower_half,
+ bool is_signed)
+{
+ return simd_integer_extension_wrapper(comp_ctx, func_ctx, e_ext_i16x8,
+ lower_half, is_signed);
+}
+
+bool
+aot_compile_simd_i64x2_extend_i32x4(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, bool lower_half,
+ bool is_signed)
+{
+ return simd_integer_extension_wrapper(comp_ctx, func_ctx, e_ext_i32x4,
+ lower_half, is_signed);
+}
+
+static LLVMValueRef
+simd_trunc_sat(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ const char *intrinsics, LLVMTypeRef in_vector_type,
+ LLVMTypeRef out_vector_type)
+{
+ LLVMValueRef vector, result;
+ LLVMTypeRef param_types[] = { in_vector_type };
+
+ if (!(vector = simd_pop_v128_and_bitcast(comp_ctx, func_ctx, in_vector_type,
+ "vector"))) {
+ return false;
+ }
+
+ if (!(result = aot_call_llvm_intrinsic(comp_ctx, func_ctx, intrinsics,
+ out_vector_type, param_types, 1,
+ vector))) {
+ return false;
+ }
+
+ return result;
+}
+
+bool
+aot_compile_simd_i32x4_trunc_sat_f32x4(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, bool is_signed)
+{
+ LLVMValueRef result;
+ if (!(result = simd_trunc_sat(comp_ctx, func_ctx,
+ is_signed ? "llvm.fptosi.sat.v4i32.v4f32"
+ : "llvm.fptoui.sat.v4i32.v4f32",
+ V128_f32x4_TYPE, V128_i32x4_TYPE))) {
+ return false;
+ }
+
+ return simd_bitcast_and_push_v128(comp_ctx, func_ctx, result, "result");
+}
+
+bool
+aot_compile_simd_i32x4_trunc_sat_f64x2(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, bool is_signed)
+{
+ LLVMValueRef result, zero, mask;
+ LLVMTypeRef out_vector_type;
+ LLVMValueRef lanes[] = {
+ LLVM_CONST(i32_zero),
+ LLVM_CONST(i32_one),
+ LLVM_CONST(i32_two),
+ LLVM_CONST(i32_three),
+ };
+
+ if (!(out_vector_type = LLVMVectorType(I32_TYPE, 2))) {
+ HANDLE_FAILURE("LLVMVectorType");
+ return false;
+ }
+
+ if (!(result = simd_trunc_sat(comp_ctx, func_ctx,
+ is_signed ? "llvm.fptosi.sat.v2i32.v2f64"
+ : "llvm.fptoui.sat.v2i32.v2f64",
+ V128_f64x2_TYPE, out_vector_type))) {
+ return false;
+ }
+
+ if (!(zero = LLVMConstNull(out_vector_type))) {
+ HANDLE_FAILURE("LLVMConstNull");
+ return false;
+ }
+
+ /* v2i32 -> v4i32 */
+ if (!(mask = LLVMConstVector(lanes, 4))) {
+ HANDLE_FAILURE("LLVMConstVector");
+ return false;
+ }
+
+ if (!(result = LLVMBuildShuffleVector(comp_ctx->builder, result, zero, mask,
+ "extend"))) {
+ HANDLE_FAILURE("LLVMBuildShuffleVector");
+ return false;
+ }
+
+ return simd_bitcast_and_push_v128(comp_ctx, func_ctx, result, "result");
+}
+
+static LLVMValueRef
+simd_integer_convert(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ bool is_signed, LLVMValueRef vector,
+ LLVMTypeRef out_vector_type)
+
+{
+ LLVMValueRef result;
+ result = is_signed ? LLVMBuildSIToFP(comp_ctx->builder, vector,
+ out_vector_type, "converted")
+ : LLVMBuildUIToFP(comp_ctx->builder, vector,
+ out_vector_type, "converted");
+ if (!result) {
+ HANDLE_FAILURE("LLVMBuildSIToFP/LLVMBuildUIToFP");
+ }
+
+ return result;
+}
+
+bool
+aot_compile_simd_f32x4_convert_i32x4(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, bool is_signed)
+{
+ LLVMValueRef vector, result;
+
+ if (!(vector = simd_pop_v128_and_bitcast(comp_ctx, func_ctx,
+ V128_i32x4_TYPE, "vec"))) {
+ return false;
+ }
+
+ if (!(result = simd_integer_convert(comp_ctx, func_ctx, is_signed, vector,
+ V128_f32x4_TYPE))) {
+ return false;
+ }
+
+ return simd_bitcast_and_push_v128(comp_ctx, func_ctx, result, "result");
+}
+
+bool
+aot_compile_simd_f64x2_convert_i32x4(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, bool is_signed)
+{
+ LLVMValueRef vector, mask, result;
+ LLVMValueRef lanes[] = {
+ LLVM_CONST(i32_zero),
+ LLVM_CONST(i32_one),
+ };
+ LLVMTypeRef out_vector_type;
+
+ if (!(vector = simd_pop_v128_and_bitcast(comp_ctx, func_ctx,
+ V128_i32x4_TYPE, "vec"))) {
+ return false;
+ }
+
+ if (!(out_vector_type = LLVMVectorType(F64_TYPE, 4))) {
+ HANDLE_FAILURE("LLVMVectorType");
+ return false;
+ }
+
+ if (!(result = simd_integer_convert(comp_ctx, func_ctx, is_signed, vector,
+ out_vector_type))) {
+ return false;
+ }
+
+ /* v4f64 -> v2f64 */
+ if (!(mask = LLVMConstVector(lanes, 2))) {
+ HANDLE_FAILURE("LLVMConstVector");
+ return false;
+ }
+
+ if (!(result = LLVMBuildShuffleVector(comp_ctx->builder, result, result,
+ mask, "trunc"))) {
+ HANDLE_FAILURE("LLVMBuildShuffleVector");
+ return false;
+ }
+
+ return simd_bitcast_and_push_v128(comp_ctx, func_ctx, result, "result");
+}
+
+static bool
+simd_extadd_pairwise(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ LLVMTypeRef in_vector_type, LLVMTypeRef out_vector_type,
+ bool is_signed)
+{
+ LLVMValueRef vector, even_mask, odd_mask, sub_vector_even, sub_vector_odd,
+ result;
+
+ LLVMValueRef even_element[] = {
+ LLVM_CONST(i32_zero), LLVM_CONST(i32_two), LLVM_CONST(i32_four),
+ LLVM_CONST(i32_six), LLVM_CONST(i32_eight), LLVM_CONST(i32_ten),
+ LLVM_CONST(i32_twelve), LLVM_CONST(i32_fourteen),
+ };
+
+ LLVMValueRef odd_element[] = {
+ LLVM_CONST(i32_one), LLVM_CONST(i32_three),
+ LLVM_CONST(i32_five), LLVM_CONST(i32_seven),
+ LLVM_CONST(i32_nine), LLVM_CONST(i32_eleven),
+ LLVM_CONST(i32_thirteen), LLVM_CONST(i32_fifteen),
+ };
+
+ /* assumption about i16x8 from i8x16 and i32x4 from i16x8 */
+ uint8 mask_length = V128_i16x8_TYPE == out_vector_type ? 8 : 4;
+
+ if (!(vector = simd_pop_v128_and_bitcast(comp_ctx, func_ctx, in_vector_type,
+ "vector"))) {
+ return false;
+ }
+
+ if (!(even_mask = LLVMConstVector(even_element, mask_length))
+ || !(odd_mask = LLVMConstVector(odd_element, mask_length))) {
+ HANDLE_FAILURE("LLVMConstVector");
+ return false;
+ }
+
+ /* shuffle a <16xi8> vector to two <8xi8> vectors */
+ if (!(sub_vector_even = LLVMBuildShuffleVector(
+ comp_ctx->builder, vector, vector, even_mask, "pick_even"))
+ || !(sub_vector_odd = LLVMBuildShuffleVector(
+ comp_ctx->builder, vector, vector, odd_mask, "pick_odd"))) {
+ HANDLE_FAILURE("LLVMBuildShuffleVector");
+ return false;
+ }
+
+ /* sext/zext <8xi8> to <8xi16> */
+ if (is_signed) {
+ if (!(sub_vector_even =
+ LLVMBuildSExt(comp_ctx->builder, sub_vector_even,
+ out_vector_type, "even_sext"))
+ || !(sub_vector_odd =
+ LLVMBuildSExt(comp_ctx->builder, sub_vector_odd,
+ out_vector_type, "odd_sext"))) {
+ HANDLE_FAILURE("LLVMBuildSExt");
+ return false;
+ }
+ }
+ else {
+ if (!(sub_vector_even =
+ LLVMBuildZExt(comp_ctx->builder, sub_vector_even,
+ out_vector_type, "even_zext"))
+ || !(sub_vector_odd =
+ LLVMBuildZExt(comp_ctx->builder, sub_vector_odd,
+ out_vector_type, "odd_zext"))) {
+ HANDLE_FAILURE("LLVMBuildZExt");
+ return false;
+ }
+ }
+
+ if (!(result = LLVMBuildAdd(comp_ctx->builder, sub_vector_even,
+ sub_vector_odd, "sum"))) {
+ HANDLE_FAILURE("LLVMBuildAdd");
+ return false;
+ }
+
+ return simd_bitcast_and_push_v128(comp_ctx, func_ctx, result, "result");
+}
+
+bool
+aot_compile_simd_i16x8_extadd_pairwise_i8x16(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx,
+ bool is_signed)
+{
+ return simd_extadd_pairwise(comp_ctx, func_ctx, V128_i8x16_TYPE,
+ V128_i16x8_TYPE, is_signed);
+}
+
+bool
+aot_compile_simd_i32x4_extadd_pairwise_i16x8(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx,
+ bool is_signed)
+{
+ return simd_extadd_pairwise(comp_ctx, func_ctx, V128_i16x8_TYPE,
+ V128_i32x4_TYPE, is_signed);
+}
+
+bool
+aot_compile_simd_i16x8_q15mulr_sat(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx)
+{
+ LLVMValueRef lhs, rhs, pad, offset, min, max, result;
+ LLVMTypeRef vector_ext_type;
+
+ if (!(rhs = simd_pop_v128_and_bitcast(comp_ctx, func_ctx, V128_i16x8_TYPE,
+ "rhs"))
+ || !(lhs = simd_pop_v128_and_bitcast(comp_ctx, func_ctx,
+ V128_i16x8_TYPE, "lhs"))) {
+ return false;
+ }
+
+ if (!(vector_ext_type = LLVMVectorType(I32_TYPE, 8))) {
+ HANDLE_FAILURE("LLVMVectorType");
+ return false;
+ }
+
+ if (!(lhs = LLVMBuildSExt(comp_ctx->builder, lhs, vector_ext_type,
+ "lhs_v8i32"))
+ || !(rhs = LLVMBuildSExt(comp_ctx->builder, rhs, vector_ext_type,
+ "rhs_v8i32"))) {
+ HANDLE_FAILURE("LLVMBuildSExt");
+ return false;
+ }
+
+ /* 0x4000 and 15*/
+ if (!(pad = simd_build_splat_const_integer_vector(comp_ctx, I32_TYPE,
+ 0x4000, 8))
+ || !(offset = simd_build_splat_const_integer_vector(comp_ctx, I32_TYPE,
+ 15, 8))) {
+ return false;
+ }
+
+ /* TODO: looking for x86 intrinsics about integer"fused multiply-and-add" */
+ /* S.SignedSaturate((x * y + 0x4000) >> 15) */
+ if (!(result = LLVMBuildMul(comp_ctx->builder, lhs, rhs, "mul"))) {
+ HANDLE_FAILURE("LLVMBuildMul");
+ return false;
+ }
+
+ if (!(result = LLVMBuildAdd(comp_ctx->builder, result, pad, "add"))) {
+ HANDLE_FAILURE("LLVMBuildAdd");
+ return false;
+ }
+
+ if (!(result = LLVMBuildAShr(comp_ctx->builder, result, offset, "ashr"))) {
+ HANDLE_FAILURE("LLVMBuildAShr");
+ return false;
+ }
+
+ if (!(min = simd_build_splat_const_integer_vector(comp_ctx, I32_TYPE,
+ 0xffff8000, 8))
+ || !(max = simd_build_splat_const_integer_vector(comp_ctx, I32_TYPE,
+ 0x00007fff, 8))) {
+ return false;
+ }
+
+ /* sat after trunc will let *sat* part be optimized */
+ if (!(result = simd_saturate(comp_ctx, func_ctx, e_sat_i32x8, result, min,
+ max, true))) {
+ return false;
+ }
+
+ if (!(result = LLVMBuildTrunc(comp_ctx->builder, result, V128_i16x8_TYPE,
+ "down_to_v8i16"))) {
+ HANDLE_FAILURE("LLVMBuidlTrunc");
+ return false;
+ }
+
+ return simd_bitcast_and_push_v128(comp_ctx, func_ctx, result, "result");
+}
+
+enum integer_extmul_type {
+ e_i16x8_extmul_i8x16,
+ e_i32x4_extmul_i16x8,
+ e_i64x2_extmul_i32x4,
+};
+
+static bool
+simd_integer_extmul(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ bool lower_half, bool is_signed,
+ enum integer_extmul_type itype)
+{
+ LLVMValueRef vec1, vec2, result;
+ enum integer_extend_type ext_type[] = {
+ e_ext_i8x16,
+ e_ext_i16x8,
+ e_ext_i32x4,
+ };
+ LLVMTypeRef in_vector_type[] = {
+ V128_i8x16_TYPE,
+ V128_i16x8_TYPE,
+ V128_i32x4_TYPE,
+ };
+
+ if (!(vec1 = simd_pop_v128_and_bitcast(comp_ctx, func_ctx,
+ in_vector_type[itype], "vec1"))
+ || !(vec2 = simd_pop_v128_and_bitcast(comp_ctx, func_ctx,
+ in_vector_type[itype], "vec2"))) {
+ return false;
+ }
+
+ if (!(vec1 = simd_integer_extension(comp_ctx, func_ctx, ext_type[itype],
+ vec1, lower_half, is_signed))
+ || !(vec2 = simd_integer_extension(comp_ctx, func_ctx, ext_type[itype],
+ vec2, lower_half, is_signed))) {
+ return false;
+ }
+
+ if (!(result = LLVMBuildMul(comp_ctx->builder, vec1, vec2, "product"))) {
+ return false;
+ }
+
+ return simd_bitcast_and_push_v128(comp_ctx, func_ctx, result, "result");
+}
+
+bool
+aot_compile_simd_i16x8_extmul_i8x16(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, bool lower_half,
+ bool is_signed)
+{
+ return simd_integer_extmul(comp_ctx, func_ctx, lower_half, is_signed,
+ e_i16x8_extmul_i8x16);
+}
+
+bool
+aot_compile_simd_i32x4_extmul_i16x8(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, bool lower_half,
+ bool is_signed)
+{
+ return simd_integer_extmul(comp_ctx, func_ctx, lower_half, is_signed,
+ e_i32x4_extmul_i16x8);
+}
+
+bool
+aot_compile_simd_i64x2_extmul_i32x4(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, bool lower_half,
+ bool is_signed)
+{
+ return simd_integer_extmul(comp_ctx, func_ctx, lower_half, is_signed,
+ e_i64x2_extmul_i32x4);
+} \ No newline at end of file
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_conversions.h b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_conversions.h
new file mode 100644
index 000000000..87b8bd684
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_conversions.h
@@ -0,0 +1,90 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#ifndef _SIMD_CONVERSIONS_H_
+#define _SIMD_CONVERSIONS_H_
+
+#include "../aot_compiler.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+bool
+aot_compile_simd_i8x16_narrow_i16x8(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, bool is_signed);
+
+bool
+aot_compile_simd_i16x8_narrow_i32x4(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, bool is_signed);
+
+bool
+aot_compile_simd_i32x4_narrow_i64x2(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, bool is_signed);
+
+bool
+aot_compile_simd_i16x8_extend_i8x16(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, bool is_low,
+ bool is_signed);
+
+bool
+aot_compile_simd_i32x4_extend_i16x8(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, bool is_low,
+ bool is_signed);
+
+bool
+aot_compile_simd_i64x2_extend_i32x4(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, bool lower_half,
+ bool is_signed);
+
+bool
+aot_compile_simd_i32x4_trunc_sat_f32x4(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx,
+ bool is_signed);
+
+bool
+aot_compile_simd_i32x4_trunc_sat_f64x2(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx,
+ bool is_signed);
+
+bool
+aot_compile_simd_f32x4_convert_i32x4(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, bool is_signed);
+
+bool
+aot_compile_simd_f64x2_convert_i32x4(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, bool is_signed);
+bool
+aot_compile_simd_i16x8_extadd_pairwise_i8x16(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx,
+ bool is_signed);
+
+bool
+aot_compile_simd_i32x4_extadd_pairwise_i16x8(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx,
+ bool is_signed);
+bool
+aot_compile_simd_i16x8_q15mulr_sat(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx);
+
+bool
+aot_compile_simd_i16x8_extmul_i8x16(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, bool is_low,
+ bool is_signed);
+
+bool
+aot_compile_simd_i32x4_extmul_i16x8(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, bool is_low,
+ bool is_signed);
+
+bool
+aot_compile_simd_i64x2_extmul_i32x4(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, bool lower_half,
+ bool is_signed);
+#ifdef __cplusplus
+} /* end of extern "C" */
+#endif
+
+#endif /* end of _SIMD_CONVERSIONS_H_ */
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_floating_point.c b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_floating_point.c
new file mode 100644
index 000000000..d850fe8f7
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_floating_point.c
@@ -0,0 +1,388 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#include "simd_floating_point.h"
+#include "simd_common.h"
+#include "../aot_emit_exception.h"
+#include "../aot_emit_numberic.h"
+#include "../../aot/aot_runtime.h"
+
+static bool
+simd_v128_float_arith(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ FloatArithmetic arith_op, LLVMTypeRef vector_type)
+{
+ LLVMValueRef lhs, rhs, result = NULL;
+
+ if (!(rhs =
+ simd_pop_v128_and_bitcast(comp_ctx, func_ctx, vector_type, "rhs"))
+ || !(lhs = simd_pop_v128_and_bitcast(comp_ctx, func_ctx, vector_type,
+ "lhs"))) {
+ return false;
+ }
+
+ switch (arith_op) {
+ case FLOAT_ADD:
+ result = LLVMBuildFAdd(comp_ctx->builder, lhs, rhs, "sum");
+ break;
+ case FLOAT_SUB:
+ result = LLVMBuildFSub(comp_ctx->builder, lhs, rhs, "difference");
+ break;
+ case FLOAT_MUL:
+ result = LLVMBuildFMul(comp_ctx->builder, lhs, rhs, "product");
+ break;
+ case FLOAT_DIV:
+ result = LLVMBuildFDiv(comp_ctx->builder, lhs, rhs, "quotient");
+ break;
+ default:
+ return false;
+ }
+
+ if (!result) {
+ HANDLE_FAILURE(
+ "LLVMBuildFAdd/LLVMBuildFSub/LLVMBuildFMul/LLVMBuildFDiv");
+ return false;
+ }
+
+ return simd_bitcast_and_push_v128(comp_ctx, func_ctx, result, "result");
+}
+
+bool
+aot_compile_simd_f32x4_arith(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ FloatArithmetic arith_op)
+{
+ return simd_v128_float_arith(comp_ctx, func_ctx, arith_op, V128_f32x4_TYPE);
+}
+
+bool
+aot_compile_simd_f64x2_arith(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ FloatArithmetic arith_op)
+{
+ return simd_v128_float_arith(comp_ctx, func_ctx, arith_op, V128_f64x2_TYPE);
+}
+
+static bool
+simd_v128_float_neg(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ LLVMTypeRef vector_type)
+{
+ LLVMValueRef vector, result;
+
+ if (!(vector = simd_pop_v128_and_bitcast(comp_ctx, func_ctx, vector_type,
+ "vector"))) {
+ return false;
+ }
+
+ if (!(result = LLVMBuildFNeg(comp_ctx->builder, vector, "neg"))) {
+ HANDLE_FAILURE("LLVMBuildFNeg");
+ return false;
+ }
+
+ return simd_bitcast_and_push_v128(comp_ctx, func_ctx, result, "result");
+}
+
+bool
+aot_compile_simd_f32x4_neg(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
+{
+ return simd_v128_float_neg(comp_ctx, func_ctx, V128_f32x4_TYPE);
+}
+
+bool
+aot_compile_simd_f64x2_neg(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
+{
+ return simd_v128_float_neg(comp_ctx, func_ctx, V128_f64x2_TYPE);
+}
+
+static bool
+simd_float_intrinsic(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ LLVMTypeRef vector_type, const char *intrinsic)
+{
+ LLVMValueRef vector, result;
+ LLVMTypeRef param_types[1] = { vector_type };
+
+ if (!(vector = simd_pop_v128_and_bitcast(comp_ctx, func_ctx, vector_type,
+ "vector"))) {
+ return false;
+ }
+
+ if (!(result =
+ aot_call_llvm_intrinsic(comp_ctx, func_ctx, intrinsic,
+ vector_type, param_types, 1, vector))) {
+ HANDLE_FAILURE("LLVMBuildCall");
+ return false;
+ }
+
+ return simd_bitcast_and_push_v128(comp_ctx, func_ctx, result, "result");
+}
+
+bool
+aot_compile_simd_f32x4_abs(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
+{
+ return simd_float_intrinsic(comp_ctx, func_ctx, V128_f32x4_TYPE,
+ "llvm.fabs.v4f32");
+}
+
+bool
+aot_compile_simd_f64x2_abs(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
+{
+ return simd_float_intrinsic(comp_ctx, func_ctx, V128_f64x2_TYPE,
+ "llvm.fabs.v2f64");
+}
+
+bool
+aot_compile_simd_f32x4_round(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
+{
+ return simd_float_intrinsic(comp_ctx, func_ctx, V128_f32x4_TYPE,
+ "llvm.round.v4f32");
+}
+
+bool
+aot_compile_simd_f64x2_round(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
+{
+ return simd_float_intrinsic(comp_ctx, func_ctx, V128_f64x2_TYPE,
+ "llvm.round.v2f64");
+}
+
+bool
+aot_compile_simd_f32x4_sqrt(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
+{
+ return simd_float_intrinsic(comp_ctx, func_ctx, V128_f32x4_TYPE,
+ "llvm.sqrt.v4f32");
+}
+
+bool
+aot_compile_simd_f64x2_sqrt(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
+{
+ return simd_float_intrinsic(comp_ctx, func_ctx, V128_f64x2_TYPE,
+ "llvm.sqrt.v2f64");
+}
+
+bool
+aot_compile_simd_f32x4_ceil(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
+{
+ return simd_float_intrinsic(comp_ctx, func_ctx, V128_f32x4_TYPE,
+ "llvm.ceil.v4f32");
+}
+
+bool
+aot_compile_simd_f64x2_ceil(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
+{
+ return simd_float_intrinsic(comp_ctx, func_ctx, V128_f64x2_TYPE,
+ "llvm.ceil.v2f64");
+}
+
+bool
+aot_compile_simd_f32x4_floor(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
+{
+ return simd_float_intrinsic(comp_ctx, func_ctx, V128_f32x4_TYPE,
+ "llvm.floor.v4f32");
+}
+
+bool
+aot_compile_simd_f64x2_floor(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
+{
+ return simd_float_intrinsic(comp_ctx, func_ctx, V128_f64x2_TYPE,
+ "llvm.floor.v2f64");
+}
+
+bool
+aot_compile_simd_f32x4_trunc(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
+{
+ return simd_float_intrinsic(comp_ctx, func_ctx, V128_f32x4_TYPE,
+ "llvm.trunc.v4f32");
+}
+
+bool
+aot_compile_simd_f64x2_trunc(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
+{
+ return simd_float_intrinsic(comp_ctx, func_ctx, V128_f64x2_TYPE,
+ "llvm.trunc.v2f64");
+}
+
+bool
+aot_compile_simd_f32x4_nearest(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx)
+{
+ return simd_float_intrinsic(comp_ctx, func_ctx, V128_f32x4_TYPE,
+ "llvm.rint.v4f32");
+}
+
+bool
+aot_compile_simd_f64x2_nearest(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx)
+{
+ return simd_float_intrinsic(comp_ctx, func_ctx, V128_f64x2_TYPE,
+ "llvm.rint.v2f64");
+}
+
+static bool
+simd_float_cmp(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ FloatArithmetic arith_op, LLVMTypeRef vector_type)
+{
+ LLVMValueRef lhs, rhs, result;
+ LLVMRealPredicate op = FLOAT_MIN == arith_op ? LLVMRealULT : LLVMRealUGT;
+
+ if (!(rhs =
+ simd_pop_v128_and_bitcast(comp_ctx, func_ctx, vector_type, "rhs"))
+ || !(lhs = simd_pop_v128_and_bitcast(comp_ctx, func_ctx, vector_type,
+ "lhs"))) {
+ return false;
+ }
+
+ if (!(result = LLVMBuildFCmp(comp_ctx->builder, op, lhs, rhs, "cmp"))) {
+ HANDLE_FAILURE("LLVMBuildFCmp");
+ return false;
+ }
+
+ if (!(result =
+ LLVMBuildSelect(comp_ctx->builder, result, lhs, rhs, "select"))) {
+ HANDLE_FAILURE("LLVMBuildSelect");
+ return false;
+ }
+
+ return simd_bitcast_and_push_v128(comp_ctx, func_ctx, result, "result");
+}
+
+/*TODO: sugggest non-IA platforms check with "llvm.minimum.*" and
+ * "llvm.maximum.*" firstly */
+bool
+aot_compile_simd_f32x4_min_max(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, bool run_min)
+{
+ return simd_float_cmp(comp_ctx, func_ctx, run_min ? FLOAT_MIN : FLOAT_MAX,
+ V128_f32x4_TYPE);
+}
+
+bool
+aot_compile_simd_f64x2_min_max(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, bool run_min)
+{
+ return simd_float_cmp(comp_ctx, func_ctx, run_min ? FLOAT_MIN : FLOAT_MAX,
+ V128_f64x2_TYPE);
+}
+
+static bool
+simd_float_pmin_max(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ LLVMTypeRef vector_type, const char *intrinsic)
+{
+ LLVMValueRef lhs, rhs, result;
+ LLVMTypeRef param_types[2];
+
+ param_types[0] = vector_type;
+ param_types[1] = vector_type;
+
+ if (!(rhs =
+ simd_pop_v128_and_bitcast(comp_ctx, func_ctx, vector_type, "rhs"))
+ || !(lhs = simd_pop_v128_and_bitcast(comp_ctx, func_ctx, vector_type,
+ "lhs"))) {
+ return false;
+ }
+
+ if (!(result =
+ aot_call_llvm_intrinsic(comp_ctx, func_ctx, intrinsic,
+ vector_type, param_types, 2, lhs, rhs))) {
+ return false;
+ }
+
+ return simd_bitcast_and_push_v128(comp_ctx, func_ctx, result, "result");
+}
+
+bool
+aot_compile_simd_f32x4_pmin_pmax(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, bool run_min)
+{
+ return simd_float_pmin_max(comp_ctx, func_ctx, V128_f32x4_TYPE,
+ run_min ? "llvm.minnum.v4f32"
+ : "llvm.maxnum.v4f32");
+}
+
+bool
+aot_compile_simd_f64x2_pmin_pmax(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, bool run_min)
+{
+ return simd_float_pmin_max(comp_ctx, func_ctx, V128_f64x2_TYPE,
+ run_min ? "llvm.minnum.v2f64"
+ : "llvm.maxnum.v2f64");
+}
+
+bool
+aot_compile_simd_f64x2_demote(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx)
+{
+ LLVMValueRef vector, elem_0, elem_1, result;
+
+ if (!(vector = simd_pop_v128_and_bitcast(comp_ctx, func_ctx,
+ V128_f64x2_TYPE, "vector"))) {
+ return false;
+ }
+
+ if (!(elem_0 = LLVMBuildExtractElement(comp_ctx->builder, vector,
+ LLVM_CONST(i32_zero), "elem_0"))
+ || !(elem_1 = LLVMBuildExtractElement(comp_ctx->builder, vector,
+ LLVM_CONST(i32_one), "elem_1"))) {
+ HANDLE_FAILURE("LLVMBuildExtractElement");
+ return false;
+ }
+
+ /* fptrunc <f64> elem to <f32> */
+ if (!(elem_0 = LLVMBuildFPTrunc(comp_ctx->builder, elem_0, F32_TYPE,
+ "elem_0_trunc"))
+ || !(elem_1 = LLVMBuildFPTrunc(comp_ctx->builder, elem_1, F32_TYPE,
+ "elem_1_trunc"))) {
+ HANDLE_FAILURE("LLVMBuildFPTrunc");
+ return false;
+ }
+
+ if (!(result = LLVMBuildInsertElement(comp_ctx->builder,
+ LLVM_CONST(f32x4_vec_zero), elem_0,
+ LLVM_CONST(i32_zero), "new_vector_0"))
+ || !(result =
+ LLVMBuildInsertElement(comp_ctx->builder, result, elem_1,
+ LLVM_CONST(i32_one), "new_vector_1"))) {
+ HANDLE_FAILURE("LLVMBuildInsertElement");
+ return false;
+ }
+
+ return simd_bitcast_and_push_v128(comp_ctx, func_ctx, result, "result");
+}
+
+bool
+aot_compile_simd_f32x4_promote(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx)
+{
+ LLVMValueRef vector, elem_0, elem_1, result;
+
+ if (!(vector = simd_pop_v128_and_bitcast(comp_ctx, func_ctx,
+ V128_f32x4_TYPE, "vector"))) {
+ return false;
+ }
+
+ if (!(elem_0 = LLVMBuildExtractElement(comp_ctx->builder, vector,
+ LLVM_CONST(i32_zero), "elem_0"))
+ || !(elem_1 = LLVMBuildExtractElement(comp_ctx->builder, vector,
+ LLVM_CONST(i32_one), "elem_1"))) {
+ HANDLE_FAILURE("LLVMBuildExtractElement");
+ return false;
+ }
+
+ /* fpext <f32> elem to <f64> */
+ if (!(elem_0 =
+ LLVMBuildFPExt(comp_ctx->builder, elem_0, F64_TYPE, "elem_0_ext"))
+ || !(elem_1 = LLVMBuildFPExt(comp_ctx->builder, elem_1, F64_TYPE,
+ "elem_1_ext"))) {
+ HANDLE_FAILURE("LLVMBuildFPExt");
+ return false;
+ }
+
+ if (!(result = LLVMBuildInsertElement(comp_ctx->builder,
+ LLVM_CONST(f64x2_vec_zero), elem_0,
+ LLVM_CONST(i32_zero), "new_vector_0"))
+ || !(result =
+ LLVMBuildInsertElement(comp_ctx->builder, result, elem_1,
+ LLVM_CONST(i32_one), "new_vector_1"))) {
+ HANDLE_FAILURE("LLVMBuildInsertElement");
+ return false;
+ }
+
+ return simd_bitcast_and_push_v128(comp_ctx, func_ctx, result, "result");
+}
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_floating_point.h b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_floating_point.h
new file mode 100644
index 000000000..213b4391f
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_floating_point.h
@@ -0,0 +1,107 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#ifndef _SIMD_FLOATING_POINT_H_
+#define _SIMD_FLOATING_POINT_H_
+
+#include "../aot_compiler.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+bool
+aot_compile_simd_f32x4_arith(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ FloatArithmetic arith_op);
+
+bool
+aot_compile_simd_f64x2_arith(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ FloatArithmetic arith_op);
+
+bool
+aot_compile_simd_f32x4_neg(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx);
+
+bool
+aot_compile_simd_f64x2_neg(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx);
+
+bool
+aot_compile_simd_f32x4_abs(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx);
+
+bool
+aot_compile_simd_f64x2_abs(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx);
+
+bool
+aot_compile_simd_f32x4_round(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx);
+
+bool
+aot_compile_simd_f64x2_round(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx);
+
+bool
+aot_compile_simd_f32x4_sqrt(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx);
+
+bool
+aot_compile_simd_f64x2_sqrt(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx);
+
+bool
+aot_compile_simd_f32x4_ceil(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx);
+
+bool
+aot_compile_simd_f64x2_ceil(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx);
+
+bool
+aot_compile_simd_f32x4_floor(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx);
+
+bool
+aot_compile_simd_f64x2_floor(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx);
+
+bool
+aot_compile_simd_f32x4_trunc(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx);
+
+bool
+aot_compile_simd_f64x2_trunc(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx);
+
+bool
+aot_compile_simd_f32x4_nearest(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx);
+
+bool
+aot_compile_simd_f64x2_nearest(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx);
+
+bool
+aot_compile_simd_f32x4_min_max(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, bool run_min);
+
+bool
+aot_compile_simd_f64x2_min_max(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, bool run_min);
+
+bool
+aot_compile_simd_f32x4_pmin_pmax(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, bool run_min);
+
+bool
+aot_compile_simd_f64x2_pmin_pmax(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, bool run_min);
+
+bool
+aot_compile_simd_f64x2_demote(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx);
+
+bool
+aot_compile_simd_f32x4_promote(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx);
+
+#ifdef __cplusplus
+} /* end of extern "C" */
+#endif
+
+#endif /* end of _SIMD_FLOATING_POINT_H_ */
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_int_arith.c b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_int_arith.c
new file mode 100644
index 000000000..1d0e6967b
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_int_arith.c
@@ -0,0 +1,406 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#include "simd_int_arith.h"
+#include "simd_common.h"
+#include "../aot_emit_exception.h"
+#include "../../aot/aot_runtime.h"
+
+static bool
+simd_integer_arith(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ V128Arithmetic arith_op, LLVMTypeRef vector_type)
+{
+ LLVMValueRef lhs, rhs, result = NULL;
+
+ if (!(rhs =
+ simd_pop_v128_and_bitcast(comp_ctx, func_ctx, vector_type, "rhs"))
+ || !(lhs = simd_pop_v128_and_bitcast(comp_ctx, func_ctx, vector_type,
+ "lhs"))) {
+ return false;
+ }
+
+ switch (arith_op) {
+ case V128_ADD:
+ result = LLVMBuildAdd(comp_ctx->builder, lhs, rhs, "sum");
+ break;
+ case V128_SUB:
+ result = LLVMBuildSub(comp_ctx->builder, lhs, rhs, "difference");
+ break;
+ case V128_MUL:
+ result = LLVMBuildMul(comp_ctx->builder, lhs, rhs, "product");
+ break;
+ default:
+ HANDLE_FAILURE("Unsupport arith_op");
+ break;
+ }
+
+ if (!result) {
+ HANDLE_FAILURE("LLVMBuildAdd/LLVMBuildSub/LLVMBuildMul");
+ return false;
+ }
+
+ return simd_bitcast_and_push_v128(comp_ctx, func_ctx, result, "result");
+}
+
+bool
+aot_compile_simd_i8x16_arith(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ V128Arithmetic arith_op)
+{
+ return simd_integer_arith(comp_ctx, func_ctx, arith_op, V128_i8x16_TYPE);
+}
+
+bool
+aot_compile_simd_i16x8_arith(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ V128Arithmetic arith_op)
+{
+ return simd_integer_arith(comp_ctx, func_ctx, arith_op, V128_i16x8_TYPE);
+}
+
+bool
+aot_compile_simd_i32x4_arith(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ V128Arithmetic arith_op)
+{
+ return simd_integer_arith(comp_ctx, func_ctx, arith_op, V128_i32x4_TYPE);
+}
+
+bool
+aot_compile_simd_i64x2_arith(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ V128Arithmetic arith_op)
+{
+ return simd_integer_arith(comp_ctx, func_ctx, arith_op, V128_i64x2_TYPE);
+}
+
+static bool
+simd_neg(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx, LLVMTypeRef type)
+{
+ LLVMValueRef vector, result;
+
+ if (!(vector =
+ simd_pop_v128_and_bitcast(comp_ctx, func_ctx, type, "vector"))) {
+ return false;
+ }
+
+ if (!(result = LLVMBuildNeg(comp_ctx->builder, vector, "neg"))) {
+ HANDLE_FAILURE("LLVMBuildNeg");
+ return false;
+ }
+
+ return simd_bitcast_and_push_v128(comp_ctx, func_ctx, result, "result");
+}
+
+bool
+aot_compile_simd_i8x16_neg(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
+{
+ return simd_neg(comp_ctx, func_ctx, V128_i8x16_TYPE);
+}
+
+bool
+aot_compile_simd_i16x8_neg(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
+{
+ return simd_neg(comp_ctx, func_ctx, V128_i16x8_TYPE);
+}
+
+bool
+aot_compile_simd_i32x4_neg(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
+{
+ return simd_neg(comp_ctx, func_ctx, V128_i32x4_TYPE);
+}
+
+bool
+aot_compile_simd_i64x2_neg(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
+{
+ return simd_neg(comp_ctx, func_ctx, V128_i64x2_TYPE);
+}
+
+bool
+aot_compile_simd_i8x16_popcnt(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx)
+{
+ LLVMValueRef vector, result;
+
+ if (!(vector = simd_pop_v128_and_bitcast(comp_ctx, func_ctx,
+ V128_i8x16_TYPE, "vector"))) {
+ return false;
+ }
+
+ if (!(result = aot_call_llvm_intrinsic(comp_ctx, func_ctx,
+ "llvm.ctpop.v16i8", V128_i8x16_TYPE,
+ &V128_i8x16_TYPE, 1, vector))) {
+ return false;
+ }
+
+ return simd_bitcast_and_push_v128(comp_ctx, func_ctx, result, "result");
+}
+
+static bool
+simd_v128_cmp(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ LLVMTypeRef vector_type, V128Arithmetic arith_op, bool is_signed)
+{
+ LLVMValueRef lhs, rhs, result;
+ LLVMIntPredicate op;
+
+ if (!(rhs =
+ simd_pop_v128_and_bitcast(comp_ctx, func_ctx, vector_type, "rhs"))
+ || !(lhs = simd_pop_v128_and_bitcast(comp_ctx, func_ctx, vector_type,
+ "lhs"))) {
+ return false;
+ }
+
+ if (V128_MIN == arith_op) {
+ op = is_signed ? LLVMIntSLT : LLVMIntULT;
+ }
+ else {
+ op = is_signed ? LLVMIntSGT : LLVMIntUGT;
+ }
+
+ if (!(result = LLVMBuildICmp(comp_ctx->builder, op, lhs, rhs, "cmp"))) {
+ HANDLE_FAILURE("LLVMBuildICmp");
+ return false;
+ }
+
+ if (!(result =
+ LLVMBuildSelect(comp_ctx->builder, result, lhs, rhs, "select"))) {
+ HANDLE_FAILURE("LLVMBuildSelect");
+ return false;
+ }
+
+ return simd_bitcast_and_push_v128(comp_ctx, func_ctx, result, "result");
+}
+
+bool
+aot_compile_simd_i8x16_cmp(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ V128Arithmetic arith_op, bool is_signed)
+{
+ return simd_v128_cmp(comp_ctx, func_ctx, V128_i8x16_TYPE, arith_op,
+ is_signed);
+}
+
+bool
+aot_compile_simd_i16x8_cmp(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ V128Arithmetic arith_op, bool is_signed)
+{
+ return simd_v128_cmp(comp_ctx, func_ctx, V128_i16x8_TYPE, arith_op,
+ is_signed);
+}
+
+bool
+aot_compile_simd_i32x4_cmp(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ V128Arithmetic arith_op, bool is_signed)
+{
+ return simd_v128_cmp(comp_ctx, func_ctx, V128_i32x4_TYPE, arith_op,
+ is_signed);
+}
+
+/* llvm.abs.* */
+static bool
+simd_v128_abs(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ char *intrinsic, LLVMTypeRef vector_type)
+{
+ LLVMValueRef vector, result;
+ LLVMTypeRef param_types[] = { vector_type, INT1_TYPE };
+
+ if (!(vector = simd_pop_v128_and_bitcast(comp_ctx, func_ctx, vector_type,
+ "vec"))) {
+ return false;
+ }
+
+ if (!(result = aot_call_llvm_intrinsic(comp_ctx, func_ctx, intrinsic,
+ vector_type, param_types, 2, vector,
+ /* is_int_min_poison */
+ LLVM_CONST(i1_zero)))) {
+ return false;
+ }
+
+ return simd_bitcast_and_push_v128(comp_ctx, func_ctx, result, "result");
+}
+
+bool
+aot_compile_simd_i8x16_abs(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
+{
+ return simd_v128_abs(comp_ctx, func_ctx, "llvm.abs.v16i8", V128_i8x16_TYPE);
+}
+
+bool
+aot_compile_simd_i16x8_abs(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
+{
+ return simd_v128_abs(comp_ctx, func_ctx, "llvm.abs.v8i16", V128_i16x8_TYPE);
+}
+
+bool
+aot_compile_simd_i32x4_abs(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
+{
+ return simd_v128_abs(comp_ctx, func_ctx, "llvm.abs.v4i32", V128_i32x4_TYPE);
+}
+
+bool
+aot_compile_simd_i64x2_abs(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
+{
+ return simd_v128_abs(comp_ctx, func_ctx, "llvm.abs.v2i64", V128_i64x2_TYPE);
+}
+
+enum integer_avgr_u {
+ e_avgr_u_i8x16,
+ e_avgr_u_i16x8,
+ e_avgr_u_i32x4,
+};
+
+/* TODO: try int_x86_mmx_pavg_b and int_x86_mmx_pavg_w */
+/* (v1 + v2 + 1) / 2 */
+static bool
+simd_v128_avg(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ enum integer_avgr_u itype)
+{
+ LLVMValueRef lhs, rhs, ones, result;
+ LLVMTypeRef vector_ext_type;
+ LLVMTypeRef vector_type[] = {
+ V128_i8x16_TYPE,
+ V128_i16x8_TYPE,
+ V128_i32x4_TYPE,
+ };
+ unsigned lanes[] = { 16, 8, 4 };
+
+ if (!(rhs = simd_pop_v128_and_bitcast(comp_ctx, func_ctx,
+ vector_type[itype], "rhs"))
+ || !(lhs = simd_pop_v128_and_bitcast(comp_ctx, func_ctx,
+ vector_type[itype], "lhs"))) {
+ return false;
+ }
+
+ if (!(vector_ext_type = LLVMVectorType(I64_TYPE, lanes[itype]))) {
+ HANDLE_FAILURE("LLVMVectorType");
+ return false;
+ }
+
+ if (!(lhs = LLVMBuildZExt(comp_ctx->builder, lhs, vector_ext_type,
+ "zext_to_i64"))
+ || !(rhs = LLVMBuildZExt(comp_ctx->builder, rhs, vector_ext_type,
+ "zext_to_i64"))) {
+ HANDLE_FAILURE("LLVMBuildZExt");
+ return false;
+ }
+
+ /* by default, add will do signed/unsigned overflow */
+ if (!(result = LLVMBuildAdd(comp_ctx->builder, lhs, rhs, "l_add_r"))) {
+ HANDLE_FAILURE("LLVMBuildAdd");
+ return false;
+ }
+
+ if (!(ones = simd_build_splat_const_integer_vector(comp_ctx, I64_TYPE, 1,
+ lanes[itype]))) {
+ return false;
+ }
+
+ if (!(result = LLVMBuildAdd(comp_ctx->builder, result, ones, "plus_1"))) {
+ HANDLE_FAILURE("LLVMBuildAdd");
+ return false;
+ }
+
+ if (!(result = LLVMBuildLShr(comp_ctx->builder, result, ones, "avg"))) {
+ HANDLE_FAILURE("LLVMBuildLShr");
+ return false;
+ }
+
+ if (!(result = LLVMBuildTrunc(comp_ctx->builder, result, vector_type[itype],
+ "to_orig_type"))) {
+ HANDLE_FAILURE("LLVMBuildTrunc");
+ return false;
+ }
+
+ return simd_bitcast_and_push_v128(comp_ctx, func_ctx, result, "result");
+}
+
+bool
+aot_compile_simd_i8x16_avgr_u(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx)
+{
+ return simd_v128_avg(comp_ctx, func_ctx, e_avgr_u_i8x16);
+}
+
+bool
+aot_compile_simd_i16x8_avgr_u(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx)
+{
+ return simd_v128_avg(comp_ctx, func_ctx, e_avgr_u_i16x8);
+}
+
+bool
+aot_compile_simd_i32x4_avgr_u(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx)
+{
+ return simd_v128_avg(comp_ctx, func_ctx, e_avgr_u_i32x4);
+}
+
+bool
+aot_compile_simd_i32x4_dot_i16x8(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx)
+{
+ LLVMValueRef vec1, vec2, even_mask, odd_mask, zero, result;
+ LLVMTypeRef vector_ext_type;
+ LLVMValueRef even_element[] = {
+ LLVM_CONST(i32_zero),
+ LLVM_CONST(i32_two),
+ LLVM_CONST(i32_four),
+ LLVM_CONST(i32_six),
+ };
+ LLVMValueRef odd_element[] = {
+ LLVM_CONST(i32_one),
+ LLVM_CONST(i32_three),
+ LLVM_CONST(i32_five),
+ LLVM_CONST(i32_seven),
+ };
+
+ if (!(vec1 = simd_pop_v128_and_bitcast(comp_ctx, func_ctx, V128_i16x8_TYPE,
+ "vec1"))
+ || !(vec2 = simd_pop_v128_and_bitcast(comp_ctx, func_ctx,
+ V128_i16x8_TYPE, "vec2"))) {
+ return false;
+ }
+
+ if (!(vector_ext_type = LLVMVectorType(I32_TYPE, 8))) {
+ HANDLE_FAILURE("LLVMVectorType");
+ return false;
+ }
+
+ /* sext <v8i16> to <v8i32> */
+ if (!(vec1 = LLVMBuildSExt(comp_ctx->builder, vec1, vector_ext_type,
+ "vec1_v8i32"))
+ || !(vec2 = LLVMBuildSExt(comp_ctx->builder, vec2, vector_ext_type,
+ "vec2_v8i32"))) {
+ HANDLE_FAILURE("LLVMBuildSExt");
+ return false;
+ }
+
+ if (!(result = LLVMBuildMul(comp_ctx->builder, vec1, vec2, "product"))) {
+ HANDLE_FAILURE("LLVMBuildMul");
+ return false;
+ }
+
+ /* pick elements with even indexes and odd indexes */
+ if (!(even_mask = LLVMConstVector(even_element, 4))
+ || !(odd_mask = LLVMConstVector(odd_element, 4))) {
+ HANDLE_FAILURE("LLVMConstVector");
+ return false;
+ }
+
+ if (!(zero = simd_build_splat_const_integer_vector(comp_ctx, I32_TYPE, 0,
+ 8))) {
+ return false;
+ }
+
+ if (!(vec1 = LLVMBuildShuffleVector(comp_ctx->builder, result, zero,
+ even_mask, "even_result"))
+ || !(vec2 = LLVMBuildShuffleVector(comp_ctx->builder, result, zero,
+ odd_mask, "odd_result"))) {
+ HANDLE_FAILURE("LLVMBuildShuffleVector");
+ return false;
+ }
+
+ if (!(result = LLVMBuildAdd(comp_ctx->builder, vec1, vec2, "new_vec"))) {
+ HANDLE_FAILURE("LLVMBuildAdd");
+ return false;
+ }
+
+ return simd_bitcast_and_push_v128(comp_ctx, func_ctx, result, "result");
+}
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_int_arith.h b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_int_arith.h
new file mode 100644
index 000000000..a7a21170a
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_int_arith.h
@@ -0,0 +1,91 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#ifndef _SIMD_INT_ARITH_H_
+#define _SIMD_INT_ARITH_H_
+
+#include "../aot_compiler.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+bool
+aot_compile_simd_i8x16_arith(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ V128Arithmetic cond);
+
+bool
+aot_compile_simd_i16x8_arith(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ V128Arithmetic cond);
+
+bool
+aot_compile_simd_i32x4_arith(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ V128Arithmetic cond);
+
+bool
+aot_compile_simd_i64x2_arith(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ V128Arithmetic cond);
+
+bool
+aot_compile_simd_i8x16_neg(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx);
+
+bool
+aot_compile_simd_i16x8_neg(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx);
+
+bool
+aot_compile_simd_i32x4_neg(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx);
+
+bool
+aot_compile_simd_i64x2_neg(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx);
+
+bool
+aot_compile_simd_i8x16_popcnt(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx);
+
+bool
+aot_compile_simd_i8x16_cmp(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ V128Arithmetic arith_op, bool is_signed);
+
+bool
+aot_compile_simd_i16x8_cmp(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ V128Arithmetic arith_op, bool is_signed);
+
+bool
+aot_compile_simd_i32x4_cmp(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ V128Arithmetic arith_op, bool is_signed);
+
+bool
+aot_compile_simd_i8x16_abs(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx);
+
+bool
+aot_compile_simd_i16x8_abs(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx);
+
+bool
+aot_compile_simd_i32x4_abs(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx);
+
+bool
+aot_compile_simd_i64x2_abs(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx);
+
+bool
+aot_compile_simd_i8x16_avgr_u(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx);
+
+bool
+aot_compile_simd_i16x8_avgr_u(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx);
+
+bool
+aot_compile_simd_i32x4_avgr_u(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx);
+
+bool
+aot_compile_simd_i32x4_dot_i16x8(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx);
+
+#ifdef __cplusplus
+} /* end of extern "C" */
+#endif
+
+#endif /* end of _SIMD_INT_ARITH_H_ */
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_load_store.c b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_load_store.c
new file mode 100644
index 000000000..d166e954c
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_load_store.c
@@ -0,0 +1,331 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#include "simd_common.h"
+#include "simd_load_store.h"
+#include "../aot_emit_exception.h"
+#include "../aot_emit_memory.h"
+#include "../../aot/aot_runtime.h"
+#include "../../interpreter/wasm_opcode.h"
+
+/* data_length in bytes */
+static LLVMValueRef
+simd_load(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx, uint32 align,
+ uint32 offset, uint32 data_length, LLVMTypeRef ptr_type,
+ LLVMTypeRef data_type)
+{
+ LLVMValueRef maddr, data;
+
+ if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset,
+ data_length))) {
+ HANDLE_FAILURE("aot_check_memory_overflow");
+ return NULL;
+ }
+
+ if (!(maddr = LLVMBuildBitCast(comp_ctx->builder, maddr, ptr_type,
+ "data_ptr"))) {
+ HANDLE_FAILURE("LLVMBuildBitCast");
+ return NULL;
+ }
+
+ if (!(data = LLVMBuildLoad2(comp_ctx->builder, data_type, maddr, "data"))) {
+ HANDLE_FAILURE("LLVMBuildLoad");
+ return NULL;
+ }
+
+ LLVMSetAlignment(data, 1);
+
+ return data;
+}
+
+bool
+aot_compile_simd_v128_load(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ uint32 align, uint32 offset)
+{
+ LLVMValueRef result;
+
+ if (!(result = simd_load(comp_ctx, func_ctx, align, offset, 16,
+ V128_PTR_TYPE, V128_TYPE))) {
+ return false;
+ }
+
+ PUSH_V128(result);
+
+ return true;
+fail:
+ return false;
+}
+
+bool
+aot_compile_simd_load_extend(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ uint8 opcode, uint32 align, uint32 offset)
+{
+ LLVMValueRef sub_vector, result;
+ uint32 opcode_index = opcode - SIMD_v128_load8x8_s;
+ bool signeds[] = { true, false, true, false, true, false };
+ LLVMTypeRef vector_types[] = {
+ V128_i16x8_TYPE, V128_i16x8_TYPE, V128_i32x4_TYPE,
+ V128_i32x4_TYPE, V128_i64x2_TYPE, V128_i64x2_TYPE,
+ };
+ LLVMTypeRef sub_vector_types[] = {
+ LLVMVectorType(INT8_TYPE, 8), LLVMVectorType(INT8_TYPE, 8),
+ LLVMVectorType(INT16_TYPE, 4), LLVMVectorType(INT16_TYPE, 4),
+ LLVMVectorType(I32_TYPE, 2), LLVMVectorType(I32_TYPE, 2),
+ };
+ LLVMTypeRef sub_vector_type, sub_vector_ptr_type;
+
+ bh_assert(opcode_index < 6);
+
+ sub_vector_type = sub_vector_types[opcode_index];
+
+ /* to vector ptr type */
+ if (!sub_vector_type
+ || !(sub_vector_ptr_type = LLVMPointerType(sub_vector_type, 0))) {
+ HANDLE_FAILURE("LLVMPointerType");
+ return false;
+ }
+
+ if (!(sub_vector = simd_load(comp_ctx, func_ctx, align, offset, 8,
+ sub_vector_ptr_type, sub_vector_type))) {
+ return false;
+ }
+
+ if (signeds[opcode_index]) {
+ if (!(result = LLVMBuildSExt(comp_ctx->builder, sub_vector,
+ vector_types[opcode_index], "vector"))) {
+ HANDLE_FAILURE("LLVMBuildSExt");
+ return false;
+ }
+ }
+ else {
+ if (!(result = LLVMBuildZExt(comp_ctx->builder, sub_vector,
+ vector_types[opcode_index], "vector"))) {
+ HANDLE_FAILURE("LLVMBuildZExt");
+ return false;
+ }
+ }
+
+ return simd_bitcast_and_push_v128(comp_ctx, func_ctx, result, "result");
+}
+
+bool
+aot_compile_simd_load_splat(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ uint8 opcode, uint32 align, uint32 offset)
+{
+ uint32 opcode_index = opcode - SIMD_v128_load8_splat;
+ LLVMValueRef element, result;
+ LLVMTypeRef element_ptr_types[] = { INT8_PTR_TYPE, INT16_PTR_TYPE,
+ INT32_PTR_TYPE, INT64_PTR_TYPE };
+ LLVMTypeRef element_data_types[] = { INT8_TYPE, INT16_TYPE, I32_TYPE,
+ I64_TYPE };
+ uint32 data_lengths[] = { 1, 2, 4, 8 };
+ LLVMValueRef undefs[] = {
+ LLVM_CONST(i8x16_undef),
+ LLVM_CONST(i16x8_undef),
+ LLVM_CONST(i32x4_undef),
+ LLVM_CONST(i64x2_undef),
+ };
+ LLVMValueRef masks[] = {
+ LLVM_CONST(i32x16_zero),
+ LLVM_CONST(i32x8_zero),
+ LLVM_CONST(i32x4_zero),
+ LLVM_CONST(i32x2_zero),
+ };
+
+ bh_assert(opcode_index < 4);
+
+ if (!(element = simd_load(comp_ctx, func_ctx, align, offset,
+ data_lengths[opcode_index],
+ element_ptr_types[opcode_index],
+ element_data_types[opcode_index]))) {
+ return false;
+ }
+
+ if (!(result =
+ LLVMBuildInsertElement(comp_ctx->builder, undefs[opcode_index],
+ element, I32_ZERO, "base"))) {
+ HANDLE_FAILURE("LLVMBuildInsertElement");
+ return false;
+ }
+
+ if (!(result = LLVMBuildShuffleVector(comp_ctx->builder, result,
+ undefs[opcode_index],
+ masks[opcode_index], "vector"))) {
+ HANDLE_FAILURE("LLVMBuildShuffleVector");
+ return false;
+ }
+
+ return simd_bitcast_and_push_v128(comp_ctx, func_ctx, result, "result");
+}
+
+bool
+aot_compile_simd_load_lane(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ uint8 opcode, uint32 align, uint32 offset,
+ uint8 lane_id)
+{
+ LLVMValueRef element, vector;
+ uint32 opcode_index = opcode - SIMD_v128_load8_lane;
+ uint32 data_lengths[] = { 1, 2, 4, 8 };
+ LLVMTypeRef element_ptr_types[] = { INT8_PTR_TYPE, INT16_PTR_TYPE,
+ INT32_PTR_TYPE, INT64_PTR_TYPE };
+ LLVMTypeRef element_data_types[] = { INT8_TYPE, INT16_TYPE, I32_TYPE,
+ I64_TYPE };
+ LLVMTypeRef vector_types[] = { V128_i8x16_TYPE, V128_i16x8_TYPE,
+ V128_i32x4_TYPE, V128_i64x2_TYPE };
+ LLVMValueRef lane = simd_lane_id_to_llvm_value(comp_ctx, lane_id);
+
+ bh_assert(opcode_index < 4);
+
+ if (!(vector = simd_pop_v128_and_bitcast(
+ comp_ctx, func_ctx, vector_types[opcode_index], "src"))) {
+ return false;
+ }
+
+ if (!(element = simd_load(comp_ctx, func_ctx, align, offset,
+ data_lengths[opcode_index],
+ element_ptr_types[opcode_index],
+ element_data_types[opcode_index]))) {
+ return false;
+ }
+
+ if (!(vector = LLVMBuildInsertElement(comp_ctx->builder, vector, element,
+ lane, "dst"))) {
+ HANDLE_FAILURE("LLVMBuildInsertElement");
+ return false;
+ }
+
+ return simd_bitcast_and_push_v128(comp_ctx, func_ctx, vector, "result");
+}
+
+bool
+aot_compile_simd_load_zero(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ uint8 opcode, uint32 align, uint32 offset)
+{
+ LLVMValueRef element, result, mask;
+ uint32 opcode_index = opcode - SIMD_v128_load32_zero;
+ uint32 data_lengths[] = { 4, 8 };
+ LLVMTypeRef element_ptr_types[] = { INT32_PTR_TYPE, INT64_PTR_TYPE };
+ LLVMTypeRef element_data_types[] = { I32_TYPE, I64_TYPE };
+ LLVMValueRef zero[] = {
+ LLVM_CONST(i32x4_vec_zero),
+ LLVM_CONST(i64x2_vec_zero),
+ };
+ LLVMValueRef undef[] = {
+ LLVM_CONST(i32x4_undef),
+ LLVM_CONST(i64x2_undef),
+ };
+ uint32 mask_length[] = { 4, 2 };
+ LLVMValueRef mask_element[][4] = {
+ { LLVM_CONST(i32_zero), LLVM_CONST(i32_four), LLVM_CONST(i32_five),
+ LLVM_CONST(i32_six) },
+ { LLVM_CONST(i32_zero), LLVM_CONST(i32_two) },
+ };
+
+ bh_assert(opcode_index < 2);
+
+ if (!(element = simd_load(comp_ctx, func_ctx, align, offset,
+ data_lengths[opcode_index],
+ element_ptr_types[opcode_index],
+ element_data_types[opcode_index]))) {
+ return false;
+ }
+
+ if (!(result =
+ LLVMBuildInsertElement(comp_ctx->builder, undef[opcode_index],
+ element, I32_ZERO, "vector"))) {
+ HANDLE_FAILURE("LLVMBuildInsertElement");
+ return false;
+ }
+
+ /* fill in other lanes with zero */
+ if (!(mask = LLVMConstVector(mask_element[opcode_index],
+ mask_length[opcode_index]))) {
+ HANDLE_FAILURE("LLConstVector");
+ return false;
+ }
+
+ if (!(result = LLVMBuildShuffleVector(comp_ctx->builder, result,
+ zero[opcode_index], mask,
+ "fill_in_zero"))) {
+ HANDLE_FAILURE("LLVMBuildShuffleVector");
+ return false;
+ }
+
+ return simd_bitcast_and_push_v128(comp_ctx, func_ctx, result, "result");
+}
+
+/* data_length in bytes */
+static bool
+simd_store(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx, uint32 align,
+ uint32 offset, uint32 data_length, LLVMValueRef value,
+ LLVMTypeRef value_ptr_type)
+{
+ LLVMValueRef maddr, result;
+
+ if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset,
+ data_length)))
+ return false;
+
+ if (!(maddr = LLVMBuildBitCast(comp_ctx->builder, maddr, value_ptr_type,
+ "data_ptr"))) {
+ HANDLE_FAILURE("LLVMBuildBitCast");
+ return false;
+ }
+
+ if (!(result = LLVMBuildStore(comp_ctx->builder, value, maddr))) {
+ HANDLE_FAILURE("LLVMBuildStore");
+ return false;
+ }
+
+ LLVMSetAlignment(result, 1);
+
+ return true;
+}
+
+bool
+aot_compile_simd_v128_store(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ uint32 align, uint32 offset)
+{
+ LLVMValueRef value;
+
+ POP_V128(value);
+
+ return simd_store(comp_ctx, func_ctx, align, offset, 16, value,
+ V128_PTR_TYPE);
+fail:
+ return false;
+}
+
+bool
+aot_compile_simd_store_lane(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ uint8 opcode, uint32 align, uint32 offset,
+ uint8 lane_id)
+{
+ LLVMValueRef element, vector;
+ uint32 data_lengths[] = { 1, 2, 4, 8 };
+ LLVMTypeRef element_ptr_types[] = { INT8_PTR_TYPE, INT16_PTR_TYPE,
+ INT32_PTR_TYPE, INT64_PTR_TYPE };
+ uint32 opcode_index = opcode - SIMD_v128_store8_lane;
+ LLVMTypeRef vector_types[] = { V128_i8x16_TYPE, V128_i16x8_TYPE,
+ V128_i32x4_TYPE, V128_i64x2_TYPE };
+ LLVMValueRef lane = simd_lane_id_to_llvm_value(comp_ctx, lane_id);
+
+ bh_assert(opcode_index < 4);
+
+ if (!(vector = simd_pop_v128_and_bitcast(
+ comp_ctx, func_ctx, vector_types[opcode_index], "src"))) {
+ return false;
+ }
+
+ if (!(element = LLVMBuildExtractElement(comp_ctx->builder, vector, lane,
+ "element"))) {
+ HANDLE_FAILURE("LLVMBuildExtractElement");
+ return false;
+ }
+
+ return simd_store(comp_ctx, func_ctx, align, offset,
+ data_lengths[opcode_index], element,
+ element_ptr_types[opcode_index]);
+}
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_load_store.h b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_load_store.h
new file mode 100644
index 000000000..fd118ec1b
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_load_store.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#ifndef _SIMD_LOAD_STORE_H_
+#define _SIMD_LOAD_STORE_H_
+
+#include "../aot_compiler.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+bool
+aot_compile_simd_v128_load(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ uint32 align, uint32 offset);
+
+bool
+aot_compile_simd_load_extend(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ uint8 opcode, uint32 align, uint32 offset);
+
+bool
+aot_compile_simd_load_splat(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ uint8 opcode, uint32 align, uint32 offset);
+
+bool
+aot_compile_simd_load_lane(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ uint8 opcode, uint32 align, uint32 offset,
+ uint8 lane_id);
+
+bool
+aot_compile_simd_load_zero(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ uint8 opcode, uint32 align, uint32 offset);
+
+bool
+aot_compile_simd_v128_store(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ uint32 align, uint32 offset);
+
+bool
+aot_compile_simd_store_lane(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ uint8 opcode, uint32 align, uint32 offset,
+ uint8 lane_id);
+
+#ifdef __cplusplus
+} /* end of extern "C" */
+#endif
+
+#endif /* end of _SIMD_LOAD_STORE_H_ */
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_sat_int_arith.c b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_sat_int_arith.c
new file mode 100644
index 000000000..1de4520a7
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_sat_int_arith.c
@@ -0,0 +1,81 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#include "simd_sat_int_arith.h"
+#include "simd_common.h"
+#include "../aot_emit_exception.h"
+#include "../../aot/aot_runtime.h"
+
+static bool
+simd_sat_int_arith(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ LLVMTypeRef vector_type, const char *intrinsics)
+{
+ LLVMValueRef lhs, rhs, result;
+ LLVMTypeRef param_types[2];
+
+ if (!(rhs =
+ simd_pop_v128_and_bitcast(comp_ctx, func_ctx, vector_type, "rhs"))
+ || !(lhs = simd_pop_v128_and_bitcast(comp_ctx, func_ctx, vector_type,
+ "lhs"))) {
+ return false;
+ }
+
+ param_types[0] = vector_type;
+ param_types[1] = vector_type;
+
+ if (!(result =
+ aot_call_llvm_intrinsic(comp_ctx, func_ctx, intrinsics,
+ vector_type, param_types, 2, lhs, rhs))) {
+ HANDLE_FAILURE("LLVMBuildCall");
+ return false;
+ }
+
+ return simd_bitcast_and_push_v128(comp_ctx, func_ctx, result, "result");
+}
+
+bool
+aot_compile_simd_i8x16_saturate(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx,
+ V128Arithmetic arith_op, bool is_signed)
+{
+ char *intrinsics[][2] = {
+ { "llvm.sadd.sat.v16i8", "llvm.uadd.sat.v16i8" },
+ { "llvm.ssub.sat.v16i8", "llvm.usub.sat.v16i8" },
+ };
+
+ return simd_sat_int_arith(comp_ctx, func_ctx, V128_i8x16_TYPE,
+ is_signed ? intrinsics[arith_op][0]
+ : intrinsics[arith_op][1]);
+}
+
+bool
+aot_compile_simd_i16x8_saturate(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx,
+ V128Arithmetic arith_op, bool is_signed)
+{
+ char *intrinsics[][2] = {
+ { "llvm.sadd.sat.v8i16", "llvm.uadd.sat.v8i16" },
+ { "llvm.ssub.sat.v8i16", "llvm.usub.sat.v8i16" },
+ };
+
+ return simd_sat_int_arith(comp_ctx, func_ctx, V128_i16x8_TYPE,
+ is_signed ? intrinsics[arith_op][0]
+ : intrinsics[arith_op][1]);
+}
+
+bool
+aot_compile_simd_i32x4_saturate(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx,
+ V128Arithmetic arith_op, bool is_signed)
+{
+ char *intrinsics[][2] = {
+ { "llvm.sadd.sat.v4i32", "llvm.uadd.sat.v4i32" },
+ { "llvm.ssub.sat.v4i32", "llvm.usub.sat.v4i32" },
+ };
+
+ return simd_sat_int_arith(comp_ctx, func_ctx, V128_i16x8_TYPE,
+ is_signed ? intrinsics[arith_op][0]
+ : intrinsics[arith_op][1]);
+}
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_sat_int_arith.h b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_sat_int_arith.h
new file mode 100644
index 000000000..e30acaaf4
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_sat_int_arith.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#ifndef _SIMD_SAT_INT_ARITH_H_
+#define _SIMD_SAT_INT_ARITH_H_
+
+#include "../aot_compiler.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+bool
+aot_compile_simd_i8x16_saturate(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx,
+ V128Arithmetic arith_op, bool is_signed);
+
+bool
+aot_compile_simd_i16x8_saturate(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx,
+ V128Arithmetic arith_op, bool is_signed);
+
+bool
+aot_compile_simd_i32x4_saturate(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx,
+ V128Arithmetic arith_op, bool is_signed);
+#ifdef __cplusplus
+} /* end of extern "C" */
+#endif
+
+#endif /* end of _SIMD_SAT_INT_ARITH_H_ */