summaryrefslogtreecommitdiffstats
path: root/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation
diff options
context:
space:
mode:
Diffstat (limited to 'fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation')
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/aot.c594
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/aot.h335
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/aot_compiler.c2921
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/aot_compiler.h383
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/aot_emit_aot_file.c2930
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/aot_emit_compare.c232
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/aot_emit_compare.h35
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/aot_emit_const.c172
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/aot_emit_const.h35
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/aot_emit_control.c1155
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/aot_emit_control.h62
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/aot_emit_conversion.c939
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/aot_emit_conversion.h90
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/aot_emit_exception.c141
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/aot_emit_exception.h24
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/aot_emit_function.c1729
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/aot_emit_function.h36
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/aot_emit_memory.c1435
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/aot_emit_memory.h110
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/aot_emit_numberic.c1248
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/aot_emit_numberic.h87
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/aot_emit_parametric.c107
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/aot_emit_parametric.h27
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/aot_emit_table.c503
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/aot_emit_table.h59
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/aot_emit_variable.c267
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/aot_emit_variable.h39
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/aot_llvm.c2770
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/aot_llvm.h526
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/aot_llvm_extra.cpp360
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/aot_llvm_extra2.cpp108
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/aot_llvm_extra2.h17
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/aot_orc_extra.cpp289
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/aot_orc_extra.h75
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/debug/dwarf_extractor.cpp510
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/debug/dwarf_extractor.h56
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/iwasm_compl.cmake26
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_access_lanes.c418
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_access_lanes.h92
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_bit_shifts.c144
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_bit_shifts.h35
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_bitmask_extracts.c133
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_bitmask_extracts.h35
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_bitwise_ops.c144
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_bitwise_ops.h23
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_bool_reductions.c138
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_bool_reductions.h39
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_common.c157
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_common.h45
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_comparisons.c229
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_comparisons.h43
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_construct_values.c135
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_construct_values.h27
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_conversions.c743
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_conversions.h90
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_floating_point.c388
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_floating_point.h107
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_int_arith.c406
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_int_arith.h91
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_load_store.c331
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_load_store.h49
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_sat_int_arith.c81
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_sat_int_arith.h33
63 files changed, 24588 insertions, 0 deletions
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/aot.c b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/aot.c
new file mode 100644
index 000000000..e836df28f
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/aot.c
@@ -0,0 +1,594 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#include "aot.h"
+
+static char aot_error[128];
+
+char *
+aot_get_last_error()
+{
+ return aot_error[0] == '\0' ? "" : aot_error;
+}
+
+void
+aot_set_last_error_v(const char *format, ...)
+{
+ va_list args;
+ va_start(args, format);
+ vsnprintf(aot_error, sizeof(aot_error), format, args);
+ va_end(args);
+}
+
+void
+aot_set_last_error(const char *error)
+{
+ if (error)
+ snprintf(aot_error, sizeof(aot_error), "Error: %s", error);
+ else
+ aot_error[0] = '\0';
+}
+
+static void
+aot_destroy_mem_init_data_list(AOTMemInitData **data_list, uint32 count)
+{
+ uint32 i;
+ for (i = 0; i < count; i++)
+ if (data_list[i])
+ wasm_runtime_free(data_list[i]);
+ wasm_runtime_free(data_list);
+}
+
+static AOTMemInitData **
+aot_create_mem_init_data_list(const WASMModule *module)
+{
+ AOTMemInitData **data_list;
+ uint64 size;
+ uint32 i;
+
+ /* Allocate memory */
+ size = sizeof(AOTMemInitData *) * (uint64)module->data_seg_count;
+ if (size >= UINT32_MAX
+ || !(data_list = wasm_runtime_malloc((uint32)size))) {
+ aot_set_last_error("allocate memory failed.");
+ return NULL;
+ }
+
+ memset(data_list, 0, size);
+
+ /* Create each memory data segment */
+ for (i = 0; i < module->data_seg_count; i++) {
+ size = offsetof(AOTMemInitData, bytes)
+ + (uint64)module->data_segments[i]->data_length;
+ if (size >= UINT32_MAX
+ || !(data_list[i] = wasm_runtime_malloc((uint32)size))) {
+ aot_set_last_error("allocate memory failed.");
+ goto fail;
+ }
+
+#if WASM_ENABLE_BULK_MEMORY != 0
+ data_list[i]->is_passive = module->data_segments[i]->is_passive;
+ data_list[i]->memory_index = module->data_segments[i]->memory_index;
+#endif
+ data_list[i]->offset = module->data_segments[i]->base_offset;
+ data_list[i]->byte_count = module->data_segments[i]->data_length;
+ memcpy(data_list[i]->bytes, module->data_segments[i]->data,
+ module->data_segments[i]->data_length);
+ }
+
+ return data_list;
+
+fail:
+ aot_destroy_mem_init_data_list(data_list, module->data_seg_count);
+ return NULL;
+}
+
+static void
+aot_destroy_table_init_data_list(AOTTableInitData **data_list, uint32 count)
+{
+ uint32 i;
+ for (i = 0; i < count; i++)
+ if (data_list[i])
+ wasm_runtime_free(data_list[i]);
+ wasm_runtime_free(data_list);
+}
+
+static AOTTableInitData **
+aot_create_table_init_data_list(const WASMModule *module)
+{
+ AOTTableInitData **data_list;
+ uint64 size;
+ uint32 i;
+
+ /* Allocate memory */
+ size = sizeof(AOTTableInitData *) * (uint64)module->table_seg_count;
+ if (size >= UINT32_MAX
+ || !(data_list = wasm_runtime_malloc((uint32)size))) {
+ aot_set_last_error("allocate memory failed.");
+ return NULL;
+ }
+
+ memset(data_list, 0, size);
+
+ /* Create each table data segment */
+ for (i = 0; i < module->table_seg_count; i++) {
+ size =
+ offsetof(AOTTableInitData, func_indexes)
+ + sizeof(uint32) * (uint64)module->table_segments[i].function_count;
+ if (size >= UINT32_MAX
+ || !(data_list[i] = wasm_runtime_malloc((uint32)size))) {
+ aot_set_last_error("allocate memory failed.");
+ goto fail;
+ }
+
+ data_list[i]->offset = module->table_segments[i].base_offset;
+ data_list[i]->func_index_count =
+ module->table_segments[i].function_count;
+ data_list[i]->mode = module->table_segments[i].mode;
+ data_list[i]->elem_type = module->table_segments[i].elem_type;
+ /* runtime control it */
+ data_list[i]->is_dropped = false;
+ data_list[i]->table_index = module->table_segments[i].table_index;
+ bh_memcpy_s(&data_list[i]->offset, sizeof(AOTInitExpr),
+ &module->table_segments[i].base_offset,
+ sizeof(AOTInitExpr));
+ data_list[i]->func_index_count =
+ module->table_segments[i].function_count;
+ bh_memcpy_s(data_list[i]->func_indexes,
+ sizeof(uint32) * module->table_segments[i].function_count,
+ module->table_segments[i].func_indexes,
+ sizeof(uint32) * module->table_segments[i].function_count);
+ }
+
+ return data_list;
+
+fail:
+ aot_destroy_table_init_data_list(data_list, module->table_seg_count);
+ return NULL;
+}
+
+static AOTImportGlobal *
+aot_create_import_globals(const WASMModule *module,
+ uint32 *p_import_global_data_size)
+{
+ AOTImportGlobal *import_globals;
+ uint64 size;
+ uint32 i, data_offset = 0;
+
+ /* Allocate memory */
+ size = sizeof(AOTImportGlobal) * (uint64)module->import_global_count;
+ if (size >= UINT32_MAX
+ || !(import_globals = wasm_runtime_malloc((uint32)size))) {
+ aot_set_last_error("allocate memory failed.");
+ return NULL;
+ }
+
+ memset(import_globals, 0, (uint32)size);
+
+ /* Create each import global */
+ for (i = 0; i < module->import_global_count; i++) {
+ WASMGlobalImport *import_global = &module->import_globals[i].u.global;
+ import_globals[i].module_name = import_global->module_name;
+ import_globals[i].global_name = import_global->field_name;
+ import_globals[i].type = import_global->type;
+ import_globals[i].is_mutable = import_global->is_mutable;
+ import_globals[i].global_data_linked =
+ import_global->global_data_linked;
+ import_globals[i].size = wasm_value_type_size(import_global->type);
+ /* Calculate data offset */
+ import_globals[i].data_offset = data_offset;
+ data_offset += wasm_value_type_size(import_global->type);
+ }
+
+ *p_import_global_data_size = data_offset;
+ return import_globals;
+}
+
+static AOTGlobal *
+aot_create_globals(const WASMModule *module, uint32 global_data_start_offset,
+ uint32 *p_global_data_size)
+{
+ AOTGlobal *globals;
+ uint64 size;
+ uint32 i, data_offset = global_data_start_offset;
+
+ /* Allocate memory */
+ size = sizeof(AOTGlobal) * (uint64)module->global_count;
+ if (size >= UINT32_MAX || !(globals = wasm_runtime_malloc((uint32)size))) {
+ aot_set_last_error("allocate memory failed.");
+ return NULL;
+ }
+
+ memset(globals, 0, (uint32)size);
+
+ /* Create each global */
+ for (i = 0; i < module->global_count; i++) {
+ WASMGlobal *global = &module->globals[i];
+ globals[i].type = global->type;
+ globals[i].is_mutable = global->is_mutable;
+ globals[i].size = wasm_value_type_size(global->type);
+ memcpy(&globals[i].init_expr, &global->init_expr,
+ sizeof(global->init_expr));
+ /* Calculate data offset */
+ globals[i].data_offset = data_offset;
+ data_offset += wasm_value_type_size(global->type);
+ }
+
+ *p_global_data_size = data_offset - global_data_start_offset;
+ return globals;
+}
+
+static void
+aot_destroy_func_types(AOTFuncType **func_types, uint32 count)
+{
+ uint32 i;
+ for (i = 0; i < count; i++)
+ if (func_types[i])
+ wasm_runtime_free(func_types[i]);
+ wasm_runtime_free(func_types);
+}
+
+static AOTFuncType **
+aot_create_func_types(const WASMModule *module)
+{
+ AOTFuncType **func_types;
+ uint64 size;
+ uint32 i;
+
+ /* Allocate memory */
+ size = sizeof(AOTFuncType *) * (uint64)module->type_count;
+ if (size >= UINT32_MAX
+ || !(func_types = wasm_runtime_malloc((uint32)size))) {
+ aot_set_last_error("allocate memory failed.");
+ return NULL;
+ }
+
+ memset(func_types, 0, size);
+
+ /* Create each function type */
+ for (i = 0; i < module->type_count; i++) {
+ size = offsetof(AOTFuncType, types)
+ + (uint64)module->types[i]->param_count
+ + (uint64)module->types[i]->result_count;
+ if (size >= UINT32_MAX
+ || !(func_types[i] = wasm_runtime_malloc((uint32)size))) {
+ aot_set_last_error("allocate memory failed.");
+ goto fail;
+ }
+ memcpy(func_types[i], module->types[i], size);
+ }
+
+ return func_types;
+
+fail:
+ aot_destroy_func_types(func_types, module->type_count);
+ return NULL;
+}
+
+static AOTImportFunc *
+aot_create_import_funcs(const WASMModule *module)
+{
+ AOTImportFunc *import_funcs;
+ uint64 size;
+ uint32 i, j;
+
+ /* Allocate memory */
+ size = sizeof(AOTImportFunc) * (uint64)module->import_function_count;
+ if (size >= UINT32_MAX
+ || !(import_funcs = wasm_runtime_malloc((uint32)size))) {
+ aot_set_last_error("allocate memory failed.");
+ return NULL;
+ }
+
+ /* Create each import function */
+ for (i = 0; i < module->import_function_count; i++) {
+ WASMFunctionImport *import_func =
+ &module->import_functions[i].u.function;
+ import_funcs[i].module_name = import_func->module_name;
+ import_funcs[i].func_name = import_func->field_name;
+ import_funcs[i].func_ptr_linked = import_func->func_ptr_linked;
+ import_funcs[i].func_type = import_func->func_type;
+ import_funcs[i].signature = import_func->signature;
+ import_funcs[i].attachment = import_func->attachment;
+ import_funcs[i].call_conv_raw = import_func->call_conv_raw;
+ import_funcs[i].call_conv_wasm_c_api = false;
+ /* Resolve function type index */
+ for (j = 0; j < module->type_count; j++)
+ if (import_func->func_type == module->types[j]) {
+ import_funcs[i].func_type_index = j;
+ break;
+ }
+ }
+
+ return import_funcs;
+}
+
+static void
+aot_destroy_funcs(AOTFunc **funcs, uint32 count)
+{
+ uint32 i;
+
+ for (i = 0; i < count; i++)
+ if (funcs[i])
+ wasm_runtime_free(funcs[i]);
+ wasm_runtime_free(funcs);
+}
+
+static AOTFunc **
+aot_create_funcs(const WASMModule *module)
+{
+ AOTFunc **funcs;
+ uint64 size;
+ uint32 i, j;
+
+ /* Allocate memory */
+ size = sizeof(AOTFunc *) * (uint64)module->function_count;
+ if (size >= UINT32_MAX || !(funcs = wasm_runtime_malloc((uint32)size))) {
+ aot_set_last_error("allocate memory failed.");
+ return NULL;
+ }
+
+ memset(funcs, 0, size);
+
+ /* Create each function */
+ for (i = 0; i < module->function_count; i++) {
+ WASMFunction *func = module->functions[i];
+ size = sizeof(AOTFunc);
+ if (!(funcs[i] = wasm_runtime_malloc((uint32)size))) {
+ aot_set_last_error("allocate memory failed.");
+ goto fail;
+ }
+
+ funcs[i]->func_type = func->func_type;
+
+ /* Resolve function type index */
+ for (j = 0; j < module->type_count; j++)
+ if (func->func_type == module->types[j]) {
+ funcs[i]->func_type_index = j;
+ break;
+ }
+
+ /* Resolve local variable info and code info */
+ funcs[i]->local_count = func->local_count;
+ funcs[i]->local_types = func->local_types;
+ funcs[i]->param_cell_num = func->param_cell_num;
+ funcs[i]->local_cell_num = func->local_cell_num;
+ funcs[i]->code = func->code;
+ funcs[i]->code_size = func->code_size;
+ }
+
+ return funcs;
+
+fail:
+ aot_destroy_funcs(funcs, module->function_count);
+ return NULL;
+}
+
+AOTCompData *
+aot_create_comp_data(WASMModule *module)
+{
+ AOTCompData *comp_data;
+ uint32 import_global_data_size = 0, global_data_size = 0, i, j;
+ uint64 size;
+
+ /* Allocate memory */
+ if (!(comp_data = wasm_runtime_malloc(sizeof(AOTCompData)))) {
+ aot_set_last_error("create compile data failed.\n");
+ return NULL;
+ }
+
+ memset(comp_data, 0, sizeof(AOTCompData));
+
+ comp_data->memory_count =
+ module->import_memory_count + module->memory_count;
+
+ /* TODO: create import memories */
+
+ /* Allocate memory for memory array, reserve one AOTMemory space at least */
+ if (!comp_data->memory_count)
+ comp_data->memory_count = 1;
+
+ size = (uint64)comp_data->memory_count * sizeof(AOTMemory);
+ if (size >= UINT32_MAX
+ || !(comp_data->memories = wasm_runtime_malloc((uint32)size))) {
+ aot_set_last_error("create memories array failed.\n");
+ goto fail;
+ }
+ memset(comp_data->memories, 0, size);
+
+ if (!(module->import_memory_count + module->memory_count)) {
+ comp_data->memories[0].num_bytes_per_page = DEFAULT_NUM_BYTES_PER_PAGE;
+ }
+
+ /* Set memory page count */
+ for (i = 0; i < module->import_memory_count + module->memory_count; i++) {
+ if (i < module->import_memory_count) {
+ comp_data->memories[i].memory_flags =
+ module->import_memories[i].u.memory.flags;
+ comp_data->memories[i].num_bytes_per_page =
+ module->import_memories[i].u.memory.num_bytes_per_page;
+ comp_data->memories[i].mem_init_page_count =
+ module->import_memories[i].u.memory.init_page_count;
+ comp_data->memories[i].mem_max_page_count =
+ module->import_memories[i].u.memory.max_page_count;
+ comp_data->memories[i].num_bytes_per_page =
+ module->import_memories[i].u.memory.num_bytes_per_page;
+ }
+ else {
+ j = i - module->import_memory_count;
+ comp_data->memories[i].memory_flags = module->memories[j].flags;
+ comp_data->memories[i].num_bytes_per_page =
+ module->memories[j].num_bytes_per_page;
+ comp_data->memories[i].mem_init_page_count =
+ module->memories[j].init_page_count;
+ comp_data->memories[i].mem_max_page_count =
+ module->memories[j].max_page_count;
+ comp_data->memories[i].num_bytes_per_page =
+ module->memories[j].num_bytes_per_page;
+ }
+ }
+
+ /* Create memory data segments */
+ comp_data->mem_init_data_count = module->data_seg_count;
+ if (comp_data->mem_init_data_count > 0
+ && !(comp_data->mem_init_data_list =
+ aot_create_mem_init_data_list(module)))
+ goto fail;
+
+ /* Create tables */
+ comp_data->table_count = module->import_table_count + module->table_count;
+
+ if (comp_data->table_count > 0) {
+ size = sizeof(AOTTable) * (uint64)comp_data->table_count;
+ if (size >= UINT32_MAX
+ || !(comp_data->tables = wasm_runtime_malloc((uint32)size))) {
+ aot_set_last_error("create memories array failed.\n");
+ goto fail;
+ }
+ memset(comp_data->tables, 0, size);
+ for (i = 0; i < comp_data->table_count; i++) {
+ if (i < module->import_table_count) {
+ comp_data->tables[i].elem_type =
+ module->import_tables[i].u.table.elem_type;
+ comp_data->tables[i].table_flags =
+ module->import_tables[i].u.table.flags;
+ comp_data->tables[i].table_init_size =
+ module->import_tables[i].u.table.init_size;
+ comp_data->tables[i].table_max_size =
+ module->import_tables[i].u.table.max_size;
+ comp_data->tables[i].possible_grow =
+ module->import_tables[i].u.table.possible_grow;
+ }
+ else {
+ j = i - module->import_table_count;
+ comp_data->tables[i].elem_type = module->tables[j].elem_type;
+ comp_data->tables[i].table_flags = module->tables[j].flags;
+ comp_data->tables[i].table_init_size =
+ module->tables[j].init_size;
+ comp_data->tables[i].table_max_size =
+ module->tables[j].max_size;
+ comp_data->tables[i].possible_grow =
+ module->tables[j].possible_grow;
+ }
+ }
+ }
+
+ /* Create table data segments */
+ comp_data->table_init_data_count = module->table_seg_count;
+ if (comp_data->table_init_data_count > 0
+ && !(comp_data->table_init_data_list =
+ aot_create_table_init_data_list(module)))
+ goto fail;
+
+ /* Create import globals */
+ comp_data->import_global_count = module->import_global_count;
+ if (comp_data->import_global_count > 0
+ && !(comp_data->import_globals =
+ aot_create_import_globals(module, &import_global_data_size)))
+ goto fail;
+
+ /* Create globals */
+ comp_data->global_count = module->global_count;
+ if (comp_data->global_count
+ && !(comp_data->globals = aot_create_globals(
+ module, import_global_data_size, &global_data_size)))
+ goto fail;
+
+ comp_data->global_data_size = import_global_data_size + global_data_size;
+
+ /* Create function types */
+ comp_data->func_type_count = module->type_count;
+ if (comp_data->func_type_count
+ && !(comp_data->func_types = aot_create_func_types(module)))
+ goto fail;
+
+ /* Create import functions */
+ comp_data->import_func_count = module->import_function_count;
+ if (comp_data->import_func_count
+ && !(comp_data->import_funcs = aot_create_import_funcs(module)))
+ goto fail;
+
+ /* Create functions */
+ comp_data->func_count = module->function_count;
+ if (comp_data->func_count && !(comp_data->funcs = aot_create_funcs(module)))
+ goto fail;
+
+#if WASM_ENABLE_CUSTOM_NAME_SECTION != 0
+ /* Create custom name section */
+ comp_data->name_section_buf = module->name_section_buf;
+ comp_data->name_section_buf_end = module->name_section_buf_end;
+#endif
+
+ /* Create aux data/heap/stack information */
+ comp_data->aux_data_end_global_index = module->aux_data_end_global_index;
+ comp_data->aux_data_end = module->aux_data_end;
+ comp_data->aux_heap_base_global_index = module->aux_heap_base_global_index;
+ comp_data->aux_heap_base = module->aux_heap_base;
+ comp_data->aux_stack_top_global_index = module->aux_stack_top_global_index;
+ comp_data->aux_stack_bottom = module->aux_stack_bottom;
+ comp_data->aux_stack_size = module->aux_stack_size;
+
+ comp_data->start_func_index = module->start_function;
+ comp_data->malloc_func_index = module->malloc_function;
+ comp_data->free_func_index = module->free_function;
+ comp_data->retain_func_index = module->retain_function;
+
+ comp_data->wasm_module = module;
+
+ return comp_data;
+
+fail:
+
+ aot_destroy_comp_data(comp_data);
+ return NULL;
+}
+
+void
+aot_destroy_comp_data(AOTCompData *comp_data)
+{
+ if (!comp_data)
+ return;
+
+ if (comp_data->import_memories)
+ wasm_runtime_free(comp_data->import_memories);
+
+ if (comp_data->memories)
+ wasm_runtime_free(comp_data->memories);
+
+ if (comp_data->mem_init_data_list)
+ aot_destroy_mem_init_data_list(comp_data->mem_init_data_list,
+ comp_data->mem_init_data_count);
+
+ if (comp_data->import_tables)
+ wasm_runtime_free(comp_data->import_tables);
+
+ if (comp_data->tables)
+ wasm_runtime_free(comp_data->tables);
+
+ if (comp_data->table_init_data_list)
+ aot_destroy_table_init_data_list(comp_data->table_init_data_list,
+ comp_data->table_init_data_count);
+
+ if (comp_data->import_globals)
+ wasm_runtime_free(comp_data->import_globals);
+
+ if (comp_data->globals)
+ wasm_runtime_free(comp_data->globals);
+
+ if (comp_data->func_types)
+ aot_destroy_func_types(comp_data->func_types,
+ comp_data->func_type_count);
+
+ if (comp_data->import_funcs)
+ wasm_runtime_free(comp_data->import_funcs);
+
+ if (comp_data->funcs)
+ aot_destroy_funcs(comp_data->funcs, comp_data->func_count);
+
+ if (comp_data->aot_name_section_buf)
+ wasm_runtime_free(comp_data->aot_name_section_buf);
+
+ wasm_runtime_free(comp_data);
+}
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/aot.h b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/aot.h
new file mode 100644
index 000000000..c67251a6f
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/aot.h
@@ -0,0 +1,335 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#ifndef _AOT_H_
+#define _AOT_H_
+
+#include "bh_platform.h"
+#include "bh_assert.h"
+#include "../common/wasm_runtime_common.h"
+#include "../interpreter/wasm.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifndef AOT_FUNC_PREFIX
+#define AOT_FUNC_PREFIX "aot_func#"
+#endif
+
+typedef InitializerExpression AOTInitExpr;
+typedef WASMType AOTFuncType;
+typedef WASMExport AOTExport;
+
+#if WASM_ENABLE_DEBUG_AOT != 0
+typedef void *dwar_extractor_handle_t;
+#endif
+
+typedef enum AOTIntCond {
+ INT_EQZ = 0,
+ INT_EQ,
+ INT_NE,
+ INT_LT_S,
+ INT_LT_U,
+ INT_GT_S,
+ INT_GT_U,
+ INT_LE_S,
+ INT_LE_U,
+ INT_GE_S,
+ INT_GE_U
+} AOTIntCond;
+
+typedef enum AOTFloatCond {
+ FLOAT_EQ = 0,
+ FLOAT_NE,
+ FLOAT_LT,
+ FLOAT_GT,
+ FLOAT_LE,
+ FLOAT_GE,
+ FLOAT_UNO
+} AOTFloatCond;
+
+/**
+ * Import memory
+ */
+typedef struct AOTImportMemory {
+ char *module_name;
+ char *memory_name;
+ uint32 memory_flags;
+ uint32 num_bytes_per_page;
+ uint32 mem_init_page_count;
+ uint32 mem_max_page_count;
+} AOTImportMemory;
+
+/**
+ * Memory information
+ */
+typedef struct AOTMemory {
+ /* memory info */
+ uint32 memory_flags;
+ uint32 num_bytes_per_page;
+ uint32 mem_init_page_count;
+ uint32 mem_max_page_count;
+} AOTMemory;
+
+/**
+ * A segment of memory init data
+ */
+typedef struct AOTMemInitData {
+#if WASM_ENABLE_BULK_MEMORY != 0
+ /* Passive flag */
+ bool is_passive;
+ /* memory index */
+ uint32 memory_index;
+#endif
+ /* Start address of init data */
+ AOTInitExpr offset;
+ /* Byte count */
+ uint32 byte_count;
+ /* Byte array */
+ uint8 bytes[1];
+} AOTMemInitData;
+
+/**
+ * Import table
+ */
+typedef struct AOTImportTable {
+ char *module_name;
+ char *table_name;
+ uint32 elem_type;
+ uint32 table_flags;
+ uint32 table_init_size;
+ uint32 table_max_size;
+ bool possible_grow;
+} AOTImportTable;
+
+/**
+ * Table
+ */
+typedef struct AOTTable {
+ uint32 elem_type;
+ uint32 table_flags;
+ uint32 table_init_size;
+ uint32 table_max_size;
+ bool possible_grow;
+} AOTTable;
+
+/**
+ * A segment of table init data
+ */
+typedef struct AOTTableInitData {
+ /* 0 to 7 */
+ uint32 mode;
+ /* funcref or externref, elemkind will be considered as funcref */
+ uint32 elem_type;
+ bool is_dropped;
+ /* optional, only for active */
+ uint32 table_index;
+ /* Start address of init data */
+ AOTInitExpr offset;
+ /* Function index count */
+ uint32 func_index_count;
+ /* Function index array */
+ uint32 func_indexes[1];
+} AOTTableInitData;
+
+/**
+ * Import global variable
+ */
+typedef struct AOTImportGlobal {
+ char *module_name;
+ char *global_name;
+ /* VALUE_TYPE_I32/I64/F32/F64 */
+ uint8 type;
+ bool is_mutable;
+ uint32 size;
+ /* The data offset of current global in global data */
+ uint32 data_offset;
+ /* global data after linked */
+ WASMValue global_data_linked;
+ bool is_linked;
+} AOTImportGlobal;
+
+/**
+ * Global variable
+ */
+typedef struct AOTGlobal {
+ /* VALUE_TYPE_I32/I64/F32/F64 */
+ uint8 type;
+ bool is_mutable;
+ uint32 size;
+ /* The data offset of current global in global data */
+ uint32 data_offset;
+ AOTInitExpr init_expr;
+} AOTGlobal;
+
+/**
+ * Import function
+ */
+typedef struct AOTImportFunc {
+ char *module_name;
+ char *func_name;
+ AOTFuncType *func_type;
+ uint32 func_type_index;
+ /* function pointer after linked */
+ void *func_ptr_linked;
+ /* signature from registered native symbols */
+ const char *signature;
+ /* attachment */
+ void *attachment;
+ bool call_conv_raw;
+ bool call_conv_wasm_c_api;
+ bool wasm_c_api_with_env;
+} AOTImportFunc;
+
+/**
+ * Function
+ */
+typedef struct AOTFunc {
+ AOTFuncType *func_type;
+ uint32 func_type_index;
+ uint32 local_count;
+ uint8 *local_types;
+ uint16 param_cell_num;
+ uint16 local_cell_num;
+ uint32 code_size;
+ uint8 *code;
+} AOTFunc;
+
+typedef struct AOTCompData {
+ /* Import memories */
+ uint32 import_memory_count;
+ AOTImportMemory *import_memories;
+
+ /* Memories */
+ uint32 memory_count;
+ AOTMemory *memories;
+
+ /* Memory init data info */
+ uint32 mem_init_data_count;
+ AOTMemInitData **mem_init_data_list;
+
+ /* Import tables */
+ uint32 import_table_count;
+ AOTImportTable *import_tables;
+
+ /* Tables */
+ uint32 table_count;
+ AOTTable *tables;
+
+ /* Table init data info */
+ uint32 table_init_data_count;
+ AOTTableInitData **table_init_data_list;
+
+ /* Import globals */
+ uint32 import_global_count;
+ AOTImportGlobal *import_globals;
+
+ /* Globals */
+ uint32 global_count;
+ AOTGlobal *globals;
+
+ /* Function types */
+ uint32 func_type_count;
+ AOTFuncType **func_types;
+
+ /* Import functions */
+ uint32 import_func_count;
+ AOTImportFunc *import_funcs;
+
+ /* Functions */
+ uint32 func_count;
+ AOTFunc **funcs;
+
+ /* Custom name sections */
+ const uint8 *name_section_buf;
+ const uint8 *name_section_buf_end;
+ uint8 *aot_name_section_buf;
+ uint32 aot_name_section_size;
+
+ uint32 global_data_size;
+
+ uint32 start_func_index;
+ uint32 malloc_func_index;
+ uint32 free_func_index;
+ uint32 retain_func_index;
+
+ uint32 aux_data_end_global_index;
+ uint32 aux_data_end;
+ uint32 aux_heap_base_global_index;
+ uint32 aux_heap_base;
+ uint32 aux_stack_top_global_index;
+ uint32 aux_stack_bottom;
+ uint32 aux_stack_size;
+
+ WASMModule *wasm_module;
+#if WASM_ENABLE_DEBUG_AOT != 0
+ dwar_extractor_handle_t extractor;
+#endif
+} AOTCompData;
+
+typedef struct AOTNativeSymbol {
+ bh_list_link link;
+ char symbol[32];
+ int32 index;
+} AOTNativeSymbol;
+
+AOTCompData *
+aot_create_comp_data(WASMModule *module);
+
+void
+aot_destroy_comp_data(AOTCompData *comp_data);
+
+char *
+aot_get_last_error();
+
+void
+aot_set_last_error(const char *error);
+
+void
+aot_set_last_error_v(const char *format, ...);
+
+#if BH_DEBUG != 0
+#define HANDLE_FAILURE(callee) \
+ do { \
+ aot_set_last_error_v("call %s failed in %s:%d", (callee), \
+ __FUNCTION__, __LINE__); \
+ } while (0)
+#else
+#define HANDLE_FAILURE(callee) \
+ do { \
+ aot_set_last_error_v("call %s failed", (callee)); \
+ } while (0)
+#endif
+
+static inline uint32
+aot_get_imp_tbl_data_slots(const AOTImportTable *tbl, bool is_jit_mode)
+{
+#if WASM_ENABLE_MULTI_MODULE != 0
+ if (is_jit_mode)
+ return tbl->table_max_size;
+#else
+ (void)is_jit_mode;
+#endif
+ return tbl->possible_grow ? tbl->table_max_size : tbl->table_init_size;
+}
+
+static inline uint32
+aot_get_tbl_data_slots(const AOTTable *tbl, bool is_jit_mode)
+{
+#if WASM_ENABLE_MULTI_MODULE != 0
+ if (is_jit_mode)
+ return tbl->table_max_size;
+#else
+ (void)is_jit_mode;
+#endif
+ return tbl->possible_grow ? tbl->table_max_size : tbl->table_init_size;
+}
+
+#ifdef __cplusplus
+} /* end of extern "C" */
+#endif
+
+#endif /* end of _AOT_H_ */
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/aot_compiler.c b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/aot_compiler.c
new file mode 100644
index 000000000..06235fe31
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/aot_compiler.c
@@ -0,0 +1,2921 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#include "aot_compiler.h"
+#include "aot_emit_compare.h"
+#include "aot_emit_conversion.h"
+#include "aot_emit_memory.h"
+#include "aot_emit_variable.h"
+#include "aot_emit_const.h"
+#include "aot_emit_exception.h"
+#include "aot_emit_numberic.h"
+#include "aot_emit_control.h"
+#include "aot_emit_function.h"
+#include "aot_emit_parametric.h"
+#include "aot_emit_table.h"
+#include "simd/simd_access_lanes.h"
+#include "simd/simd_bitmask_extracts.h"
+#include "simd/simd_bit_shifts.h"
+#include "simd/simd_bitwise_ops.h"
+#include "simd/simd_bool_reductions.h"
+#include "simd/simd_comparisons.h"
+#include "simd/simd_conversions.h"
+#include "simd/simd_construct_values.h"
+#include "simd/simd_conversions.h"
+#include "simd/simd_floating_point.h"
+#include "simd/simd_int_arith.h"
+#include "simd/simd_load_store.h"
+#include "simd/simd_sat_int_arith.h"
+#include "../aot/aot_runtime.h"
+#include "../interpreter/wasm_opcode.h"
+#include <errno.h>
+
+#if WASM_ENABLE_DEBUG_AOT != 0
+#include "debug/dwarf_extractor.h"
+#endif
+
+#define CHECK_BUF(buf, buf_end, length) \
+ do { \
+ if (buf + length > buf_end) { \
+ aot_set_last_error("read leb failed: unexpected end."); \
+ return false; \
+ } \
+ } while (0)
+
+static bool
+read_leb(const uint8 *buf, const uint8 *buf_end, uint32 *p_offset,
+ uint32 maxbits, bool sign, uint64 *p_result)
+{
+ uint64 result = 0;
+ uint32 shift = 0;
+ uint32 bcnt = 0;
+ uint64 byte;
+
+ while (true) {
+ CHECK_BUF(buf, buf_end, 1);
+ byte = buf[*p_offset];
+ *p_offset += 1;
+ result |= ((byte & 0x7f) << shift);
+ shift += 7;
+ if ((byte & 0x80) == 0) {
+ break;
+ }
+ bcnt += 1;
+ }
+ if (bcnt > (maxbits + 6) / 7) {
+ aot_set_last_error("read leb failed: "
+ "integer representation too long");
+ return false;
+ }
+ if (sign && (shift < maxbits) && (byte & 0x40)) {
+ /* Sign extend */
+ result |= (~((uint64)0)) << shift;
+ }
+ *p_result = result;
+ return true;
+}
+
+#define read_leb_uint32(p, p_end, res) \
+ do { \
+ uint32 off = 0; \
+ uint64 res64; \
+ if (!read_leb(p, p_end, &off, 32, false, &res64)) \
+ return false; \
+ p += off; \
+ res = (uint32)res64; \
+ } while (0)
+
+#define read_leb_int32(p, p_end, res) \
+ do { \
+ uint32 off = 0; \
+ uint64 res64; \
+ if (!read_leb(p, p_end, &off, 32, true, &res64)) \
+ return false; \
+ p += off; \
+ res = (int32)res64; \
+ } while (0)
+
+#define read_leb_int64(p, p_end, res) \
+ do { \
+ uint32 off = 0; \
+ uint64 res64; \
+ if (!read_leb(p, p_end, &off, 64, true, &res64)) \
+ return false; \
+ p += off; \
+ res = (int64)res64; \
+ } while (0)
+
+/**
+ * Since Wamrc uses a full feature Wasm loader,
+ * add a post-validator here to run checks according
+ * to options, like enable_tail_call, enable_ref_types,
+ * and so on.
+ */
+static bool
+aot_validate_wasm(AOTCompContext *comp_ctx)
+{
+ if (!comp_ctx->enable_ref_types) {
+ /* Doesn't support multiple tables unless enabling reference type */
+ if (comp_ctx->comp_data->import_table_count
+ + comp_ctx->comp_data->table_count
+ > 1) {
+ aot_set_last_error("multiple tables");
+ return false;
+ }
+ }
+
+ return true;
+}
+
+#define COMPILE_ATOMIC_RMW(OP, NAME) \
+ case WASM_OP_ATOMIC_RMW_I32_##NAME: \
+ bytes = 4; \
+ op_type = VALUE_TYPE_I32; \
+ goto OP_ATOMIC_##OP; \
+ case WASM_OP_ATOMIC_RMW_I64_##NAME: \
+ bytes = 8; \
+ op_type = VALUE_TYPE_I64; \
+ goto OP_ATOMIC_##OP; \
+ case WASM_OP_ATOMIC_RMW_I32_##NAME##8_U: \
+ bytes = 1; \
+ op_type = VALUE_TYPE_I32; \
+ goto OP_ATOMIC_##OP; \
+ case WASM_OP_ATOMIC_RMW_I32_##NAME##16_U: \
+ bytes = 2; \
+ op_type = VALUE_TYPE_I32; \
+ goto OP_ATOMIC_##OP; \
+ case WASM_OP_ATOMIC_RMW_I64_##NAME##8_U: \
+ bytes = 1; \
+ op_type = VALUE_TYPE_I64; \
+ goto OP_ATOMIC_##OP; \
+ case WASM_OP_ATOMIC_RMW_I64_##NAME##16_U: \
+ bytes = 2; \
+ op_type = VALUE_TYPE_I64; \
+ goto OP_ATOMIC_##OP; \
+ case WASM_OP_ATOMIC_RMW_I64_##NAME##32_U: \
+ bytes = 4; \
+ op_type = VALUE_TYPE_I64; \
+ OP_ATOMIC_##OP : bin_op = LLVMAtomicRMWBinOp##OP; \
+ goto build_atomic_rmw;
+
+static bool
+aot_compile_func(AOTCompContext *comp_ctx, uint32 func_index)
+{
+ AOTFuncContext *func_ctx = comp_ctx->func_ctxes[func_index];
+ uint8 *frame_ip = func_ctx->aot_func->code, opcode, *p_f32, *p_f64;
+ uint8 *frame_ip_end = frame_ip + func_ctx->aot_func->code_size;
+ uint8 *param_types = NULL;
+ uint8 *result_types = NULL;
+ uint8 value_type;
+ uint16 param_count;
+ uint16 result_count;
+ uint32 br_depth, *br_depths, br_count;
+ uint32 func_idx, type_idx, mem_idx, local_idx, global_idx, i;
+ uint32 bytes = 4, align, offset;
+ uint32 type_index;
+ bool sign = true;
+ int32 i32_const;
+ int64 i64_const;
+ float32 f32_const;
+ float64 f64_const;
+ AOTFuncType *func_type = NULL;
+#if WASM_ENABLE_DEBUG_AOT != 0
+ LLVMMetadataRef location;
+#endif
+
+ /* Start to translate the opcodes */
+ LLVMPositionBuilderAtEnd(
+ comp_ctx->builder,
+ func_ctx->block_stack.block_list_head->llvm_entry_block);
+ while (frame_ip < frame_ip_end) {
+ opcode = *frame_ip++;
+
+#if WASM_ENABLE_DEBUG_AOT != 0
+ location = dwarf_gen_location(
+ comp_ctx, func_ctx,
+ (frame_ip - 1) - comp_ctx->comp_data->wasm_module->buf_code);
+ LLVMSetCurrentDebugLocation2(comp_ctx->builder, location);
+#endif
+
+ switch (opcode) {
+ case WASM_OP_UNREACHABLE:
+ if (!aot_compile_op_unreachable(comp_ctx, func_ctx, &frame_ip))
+ return false;
+ break;
+
+ case WASM_OP_NOP:
+ break;
+
+ case WASM_OP_BLOCK:
+ case WASM_OP_LOOP:
+ case WASM_OP_IF:
+ {
+ value_type = *frame_ip++;
+ if (value_type == VALUE_TYPE_I32 || value_type == VALUE_TYPE_I64
+ || value_type == VALUE_TYPE_F32
+ || value_type == VALUE_TYPE_F64
+ || value_type == VALUE_TYPE_V128
+ || value_type == VALUE_TYPE_VOID
+ || value_type == VALUE_TYPE_FUNCREF
+ || value_type == VALUE_TYPE_EXTERNREF) {
+ param_count = 0;
+ param_types = NULL;
+ if (value_type == VALUE_TYPE_VOID) {
+ result_count = 0;
+ result_types = NULL;
+ }
+ else {
+ result_count = 1;
+ result_types = &value_type;
+ }
+ }
+ else {
+ frame_ip--;
+ read_leb_uint32(frame_ip, frame_ip_end, type_index);
+ func_type = comp_ctx->comp_data->func_types[type_index];
+ param_count = func_type->param_count;
+ param_types = func_type->types;
+ result_count = func_type->result_count;
+ result_types = func_type->types + param_count;
+ }
+ if (!aot_compile_op_block(
+ comp_ctx, func_ctx, &frame_ip, frame_ip_end,
+ (uint32)(LABEL_TYPE_BLOCK + opcode - WASM_OP_BLOCK),
+ param_count, param_types, result_count, result_types))
+ return false;
+ break;
+ }
+
+ case EXT_OP_BLOCK:
+ case EXT_OP_LOOP:
+ case EXT_OP_IF:
+ {
+ read_leb_uint32(frame_ip, frame_ip_end, type_index);
+ func_type = comp_ctx->comp_data->func_types[type_index];
+ param_count = func_type->param_count;
+ param_types = func_type->types;
+ result_count = func_type->result_count;
+ result_types = func_type->types + param_count;
+ if (!aot_compile_op_block(
+ comp_ctx, func_ctx, &frame_ip, frame_ip_end,
+ (uint32)(LABEL_TYPE_BLOCK + opcode - EXT_OP_BLOCK),
+ param_count, param_types, result_count, result_types))
+ return false;
+ break;
+ }
+
+ case WASM_OP_ELSE:
+ if (!aot_compile_op_else(comp_ctx, func_ctx, &frame_ip))
+ return false;
+ break;
+
+ case WASM_OP_END:
+ if (!aot_compile_op_end(comp_ctx, func_ctx, &frame_ip))
+ return false;
+ break;
+
+ case WASM_OP_BR:
+ read_leb_uint32(frame_ip, frame_ip_end, br_depth);
+ if (!aot_compile_op_br(comp_ctx, func_ctx, br_depth, &frame_ip))
+ return false;
+ break;
+
+ case WASM_OP_BR_IF:
+ read_leb_uint32(frame_ip, frame_ip_end, br_depth);
+ if (!aot_compile_op_br_if(comp_ctx, func_ctx, br_depth,
+ &frame_ip))
+ return false;
+ break;
+
+ case WASM_OP_BR_TABLE:
+ read_leb_uint32(frame_ip, frame_ip_end, br_count);
+ if (!(br_depths = wasm_runtime_malloc((uint32)sizeof(uint32)
+ * (br_count + 1)))) {
+ aot_set_last_error("allocate memory failed.");
+ goto fail;
+ }
+#if WASM_ENABLE_FAST_INTERP != 0
+ for (i = 0; i <= br_count; i++)
+ read_leb_uint32(frame_ip, frame_ip_end, br_depths[i]);
+#else
+ for (i = 0; i <= br_count; i++)
+ br_depths[i] = *frame_ip++;
+#endif
+
+ if (!aot_compile_op_br_table(comp_ctx, func_ctx, br_depths,
+ br_count, &frame_ip)) {
+ wasm_runtime_free(br_depths);
+ return false;
+ }
+
+ wasm_runtime_free(br_depths);
+ break;
+
+#if WASM_ENABLE_FAST_INTERP == 0
+ case EXT_OP_BR_TABLE_CACHE:
+ {
+ BrTableCache *node = bh_list_first_elem(
+ comp_ctx->comp_data->wasm_module->br_table_cache_list);
+ BrTableCache *node_next;
+ uint8 *p_opcode = frame_ip - 1;
+
+ read_leb_uint32(frame_ip, frame_ip_end, br_count);
+
+ while (node) {
+ node_next = bh_list_elem_next(node);
+ if (node->br_table_op_addr == p_opcode) {
+ br_depths = node->br_depths;
+ if (!aot_compile_op_br_table(comp_ctx, func_ctx,
+ br_depths, br_count,
+ &frame_ip)) {
+ return false;
+ }
+ break;
+ }
+ node = node_next;
+ }
+ bh_assert(node);
+
+ break;
+ }
+#endif
+
+ case WASM_OP_RETURN:
+ if (!aot_compile_op_return(comp_ctx, func_ctx, &frame_ip))
+ return false;
+ break;
+
+ case WASM_OP_CALL:
+ read_leb_uint32(frame_ip, frame_ip_end, func_idx);
+ if (!aot_compile_op_call(comp_ctx, func_ctx, func_idx, false))
+ return false;
+ break;
+
+ case WASM_OP_CALL_INDIRECT:
+ {
+ uint32 tbl_idx;
+
+ read_leb_uint32(frame_ip, frame_ip_end, type_idx);
+
+#if WASM_ENABLE_REF_TYPES != 0
+ if (comp_ctx->enable_ref_types) {
+ read_leb_uint32(frame_ip, frame_ip_end, tbl_idx);
+ }
+ else
+#endif
+ {
+ frame_ip++;
+ tbl_idx = 0;
+ }
+
+ if (!aot_compile_op_call_indirect(comp_ctx, func_ctx, type_idx,
+ tbl_idx))
+ return false;
+ break;
+ }
+
+#if WASM_ENABLE_TAIL_CALL != 0
+ case WASM_OP_RETURN_CALL:
+ if (!comp_ctx->enable_tail_call) {
+ aot_set_last_error("unsupported opcode");
+ return false;
+ }
+ read_leb_uint32(frame_ip, frame_ip_end, func_idx);
+ if (!aot_compile_op_call(comp_ctx, func_ctx, func_idx, true))
+ return false;
+ if (!aot_compile_op_return(comp_ctx, func_ctx, &frame_ip))
+ return false;
+ break;
+
+ case WASM_OP_RETURN_CALL_INDIRECT:
+ {
+ uint32 tbl_idx;
+
+ if (!comp_ctx->enable_tail_call) {
+ aot_set_last_error("unsupported opcode");
+ return false;
+ }
+
+ read_leb_uint32(frame_ip, frame_ip_end, type_idx);
+#if WASM_ENABLE_REF_TYPES != 0
+ if (comp_ctx->enable_ref_types) {
+ read_leb_uint32(frame_ip, frame_ip_end, tbl_idx);
+ }
+ else
+#endif
+ {
+ frame_ip++;
+ tbl_idx = 0;
+ }
+
+ if (!aot_compile_op_call_indirect(comp_ctx, func_ctx, type_idx,
+ tbl_idx))
+ return false;
+ if (!aot_compile_op_return(comp_ctx, func_ctx, &frame_ip))
+ return false;
+ break;
+ }
+#endif /* end of WASM_ENABLE_TAIL_CALL */
+
+ case WASM_OP_DROP:
+ if (!aot_compile_op_drop(comp_ctx, func_ctx, true))
+ return false;
+ break;
+
+ case WASM_OP_DROP_64:
+ if (!aot_compile_op_drop(comp_ctx, func_ctx, false))
+ return false;
+ break;
+
+ case WASM_OP_SELECT:
+ if (!aot_compile_op_select(comp_ctx, func_ctx, true))
+ return false;
+ break;
+
+ case WASM_OP_SELECT_64:
+ if (!aot_compile_op_select(comp_ctx, func_ctx, false))
+ return false;
+ break;
+
+#if WASM_ENABLE_REF_TYPES != 0
+ case WASM_OP_SELECT_T:
+ {
+ uint32 vec_len;
+
+ if (!comp_ctx->enable_ref_types) {
+ goto unsupport_ref_types;
+ }
+
+ read_leb_uint32(frame_ip, frame_ip_end, vec_len);
+ bh_assert(vec_len == 1);
+ (void)vec_len;
+
+ type_idx = *frame_ip++;
+ if (!aot_compile_op_select(comp_ctx, func_ctx,
+ (type_idx != VALUE_TYPE_I64)
+ && (type_idx != VALUE_TYPE_F64)))
+ return false;
+ break;
+ }
+ case WASM_OP_TABLE_GET:
+ {
+ uint32 tbl_idx;
+
+ if (!comp_ctx->enable_ref_types) {
+ goto unsupport_ref_types;
+ }
+
+ read_leb_uint32(frame_ip, frame_ip_end, tbl_idx);
+ if (!aot_compile_op_table_get(comp_ctx, func_ctx, tbl_idx))
+ return false;
+ break;
+ }
+ case WASM_OP_TABLE_SET:
+ {
+ uint32 tbl_idx;
+
+ if (!comp_ctx->enable_ref_types) {
+ goto unsupport_ref_types;
+ }
+
+ read_leb_uint32(frame_ip, frame_ip_end, tbl_idx);
+ if (!aot_compile_op_table_set(comp_ctx, func_ctx, tbl_idx))
+ return false;
+ break;
+ }
+ case WASM_OP_REF_NULL:
+ {
+ uint32 type;
+
+ if (!comp_ctx->enable_ref_types) {
+ goto unsupport_ref_types;
+ }
+
+ read_leb_uint32(frame_ip, frame_ip_end, type);
+
+ if (!aot_compile_op_ref_null(comp_ctx, func_ctx))
+ return false;
+
+ (void)type;
+ break;
+ }
+ case WASM_OP_REF_IS_NULL:
+ {
+ if (!comp_ctx->enable_ref_types) {
+ goto unsupport_ref_types;
+ }
+
+ if (!aot_compile_op_ref_is_null(comp_ctx, func_ctx))
+ return false;
+ break;
+ }
+ case WASM_OP_REF_FUNC:
+ {
+ if (!comp_ctx->enable_ref_types) {
+ goto unsupport_ref_types;
+ }
+
+ read_leb_uint32(frame_ip, frame_ip_end, func_idx);
+ if (!aot_compile_op_ref_func(comp_ctx, func_ctx, func_idx))
+ return false;
+ break;
+ }
+#endif
+
+ case WASM_OP_GET_LOCAL:
+ read_leb_uint32(frame_ip, frame_ip_end, local_idx);
+ if (!aot_compile_op_get_local(comp_ctx, func_ctx, local_idx))
+ return false;
+ break;
+
+ case WASM_OP_SET_LOCAL:
+ read_leb_uint32(frame_ip, frame_ip_end, local_idx);
+ if (!aot_compile_op_set_local(comp_ctx, func_ctx, local_idx))
+ return false;
+ break;
+
+ case WASM_OP_TEE_LOCAL:
+ read_leb_uint32(frame_ip, frame_ip_end, local_idx);
+ if (!aot_compile_op_tee_local(comp_ctx, func_ctx, local_idx))
+ return false;
+ break;
+
+ case WASM_OP_GET_GLOBAL:
+ case WASM_OP_GET_GLOBAL_64:
+ read_leb_uint32(frame_ip, frame_ip_end, global_idx);
+ if (!aot_compile_op_get_global(comp_ctx, func_ctx, global_idx))
+ return false;
+ break;
+
+ case WASM_OP_SET_GLOBAL:
+ case WASM_OP_SET_GLOBAL_64:
+ case WASM_OP_SET_GLOBAL_AUX_STACK:
+ read_leb_uint32(frame_ip, frame_ip_end, global_idx);
+ if (!aot_compile_op_set_global(
+ comp_ctx, func_ctx, global_idx,
+ opcode == WASM_OP_SET_GLOBAL_AUX_STACK ? true : false))
+ return false;
+ break;
+
+ case WASM_OP_I32_LOAD:
+ bytes = 4;
+ sign = true;
+ goto op_i32_load;
+ case WASM_OP_I32_LOAD8_S:
+ case WASM_OP_I32_LOAD8_U:
+ bytes = 1;
+ sign = (opcode == WASM_OP_I32_LOAD8_S) ? true : false;
+ goto op_i32_load;
+ case WASM_OP_I32_LOAD16_S:
+ case WASM_OP_I32_LOAD16_U:
+ bytes = 2;
+ sign = (opcode == WASM_OP_I32_LOAD16_S) ? true : false;
+ op_i32_load:
+ read_leb_uint32(frame_ip, frame_ip_end, align);
+ read_leb_uint32(frame_ip, frame_ip_end, offset);
+ if (!aot_compile_op_i32_load(comp_ctx, func_ctx, align, offset,
+ bytes, sign, false))
+ return false;
+ break;
+
+ case WASM_OP_I64_LOAD:
+ bytes = 8;
+ sign = true;
+ goto op_i64_load;
+ case WASM_OP_I64_LOAD8_S:
+ case WASM_OP_I64_LOAD8_U:
+ bytes = 1;
+ sign = (opcode == WASM_OP_I64_LOAD8_S) ? true : false;
+ goto op_i64_load;
+ case WASM_OP_I64_LOAD16_S:
+ case WASM_OP_I64_LOAD16_U:
+ bytes = 2;
+ sign = (opcode == WASM_OP_I64_LOAD16_S) ? true : false;
+ goto op_i64_load;
+ case WASM_OP_I64_LOAD32_S:
+ case WASM_OP_I64_LOAD32_U:
+ bytes = 4;
+ sign = (opcode == WASM_OP_I64_LOAD32_S) ? true : false;
+ op_i64_load:
+ read_leb_uint32(frame_ip, frame_ip_end, align);
+ read_leb_uint32(frame_ip, frame_ip_end, offset);
+ if (!aot_compile_op_i64_load(comp_ctx, func_ctx, align, offset,
+ bytes, sign, false))
+ return false;
+ break;
+
+ case WASM_OP_F32_LOAD:
+ read_leb_uint32(frame_ip, frame_ip_end, align);
+ read_leb_uint32(frame_ip, frame_ip_end, offset);
+ if (!aot_compile_op_f32_load(comp_ctx, func_ctx, align, offset))
+ return false;
+ break;
+
+ case WASM_OP_F64_LOAD:
+ read_leb_uint32(frame_ip, frame_ip_end, align);
+ read_leb_uint32(frame_ip, frame_ip_end, offset);
+ if (!aot_compile_op_f64_load(comp_ctx, func_ctx, align, offset))
+ return false;
+ break;
+
+ case WASM_OP_I32_STORE:
+ bytes = 4;
+ goto op_i32_store;
+ case WASM_OP_I32_STORE8:
+ bytes = 1;
+ goto op_i32_store;
+ case WASM_OP_I32_STORE16:
+ bytes = 2;
+ op_i32_store:
+ read_leb_uint32(frame_ip, frame_ip_end, align);
+ read_leb_uint32(frame_ip, frame_ip_end, offset);
+ if (!aot_compile_op_i32_store(comp_ctx, func_ctx, align, offset,
+ bytes, false))
+ return false;
+ break;
+
+ case WASM_OP_I64_STORE:
+ bytes = 8;
+ goto op_i64_store;
+ case WASM_OP_I64_STORE8:
+ bytes = 1;
+ goto op_i64_store;
+ case WASM_OP_I64_STORE16:
+ bytes = 2;
+ goto op_i64_store;
+ case WASM_OP_I64_STORE32:
+ bytes = 4;
+ op_i64_store:
+ read_leb_uint32(frame_ip, frame_ip_end, align);
+ read_leb_uint32(frame_ip, frame_ip_end, offset);
+ if (!aot_compile_op_i64_store(comp_ctx, func_ctx, align, offset,
+ bytes, false))
+ return false;
+ break;
+
+ case WASM_OP_F32_STORE:
+ read_leb_uint32(frame_ip, frame_ip_end, align);
+ read_leb_uint32(frame_ip, frame_ip_end, offset);
+ if (!aot_compile_op_f32_store(comp_ctx, func_ctx, align,
+ offset))
+ return false;
+ break;
+
+ case WASM_OP_F64_STORE:
+ read_leb_uint32(frame_ip, frame_ip_end, align);
+ read_leb_uint32(frame_ip, frame_ip_end, offset);
+ if (!aot_compile_op_f64_store(comp_ctx, func_ctx, align,
+ offset))
+ return false;
+ break;
+
+ case WASM_OP_MEMORY_SIZE:
+ read_leb_uint32(frame_ip, frame_ip_end, mem_idx);
+ if (!aot_compile_op_memory_size(comp_ctx, func_ctx))
+ return false;
+ (void)mem_idx;
+ break;
+
+ case WASM_OP_MEMORY_GROW:
+ read_leb_uint32(frame_ip, frame_ip_end, mem_idx);
+ if (!aot_compile_op_memory_grow(comp_ctx, func_ctx))
+ return false;
+ break;
+
+ case WASM_OP_I32_CONST:
+ read_leb_int32(frame_ip, frame_ip_end, i32_const);
+ if (!aot_compile_op_i32_const(comp_ctx, func_ctx, i32_const))
+ return false;
+ break;
+
+ case WASM_OP_I64_CONST:
+ read_leb_int64(frame_ip, frame_ip_end, i64_const);
+ if (!aot_compile_op_i64_const(comp_ctx, func_ctx, i64_const))
+ return false;
+ break;
+
+ case WASM_OP_F32_CONST:
+ p_f32 = (uint8 *)&f32_const;
+ for (i = 0; i < sizeof(float32); i++)
+ *p_f32++ = *frame_ip++;
+ if (!aot_compile_op_f32_const(comp_ctx, func_ctx, f32_const))
+ return false;
+ break;
+
+ case WASM_OP_F64_CONST:
+ p_f64 = (uint8 *)&f64_const;
+ for (i = 0; i < sizeof(float64); i++)
+ *p_f64++ = *frame_ip++;
+ if (!aot_compile_op_f64_const(comp_ctx, func_ctx, f64_const))
+ return false;
+ break;
+
+ case WASM_OP_I32_EQZ:
+ case WASM_OP_I32_EQ:
+ case WASM_OP_I32_NE:
+ case WASM_OP_I32_LT_S:
+ case WASM_OP_I32_LT_U:
+ case WASM_OP_I32_GT_S:
+ case WASM_OP_I32_GT_U:
+ case WASM_OP_I32_LE_S:
+ case WASM_OP_I32_LE_U:
+ case WASM_OP_I32_GE_S:
+ case WASM_OP_I32_GE_U:
+ if (!aot_compile_op_i32_compare(
+ comp_ctx, func_ctx, INT_EQZ + opcode - WASM_OP_I32_EQZ))
+ return false;
+ break;
+
+ case WASM_OP_I64_EQZ:
+ case WASM_OP_I64_EQ:
+ case WASM_OP_I64_NE:
+ case WASM_OP_I64_LT_S:
+ case WASM_OP_I64_LT_U:
+ case WASM_OP_I64_GT_S:
+ case WASM_OP_I64_GT_U:
+ case WASM_OP_I64_LE_S:
+ case WASM_OP_I64_LE_U:
+ case WASM_OP_I64_GE_S:
+ case WASM_OP_I64_GE_U:
+ if (!aot_compile_op_i64_compare(
+ comp_ctx, func_ctx, INT_EQZ + opcode - WASM_OP_I64_EQZ))
+ return false;
+ break;
+
+ case WASM_OP_F32_EQ:
+ case WASM_OP_F32_NE:
+ case WASM_OP_F32_LT:
+ case WASM_OP_F32_GT:
+ case WASM_OP_F32_LE:
+ case WASM_OP_F32_GE:
+ if (!aot_compile_op_f32_compare(
+ comp_ctx, func_ctx, FLOAT_EQ + opcode - WASM_OP_F32_EQ))
+ return false;
+ break;
+
+ case WASM_OP_F64_EQ:
+ case WASM_OP_F64_NE:
+ case WASM_OP_F64_LT:
+ case WASM_OP_F64_GT:
+ case WASM_OP_F64_LE:
+ case WASM_OP_F64_GE:
+ if (!aot_compile_op_f64_compare(
+ comp_ctx, func_ctx, FLOAT_EQ + opcode - WASM_OP_F64_EQ))
+ return false;
+ break;
+
+ case WASM_OP_I32_CLZ:
+ if (!aot_compile_op_i32_clz(comp_ctx, func_ctx))
+ return false;
+ break;
+
+ case WASM_OP_I32_CTZ:
+ if (!aot_compile_op_i32_ctz(comp_ctx, func_ctx))
+ return false;
+ break;
+
+ case WASM_OP_I32_POPCNT:
+ if (!aot_compile_op_i32_popcnt(comp_ctx, func_ctx))
+ return false;
+ break;
+
+ case WASM_OP_I32_ADD:
+ case WASM_OP_I32_SUB:
+ case WASM_OP_I32_MUL:
+ case WASM_OP_I32_DIV_S:
+ case WASM_OP_I32_DIV_U:
+ case WASM_OP_I32_REM_S:
+ case WASM_OP_I32_REM_U:
+ if (!aot_compile_op_i32_arithmetic(
+ comp_ctx, func_ctx, INT_ADD + opcode - WASM_OP_I32_ADD,
+ &frame_ip))
+ return false;
+ break;
+
+ case WASM_OP_I32_AND:
+ case WASM_OP_I32_OR:
+ case WASM_OP_I32_XOR:
+ if (!aot_compile_op_i32_bitwise(
+ comp_ctx, func_ctx, INT_SHL + opcode - WASM_OP_I32_AND))
+ return false;
+ break;
+
+ case WASM_OP_I32_SHL:
+ case WASM_OP_I32_SHR_S:
+ case WASM_OP_I32_SHR_U:
+ case WASM_OP_I32_ROTL:
+ case WASM_OP_I32_ROTR:
+ if (!aot_compile_op_i32_shift(
+ comp_ctx, func_ctx, INT_SHL + opcode - WASM_OP_I32_SHL))
+ return false;
+ break;
+
+ case WASM_OP_I64_CLZ:
+ if (!aot_compile_op_i64_clz(comp_ctx, func_ctx))
+ return false;
+ break;
+
+ case WASM_OP_I64_CTZ:
+ if (!aot_compile_op_i64_ctz(comp_ctx, func_ctx))
+ return false;
+ break;
+
+ case WASM_OP_I64_POPCNT:
+ if (!aot_compile_op_i64_popcnt(comp_ctx, func_ctx))
+ return false;
+ break;
+
+ case WASM_OP_I64_ADD:
+ case WASM_OP_I64_SUB:
+ case WASM_OP_I64_MUL:
+ case WASM_OP_I64_DIV_S:
+ case WASM_OP_I64_DIV_U:
+ case WASM_OP_I64_REM_S:
+ case WASM_OP_I64_REM_U:
+ if (!aot_compile_op_i64_arithmetic(
+ comp_ctx, func_ctx, INT_ADD + opcode - WASM_OP_I64_ADD,
+ &frame_ip))
+ return false;
+ break;
+
+ case WASM_OP_I64_AND:
+ case WASM_OP_I64_OR:
+ case WASM_OP_I64_XOR:
+ if (!aot_compile_op_i64_bitwise(
+ comp_ctx, func_ctx, INT_SHL + opcode - WASM_OP_I64_AND))
+ return false;
+ break;
+
+ case WASM_OP_I64_SHL:
+ case WASM_OP_I64_SHR_S:
+ case WASM_OP_I64_SHR_U:
+ case WASM_OP_I64_ROTL:
+ case WASM_OP_I64_ROTR:
+ if (!aot_compile_op_i64_shift(
+ comp_ctx, func_ctx, INT_SHL + opcode - WASM_OP_I64_SHL))
+ return false;
+ break;
+
+ case WASM_OP_F32_ABS:
+ case WASM_OP_F32_NEG:
+ case WASM_OP_F32_CEIL:
+ case WASM_OP_F32_FLOOR:
+ case WASM_OP_F32_TRUNC:
+ case WASM_OP_F32_NEAREST:
+ case WASM_OP_F32_SQRT:
+ if (!aot_compile_op_f32_math(comp_ctx, func_ctx,
+ FLOAT_ABS + opcode
+ - WASM_OP_F32_ABS))
+ return false;
+ break;
+
+ case WASM_OP_F32_ADD:
+ case WASM_OP_F32_SUB:
+ case WASM_OP_F32_MUL:
+ case WASM_OP_F32_DIV:
+ case WASM_OP_F32_MIN:
+ case WASM_OP_F32_MAX:
+ if (!aot_compile_op_f32_arithmetic(comp_ctx, func_ctx,
+ FLOAT_ADD + opcode
+ - WASM_OP_F32_ADD))
+ return false;
+ break;
+
+ case WASM_OP_F32_COPYSIGN:
+ if (!aot_compile_op_f32_copysign(comp_ctx, func_ctx))
+ return false;
+ break;
+
+ case WASM_OP_F64_ABS:
+ case WASM_OP_F64_NEG:
+ case WASM_OP_F64_CEIL:
+ case WASM_OP_F64_FLOOR:
+ case WASM_OP_F64_TRUNC:
+ case WASM_OP_F64_NEAREST:
+ case WASM_OP_F64_SQRT:
+ if (!aot_compile_op_f64_math(comp_ctx, func_ctx,
+ FLOAT_ABS + opcode
+ - WASM_OP_F64_ABS))
+ return false;
+ break;
+
+ case WASM_OP_F64_ADD:
+ case WASM_OP_F64_SUB:
+ case WASM_OP_F64_MUL:
+ case WASM_OP_F64_DIV:
+ case WASM_OP_F64_MIN:
+ case WASM_OP_F64_MAX:
+ if (!aot_compile_op_f64_arithmetic(comp_ctx, func_ctx,
+ FLOAT_ADD + opcode
+ - WASM_OP_F64_ADD))
+ return false;
+ break;
+
+ case WASM_OP_F64_COPYSIGN:
+ if (!aot_compile_op_f64_copysign(comp_ctx, func_ctx))
+ return false;
+ break;
+
+ case WASM_OP_I32_WRAP_I64:
+ if (!aot_compile_op_i32_wrap_i64(comp_ctx, func_ctx))
+ return false;
+ break;
+
+ case WASM_OP_I32_TRUNC_S_F32:
+ case WASM_OP_I32_TRUNC_U_F32:
+ sign = (opcode == WASM_OP_I32_TRUNC_S_F32) ? true : false;
+ if (!aot_compile_op_i32_trunc_f32(comp_ctx, func_ctx, sign,
+ false))
+ return false;
+ break;
+
+ case WASM_OP_I32_TRUNC_S_F64:
+ case WASM_OP_I32_TRUNC_U_F64:
+ sign = (opcode == WASM_OP_I32_TRUNC_S_F64) ? true : false;
+ if (!aot_compile_op_i32_trunc_f64(comp_ctx, func_ctx, sign,
+ false))
+ return false;
+ break;
+
+ case WASM_OP_I64_EXTEND_S_I32:
+ case WASM_OP_I64_EXTEND_U_I32:
+ sign = (opcode == WASM_OP_I64_EXTEND_S_I32) ? true : false;
+ if (!aot_compile_op_i64_extend_i32(comp_ctx, func_ctx, sign))
+ return false;
+ break;
+
+ case WASM_OP_I64_TRUNC_S_F32:
+ case WASM_OP_I64_TRUNC_U_F32:
+ sign = (opcode == WASM_OP_I64_TRUNC_S_F32) ? true : false;
+ if (!aot_compile_op_i64_trunc_f32(comp_ctx, func_ctx, sign,
+ false))
+ return false;
+ break;
+
+ case WASM_OP_I64_TRUNC_S_F64:
+ case WASM_OP_I64_TRUNC_U_F64:
+ sign = (opcode == WASM_OP_I64_TRUNC_S_F64) ? true : false;
+ if (!aot_compile_op_i64_trunc_f64(comp_ctx, func_ctx, sign,
+ false))
+ return false;
+ break;
+
+ case WASM_OP_F32_CONVERT_S_I32:
+ case WASM_OP_F32_CONVERT_U_I32:
+ sign = (opcode == WASM_OP_F32_CONVERT_S_I32) ? true : false;
+ if (!aot_compile_op_f32_convert_i32(comp_ctx, func_ctx, sign))
+ return false;
+ break;
+
+ case WASM_OP_F32_CONVERT_S_I64:
+ case WASM_OP_F32_CONVERT_U_I64:
+ sign = (opcode == WASM_OP_F32_CONVERT_S_I64) ? true : false;
+ if (!aot_compile_op_f32_convert_i64(comp_ctx, func_ctx, sign))
+ return false;
+ break;
+
+ case WASM_OP_F32_DEMOTE_F64:
+ if (!aot_compile_op_f32_demote_f64(comp_ctx, func_ctx))
+ return false;
+ break;
+
+ case WASM_OP_F64_CONVERT_S_I32:
+ case WASM_OP_F64_CONVERT_U_I32:
+ sign = (opcode == WASM_OP_F64_CONVERT_S_I32) ? true : false;
+ if (!aot_compile_op_f64_convert_i32(comp_ctx, func_ctx, sign))
+ return false;
+ break;
+
+ case WASM_OP_F64_CONVERT_S_I64:
+ case WASM_OP_F64_CONVERT_U_I64:
+ sign = (opcode == WASM_OP_F64_CONVERT_S_I64) ? true : false;
+ if (!aot_compile_op_f64_convert_i64(comp_ctx, func_ctx, sign))
+ return false;
+ break;
+
+ case WASM_OP_F64_PROMOTE_F32:
+ if (!aot_compile_op_f64_promote_f32(comp_ctx, func_ctx))
+ return false;
+ break;
+
+ case WASM_OP_I32_REINTERPRET_F32:
+ if (!aot_compile_op_i32_reinterpret_f32(comp_ctx, func_ctx))
+ return false;
+ break;
+
+ case WASM_OP_I64_REINTERPRET_F64:
+ if (!aot_compile_op_i64_reinterpret_f64(comp_ctx, func_ctx))
+ return false;
+ break;
+
+ case WASM_OP_F32_REINTERPRET_I32:
+ if (!aot_compile_op_f32_reinterpret_i32(comp_ctx, func_ctx))
+ return false;
+ break;
+
+ case WASM_OP_F64_REINTERPRET_I64:
+ if (!aot_compile_op_f64_reinterpret_i64(comp_ctx, func_ctx))
+ return false;
+ break;
+
+ case WASM_OP_I32_EXTEND8_S:
+ if (!aot_compile_op_i32_extend_i32(comp_ctx, func_ctx, 8))
+ return false;
+ break;
+
+ case WASM_OP_I32_EXTEND16_S:
+ if (!aot_compile_op_i32_extend_i32(comp_ctx, func_ctx, 16))
+ return false;
+ break;
+
+ case WASM_OP_I64_EXTEND8_S:
+ if (!aot_compile_op_i64_extend_i64(comp_ctx, func_ctx, 8))
+ return false;
+ break;
+
+ case WASM_OP_I64_EXTEND16_S:
+ if (!aot_compile_op_i64_extend_i64(comp_ctx, func_ctx, 16))
+ return false;
+ break;
+
+ case WASM_OP_I64_EXTEND32_S:
+ if (!aot_compile_op_i64_extend_i64(comp_ctx, func_ctx, 32))
+ return false;
+ break;
+
+ case WASM_OP_MISC_PREFIX:
+ {
+ uint32 opcode1;
+
+ read_leb_uint32(frame_ip, frame_ip_end, opcode1);
+ opcode = (uint32)opcode1;
+
+#if WASM_ENABLE_BULK_MEMORY != 0
+ if (WASM_OP_MEMORY_INIT <= opcode
+ && opcode <= WASM_OP_MEMORY_FILL
+ && !comp_ctx->enable_bulk_memory) {
+ goto unsupport_bulk_memory;
+ }
+#endif
+
+#if WASM_ENABLE_REF_TYPES != 0
+ if (WASM_OP_TABLE_INIT <= opcode && opcode <= WASM_OP_TABLE_FILL
+ && !comp_ctx->enable_ref_types) {
+ goto unsupport_ref_types;
+ }
+#endif
+
+ switch (opcode) {
+ case WASM_OP_I32_TRUNC_SAT_S_F32:
+ case WASM_OP_I32_TRUNC_SAT_U_F32:
+ sign = (opcode == WASM_OP_I32_TRUNC_SAT_S_F32) ? true
+ : false;
+ if (!aot_compile_op_i32_trunc_f32(comp_ctx, func_ctx,
+ sign, true))
+ return false;
+ break;
+ case WASM_OP_I32_TRUNC_SAT_S_F64:
+ case WASM_OP_I32_TRUNC_SAT_U_F64:
+ sign = (opcode == WASM_OP_I32_TRUNC_SAT_S_F64) ? true
+ : false;
+ if (!aot_compile_op_i32_trunc_f64(comp_ctx, func_ctx,
+ sign, true))
+ return false;
+ break;
+ case WASM_OP_I64_TRUNC_SAT_S_F32:
+ case WASM_OP_I64_TRUNC_SAT_U_F32:
+ sign = (opcode == WASM_OP_I64_TRUNC_SAT_S_F32) ? true
+ : false;
+ if (!aot_compile_op_i64_trunc_f32(comp_ctx, func_ctx,
+ sign, true))
+ return false;
+ break;
+ case WASM_OP_I64_TRUNC_SAT_S_F64:
+ case WASM_OP_I64_TRUNC_SAT_U_F64:
+ sign = (opcode == WASM_OP_I64_TRUNC_SAT_S_F64) ? true
+ : false;
+ if (!aot_compile_op_i64_trunc_f64(comp_ctx, func_ctx,
+ sign, true))
+ return false;
+ break;
+#if WASM_ENABLE_BULK_MEMORY != 0
+ case WASM_OP_MEMORY_INIT:
+ {
+ uint32 seg_index;
+ read_leb_uint32(frame_ip, frame_ip_end, seg_index);
+ frame_ip++;
+ if (!aot_compile_op_memory_init(comp_ctx, func_ctx,
+ seg_index))
+ return false;
+ break;
+ }
+ case WASM_OP_DATA_DROP:
+ {
+ uint32 seg_index;
+ read_leb_uint32(frame_ip, frame_ip_end, seg_index);
+ if (!aot_compile_op_data_drop(comp_ctx, func_ctx,
+ seg_index))
+ return false;
+ break;
+ }
+ case WASM_OP_MEMORY_COPY:
+ {
+ frame_ip += 2;
+ if (!aot_compile_op_memory_copy(comp_ctx, func_ctx))
+ return false;
+ break;
+ }
+ case WASM_OP_MEMORY_FILL:
+ {
+ frame_ip++;
+ if (!aot_compile_op_memory_fill(comp_ctx, func_ctx))
+ return false;
+ break;
+ }
+#endif /* WASM_ENABLE_BULK_MEMORY */
+#if WASM_ENABLE_REF_TYPES != 0
+ case WASM_OP_TABLE_INIT:
+ {
+ uint32 tbl_idx, tbl_seg_idx;
+
+ read_leb_uint32(frame_ip, frame_ip_end, tbl_seg_idx);
+ read_leb_uint32(frame_ip, frame_ip_end, tbl_idx);
+ if (!aot_compile_op_table_init(comp_ctx, func_ctx,
+ tbl_idx, tbl_seg_idx))
+ return false;
+ break;
+ }
+ case WASM_OP_ELEM_DROP:
+ {
+ uint32 tbl_seg_idx;
+
+ read_leb_uint32(frame_ip, frame_ip_end, tbl_seg_idx);
+ if (!aot_compile_op_elem_drop(comp_ctx, func_ctx,
+ tbl_seg_idx))
+ return false;
+ break;
+ }
+ case WASM_OP_TABLE_COPY:
+ {
+ uint32 src_tbl_idx, dst_tbl_idx;
+
+ read_leb_uint32(frame_ip, frame_ip_end, dst_tbl_idx);
+ read_leb_uint32(frame_ip, frame_ip_end, src_tbl_idx);
+ if (!aot_compile_op_table_copy(
+ comp_ctx, func_ctx, src_tbl_idx, dst_tbl_idx))
+ return false;
+ break;
+ }
+ case WASM_OP_TABLE_GROW:
+ {
+ uint32 tbl_idx;
+
+ read_leb_uint32(frame_ip, frame_ip_end, tbl_idx);
+ if (!aot_compile_op_table_grow(comp_ctx, func_ctx,
+ tbl_idx))
+ return false;
+ break;
+ }
+
+ case WASM_OP_TABLE_SIZE:
+ {
+ uint32 tbl_idx;
+
+ read_leb_uint32(frame_ip, frame_ip_end, tbl_idx);
+ if (!aot_compile_op_table_size(comp_ctx, func_ctx,
+ tbl_idx))
+ return false;
+ break;
+ }
+ case WASM_OP_TABLE_FILL:
+ {
+ uint32 tbl_idx;
+
+ read_leb_uint32(frame_ip, frame_ip_end, tbl_idx);
+ if (!aot_compile_op_table_fill(comp_ctx, func_ctx,
+ tbl_idx))
+ return false;
+ break;
+ }
+#endif /* WASM_ENABLE_REF_TYPES */
+ default:
+ aot_set_last_error("unsupported opcode");
+ return false;
+ }
+ break;
+ }
+
+#if WASM_ENABLE_SHARED_MEMORY != 0
+ case WASM_OP_ATOMIC_PREFIX:
+ {
+ uint8 bin_op, op_type;
+
+ if (frame_ip < frame_ip_end) {
+ opcode = *frame_ip++;
+ }
+ if (opcode != WASM_OP_ATOMIC_FENCE) {
+ read_leb_uint32(frame_ip, frame_ip_end, align);
+ read_leb_uint32(frame_ip, frame_ip_end, offset);
+ }
+ switch (opcode) {
+ case WASM_OP_ATOMIC_WAIT32:
+ if (!aot_compile_op_atomic_wait(comp_ctx, func_ctx,
+ VALUE_TYPE_I32, align,
+ offset, 4))
+ return false;
+ break;
+ case WASM_OP_ATOMIC_WAIT64:
+ if (!aot_compile_op_atomic_wait(comp_ctx, func_ctx,
+ VALUE_TYPE_I64, align,
+ offset, 8))
+ return false;
+ break;
+ case WASM_OP_ATOMIC_NOTIFY:
+ if (!aot_compiler_op_atomic_notify(
+ comp_ctx, func_ctx, align, offset, bytes))
+ return false;
+ break;
+ case WASM_OP_ATOMIC_FENCE:
+ /* Skip memory index */
+ frame_ip++;
+ if (!aot_compiler_op_atomic_fence(comp_ctx, func_ctx))
+ return false;
+ break;
+ case WASM_OP_ATOMIC_I32_LOAD:
+ bytes = 4;
+ goto op_atomic_i32_load;
+ case WASM_OP_ATOMIC_I32_LOAD8_U:
+ bytes = 1;
+ goto op_atomic_i32_load;
+ case WASM_OP_ATOMIC_I32_LOAD16_U:
+ bytes = 2;
+ op_atomic_i32_load:
+ if (!aot_compile_op_i32_load(comp_ctx, func_ctx, align,
+ offset, bytes, sign, true))
+ return false;
+ break;
+
+ case WASM_OP_ATOMIC_I64_LOAD:
+ bytes = 8;
+ goto op_atomic_i64_load;
+ case WASM_OP_ATOMIC_I64_LOAD8_U:
+ bytes = 1;
+ goto op_atomic_i64_load;
+ case WASM_OP_ATOMIC_I64_LOAD16_U:
+ bytes = 2;
+ goto op_atomic_i64_load;
+ case WASM_OP_ATOMIC_I64_LOAD32_U:
+ bytes = 4;
+ op_atomic_i64_load:
+ if (!aot_compile_op_i64_load(comp_ctx, func_ctx, align,
+ offset, bytes, sign, true))
+ return false;
+ break;
+
+ case WASM_OP_ATOMIC_I32_STORE:
+ bytes = 4;
+ goto op_atomic_i32_store;
+ case WASM_OP_ATOMIC_I32_STORE8:
+ bytes = 1;
+ goto op_atomic_i32_store;
+ case WASM_OP_ATOMIC_I32_STORE16:
+ bytes = 2;
+ op_atomic_i32_store:
+ if (!aot_compile_op_i32_store(comp_ctx, func_ctx, align,
+ offset, bytes, true))
+ return false;
+ break;
+
+ case WASM_OP_ATOMIC_I64_STORE:
+ bytes = 8;
+ goto op_atomic_i64_store;
+ case WASM_OP_ATOMIC_I64_STORE8:
+ bytes = 1;
+ goto op_atomic_i64_store;
+ case WASM_OP_ATOMIC_I64_STORE16:
+ bytes = 2;
+ goto op_atomic_i64_store;
+ case WASM_OP_ATOMIC_I64_STORE32:
+ bytes = 4;
+ op_atomic_i64_store:
+ if (!aot_compile_op_i64_store(comp_ctx, func_ctx, align,
+ offset, bytes, true))
+ return false;
+ break;
+
+ case WASM_OP_ATOMIC_RMW_I32_CMPXCHG:
+ bytes = 4;
+ op_type = VALUE_TYPE_I32;
+ goto op_atomic_cmpxchg;
+ case WASM_OP_ATOMIC_RMW_I64_CMPXCHG:
+ bytes = 8;
+ op_type = VALUE_TYPE_I64;
+ goto op_atomic_cmpxchg;
+ case WASM_OP_ATOMIC_RMW_I32_CMPXCHG8_U:
+ bytes = 1;
+ op_type = VALUE_TYPE_I32;
+ goto op_atomic_cmpxchg;
+ case WASM_OP_ATOMIC_RMW_I32_CMPXCHG16_U:
+ bytes = 2;
+ op_type = VALUE_TYPE_I32;
+ goto op_atomic_cmpxchg;
+ case WASM_OP_ATOMIC_RMW_I64_CMPXCHG8_U:
+ bytes = 1;
+ op_type = VALUE_TYPE_I64;
+ goto op_atomic_cmpxchg;
+ case WASM_OP_ATOMIC_RMW_I64_CMPXCHG16_U:
+ bytes = 2;
+ op_type = VALUE_TYPE_I64;
+ goto op_atomic_cmpxchg;
+ case WASM_OP_ATOMIC_RMW_I64_CMPXCHG32_U:
+ bytes = 4;
+ op_type = VALUE_TYPE_I64;
+ op_atomic_cmpxchg:
+ if (!aot_compile_op_atomic_cmpxchg(comp_ctx, func_ctx,
+ op_type, align,
+ offset, bytes))
+ return false;
+ break;
+
+ COMPILE_ATOMIC_RMW(Add, ADD);
+ COMPILE_ATOMIC_RMW(Sub, SUB);
+ COMPILE_ATOMIC_RMW(And, AND);
+ COMPILE_ATOMIC_RMW(Or, OR);
+ COMPILE_ATOMIC_RMW(Xor, XOR);
+ COMPILE_ATOMIC_RMW(Xchg, XCHG);
+
+ build_atomic_rmw:
+ if (!aot_compile_op_atomic_rmw(comp_ctx, func_ctx,
+ bin_op, op_type, align,
+ offset, bytes))
+ return false;
+ break;
+
+ default:
+ aot_set_last_error("unsupported opcode");
+ return false;
+ }
+ break;
+ }
+#endif /* end of WASM_ENABLE_SHARED_MEMORY */
+
+#if WASM_ENABLE_SIMD != 0
+ case WASM_OP_SIMD_PREFIX:
+ {
+ if (!comp_ctx->enable_simd) {
+ goto unsupport_simd;
+ }
+
+ opcode = *frame_ip++;
+ /* follow the order of enum WASMSimdEXTOpcode in
+ wasm_opcode.h */
+ switch (opcode) {
+ /* Memory instruction */
+ case SIMD_v128_load:
+ {
+ read_leb_uint32(frame_ip, frame_ip_end, align);
+ read_leb_uint32(frame_ip, frame_ip_end, offset);
+ if (!aot_compile_simd_v128_load(comp_ctx, func_ctx,
+ align, offset))
+ return false;
+ break;
+ }
+
+ case SIMD_v128_load8x8_s:
+ case SIMD_v128_load8x8_u:
+ case SIMD_v128_load16x4_s:
+ case SIMD_v128_load16x4_u:
+ case SIMD_v128_load32x2_s:
+ case SIMD_v128_load32x2_u:
+ {
+ read_leb_uint32(frame_ip, frame_ip_end, align);
+ read_leb_uint32(frame_ip, frame_ip_end, offset);
+ if (!aot_compile_simd_load_extend(
+ comp_ctx, func_ctx, opcode, align, offset))
+ return false;
+ break;
+ }
+
+ case SIMD_v128_load8_splat:
+ case SIMD_v128_load16_splat:
+ case SIMD_v128_load32_splat:
+ case SIMD_v128_load64_splat:
+ {
+ read_leb_uint32(frame_ip, frame_ip_end, align);
+ read_leb_uint32(frame_ip, frame_ip_end, offset);
+ if (!aot_compile_simd_load_splat(comp_ctx, func_ctx,
+ opcode, align, offset))
+ return false;
+ break;
+ }
+
+ case SIMD_v128_store:
+ {
+ read_leb_uint32(frame_ip, frame_ip_end, align);
+ read_leb_uint32(frame_ip, frame_ip_end, offset);
+ if (!aot_compile_simd_v128_store(comp_ctx, func_ctx,
+ align, offset))
+ return false;
+ break;
+ }
+
+ /* Basic operation */
+ case SIMD_v128_const:
+ {
+ if (!aot_compile_simd_v128_const(comp_ctx, func_ctx,
+ frame_ip))
+ return false;
+ frame_ip += 16;
+ break;
+ }
+
+ case SIMD_v8x16_shuffle:
+ {
+ if (!aot_compile_simd_shuffle(comp_ctx, func_ctx,
+ frame_ip))
+ return false;
+ frame_ip += 16;
+ break;
+ }
+
+ case SIMD_v8x16_swizzle:
+ {
+ if (!aot_compile_simd_swizzle(comp_ctx, func_ctx))
+ return false;
+ break;
+ }
+
+ /* Splat operation */
+ case SIMD_i8x16_splat:
+ case SIMD_i16x8_splat:
+ case SIMD_i32x4_splat:
+ case SIMD_i64x2_splat:
+ case SIMD_f32x4_splat:
+ case SIMD_f64x2_splat:
+ {
+ if (!aot_compile_simd_splat(comp_ctx, func_ctx, opcode))
+ return false;
+ break;
+ }
+
+ /* Lane operation */
+ case SIMD_i8x16_extract_lane_s:
+ case SIMD_i8x16_extract_lane_u:
+ {
+ if (!aot_compile_simd_extract_i8x16(
+ comp_ctx, func_ctx, *frame_ip++,
+ SIMD_i8x16_extract_lane_s == opcode))
+ return false;
+ break;
+ }
+
+ case SIMD_i8x16_replace_lane:
+ {
+ if (!aot_compile_simd_replace_i8x16(comp_ctx, func_ctx,
+ *frame_ip++))
+ return false;
+ break;
+ }
+
+ case SIMD_i16x8_extract_lane_s:
+ case SIMD_i16x8_extract_lane_u:
+ {
+ if (!aot_compile_simd_extract_i16x8(
+ comp_ctx, func_ctx, *frame_ip++,
+ SIMD_i16x8_extract_lane_s == opcode))
+ return false;
+ break;
+ }
+
+ case SIMD_i16x8_replace_lane:
+ {
+ if (!aot_compile_simd_replace_i16x8(comp_ctx, func_ctx,
+ *frame_ip++))
+ return false;
+ break;
+ }
+
+ case SIMD_i32x4_extract_lane:
+ {
+ if (!aot_compile_simd_extract_i32x4(comp_ctx, func_ctx,
+ *frame_ip++))
+ return false;
+ break;
+ }
+
+ case SIMD_i32x4_replace_lane:
+ {
+ if (!aot_compile_simd_replace_i32x4(comp_ctx, func_ctx,
+ *frame_ip++))
+ return false;
+ break;
+ }
+
+ case SIMD_i64x2_extract_lane:
+ {
+ if (!aot_compile_simd_extract_i64x2(comp_ctx, func_ctx,
+ *frame_ip++))
+ return false;
+ break;
+ }
+
+ case SIMD_i64x2_replace_lane:
+ {
+ if (!aot_compile_simd_replace_i64x2(comp_ctx, func_ctx,
+ *frame_ip++))
+ return false;
+ break;
+ }
+
+ case SIMD_f32x4_extract_lane:
+ {
+ if (!aot_compile_simd_extract_f32x4(comp_ctx, func_ctx,
+ *frame_ip++))
+ return false;
+ break;
+ }
+
+ case SIMD_f32x4_replace_lane:
+ {
+ if (!aot_compile_simd_replace_f32x4(comp_ctx, func_ctx,
+ *frame_ip++))
+ return false;
+ break;
+ }
+
+ case SIMD_f64x2_extract_lane:
+ {
+ if (!aot_compile_simd_extract_f64x2(comp_ctx, func_ctx,
+ *frame_ip++))
+ return false;
+ break;
+ }
+
+ case SIMD_f64x2_replace_lane:
+ {
+ if (!aot_compile_simd_replace_f64x2(comp_ctx, func_ctx,
+ *frame_ip++))
+ return false;
+ break;
+ }
+
+ /* i8x16 Cmp */
+ case SIMD_i8x16_eq:
+ case SIMD_i8x16_ne:
+ case SIMD_i8x16_lt_s:
+ case SIMD_i8x16_lt_u:
+ case SIMD_i8x16_gt_s:
+ case SIMD_i8x16_gt_u:
+ case SIMD_i8x16_le_s:
+ case SIMD_i8x16_le_u:
+ case SIMD_i8x16_ge_s:
+ case SIMD_i8x16_ge_u:
+ {
+ if (!aot_compile_simd_i8x16_compare(
+ comp_ctx, func_ctx,
+ INT_EQ + opcode - SIMD_i8x16_eq))
+ return false;
+ break;
+ }
+
+ /* i16x8 Cmp */
+ case SIMD_i16x8_eq:
+ case SIMD_i16x8_ne:
+ case SIMD_i16x8_lt_s:
+ case SIMD_i16x8_lt_u:
+ case SIMD_i16x8_gt_s:
+ case SIMD_i16x8_gt_u:
+ case SIMD_i16x8_le_s:
+ case SIMD_i16x8_le_u:
+ case SIMD_i16x8_ge_s:
+ case SIMD_i16x8_ge_u:
+ {
+ if (!aot_compile_simd_i16x8_compare(
+ comp_ctx, func_ctx,
+ INT_EQ + opcode - SIMD_i16x8_eq))
+ return false;
+ break;
+ }
+
+ /* i32x4 Cmp */
+ case SIMD_i32x4_eq:
+ case SIMD_i32x4_ne:
+ case SIMD_i32x4_lt_s:
+ case SIMD_i32x4_lt_u:
+ case SIMD_i32x4_gt_s:
+ case SIMD_i32x4_gt_u:
+ case SIMD_i32x4_le_s:
+ case SIMD_i32x4_le_u:
+ case SIMD_i32x4_ge_s:
+ case SIMD_i32x4_ge_u:
+ {
+ if (!aot_compile_simd_i32x4_compare(
+ comp_ctx, func_ctx,
+ INT_EQ + opcode - SIMD_i32x4_eq))
+ return false;
+ break;
+ }
+
+ /* f32x4 Cmp */
+ case SIMD_f32x4_eq:
+ case SIMD_f32x4_ne:
+ case SIMD_f32x4_lt:
+ case SIMD_f32x4_gt:
+ case SIMD_f32x4_le:
+ case SIMD_f32x4_ge:
+ {
+ if (!aot_compile_simd_f32x4_compare(
+ comp_ctx, func_ctx,
+ FLOAT_EQ + opcode - SIMD_f32x4_eq))
+ return false;
+ break;
+ }
+
+ /* f64x2 Cmp */
+ case SIMD_f64x2_eq:
+ case SIMD_f64x2_ne:
+ case SIMD_f64x2_lt:
+ case SIMD_f64x2_gt:
+ case SIMD_f64x2_le:
+ case SIMD_f64x2_ge:
+ {
+ if (!aot_compile_simd_f64x2_compare(
+ comp_ctx, func_ctx,
+ FLOAT_EQ + opcode - SIMD_f64x2_eq))
+ return false;
+ break;
+ }
+
+ /* v128 Op */
+ case SIMD_v128_not:
+ case SIMD_v128_and:
+ case SIMD_v128_andnot:
+ case SIMD_v128_or:
+ case SIMD_v128_xor:
+ case SIMD_v128_bitselect:
+ {
+ if (!aot_compile_simd_v128_bitwise(comp_ctx, func_ctx,
+ V128_NOT + opcode
+ - SIMD_v128_not))
+ return false;
+ break;
+ }
+
+ case SIMD_v128_any_true:
+ {
+ if (!aot_compile_simd_v128_any_true(comp_ctx, func_ctx))
+ return false;
+ break;
+ }
+
+ /* Load Lane Op */
+ case SIMD_v128_load8_lane:
+ case SIMD_v128_load16_lane:
+ case SIMD_v128_load32_lane:
+ case SIMD_v128_load64_lane:
+ {
+ read_leb_uint32(frame_ip, frame_ip_end, align);
+ read_leb_uint32(frame_ip, frame_ip_end, offset);
+ if (!aot_compile_simd_load_lane(comp_ctx, func_ctx,
+ opcode, align, offset,
+ *frame_ip++))
+ return false;
+ break;
+ }
+
+ case SIMD_v128_store8_lane:
+ case SIMD_v128_store16_lane:
+ case SIMD_v128_store32_lane:
+ case SIMD_v128_store64_lane:
+ {
+ read_leb_uint32(frame_ip, frame_ip_end, align);
+ read_leb_uint32(frame_ip, frame_ip_end, offset);
+ if (!aot_compile_simd_store_lane(comp_ctx, func_ctx,
+ opcode, align, offset,
+ *frame_ip++))
+ return false;
+ break;
+ }
+
+ case SIMD_v128_load32_zero:
+ case SIMD_v128_load64_zero:
+ {
+ read_leb_uint32(frame_ip, frame_ip_end, align);
+ read_leb_uint32(frame_ip, frame_ip_end, offset);
+ if (!aot_compile_simd_load_zero(comp_ctx, func_ctx,
+ opcode, align, offset))
+ return false;
+ break;
+ }
+
+ /* Float conversion */
+ case SIMD_f32x4_demote_f64x2_zero:
+ {
+ if (!aot_compile_simd_f64x2_demote(comp_ctx, func_ctx))
+ return false;
+ break;
+ }
+
+ case SIMD_f64x2_promote_low_f32x4_zero:
+ {
+ if (!aot_compile_simd_f32x4_promote(comp_ctx, func_ctx))
+ return false;
+ break;
+ }
+
+ /* i8x16 Op */
+ case SIMD_i8x16_abs:
+ {
+ if (!aot_compile_simd_i8x16_abs(comp_ctx, func_ctx))
+ return false;
+ break;
+ }
+
+ case SIMD_i8x16_neg:
+ {
+ if (!aot_compile_simd_i8x16_neg(comp_ctx, func_ctx))
+ return false;
+ break;
+ }
+
+ case SIMD_i8x16_popcnt:
+ {
+ if (!aot_compile_simd_i8x16_popcnt(comp_ctx, func_ctx))
+ return false;
+ break;
+ }
+
+ case SIMD_i8x16_all_true:
+ {
+ if (!aot_compile_simd_i8x16_all_true(comp_ctx,
+ func_ctx))
+ return false;
+ break;
+ }
+
+ case SIMD_i8x16_bitmask:
+ {
+ if (!aot_compile_simd_i8x16_bitmask(comp_ctx, func_ctx))
+ return false;
+ break;
+ }
+
+ case SIMD_i8x16_narrow_i16x8_s:
+ case SIMD_i8x16_narrow_i16x8_u:
+ {
+ if (!aot_compile_simd_i8x16_narrow_i16x8(
+ comp_ctx, func_ctx,
+ (opcode == SIMD_i8x16_narrow_i16x8_s)))
+ return false;
+ break;
+ }
+
+ case SIMD_f32x4_ceil:
+ {
+ if (!aot_compile_simd_f32x4_ceil(comp_ctx, func_ctx))
+ return false;
+ break;
+ }
+
+ case SIMD_f32x4_floor:
+ {
+ if (!aot_compile_simd_f32x4_floor(comp_ctx, func_ctx))
+ return false;
+ break;
+ }
+
+ case SIMD_f32x4_trunc:
+ {
+ if (!aot_compile_simd_f32x4_trunc(comp_ctx, func_ctx))
+ return false;
+ break;
+ }
+
+ case SIMD_f32x4_nearest:
+ {
+ if (!aot_compile_simd_f32x4_nearest(comp_ctx, func_ctx))
+ return false;
+ break;
+ }
+
+ case SIMD_i8x16_shl:
+ case SIMD_i8x16_shr_s:
+ case SIMD_i8x16_shr_u:
+ {
+ if (!aot_compile_simd_i8x16_shift(comp_ctx, func_ctx,
+ INT_SHL + opcode
+ - SIMD_i8x16_shl))
+ return false;
+ break;
+ }
+
+ case SIMD_i8x16_add:
+ {
+ if (!aot_compile_simd_i8x16_arith(comp_ctx, func_ctx,
+ V128_ADD))
+ return false;
+ break;
+ }
+
+ case SIMD_i8x16_add_sat_s:
+ case SIMD_i8x16_add_sat_u:
+ {
+ if (!aot_compile_simd_i8x16_saturate(
+ comp_ctx, func_ctx, V128_ADD,
+ opcode == SIMD_i8x16_add_sat_s))
+ return false;
+ break;
+ }
+
+ case SIMD_i8x16_sub:
+ {
+ if (!aot_compile_simd_i8x16_arith(comp_ctx, func_ctx,
+ V128_SUB))
+ return false;
+ break;
+ }
+
+ case SIMD_i8x16_sub_sat_s:
+ case SIMD_i8x16_sub_sat_u:
+ {
+ if (!aot_compile_simd_i8x16_saturate(
+ comp_ctx, func_ctx, V128_SUB,
+ opcode == SIMD_i8x16_sub_sat_s))
+ return false;
+ break;
+ }
+
+ case SIMD_f64x2_ceil:
+ {
+ if (!aot_compile_simd_f64x2_ceil(comp_ctx, func_ctx))
+ return false;
+ break;
+ }
+
+ case SIMD_f64x2_floor:
+ {
+ if (!aot_compile_simd_f64x2_floor(comp_ctx, func_ctx))
+ return false;
+ break;
+ }
+
+ case SIMD_i8x16_min_s:
+ case SIMD_i8x16_min_u:
+ {
+ if (!aot_compile_simd_i8x16_cmp(
+ comp_ctx, func_ctx, V128_MIN,
+ opcode == SIMD_i8x16_min_s))
+ return false;
+ break;
+ }
+
+ case SIMD_i8x16_max_s:
+ case SIMD_i8x16_max_u:
+ {
+ if (!aot_compile_simd_i8x16_cmp(
+ comp_ctx, func_ctx, V128_MAX,
+ opcode == SIMD_i8x16_max_s))
+ return false;
+ break;
+ }
+
+ case SIMD_f64x2_trunc:
+ {
+ if (!aot_compile_simd_f64x2_trunc(comp_ctx, func_ctx))
+ return false;
+ break;
+ }
+
+ case SIMD_i8x16_avgr_u:
+ {
+ if (!aot_compile_simd_i8x16_avgr_u(comp_ctx, func_ctx))
+ return false;
+ break;
+ }
+
+ case SIMD_i16x8_extadd_pairwise_i8x16_s:
+ case SIMD_i16x8_extadd_pairwise_i8x16_u:
+ {
+ if (!aot_compile_simd_i16x8_extadd_pairwise_i8x16(
+ comp_ctx, func_ctx,
+ SIMD_i16x8_extadd_pairwise_i8x16_s == opcode))
+ return false;
+ break;
+ }
+
+ case SIMD_i32x4_extadd_pairwise_i16x8_s:
+ case SIMD_i32x4_extadd_pairwise_i16x8_u:
+ {
+ if (!aot_compile_simd_i32x4_extadd_pairwise_i16x8(
+ comp_ctx, func_ctx,
+ SIMD_i32x4_extadd_pairwise_i16x8_s == opcode))
+ return false;
+ break;
+ }
+
+ /* i16x8 Op */
+ case SIMD_i16x8_abs:
+ {
+ if (!aot_compile_simd_i16x8_abs(comp_ctx, func_ctx))
+ return false;
+ break;
+ }
+
+ case SIMD_i16x8_neg:
+ {
+ if (!aot_compile_simd_i16x8_neg(comp_ctx, func_ctx))
+ return false;
+ break;
+ }
+
+ case SIMD_i16x8_q15mulr_sat_s:
+ {
+ if (!aot_compile_simd_i16x8_q15mulr_sat(comp_ctx,
+ func_ctx))
+ return false;
+ break;
+ }
+
+ case SIMD_i16x8_all_true:
+ {
+ if (!aot_compile_simd_i16x8_all_true(comp_ctx,
+ func_ctx))
+ return false;
+ break;
+ }
+
+ case SIMD_i16x8_bitmask:
+ {
+ if (!aot_compile_simd_i16x8_bitmask(comp_ctx, func_ctx))
+ return false;
+ break;
+ }
+
+ case SIMD_i16x8_narrow_i32x4_s:
+ case SIMD_i16x8_narrow_i32x4_u:
+ {
+ if (!aot_compile_simd_i16x8_narrow_i32x4(
+ comp_ctx, func_ctx,
+ SIMD_i16x8_narrow_i32x4_s == opcode))
+ return false;
+ break;
+ }
+
+ case SIMD_i16x8_extend_low_i8x16_s:
+ case SIMD_i16x8_extend_high_i8x16_s:
+ {
+ if (!aot_compile_simd_i16x8_extend_i8x16(
+ comp_ctx, func_ctx,
+ SIMD_i16x8_extend_low_i8x16_s == opcode, true))
+ return false;
+ break;
+ }
+
+ case SIMD_i16x8_extend_low_i8x16_u:
+ case SIMD_i16x8_extend_high_i8x16_u:
+ {
+ if (!aot_compile_simd_i16x8_extend_i8x16(
+ comp_ctx, func_ctx,
+ SIMD_i16x8_extend_low_i8x16_u == opcode, false))
+ return false;
+ break;
+ }
+
+ case SIMD_i16x8_shl:
+ case SIMD_i16x8_shr_s:
+ case SIMD_i16x8_shr_u:
+ {
+ if (!aot_compile_simd_i16x8_shift(comp_ctx, func_ctx,
+ INT_SHL + opcode
+ - SIMD_i16x8_shl))
+ return false;
+ break;
+ }
+
+ case SIMD_i16x8_add:
+ {
+ if (!aot_compile_simd_i16x8_arith(comp_ctx, func_ctx,
+ V128_ADD))
+ return false;
+ break;
+ }
+
+ case SIMD_i16x8_add_sat_s:
+ case SIMD_i16x8_add_sat_u:
+ {
+ if (!aot_compile_simd_i16x8_saturate(
+ comp_ctx, func_ctx, V128_ADD,
+ opcode == SIMD_i16x8_add_sat_s ? true : false))
+ return false;
+ break;
+ }
+
+ case SIMD_i16x8_sub:
+ {
+ if (!aot_compile_simd_i16x8_arith(comp_ctx, func_ctx,
+ V128_SUB))
+ return false;
+ break;
+ }
+
+ case SIMD_i16x8_sub_sat_s:
+ case SIMD_i16x8_sub_sat_u:
+ {
+ if (!aot_compile_simd_i16x8_saturate(
+ comp_ctx, func_ctx, V128_SUB,
+ opcode == SIMD_i16x8_sub_sat_s ? true : false))
+ return false;
+ break;
+ }
+
+ case SIMD_f64x2_nearest:
+ {
+ if (!aot_compile_simd_f64x2_nearest(comp_ctx, func_ctx))
+ return false;
+ break;
+ }
+
+ case SIMD_i16x8_mul:
+ {
+ if (!aot_compile_simd_i16x8_arith(comp_ctx, func_ctx,
+ V128_MUL))
+ return false;
+ break;
+ }
+
+ case SIMD_i16x8_min_s:
+ case SIMD_i16x8_min_u:
+ {
+ if (!aot_compile_simd_i16x8_cmp(
+ comp_ctx, func_ctx, V128_MIN,
+ opcode == SIMD_i16x8_min_s))
+ return false;
+ break;
+ }
+
+ case SIMD_i16x8_max_s:
+ case SIMD_i16x8_max_u:
+ {
+ if (!aot_compile_simd_i16x8_cmp(
+ comp_ctx, func_ctx, V128_MAX,
+ opcode == SIMD_i16x8_max_s))
+ return false;
+ break;
+ }
+
+ case SIMD_i16x8_avgr_u:
+ {
+ if (!aot_compile_simd_i16x8_avgr_u(comp_ctx, func_ctx))
+ return false;
+ break;
+ }
+
+ case SIMD_i16x8_extmul_low_i8x16_s:
+ case SIMD_i16x8_extmul_high_i8x16_s:
+ {
+ if (!(aot_compile_simd_i16x8_extmul_i8x16(
+ comp_ctx, func_ctx,
+ SIMD_i16x8_extmul_low_i8x16_s == opcode, true)))
+ return false;
+ break;
+ }
+
+ case SIMD_i16x8_extmul_low_i8x16_u:
+ case SIMD_i16x8_extmul_high_i8x16_u:
+ {
+ if (!(aot_compile_simd_i16x8_extmul_i8x16(
+ comp_ctx, func_ctx,
+ SIMD_i16x8_extmul_low_i8x16_u == opcode,
+ false)))
+ return false;
+ break;
+ }
+
+ /* i32x4 Op */
+ case SIMD_i32x4_abs:
+ {
+ if (!aot_compile_simd_i32x4_abs(comp_ctx, func_ctx))
+ return false;
+ break;
+ }
+
+ case SIMD_i32x4_neg:
+ {
+ if (!aot_compile_simd_i32x4_neg(comp_ctx, func_ctx))
+ return false;
+ break;
+ }
+
+ case SIMD_i32x4_all_true:
+ {
+ if (!aot_compile_simd_i32x4_all_true(comp_ctx,
+ func_ctx))
+ return false;
+ break;
+ }
+
+ case SIMD_i32x4_bitmask:
+ {
+ if (!aot_compile_simd_i32x4_bitmask(comp_ctx, func_ctx))
+ return false;
+ break;
+ }
+
+ case SIMD_i32x4_narrow_i64x2_s:
+ case SIMD_i32x4_narrow_i64x2_u:
+ {
+ if (!aot_compile_simd_i32x4_narrow_i64x2(
+ comp_ctx, func_ctx,
+ SIMD_i32x4_narrow_i64x2_s == opcode))
+ return false;
+ break;
+ }
+
+ case SIMD_i32x4_extend_low_i16x8_s:
+ case SIMD_i32x4_extend_high_i16x8_s:
+ {
+ if (!aot_compile_simd_i32x4_extend_i16x8(
+ comp_ctx, func_ctx,
+ SIMD_i32x4_extend_low_i16x8_s == opcode, true))
+ return false;
+ break;
+ }
+
+ case SIMD_i32x4_extend_low_i16x8_u:
+ case SIMD_i32x4_extend_high_i16x8_u:
+ {
+ if (!aot_compile_simd_i32x4_extend_i16x8(
+ comp_ctx, func_ctx,
+ SIMD_i32x4_extend_low_i16x8_u == opcode, false))
+ return false;
+ break;
+ }
+
+ case SIMD_i32x4_shl:
+ case SIMD_i32x4_shr_s:
+ case SIMD_i32x4_shr_u:
+ {
+ if (!aot_compile_simd_i32x4_shift(comp_ctx, func_ctx,
+ INT_SHL + opcode
+ - SIMD_i32x4_shl))
+ return false;
+ break;
+ }
+
+ case SIMD_i32x4_add:
+ {
+ if (!aot_compile_simd_i32x4_arith(comp_ctx, func_ctx,
+ V128_ADD))
+ return false;
+ break;
+ }
+
+ case SIMD_i32x4_add_sat_s:
+ case SIMD_i32x4_add_sat_u:
+ {
+ if (!aot_compile_simd_i32x4_saturate(
+ comp_ctx, func_ctx, V128_ADD,
+ opcode == SIMD_i32x4_add_sat_s))
+ return false;
+ break;
+ }
+
+ case SIMD_i32x4_sub:
+ {
+ if (!aot_compile_simd_i32x4_arith(comp_ctx, func_ctx,
+ V128_SUB))
+ return false;
+ break;
+ }
+
+ case SIMD_i32x4_sub_sat_s:
+ case SIMD_i32x4_sub_sat_u:
+ {
+ if (!aot_compile_simd_i32x4_saturate(
+ comp_ctx, func_ctx, V128_SUB,
+ opcode == SIMD_i32x4_add_sat_s))
+ return false;
+ break;
+ }
+
+ case SIMD_i32x4_mul:
+ {
+ if (!aot_compile_simd_i32x4_arith(comp_ctx, func_ctx,
+ V128_MUL))
+ return false;
+ break;
+ }
+
+ case SIMD_i32x4_min_s:
+ case SIMD_i32x4_min_u:
+ {
+ if (!aot_compile_simd_i32x4_cmp(
+ comp_ctx, func_ctx, V128_MIN,
+ SIMD_i32x4_min_s == opcode))
+ return false;
+ break;
+ }
+
+ case SIMD_i32x4_max_s:
+ case SIMD_i32x4_max_u:
+ {
+ if (!aot_compile_simd_i32x4_cmp(
+ comp_ctx, func_ctx, V128_MAX,
+ SIMD_i32x4_max_s == opcode))
+ return false;
+ break;
+ }
+
+ case SIMD_i32x4_dot_i16x8_s:
+ {
+ if (!aot_compile_simd_i32x4_dot_i16x8(comp_ctx,
+ func_ctx))
+ return false;
+ break;
+ }
+
+ case SIMD_i32x4_avgr_u:
+ {
+ if (!aot_compile_simd_i32x4_avgr_u(comp_ctx, func_ctx))
+ return false;
+ break;
+ }
+
+ case SIMD_i32x4_extmul_low_i16x8_s:
+ case SIMD_i32x4_extmul_high_i16x8_s:
+ {
+ if (!aot_compile_simd_i32x4_extmul_i16x8(
+ comp_ctx, func_ctx,
+ SIMD_i32x4_extmul_low_i16x8_s == opcode, true))
+ return false;
+ break;
+ }
+
+ case SIMD_i32x4_extmul_low_i16x8_u:
+ case SIMD_i32x4_extmul_high_i16x8_u:
+ {
+ if (!aot_compile_simd_i32x4_extmul_i16x8(
+ comp_ctx, func_ctx,
+ SIMD_i32x4_extmul_low_i16x8_u == opcode, false))
+ return false;
+ break;
+ }
+
+ /* i64x2 Op */
+ case SIMD_i64x2_abs:
+ {
+ if (!aot_compile_simd_i64x2_abs(comp_ctx, func_ctx))
+ return false;
+ break;
+ }
+
+ case SIMD_i64x2_neg:
+ {
+ if (!aot_compile_simd_i64x2_neg(comp_ctx, func_ctx))
+ return false;
+ break;
+ }
+
+ case SIMD_i64x2_all_true:
+ {
+ if (!aot_compile_simd_i64x2_all_true(comp_ctx,
+ func_ctx))
+ return false;
+ break;
+ }
+
+ case SIMD_i64x2_bitmask:
+ {
+ if (!aot_compile_simd_i64x2_bitmask(comp_ctx, func_ctx))
+ return false;
+ break;
+ }
+
+ case SIMD_i64x2_extend_low_i32x4_s:
+ case SIMD_i64x2_extend_high_i32x4_s:
+ {
+ if (!aot_compile_simd_i64x2_extend_i32x4(
+ comp_ctx, func_ctx,
+ SIMD_i64x2_extend_low_i32x4_s == opcode, true))
+ return false;
+ break;
+ }
+
+ case SIMD_i64x2_extend_low_i32x4_u:
+ case SIMD_i64x2_extend_high_i32x4_u:
+ {
+ if (!aot_compile_simd_i64x2_extend_i32x4(
+ comp_ctx, func_ctx,
+ SIMD_i64x2_extend_low_i32x4_u == opcode, false))
+ return false;
+ break;
+ }
+
+ case SIMD_i64x2_shl:
+ case SIMD_i64x2_shr_s:
+ case SIMD_i64x2_shr_u:
+ {
+ if (!aot_compile_simd_i64x2_shift(comp_ctx, func_ctx,
+ INT_SHL + opcode
+ - SIMD_i64x2_shl))
+ return false;
+ break;
+ }
+
+ case SIMD_i64x2_add:
+ {
+ if (!aot_compile_simd_i64x2_arith(comp_ctx, func_ctx,
+ V128_ADD))
+ return false;
+ break;
+ }
+
+ case SIMD_i64x2_sub:
+ {
+ if (!aot_compile_simd_i64x2_arith(comp_ctx, func_ctx,
+ V128_SUB))
+ return false;
+ break;
+ }
+
+ case SIMD_i64x2_mul:
+ {
+ if (!aot_compile_simd_i64x2_arith(comp_ctx, func_ctx,
+ V128_MUL))
+ return false;
+ break;
+ }
+
+ case SIMD_i64x2_eq:
+ case SIMD_i64x2_ne:
+ case SIMD_i64x2_lt_s:
+ case SIMD_i64x2_gt_s:
+ case SIMD_i64x2_le_s:
+ case SIMD_i64x2_ge_s:
+ {
+ IntCond icond[] = { INT_EQ, INT_NE, INT_LT_S,
+ INT_GT_S, INT_LE_S, INT_GE_S };
+ if (!aot_compile_simd_i64x2_compare(
+ comp_ctx, func_ctx,
+ icond[opcode - SIMD_i64x2_eq]))
+ return false;
+ break;
+ }
+
+ case SIMD_i64x2_extmul_low_i32x4_s:
+ case SIMD_i64x2_extmul_high_i32x4_s:
+ {
+ if (!aot_compile_simd_i64x2_extmul_i32x4(
+ comp_ctx, func_ctx,
+ SIMD_i64x2_extmul_low_i32x4_s == opcode, true))
+ return false;
+ break;
+ }
+
+ case SIMD_i64x2_extmul_low_i32x4_u:
+ case SIMD_i64x2_extmul_high_i32x4_u:
+ {
+ if (!aot_compile_simd_i64x2_extmul_i32x4(
+ comp_ctx, func_ctx,
+ SIMD_i64x2_extmul_low_i32x4_u == opcode, false))
+ return false;
+ break;
+ }
+
+ /* f32x4 Op */
+ case SIMD_f32x4_abs:
+ {
+ if (!aot_compile_simd_f32x4_abs(comp_ctx, func_ctx))
+ return false;
+ break;
+ }
+
+ case SIMD_f32x4_neg:
+ {
+ if (!aot_compile_simd_f32x4_neg(comp_ctx, func_ctx))
+ return false;
+ break;
+ }
+
+ case SIMD_f32x4_round:
+ {
+ if (!aot_compile_simd_f32x4_round(comp_ctx, func_ctx))
+ return false;
+ break;
+ }
+
+ case SIMD_f32x4_sqrt:
+ {
+ if (!aot_compile_simd_f32x4_sqrt(comp_ctx, func_ctx))
+ return false;
+ break;
+ }
+
+ case SIMD_f32x4_add:
+ case SIMD_f32x4_sub:
+ case SIMD_f32x4_mul:
+ case SIMD_f32x4_div:
+ {
+ if (!aot_compile_simd_f32x4_arith(comp_ctx, func_ctx,
+ FLOAT_ADD + opcode
+ - SIMD_f32x4_add))
+ return false;
+ break;
+ }
+
+ case SIMD_f32x4_min:
+ case SIMD_f32x4_max:
+ {
+ if (!aot_compile_simd_f32x4_min_max(
+ comp_ctx, func_ctx, SIMD_f32x4_min == opcode))
+ return false;
+ break;
+ }
+
+ case SIMD_f32x4_pmin:
+ case SIMD_f32x4_pmax:
+ {
+ if (!aot_compile_simd_f32x4_pmin_pmax(
+ comp_ctx, func_ctx, SIMD_f32x4_pmin == opcode))
+ return false;
+ break;
+ }
+
+ /* f64x2 Op */
+
+ case SIMD_f64x2_abs:
+ {
+ if (!aot_compile_simd_f64x2_abs(comp_ctx, func_ctx))
+ return false;
+ break;
+ }
+
+ case SIMD_f64x2_neg:
+ {
+ if (!aot_compile_simd_f64x2_neg(comp_ctx, func_ctx))
+ return false;
+ break;
+ }
+
+ case SIMD_f64x2_round:
+ {
+ if (!aot_compile_simd_f64x2_round(comp_ctx, func_ctx))
+ return false;
+ break;
+ }
+
+ case SIMD_f64x2_sqrt:
+ {
+ if (!aot_compile_simd_f64x2_sqrt(comp_ctx, func_ctx))
+ return false;
+ break;
+ }
+
+ case SIMD_f64x2_add:
+ case SIMD_f64x2_sub:
+ case SIMD_f64x2_mul:
+ case SIMD_f64x2_div:
+ {
+ if (!aot_compile_simd_f64x2_arith(comp_ctx, func_ctx,
+ FLOAT_ADD + opcode
+ - SIMD_f64x2_add))
+ return false;
+ break;
+ }
+
+ case SIMD_f64x2_min:
+ case SIMD_f64x2_max:
+ {
+ if (!aot_compile_simd_f64x2_min_max(
+ comp_ctx, func_ctx, SIMD_f64x2_min == opcode))
+ return false;
+ break;
+ }
+
+ case SIMD_f64x2_pmin:
+ case SIMD_f64x2_pmax:
+ {
+ if (!aot_compile_simd_f64x2_pmin_pmax(
+ comp_ctx, func_ctx, SIMD_f64x2_pmin == opcode))
+ return false;
+ break;
+ }
+
+ /* Conversion Op */
+ case SIMD_i32x4_trunc_sat_f32x4_s:
+ case SIMD_i32x4_trunc_sat_f32x4_u:
+ {
+ if (!aot_compile_simd_i32x4_trunc_sat_f32x4(
+ comp_ctx, func_ctx,
+ SIMD_i32x4_trunc_sat_f32x4_s == opcode))
+ return false;
+ break;
+ }
+
+ case SIMD_f32x4_convert_i32x4_s:
+ case SIMD_f32x4_convert_i32x4_u:
+ {
+ if (!aot_compile_simd_f32x4_convert_i32x4(
+ comp_ctx, func_ctx,
+ SIMD_f32x4_convert_i32x4_s == opcode))
+ return false;
+ break;
+ }
+
+ case SIMD_i32x4_trunc_sat_f64x2_s_zero:
+ case SIMD_i32x4_trunc_sat_f64x2_u_zero:
+ {
+ if (!aot_compile_simd_i32x4_trunc_sat_f64x2(
+ comp_ctx, func_ctx,
+ SIMD_i32x4_trunc_sat_f64x2_s_zero == opcode))
+ return false;
+ break;
+ }
+
+ case SIMD_f64x2_convert_low_i32x4_s:
+ case SIMD_f64x2_convert_low_i32x4_u:
+ {
+ if (!aot_compile_simd_f64x2_convert_i32x4(
+ comp_ctx, func_ctx,
+ SIMD_f64x2_convert_low_i32x4_s == opcode))
+ return false;
+ break;
+ }
+
+ default:
+ aot_set_last_error("unsupported SIMD opcode");
+ return false;
+ }
+ break;
+ }
+#endif /* end of WASM_ENABLE_SIMD */
+
+ default:
+ aot_set_last_error("unsupported opcode");
+ return false;
+ }
+ }
+
+ /* Move func_return block to the bottom */
+ if (func_ctx->func_return_block) {
+ LLVMBasicBlockRef last_block = LLVMGetLastBasicBlock(func_ctx->func);
+ if (last_block != func_ctx->func_return_block)
+ LLVMMoveBasicBlockAfter(func_ctx->func_return_block, last_block);
+ }
+
+ /* Move got_exception block to the bottom */
+ if (func_ctx->got_exception_block) {
+ LLVMBasicBlockRef last_block = LLVMGetLastBasicBlock(func_ctx->func);
+ if (last_block != func_ctx->got_exception_block)
+ LLVMMoveBasicBlockAfter(func_ctx->got_exception_block, last_block);
+ }
+ return true;
+
+#if WASM_ENABLE_SIMD != 0
+unsupport_simd:
+ aot_set_last_error("SIMD instruction was found, "
+ "try removing --disable-simd option");
+ return false;
+#endif
+
+#if WASM_ENABLE_REF_TYPES != 0
+unsupport_ref_types:
+ aot_set_last_error("reference type instruction was found, "
+ "try removing --disable-ref-types option");
+ return false;
+#endif
+
+#if WASM_ENABLE_BULK_MEMORY != 0
+unsupport_bulk_memory:
+ aot_set_last_error("bulk memory instruction was found, "
+ "try removing --disable-bulk-memory option");
+ return false;
+#endif
+
+fail:
+ return false;
+}
+
+static bool
+verify_module(AOTCompContext *comp_ctx)
+{
+ char *msg = NULL;
+ bool ret;
+
+ ret = LLVMVerifyModule(comp_ctx->module, LLVMPrintMessageAction, &msg);
+ if (!ret && msg) {
+ if (msg[0] != '\0') {
+ aot_set_last_error(msg);
+ LLVMDisposeMessage(msg);
+ return false;
+ }
+ LLVMDisposeMessage(msg);
+ }
+
+ return true;
+}
+
+/* Check whether the target supports hardware atomic instructions */
+static bool
+aot_require_lower_atomic_pass(AOTCompContext *comp_ctx)
+{
+ bool ret = false;
+ if (!strncmp(comp_ctx->target_arch, "riscv", 5)) {
+ char *feature =
+ LLVMGetTargetMachineFeatureString(comp_ctx->target_machine);
+
+ if (feature) {
+ if (!strstr(feature, "+a")) {
+ ret = true;
+ }
+ LLVMDisposeMessage(feature);
+ }
+ }
+ return ret;
+}
+
+/* Check whether the target needs to expand switch to if/else */
+static bool
+aot_require_lower_switch_pass(AOTCompContext *comp_ctx)
+{
+ bool ret = false;
+
+ /* IR switch/case will cause .rodata relocation on riscv/xtensa */
+ if (!strncmp(comp_ctx->target_arch, "riscv", 5)
+ || !strncmp(comp_ctx->target_arch, "xtensa", 6)) {
+ ret = true;
+ }
+
+ return ret;
+}
+
+static bool
+apply_passes_for_indirect_mode(AOTCompContext *comp_ctx)
+{
+ LLVMPassManagerRef common_pass_mgr;
+
+ if (!(common_pass_mgr = LLVMCreatePassManager())) {
+ aot_set_last_error("create pass manager failed");
+ return false;
+ }
+
+ aot_add_expand_memory_op_pass(common_pass_mgr);
+
+ if (aot_require_lower_atomic_pass(comp_ctx))
+ LLVMAddLowerAtomicPass(common_pass_mgr);
+
+ if (aot_require_lower_switch_pass(comp_ctx))
+ LLVMAddLowerSwitchPass(common_pass_mgr);
+
+ LLVMRunPassManager(common_pass_mgr, comp_ctx->module);
+
+ LLVMDisposePassManager(common_pass_mgr);
+ return true;
+}
+
+bool
+aot_compile_wasm(AOTCompContext *comp_ctx)
+{
+ uint32 i;
+
+ if (!aot_validate_wasm(comp_ctx)) {
+ return false;
+ }
+
+ bh_print_time("Begin to compile WASM bytecode to LLVM IR");
+ for (i = 0; i < comp_ctx->func_ctx_count; i++) {
+ if (!aot_compile_func(comp_ctx, i)) {
+ return false;
+ }
+ }
+
+#if WASM_ENABLE_DEBUG_AOT != 0
+ LLVMDIBuilderFinalize(comp_ctx->debug_builder);
+#endif
+
+ /* Disable LLVM module verification for jit mode to speedup
+ the compilation process */
+ if (!comp_ctx->is_jit_mode) {
+ bh_print_time("Begin to verify LLVM module");
+ if (!verify_module(comp_ctx)) {
+ return false;
+ }
+ }
+
+ /* Run IR optimization before feeding in ORCJIT and AOT codegen */
+ if (comp_ctx->optimize) {
+ /* Run passes for AOT/JIT mode.
+ TODO: Apply these passes in the do_ir_transform callback of
+ TransformLayer when compiling each jit function, so as to
+ speedup the launch process. Now there are two issues in the
+ JIT: one is memory leak in do_ir_transform, the other is
+ possible core dump. */
+ bh_print_time("Begin to run llvm optimization passes");
+ aot_apply_llvm_new_pass_manager(comp_ctx, comp_ctx->module);
+
+ /* Run specific passes for AOT indirect mode in last since general
+ optimization may create some intrinsic function calls like
+ llvm.memset, so let's remove these function calls here. */
+ if (!comp_ctx->is_jit_mode && comp_ctx->is_indirect_mode) {
+ bh_print_time("Begin to run optimization passes "
+ "for indirect mode");
+ if (!apply_passes_for_indirect_mode(comp_ctx)) {
+ return false;
+ }
+ }
+ bh_print_time("Finish llvm optimization passes");
+ }
+
+#ifdef DUMP_MODULE
+ LLVMDumpModule(comp_ctx->module);
+ os_printf("\n");
+#endif
+
+ if (comp_ctx->is_jit_mode) {
+ LLVMErrorRef err;
+ LLVMOrcJITDylibRef orc_main_dylib;
+ LLVMOrcThreadSafeModuleRef orc_thread_safe_module;
+
+ orc_main_dylib = LLVMOrcLLLazyJITGetMainJITDylib(comp_ctx->orc_jit);
+ if (!orc_main_dylib) {
+ aot_set_last_error(
+ "failed to get orc orc_jit main dynmaic library");
+ return false;
+ }
+
+ orc_thread_safe_module = LLVMOrcCreateNewThreadSafeModule(
+ comp_ctx->module, comp_ctx->orc_thread_safe_context);
+ if (!orc_thread_safe_module) {
+ aot_set_last_error("failed to create thread safe module");
+ return false;
+ }
+
+ if ((err = LLVMOrcLLLazyJITAddLLVMIRModule(
+ comp_ctx->orc_jit, orc_main_dylib, orc_thread_safe_module))) {
+ /* If adding the ThreadSafeModule fails then we need to clean it up
+ by ourselves, otherwise the orc orc_jit will manage the memory.
+ */
+ LLVMOrcDisposeThreadSafeModule(orc_thread_safe_module);
+ aot_handle_llvm_errmsg("failed to addIRModule", err);
+ return false;
+ }
+ }
+
+ return true;
+}
+
+#if !(defined(_WIN32) || defined(_WIN32_))
+char *
+aot_generate_tempfile_name(const char *prefix, const char *extension,
+ char *buffer, uint32 len)
+{
+ int fd, name_len;
+
+ name_len = snprintf(buffer, len, "%s-XXXXXX", prefix);
+
+ if ((fd = mkstemp(buffer)) <= 0) {
+ aot_set_last_error("make temp file failed.");
+ return NULL;
+ }
+
+ /* close and remove temp file */
+ close(fd);
+ unlink(buffer);
+
+ /* Check if buffer length is enough */
+ /* name_len + '.' + extension + '\0' */
+ if (name_len + 1 + strlen(extension) + 1 > len) {
+ aot_set_last_error("temp file name too long.");
+ return NULL;
+ }
+
+ snprintf(buffer + name_len, len - name_len, ".%s", extension);
+ return buffer;
+}
+#endif /* end of !(defined(_WIN32) || defined(_WIN32_)) */
+
+bool
+aot_emit_llvm_file(AOTCompContext *comp_ctx, const char *file_name)
+{
+ char *err = NULL;
+
+ bh_print_time("Begin to emit LLVM IR file");
+
+ if (LLVMPrintModuleToFile(comp_ctx->module, file_name, &err) != 0) {
+ if (err) {
+ LLVMDisposeMessage(err);
+ err = NULL;
+ }
+ aot_set_last_error("emit llvm ir to file failed.");
+ return false;
+ }
+
+ return true;
+}
+
+bool
+aot_emit_object_file(AOTCompContext *comp_ctx, char *file_name)
+{
+ char *err = NULL;
+ LLVMCodeGenFileType file_type = LLVMObjectFile;
+ LLVMTargetRef target = LLVMGetTargetMachineTarget(comp_ctx->target_machine);
+
+ bh_print_time("Begin to emit object file");
+
+#if !(defined(_WIN32) || defined(_WIN32_))
+ if (comp_ctx->external_llc_compiler || comp_ctx->external_asm_compiler) {
+ char cmd[1024];
+ int ret;
+
+ if (comp_ctx->external_llc_compiler) {
+ char bc_file_name[64];
+
+ if (!aot_generate_tempfile_name("wamrc-bc", "bc", bc_file_name,
+ sizeof(bc_file_name))) {
+ return false;
+ }
+
+ if (LLVMWriteBitcodeToFile(comp_ctx->module, bc_file_name) != 0) {
+ aot_set_last_error("emit llvm bitcode file failed.");
+ return false;
+ }
+
+ snprintf(cmd, sizeof(cmd), "%s %s -o %s %s",
+ comp_ctx->external_llc_compiler,
+ comp_ctx->llc_compiler_flags ? comp_ctx->llc_compiler_flags
+ : "-O3 -c",
+ file_name, bc_file_name);
+ LOG_VERBOSE("invoking external LLC compiler:\n\t%s", cmd);
+
+ ret = system(cmd);
+ /* remove temp bitcode file */
+ unlink(bc_file_name);
+
+ if (ret != 0) {
+ aot_set_last_error("failed to compile LLVM bitcode to obj file "
+ "with external LLC compiler.");
+ return false;
+ }
+ }
+ else if (comp_ctx->external_asm_compiler) {
+ char asm_file_name[64];
+
+ if (!aot_generate_tempfile_name("wamrc-asm", "s", asm_file_name,
+ sizeof(asm_file_name))) {
+ return false;
+ }
+
+ if (LLVMTargetMachineEmitToFile(comp_ctx->target_machine,
+ comp_ctx->module, asm_file_name,
+ LLVMAssemblyFile, &err)
+ != 0) {
+ if (err) {
+ LLVMDisposeMessage(err);
+ err = NULL;
+ }
+ aot_set_last_error("emit elf to assembly file failed.");
+ return false;
+ }
+
+ snprintf(cmd, sizeof(cmd), "%s %s -o %s %s",
+ comp_ctx->external_asm_compiler,
+ comp_ctx->asm_compiler_flags ? comp_ctx->asm_compiler_flags
+ : "-O3 -c",
+ file_name, asm_file_name);
+ LOG_VERBOSE("invoking external ASM compiler:\n\t%s", cmd);
+
+ ret = system(cmd);
+ /* remove temp assembly file */
+ unlink(asm_file_name);
+
+ if (ret != 0) {
+ aot_set_last_error("failed to compile Assembly file to obj "
+ "file with external ASM compiler.");
+ return false;
+ }
+ }
+
+ return true;
+ }
+#endif /* end of !(defined(_WIN32) || defined(_WIN32_)) */
+
+ if (!strncmp(LLVMGetTargetName(target), "arc", 3))
+ /* Emit to assmelby file instead for arc target
+ as it cannot emit to object file */
+ file_type = LLVMAssemblyFile;
+
+ if (LLVMTargetMachineEmitToFile(comp_ctx->target_machine, comp_ctx->module,
+ file_name, file_type, &err)
+ != 0) {
+ if (err) {
+ LLVMDisposeMessage(err);
+ err = NULL;
+ }
+ aot_set_last_error("emit elf to object file failed.");
+ return false;
+ }
+
+ return true;
+}
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/aot_compiler.h b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/aot_compiler.h
new file mode 100644
index 000000000..e6031ab89
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/aot_compiler.h
@@ -0,0 +1,383 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#ifndef _AOT_COMPILER_H_
+#define _AOT_COMPILER_H_
+
+#include "aot.h"
+#include "aot_llvm.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef AOTIntCond IntCond;
+typedef AOTFloatCond FloatCond;
+
+typedef enum IntArithmetic {
+ INT_ADD = 0,
+ INT_SUB,
+ INT_MUL,
+ INT_DIV_S,
+ INT_DIV_U,
+ INT_REM_S,
+ INT_REM_U
+} IntArithmetic;
+
+typedef enum V128Arithmetic {
+ V128_ADD = 0,
+ V128_SUB,
+ V128_MUL,
+ V128_DIV,
+ V128_NEG,
+ V128_MIN,
+ V128_MAX,
+} V128Arithmetic;
+
+typedef enum IntBitwise {
+ INT_AND = 0,
+ INT_OR,
+ INT_XOR,
+} IntBitwise;
+
+typedef enum V128Bitwise {
+ V128_NOT,
+ V128_AND,
+ V128_ANDNOT,
+ V128_OR,
+ V128_XOR,
+ V128_BITSELECT,
+} V128Bitwise;
+
+typedef enum IntShift {
+ INT_SHL = 0,
+ INT_SHR_S,
+ INT_SHR_U,
+ INT_ROTL,
+ INT_ROTR
+} IntShift;
+
+typedef enum FloatMath {
+ FLOAT_ABS = 0,
+ FLOAT_NEG,
+ FLOAT_CEIL,
+ FLOAT_FLOOR,
+ FLOAT_TRUNC,
+ FLOAT_NEAREST,
+ FLOAT_SQRT
+} FloatMath;
+
+typedef enum FloatArithmetic {
+ FLOAT_ADD = 0,
+ FLOAT_SUB,
+ FLOAT_MUL,
+ FLOAT_DIV,
+ FLOAT_MIN,
+ FLOAT_MAX,
+} FloatArithmetic;
+
+static inline bool
+check_type_compatible(uint8 src_type, uint8 dst_type)
+{
+ if (src_type == dst_type) {
+ return true;
+ }
+
+ /* ext i1 to i32 */
+ if (src_type == VALUE_TYPE_I1 && dst_type == VALUE_TYPE_I32) {
+ return true;
+ }
+
+ /* i32 <==> func.ref, i32 <==> extern.ref */
+ if (src_type == VALUE_TYPE_I32
+ && (dst_type == VALUE_TYPE_EXTERNREF
+ || dst_type == VALUE_TYPE_FUNCREF)) {
+ return true;
+ }
+
+ if (dst_type == VALUE_TYPE_I32
+ && (src_type == VALUE_TYPE_FUNCREF
+ || src_type == VALUE_TYPE_EXTERNREF)) {
+ return true;
+ }
+
+ return false;
+}
+
+#define CHECK_STACK() \
+ do { \
+ if (!func_ctx->block_stack.block_list_end) { \
+ aot_set_last_error("WASM block stack underflow."); \
+ goto fail; \
+ } \
+ if (!func_ctx->block_stack.block_list_end->value_stack \
+ .value_list_end) { \
+ aot_set_last_error("WASM data stack underflow."); \
+ goto fail; \
+ } \
+ } while (0)
+
+#define POP(llvm_value, value_type) \
+ do { \
+ AOTValue *aot_value; \
+ CHECK_STACK(); \
+ aot_value = aot_value_stack_pop( \
+ &func_ctx->block_stack.block_list_end->value_stack); \
+ if (!check_type_compatible(aot_value->type, value_type)) { \
+ aot_set_last_error("invalid WASM stack data type."); \
+ wasm_runtime_free(aot_value); \
+ goto fail; \
+ } \
+ if (aot_value->type == value_type) \
+ llvm_value = aot_value->value; \
+ else { \
+ if (aot_value->type == VALUE_TYPE_I1) { \
+ if (!(llvm_value = \
+ LLVMBuildZExt(comp_ctx->builder, aot_value->value, \
+ I32_TYPE, "i1toi32"))) { \
+ aot_set_last_error("invalid WASM stack " \
+ "data type."); \
+ wasm_runtime_free(aot_value); \
+ goto fail; \
+ } \
+ } \
+ else { \
+ bh_assert(aot_value->type == VALUE_TYPE_I32 \
+ || aot_value->type == VALUE_TYPE_FUNCREF \
+ || aot_value->type == VALUE_TYPE_EXTERNREF); \
+ bh_assert(value_type == VALUE_TYPE_I32 \
+ || value_type == VALUE_TYPE_FUNCREF \
+ || value_type == VALUE_TYPE_EXTERNREF); \
+ llvm_value = aot_value->value; \
+ } \
+ } \
+ wasm_runtime_free(aot_value); \
+ } while (0)
+
+#define POP_I32(v) POP(v, VALUE_TYPE_I32)
+#define POP_I64(v) POP(v, VALUE_TYPE_I64)
+#define POP_F32(v) POP(v, VALUE_TYPE_F32)
+#define POP_F64(v) POP(v, VALUE_TYPE_F64)
+#define POP_V128(v) POP(v, VALUE_TYPE_V128)
+#define POP_FUNCREF(v) POP(v, VALUE_TYPE_FUNCREF)
+#define POP_EXTERNREF(v) POP(v, VALUE_TYPE_EXTERNREF)
+
+#define POP_COND(llvm_value) \
+ do { \
+ AOTValue *aot_value; \
+ CHECK_STACK(); \
+ aot_value = aot_value_stack_pop( \
+ &func_ctx->block_stack.block_list_end->value_stack); \
+ if (aot_value->type != VALUE_TYPE_I1 \
+ && aot_value->type != VALUE_TYPE_I32) { \
+ aot_set_last_error("invalid WASM stack data type."); \
+ wasm_runtime_free(aot_value); \
+ goto fail; \
+ } \
+ if (aot_value->type == VALUE_TYPE_I1) \
+ llvm_value = aot_value->value; \
+ else { \
+ if (!(llvm_value = \
+ LLVMBuildICmp(comp_ctx->builder, LLVMIntNE, \
+ aot_value->value, I32_ZERO, "i1_cond"))) { \
+ aot_set_last_error("llvm build trunc failed."); \
+ wasm_runtime_free(aot_value); \
+ goto fail; \
+ } \
+ } \
+ wasm_runtime_free(aot_value); \
+ } while (0)
+
+#define PUSH(llvm_value, value_type) \
+ do { \
+ AOTValue *aot_value; \
+ if (!func_ctx->block_stack.block_list_end) { \
+ aot_set_last_error("WASM block stack underflow."); \
+ goto fail; \
+ } \
+ aot_value = wasm_runtime_malloc(sizeof(AOTValue)); \
+ if (!aot_value) { \
+ aot_set_last_error("allocate memory failed."); \
+ goto fail; \
+ } \
+ memset(aot_value, 0, sizeof(AOTValue)); \
+ aot_value->type = value_type; \
+ aot_value->value = llvm_value; \
+ aot_value_stack_push( \
+ &func_ctx->block_stack.block_list_end->value_stack, aot_value); \
+ } while (0)
+
+#define PUSH_I32(v) PUSH(v, VALUE_TYPE_I32)
+#define PUSH_I64(v) PUSH(v, VALUE_TYPE_I64)
+#define PUSH_F32(v) PUSH(v, VALUE_TYPE_F32)
+#define PUSH_F64(v) PUSH(v, VALUE_TYPE_F64)
+#define PUSH_V128(v) PUSH(v, VALUE_TYPE_V128)
+#define PUSH_COND(v) PUSH(v, VALUE_TYPE_I1)
+#define PUSH_FUNCREF(v) PUSH(v, VALUE_TYPE_FUNCREF)
+#define PUSH_EXTERNREF(v) PUSH(v, VALUE_TYPE_EXTERNREF)
+
+#define TO_LLVM_TYPE(wasm_type) \
+ wasm_type_to_llvm_type(&comp_ctx->basic_types, wasm_type)
+
+#define I32_TYPE comp_ctx->basic_types.int32_type
+#define I64_TYPE comp_ctx->basic_types.int64_type
+#define F32_TYPE comp_ctx->basic_types.float32_type
+#define F64_TYPE comp_ctx->basic_types.float64_type
+#define VOID_TYPE comp_ctx->basic_types.void_type
+#define INT1_TYPE comp_ctx->basic_types.int1_type
+#define INT8_TYPE comp_ctx->basic_types.int8_type
+#define INT16_TYPE comp_ctx->basic_types.int16_type
+#define MD_TYPE comp_ctx->basic_types.meta_data_type
+#define INT8_PTR_TYPE comp_ctx->basic_types.int8_ptr_type
+#define INT16_PTR_TYPE comp_ctx->basic_types.int16_ptr_type
+#define INT32_PTR_TYPE comp_ctx->basic_types.int32_ptr_type
+#define INT64_PTR_TYPE comp_ctx->basic_types.int64_ptr_type
+#define F32_PTR_TYPE comp_ctx->basic_types.float32_ptr_type
+#define F64_PTR_TYPE comp_ctx->basic_types.float64_ptr_type
+#define FUNC_REF_TYPE comp_ctx->basic_types.funcref_type
+#define EXTERN_REF_TYPE comp_ctx->basic_types.externref_type
+
+#define I32_CONST(v) LLVMConstInt(I32_TYPE, v, true)
+#define I64_CONST(v) LLVMConstInt(I64_TYPE, v, true)
+#define F32_CONST(v) LLVMConstReal(F32_TYPE, v)
+#define F64_CONST(v) LLVMConstReal(F64_TYPE, v)
+#define I8_CONST(v) LLVMConstInt(INT8_TYPE, v, true)
+
+#define LLVM_CONST(name) (comp_ctx->llvm_consts.name)
+#define I8_ZERO LLVM_CONST(i8_zero)
+#define I32_ZERO LLVM_CONST(i32_zero)
+#define I64_ZERO LLVM_CONST(i64_zero)
+#define F32_ZERO LLVM_CONST(f32_zero)
+#define F64_ZERO LLVM_CONST(f64_zero)
+#define I32_ONE LLVM_CONST(i32_one)
+#define I32_TWO LLVM_CONST(i32_two)
+#define I32_THREE LLVM_CONST(i32_three)
+#define I32_FOUR LLVM_CONST(i32_four)
+#define I32_FIVE LLVM_CONST(i32_five)
+#define I32_SIX LLVM_CONST(i32_six)
+#define I32_SEVEN LLVM_CONST(i32_seven)
+#define I32_EIGHT LLVM_CONST(i32_eight)
+#define I32_NINE LLVM_CONST(i32_nine)
+#define I32_NEG_ONE LLVM_CONST(i32_neg_one)
+#define I64_NEG_ONE LLVM_CONST(i64_neg_one)
+#define I32_MIN LLVM_CONST(i32_min)
+#define I64_MIN LLVM_CONST(i64_min)
+#define I32_31 LLVM_CONST(i32_31)
+#define I32_32 LLVM_CONST(i32_32)
+#define I64_63 LLVM_CONST(i64_63)
+#define I64_64 LLVM_CONST(i64_64)
+#define REF_NULL I32_NEG_ONE
+
+#define V128_TYPE comp_ctx->basic_types.v128_type
+#define V128_PTR_TYPE comp_ctx->basic_types.v128_ptr_type
+#define V128_i8x16_TYPE comp_ctx->basic_types.i8x16_vec_type
+#define V128_i16x8_TYPE comp_ctx->basic_types.i16x8_vec_type
+#define V128_i32x4_TYPE comp_ctx->basic_types.i32x4_vec_type
+#define V128_i64x2_TYPE comp_ctx->basic_types.i64x2_vec_type
+#define V128_f32x4_TYPE comp_ctx->basic_types.f32x4_vec_type
+#define V128_f64x2_TYPE comp_ctx->basic_types.f64x2_vec_type
+
+#define V128_i8x16_ZERO LLVM_CONST(i8x16_vec_zero)
+#define V128_i16x8_ZERO LLVM_CONST(i16x8_vec_zero)
+#define V128_i32x4_ZERO LLVM_CONST(i32x4_vec_zero)
+#define V128_i64x2_ZERO LLVM_CONST(i64x2_vec_zero)
+#define V128_f32x4_ZERO LLVM_CONST(f32x4_vec_zero)
+#define V128_f64x2_ZERO LLVM_CONST(f64x2_vec_zero)
+
+#define TO_V128_i8x16(v) \
+ LLVMBuildBitCast(comp_ctx->builder, v, V128_i8x16_TYPE, "i8x16_val")
+#define TO_V128_i16x8(v) \
+ LLVMBuildBitCast(comp_ctx->builder, v, V128_i16x8_TYPE, "i16x8_val")
+#define TO_V128_i32x4(v) \
+ LLVMBuildBitCast(comp_ctx->builder, v, V128_i32x4_TYPE, "i32x4_val")
+#define TO_V128_i64x2(v) \
+ LLVMBuildBitCast(comp_ctx->builder, v, V128_i64x2_TYPE, "i64x2_val")
+#define TO_V128_f32x4(v) \
+ LLVMBuildBitCast(comp_ctx->builder, v, V128_f32x4_TYPE, "f32x4_val")
+#define TO_V128_f64x2(v) \
+ LLVMBuildBitCast(comp_ctx->builder, v, V128_f64x2_TYPE, "f64x2_val")
+
+#define CHECK_LLVM_CONST(v) \
+ do { \
+ if (!v) { \
+ aot_set_last_error("create llvm const failed."); \
+ goto fail; \
+ } \
+ } while (0)
+
+#define GET_AOT_FUNCTION(name, argc) \
+ do { \
+ if (!(func_type = \
+ LLVMFunctionType(ret_type, param_types, argc, false))) { \
+ aot_set_last_error("llvm add function type failed."); \
+ goto fail; \
+ } \
+ if (comp_ctx->is_jit_mode) { \
+ /* JIT mode, call the function directly */ \
+ if (!(func_ptr_type = LLVMPointerType(func_type, 0))) { \
+ aot_set_last_error("llvm add pointer type failed."); \
+ goto fail; \
+ } \
+ if (!(value = I64_CONST((uint64)(uintptr_t)name)) \
+ || !(func = LLVMConstIntToPtr(value, func_ptr_type))) { \
+ aot_set_last_error("create LLVM value failed."); \
+ goto fail; \
+ } \
+ } \
+ else if (comp_ctx->is_indirect_mode) { \
+ int32 func_index; \
+ if (!(func_ptr_type = LLVMPointerType(func_type, 0))) { \
+ aot_set_last_error("create LLVM function type failed."); \
+ goto fail; \
+ } \
+ \
+ func_index = aot_get_native_symbol_index(comp_ctx, #name); \
+ if (func_index < 0) { \
+ goto fail; \
+ } \
+ if (!(func = aot_get_func_from_table( \
+ comp_ctx, func_ctx->native_symbol, func_ptr_type, \
+ func_index))) { \
+ goto fail; \
+ } \
+ } \
+ else { \
+ char *func_name = #name; \
+ /* AOT mode, delcare the function */ \
+ if (!(func = LLVMGetNamedFunction(func_ctx->module, func_name)) \
+ && !(func = LLVMAddFunction(func_ctx->module, func_name, \
+ func_type))) { \
+ aot_set_last_error("llvm add function failed."); \
+ goto fail; \
+ } \
+ } \
+ } while (0)
+
+bool
+aot_compile_wasm(AOTCompContext *comp_ctx);
+
+bool
+aot_emit_llvm_file(AOTCompContext *comp_ctx, const char *file_name);
+
+bool
+aot_emit_aot_file(AOTCompContext *comp_ctx, AOTCompData *comp_data,
+ const char *file_name);
+
+uint8 *
+aot_emit_aot_file_buf(AOTCompContext *comp_ctx, AOTCompData *comp_data,
+ uint32 *p_aot_file_size);
+
+bool
+aot_emit_object_file(AOTCompContext *comp_ctx, char *file_name);
+
+char *
+aot_generate_tempfile_name(const char *prefix, const char *extension,
+ char *buffer, uint32 len);
+
+#ifdef __cplusplus
+} /* end of extern "C" */
+#endif
+
+#endif /* end of _AOT_COMPILER_H_ */
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/aot_emit_aot_file.c b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/aot_emit_aot_file.c
new file mode 100644
index 000000000..62bb809da
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/aot_emit_aot_file.c
@@ -0,0 +1,2930 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#include "aot_compiler.h"
+#include "../aot/aot_runtime.h"
+
+#define PUT_U64_TO_ADDR(addr, value) \
+ do { \
+ union { \
+ uint64 val; \
+ uint32 parts[2]; \
+ } u; \
+ u.val = (value); \
+ ((uint32 *)(addr))[0] = u.parts[0]; \
+ ((uint32 *)(addr))[1] = u.parts[1]; \
+ } while (0)
+
+#define CHECK_SIZE(size) \
+ do { \
+ if (size == (uint32)-1) { \
+ aot_set_last_error("get symbol size failed."); \
+ return (uint32)-1; \
+ } \
+ } while (0)
+
+static bool
+check_utf8_str(const uint8 *str, uint32 len)
+{
+ /* The valid ranges are taken from page 125, below link
+ https://www.unicode.org/versions/Unicode9.0.0/ch03.pdf */
+ const uint8 *p = str, *p_end = str + len;
+ uint8 chr;
+
+ while (p < p_end) {
+ chr = *p;
+ if (chr < 0x80) {
+ p++;
+ }
+ else if (chr >= 0xC2 && chr <= 0xDF && p + 1 < p_end) {
+ if (p[1] < 0x80 || p[1] > 0xBF) {
+ return false;
+ }
+ p += 2;
+ }
+ else if (chr >= 0xE0 && chr <= 0xEF && p + 2 < p_end) {
+ if (chr == 0xE0) {
+ if (p[1] < 0xA0 || p[1] > 0xBF || p[2] < 0x80 || p[2] > 0xBF) {
+ return false;
+ }
+ }
+ else if (chr == 0xED) {
+ if (p[1] < 0x80 || p[1] > 0x9F || p[2] < 0x80 || p[2] > 0xBF) {
+ return false;
+ }
+ }
+ else if (chr >= 0xE1 && chr <= 0xEF) {
+ if (p[1] < 0x80 || p[1] > 0xBF || p[2] < 0x80 || p[2] > 0xBF) {
+ return false;
+ }
+ }
+ p += 3;
+ }
+ else if (chr >= 0xF0 && chr <= 0xF4 && p + 3 < p_end) {
+ if (chr == 0xF0) {
+ if (p[1] < 0x90 || p[1] > 0xBF || p[2] < 0x80 || p[2] > 0xBF
+ || p[3] < 0x80 || p[3] > 0xBF) {
+ return false;
+ }
+ }
+ else if (chr >= 0xF1 && chr <= 0xF3) {
+ if (p[1] < 0x80 || p[1] > 0xBF || p[2] < 0x80 || p[2] > 0xBF
+ || p[3] < 0x80 || p[3] > 0xBF) {
+ return false;
+ }
+ }
+ else if (chr == 0xF4) {
+ if (p[1] < 0x80 || p[1] > 0x8F || p[2] < 0x80 || p[2] > 0xBF
+ || p[3] < 0x80 || p[3] > 0xBF) {
+ return false;
+ }
+ }
+ p += 4;
+ }
+ else {
+ return false;
+ }
+ }
+ return (p == p_end);
+}
+
+/* Internal function in object file */
+typedef struct AOTObjectFunc {
+ char *func_name;
+ uint64 text_offset;
+} AOTObjectFunc;
+
+/* Symbol table list node */
+typedef struct AOTSymbolNode {
+ struct AOTSymbolNode *next;
+ uint32 str_len;
+ char *symbol;
+} AOTSymbolNode;
+
+typedef struct AOTSymbolList {
+ AOTSymbolNode *head;
+ AOTSymbolNode *end;
+ uint32 len;
+} AOTSymbolList;
+
+/* AOT object data */
+typedef struct AOTObjectData {
+ LLVMMemoryBufferRef mem_buf;
+ LLVMBinaryRef binary;
+
+ AOTTargetInfo target_info;
+
+ void *text;
+ uint32 text_size;
+
+ /* literal data and size */
+ void *literal;
+ uint32 literal_size;
+
+ AOTObjectDataSection *data_sections;
+ uint32 data_sections_count;
+
+ AOTObjectFunc *funcs;
+ uint32 func_count;
+
+ AOTSymbolList symbol_list;
+ AOTRelocationGroup *relocation_groups;
+ uint32 relocation_group_count;
+} AOTObjectData;
+
+#if 0
+static void dump_buf(uint8 *buf, uint32 size, char *title)
+{
+ int i;
+ printf("------ %s -------", title);
+ for (i = 0; i < size; i++) {
+ if ((i % 16) == 0)
+ printf("\n");
+ printf("%02x ", (unsigned char)buf[i]);
+ }
+ printf("\n\n");
+}
+#endif
+
+static bool
+is_32bit_binary(const AOTObjectData *obj_data)
+{
+ /* bit 1: 0 is 32-bit, 1 is 64-bit */
+ return obj_data->target_info.bin_type & 2 ? false : true;
+}
+
+static bool
+is_little_endian_binary(const AOTObjectData *obj_data)
+{
+ /* bit 0: 0 is little-endian, 1 is big-endian */
+ return obj_data->target_info.bin_type & 1 ? false : true;
+}
+
+static bool
+str_starts_with(const char *str, const char *prefix)
+{
+ size_t len_pre = strlen(prefix), len_str = strlen(str);
+ return (len_str >= len_pre) && !memcmp(str, prefix, len_pre);
+}
+
+static uint32
+get_file_header_size()
+{
+ /* magic number (4 bytes) + version (4 bytes) */
+ return sizeof(uint32) + sizeof(uint32);
+}
+
+static uint32
+get_string_size(AOTCompContext *comp_ctx, const char *s)
+{
+ /* string size (2 bytes) + string content */
+ return (uint32)sizeof(uint16) + (uint32)strlen(s) +
+ /* emit string with '\0' only in XIP mode */
+ (comp_ctx->is_indirect_mode ? 1 : 0);
+}
+
+static uint32
+get_target_info_section_size()
+{
+ return sizeof(AOTTargetInfo);
+}
+
+static uint32
+get_mem_init_data_size(AOTMemInitData *mem_init_data)
+{
+ /* init expr type (4 bytes) + init expr value (8 bytes)
+ + byte count (4 bytes) + bytes */
+ uint32 total_size = (uint32)(sizeof(uint32) + sizeof(uint64)
+ + sizeof(uint32) + mem_init_data->byte_count);
+
+ /* bulk_memory enabled:
+ is_passive (4 bytes) + memory_index (4 bytes)
+ bulk memory disabled:
+ placeholder (4 bytes) + placeholder (4 bytes)
+ */
+ total_size += (sizeof(uint32) + sizeof(uint32));
+
+ return total_size;
+}
+
+static uint32
+get_mem_init_data_list_size(AOTMemInitData **mem_init_data_list,
+ uint32 mem_init_data_count)
+{
+ AOTMemInitData **mem_init_data = mem_init_data_list;
+ uint32 size = 0, i;
+
+ for (i = 0; i < mem_init_data_count; i++, mem_init_data++) {
+ size = align_uint(size, 4);
+ size += get_mem_init_data_size(*mem_init_data);
+ }
+ return size;
+}
+
+static uint32
+get_import_memory_size(AOTCompData *comp_data)
+{
+ /* currently we only emit import_memory_count = 0 */
+ return sizeof(uint32);
+}
+
+static uint32
+get_memory_size(AOTCompData *comp_data)
+{
+ /* memory_count + count * (memory_flags + num_bytes_per_page +
+ init_page_count + max_page_count) */
+ return (uint32)(sizeof(uint32)
+ + comp_data->memory_count * sizeof(uint32) * 4);
+}
+
+static uint32
+get_mem_info_size(AOTCompData *comp_data)
+{
+ /* import_memory_size + memory_size
+ + init_data_count + init_data_list */
+ return get_import_memory_size(comp_data) + get_memory_size(comp_data)
+ + (uint32)sizeof(uint32)
+ + get_mem_init_data_list_size(comp_data->mem_init_data_list,
+ comp_data->mem_init_data_count);
+}
+
+static uint32
+get_table_init_data_size(AOTTableInitData *table_init_data)
+{
+ /*
+ * mode (4 bytes), elem_type (4 bytes), do not need is_dropped field
+ *
+ * table_index(4 bytes) + init expr type (4 bytes) + init expr value (8
+ * bytes)
+ * + func index count (4 bytes) + func indexes
+ */
+ return (uint32)(sizeof(uint32) * 2 + sizeof(uint32) + sizeof(uint32)
+ + sizeof(uint64) + sizeof(uint32)
+ + sizeof(uint32) * table_init_data->func_index_count);
+}
+
+static uint32
+get_table_init_data_list_size(AOTTableInitData **table_init_data_list,
+ uint32 table_init_data_count)
+{
+ /*
+ * ------------------------------
+ * | table_init_data_count
+ * ------------------------------
+ * | | U32 mode
+ * | AOTTableInitData[N] | U32 elem_type
+ * | | U32 table_index
+ * | | U32 offset.init_expr_type
+ * | | U64 offset.u.i64
+ * | | U32 func_index_count
+ * | | U32[func_index_count]
+ * ------------------------------
+ */
+ AOTTableInitData **table_init_data = table_init_data_list;
+ uint32 size = 0, i;
+
+ size = (uint32)sizeof(uint32);
+
+ for (i = 0; i < table_init_data_count; i++, table_init_data++) {
+ size = align_uint(size, 4);
+ size += get_table_init_data_size(*table_init_data);
+ }
+ return size;
+}
+
+static uint32
+get_import_table_size(AOTCompData *comp_data)
+{
+ /*
+ * ------------------------------
+ * | import_table_count
+ * ------------------------------
+ * | | U32 table_init_size
+ * | | ----------------------
+ * | AOTImpotTable[N] | U32 table_init_size
+ * | | ----------------------
+ * | | U32 possible_grow (convenient than U8)
+ * ------------------------------
+ */
+ return (uint32)(sizeof(uint32)
+ + comp_data->import_table_count * (sizeof(uint32) * 3));
+}
+
+static uint32
+get_table_size(AOTCompData *comp_data)
+{
+ /*
+ * ------------------------------
+ * | table_count
+ * ------------------------------
+ * | | U32 elem_type
+ * | AOTTable[N] | U32 table_flags
+ * | | U32 table_init_size
+ * | | U32 table_max_size
+ * | | U32 possible_grow (convenient than U8)
+ * ------------------------------
+ */
+ return (uint32)(sizeof(uint32)
+ + comp_data->table_count * (sizeof(uint32) * 5));
+}
+
+static uint32
+get_table_info_size(AOTCompData *comp_data)
+{
+ /*
+ * ------------------------------
+ * | import_table_count
+ * ------------------------------
+ * |
+ * | AOTImportTable[import_table_count]
+ * |
+ * ------------------------------
+ * | table_count
+ * ------------------------------
+ * |
+ * | AOTTable[table_count]
+ * |
+ * ------------------------------
+ * | table_init_data_count
+ * ------------------------------
+ * |
+ * | AOTTableInitData*[table_init_data_count]
+ * |
+ * ------------------------------
+ */
+ return get_import_table_size(comp_data) + get_table_size(comp_data)
+ + get_table_init_data_list_size(comp_data->table_init_data_list,
+ comp_data->table_init_data_count);
+}
+
+static uint32
+get_func_type_size(AOTFuncType *func_type)
+{
+ /* param count + result count + types */
+ return (uint32)sizeof(uint32) * 2 + func_type->param_count
+ + func_type->result_count;
+}
+
+static uint32
+get_func_types_size(AOTFuncType **func_types, uint32 func_type_count)
+{
+ AOTFuncType **func_type = func_types;
+ uint32 size = 0, i;
+
+ for (i = 0; i < func_type_count; i++, func_type++) {
+ size = align_uint(size, 4);
+ size += get_func_type_size(*func_type);
+ }
+ return size;
+}
+
+static uint32
+get_func_type_info_size(AOTCompData *comp_data)
+{
+ /* func type count + func type list */
+ return (uint32)sizeof(uint32)
+ + get_func_types_size(comp_data->func_types,
+ comp_data->func_type_count);
+}
+
+static uint32
+get_import_global_size(AOTCompContext *comp_ctx, AOTImportGlobal *import_global)
+{
+ /* type (1 byte) + is_mutable (1 byte) + module_name + global_name */
+ uint32 size = (uint32)sizeof(uint8) * 2
+ + get_string_size(comp_ctx, import_global->module_name);
+ size = align_uint(size, 2);
+ size += get_string_size(comp_ctx, import_global->global_name);
+ return size;
+}
+
+static uint32
+get_import_globals_size(AOTCompContext *comp_ctx,
+ AOTImportGlobal *import_globals,
+ uint32 import_global_count)
+{
+ AOTImportGlobal *import_global = import_globals;
+ uint32 size = 0, i;
+
+ for (i = 0; i < import_global_count; i++, import_global++) {
+ size = align_uint(size, 2);
+ size += get_import_global_size(comp_ctx, import_global);
+ }
+ return size;
+}
+
+static uint32
+get_import_global_info_size(AOTCompContext *comp_ctx, AOTCompData *comp_data)
+{
+ /* import global count + import globals */
+ return (uint32)sizeof(uint32)
+ + get_import_globals_size(comp_ctx, comp_data->import_globals,
+ comp_data->import_global_count);
+}
+
+static uint32
+get_global_size(AOTGlobal *global)
+{
+ if (global->init_expr.init_expr_type != INIT_EXPR_TYPE_V128_CONST)
+ /* type (1 byte) + is_mutable (1 byte)
+ + init expr type (2 byes) + init expr value (8 byes) */
+ return sizeof(uint8) * 2 + sizeof(uint16) + sizeof(uint64);
+ else
+ /* type (1 byte) + is_mutable (1 byte)
+ + init expr type (2 byes) + v128 value (16 byes) */
+ return sizeof(uint8) * 2 + sizeof(uint16) + sizeof(uint64) * 2;
+}
+
+static uint32
+get_globals_size(AOTGlobal *globals, uint32 global_count)
+{
+ AOTGlobal *global = globals;
+ uint32 size = 0, i;
+
+ for (i = 0; i < global_count; i++, global++) {
+ size = align_uint(size, 4);
+ size += get_global_size(global);
+ }
+ return size;
+}
+
+static uint32
+get_global_info_size(AOTCompData *comp_data)
+{
+ /* global count + globals */
+ return (uint32)sizeof(uint32)
+ + get_globals_size(comp_data->globals, comp_data->global_count);
+}
+
+static uint32
+get_import_func_size(AOTCompContext *comp_ctx, AOTImportFunc *import_func)
+{
+ /* type index (2 bytes) + module_name + func_name */
+ uint32 size = (uint32)sizeof(uint16)
+ + get_string_size(comp_ctx, import_func->module_name);
+ size = align_uint(size, 2);
+ size += get_string_size(comp_ctx, import_func->func_name);
+ return size;
+}
+
+static uint32
+get_import_funcs_size(AOTCompContext *comp_ctx, AOTImportFunc *import_funcs,
+ uint32 import_func_count)
+{
+ AOTImportFunc *import_func = import_funcs;
+ uint32 size = 0, i;
+
+ for (i = 0; i < import_func_count; i++, import_func++) {
+ size = align_uint(size, 2);
+ size += get_import_func_size(comp_ctx, import_func);
+ }
+ return size;
+}
+
+static uint32
+get_import_func_info_size(AOTCompContext *comp_ctx, AOTCompData *comp_data)
+{
+ /* import func count + import funcs */
+ return (uint32)sizeof(uint32)
+ + get_import_funcs_size(comp_ctx, comp_data->import_funcs,
+ comp_data->import_func_count);
+}
+
+static uint32
+get_object_data_sections_size(AOTCompContext *comp_ctx,
+ AOTObjectDataSection *data_sections,
+ uint32 data_sections_count)
+{
+ AOTObjectDataSection *data_section = data_sections;
+ uint32 size = 0, i;
+
+ for (i = 0; i < data_sections_count; i++, data_section++) {
+ /* name + size + data */
+ size = align_uint(size, 2);
+ size += get_string_size(comp_ctx, data_section->name);
+ size = align_uint(size, 4);
+ size += (uint32)sizeof(uint32);
+ size += data_section->size;
+ }
+ return size;
+}
+
+static uint32
+get_object_data_section_info_size(AOTCompContext *comp_ctx,
+ AOTObjectData *obj_data)
+{
+ /* data sections count + data sections */
+ return (uint32)sizeof(uint32)
+ + get_object_data_sections_size(comp_ctx, obj_data->data_sections,
+ obj_data->data_sections_count);
+}
+
+static uint32
+get_init_data_section_size(AOTCompContext *comp_ctx, AOTCompData *comp_data,
+ AOTObjectData *obj_data)
+{
+ uint32 size = 0;
+
+ size += get_mem_info_size(comp_data);
+
+ size = align_uint(size, 4);
+ size += get_table_info_size(comp_data);
+
+ size = align_uint(size, 4);
+ size += get_func_type_info_size(comp_data);
+
+ size = align_uint(size, 4);
+ size += get_import_global_info_size(comp_ctx, comp_data);
+
+ size = align_uint(size, 4);
+ size += get_global_info_size(comp_data);
+
+ size = align_uint(size, 4);
+ size += get_import_func_info_size(comp_ctx, comp_data);
+
+ /* func count + start func index */
+ size = align_uint(size, 4);
+ size += (uint32)sizeof(uint32) * 2;
+
+ /* aux data/heap/stack data */
+ size += sizeof(uint32) * 7;
+
+ size += get_object_data_section_info_size(comp_ctx, obj_data);
+ return size;
+}
+
+static uint32
+get_text_section_size(AOTObjectData *obj_data)
+{
+ return (sizeof(uint32) + obj_data->literal_size + obj_data->text_size + 3)
+ & ~3;
+}
+
+static uint32
+get_func_section_size(AOTCompData *comp_data, AOTObjectData *obj_data)
+{
+ /* text offsets + function type indexs */
+ uint32 size = 0;
+
+ if (is_32bit_binary(obj_data))
+ size = (uint32)sizeof(uint32) * comp_data->func_count;
+ else
+ size = (uint32)sizeof(uint64) * comp_data->func_count;
+
+ size += (uint32)sizeof(uint32) * comp_data->func_count;
+ return size;
+}
+
+static uint32
+get_export_size(AOTCompContext *comp_ctx, AOTExport *export)
+{
+ /* export index + export kind + 1 byte padding + export name */
+ return (uint32)sizeof(uint32) + sizeof(uint8) + 1
+ + get_string_size(comp_ctx, export->name);
+}
+
+static uint32
+get_exports_size(AOTCompContext *comp_ctx, AOTExport *exports,
+ uint32 export_count)
+{
+ AOTExport *export = exports;
+ uint32 size = 0, i;
+
+ for (i = 0; i < export_count; i++, export ++) {
+ size = align_uint(size, 4);
+ size += get_export_size(comp_ctx, export);
+ }
+ return size;
+}
+
+static uint32
+get_export_section_size(AOTCompContext *comp_ctx, AOTCompData *comp_data)
+{
+ /* export count + exports */
+ return (uint32)sizeof(uint32)
+ + get_exports_size(comp_ctx, comp_data->wasm_module->exports,
+ comp_data->wasm_module->export_count);
+}
+
+static uint32
+get_relocation_size(AOTRelocation *relocation, bool is_32bin)
+{
+ /* offset + addend + relocation type + symbol name */
+ uint32 size = 0;
+ if (is_32bin)
+ size = sizeof(uint32) * 2; /* offset and addend */
+ else
+ size = sizeof(uint64) * 2; /* offset and addend */
+ size += (uint32)sizeof(uint32); /* relocation type */
+ size += (uint32)sizeof(uint32); /* symbol name index */
+ return size;
+}
+
+static uint32
+get_relocations_size(AOTRelocation *relocations, uint32 relocation_count,
+ bool is_32bin)
+{
+ AOTRelocation *relocation = relocations;
+ uint32 size = 0, i;
+
+ for (i = 0; i < relocation_count; i++, relocation++) {
+ size = align_uint(size, 4);
+ size += get_relocation_size(relocation, is_32bin);
+ }
+ return size;
+}
+
+static uint32
+get_relocation_group_size(AOTRelocationGroup *relocation_group, bool is_32bin)
+{
+ uint32 size = 0;
+ /* section name index + relocation count + relocations */
+ size += (uint32)sizeof(uint32);
+ size += (uint32)sizeof(uint32);
+ size += get_relocations_size(relocation_group->relocations,
+ relocation_group->relocation_count, is_32bin);
+ return size;
+}
+
+static uint32
+get_relocation_groups_size(AOTRelocationGroup *relocation_groups,
+ uint32 relocation_group_count, bool is_32bin)
+{
+ AOTRelocationGroup *relocation_group = relocation_groups;
+ uint32 size = 0, i;
+
+ for (i = 0; i < relocation_group_count; i++, relocation_group++) {
+ size = align_uint(size, 4);
+ size += get_relocation_group_size(relocation_group, is_32bin);
+ }
+ return size;
+}
+
+/* return the index (in order of insertion) of the symbol,
+ create if not exits, -1 if failed */
+static uint32
+get_relocation_symbol_index(const char *symbol_name, bool *is_new,
+ AOTSymbolList *symbol_list)
+{
+ AOTSymbolNode *sym;
+ uint32 index = 0;
+
+ sym = symbol_list->head;
+ while (sym) {
+ if (!strcmp(sym->symbol, symbol_name)) {
+ if (is_new)
+ *is_new = false;
+ return index;
+ }
+
+ sym = sym->next;
+ index++;
+ }
+
+ /* Not found in symbol_list, add it */
+ sym = wasm_runtime_malloc(sizeof(AOTSymbolNode));
+ if (!sym) {
+ return (uint32)-1;
+ }
+
+ memset(sym, 0, sizeof(AOTSymbolNode));
+ sym->symbol = (char *)symbol_name;
+ sym->str_len = (uint32)strlen(symbol_name);
+
+ if (!symbol_list->head) {
+ symbol_list->head = symbol_list->end = sym;
+ }
+ else {
+ symbol_list->end->next = sym;
+ symbol_list->end = sym;
+ }
+ symbol_list->len++;
+
+ if (is_new)
+ *is_new = true;
+ return index;
+}
+
+static uint32
+get_relocation_symbol_size(AOTCompContext *comp_ctx, AOTRelocation *relocation,
+ AOTSymbolList *symbol_list)
+{
+ uint32 size = 0, index = 0;
+ bool is_new = false;
+
+ index = get_relocation_symbol_index(relocation->symbol_name, &is_new,
+ symbol_list);
+ CHECK_SIZE(index);
+
+ if (is_new) {
+ size += get_string_size(comp_ctx, relocation->symbol_name);
+ size = align_uint(size, 2);
+ }
+
+ relocation->symbol_index = index;
+ return size;
+}
+
+static uint32
+get_relocations_symbol_size(AOTCompContext *comp_ctx,
+ AOTRelocation *relocations, uint32 relocation_count,
+ AOTSymbolList *symbol_list)
+{
+ AOTRelocation *relocation = relocations;
+ uint32 size = 0, curr_size, i;
+
+ for (i = 0; i < relocation_count; i++, relocation++) {
+ curr_size =
+ get_relocation_symbol_size(comp_ctx, relocation, symbol_list);
+ CHECK_SIZE(curr_size);
+
+ size += curr_size;
+ }
+ return size;
+}
+
+static uint32
+get_relocation_group_symbol_size(AOTCompContext *comp_ctx,
+ AOTRelocationGroup *relocation_group,
+ AOTSymbolList *symbol_list)
+{
+ uint32 size = 0, index = 0, curr_size;
+ bool is_new = false;
+
+ index = get_relocation_symbol_index(relocation_group->section_name, &is_new,
+ symbol_list);
+ CHECK_SIZE(index);
+
+ if (is_new) {
+ size += get_string_size(comp_ctx, relocation_group->section_name);
+ size = align_uint(size, 2);
+ }
+
+ relocation_group->name_index = index;
+
+ curr_size = get_relocations_symbol_size(
+ comp_ctx, relocation_group->relocations,
+ relocation_group->relocation_count, symbol_list);
+ CHECK_SIZE(curr_size);
+ size += curr_size;
+
+ return size;
+}
+
+static uint32
+get_relocation_groups_symbol_size(AOTCompContext *comp_ctx,
+ AOTRelocationGroup *relocation_groups,
+ uint32 relocation_group_count,
+ AOTSymbolList *symbol_list)
+{
+ AOTRelocationGroup *relocation_group = relocation_groups;
+ uint32 size = 0, curr_size, i;
+
+ for (i = 0; i < relocation_group_count; i++, relocation_group++) {
+ curr_size = get_relocation_group_symbol_size(comp_ctx, relocation_group,
+ symbol_list);
+ CHECK_SIZE(curr_size);
+ size += curr_size;
+ }
+ return size;
+}
+
+static uint32
+get_symbol_size_from_symbol_list(AOTCompContext *comp_ctx,
+ AOTSymbolList *symbol_list)
+{
+ AOTSymbolNode *sym;
+ uint32 size = 0;
+
+ sym = symbol_list->head;
+ while (sym) {
+ /* (uint16)str_len + str */
+ size += get_string_size(comp_ctx, sym->symbol);
+ size = align_uint(size, 2);
+ sym = sym->next;
+ }
+
+ return size;
+}
+
+static uint32
+get_relocation_section_symbol_size(AOTCompContext *comp_ctx,
+ AOTObjectData *obj_data)
+{
+ AOTRelocationGroup *relocation_groups = obj_data->relocation_groups;
+ uint32 relocation_group_count = obj_data->relocation_group_count;
+ uint32 string_count = 0, symbol_table_size = 0;
+
+ /* section size will be calculated twice,
+ get symbol size from symbol list directly in the second calculation */
+ if (obj_data->symbol_list.len > 0) {
+ symbol_table_size =
+ get_symbol_size_from_symbol_list(comp_ctx, &obj_data->symbol_list);
+ }
+ else {
+ symbol_table_size = get_relocation_groups_symbol_size(
+ comp_ctx, relocation_groups, relocation_group_count,
+ &obj_data->symbol_list);
+ }
+ CHECK_SIZE(symbol_table_size);
+ string_count = obj_data->symbol_list.len;
+
+ /* string_count + string_offsets + total_string_len
+ + [str (string_len + str)] */
+ return (uint32)(sizeof(uint32) + sizeof(uint32) * string_count
+ + sizeof(uint32) + symbol_table_size);
+}
+
+static uint32
+get_relocation_section_size(AOTCompContext *comp_ctx, AOTObjectData *obj_data)
+{
+ AOTRelocationGroup *relocation_groups = obj_data->relocation_groups;
+ uint32 relocation_group_count = obj_data->relocation_group_count;
+ uint32 symbol_table_size = 0;
+
+ symbol_table_size = get_relocation_section_symbol_size(comp_ctx, obj_data);
+ CHECK_SIZE(symbol_table_size);
+ symbol_table_size = align_uint(symbol_table_size, 4);
+
+ /* relocation group count + symbol_table + relocation groups */
+ return (uint32)sizeof(uint32) + symbol_table_size
+ + get_relocation_groups_size(relocation_groups,
+ relocation_group_count,
+ is_32bit_binary(obj_data));
+}
+
+static uint32
+get_native_symbol_list_size(AOTCompContext *comp_ctx)
+{
+ uint32 len = 0;
+ AOTNativeSymbol *sym = NULL;
+
+ sym = bh_list_first_elem(&comp_ctx->native_symbols);
+
+ while (sym) {
+ len = align_uint(len, 2);
+ len += get_string_size(comp_ctx, sym->symbol);
+ sym = bh_list_elem_next(sym);
+ }
+
+ return len;
+}
+
+static uint32
+get_name_section_size(AOTCompData *comp_data);
+
+static uint32
+get_custom_sections_size(AOTCompContext *comp_ctx, AOTCompData *comp_data);
+
+static uint32
+get_aot_file_size(AOTCompContext *comp_ctx, AOTCompData *comp_data,
+ AOTObjectData *obj_data)
+{
+ uint32 size = 0;
+ uint32 size_custom_section = 0;
+
+ /* aot file header */
+ size += get_file_header_size();
+
+ /* target info section */
+ size = align_uint(size, 4);
+ /* section id + section size */
+ size += (uint32)sizeof(uint32) * 2;
+ size += get_target_info_section_size();
+
+ /* init data section */
+ size = align_uint(size, 4);
+ /* section id + section size */
+ size += (uint32)sizeof(uint32) * 2;
+ size += get_init_data_section_size(comp_ctx, comp_data, obj_data);
+
+ /* text section */
+ size = align_uint(size, 4);
+ /* section id + section size */
+ size += (uint32)sizeof(uint32) * 2;
+ size += get_text_section_size(obj_data);
+
+ /* function section */
+ size = align_uint(size, 4);
+ /* section id + section size */
+ size += (uint32)sizeof(uint32) * 2;
+ size += get_func_section_size(comp_data, obj_data);
+
+ /* export section */
+ size = align_uint(size, 4);
+ /* section id + section size */
+ size += (uint32)sizeof(uint32) * 2;
+ size += get_export_section_size(comp_ctx, comp_data);
+
+ /* relocation section */
+ size = align_uint(size, 4);
+ /* section id + section size */
+ size += (uint32)sizeof(uint32) * 2;
+ size += get_relocation_section_size(comp_ctx, obj_data);
+
+ if (get_native_symbol_list_size(comp_ctx) > 0) {
+ /* emit only when there are native symbols */
+ size = align_uint(size, 4);
+ /* section id + section size + sub section id + symbol count */
+ size += (uint32)sizeof(uint32) * 4;
+ size += get_native_symbol_list_size(comp_ctx);
+ }
+
+ if (comp_ctx->enable_aux_stack_frame) {
+ /* custom name section */
+ size = align_uint(size, 4);
+ /* section id + section size + sub section id */
+ size += (uint32)sizeof(uint32) * 3;
+ size += (comp_data->aot_name_section_size =
+ get_name_section_size(comp_data));
+ }
+
+ size_custom_section = get_custom_sections_size(comp_ctx, comp_data);
+ if (size_custom_section > 0) {
+ size = align_uint(size, 4);
+ size += size_custom_section;
+ }
+
+ return size;
+}
+
+#define exchange_uint8(p_data) (void)0
+
+static void
+exchange_uint16(uint8 *p_data)
+{
+ uint8 value = *p_data;
+ *p_data = *(p_data + 1);
+ *(p_data + 1) = value;
+}
+
+static void
+exchange_uint32(uint8 *p_data)
+{
+ uint8 value = *p_data;
+ *p_data = *(p_data + 3);
+ *(p_data + 3) = value;
+
+ value = *(p_data + 1);
+ *(p_data + 1) = *(p_data + 2);
+ *(p_data + 2) = value;
+}
+
+static void
+exchange_uint64(uint8 *pData)
+{
+ uint32 value;
+
+ value = *(uint32 *)pData;
+ *(uint32 *)pData = *(uint32 *)(pData + 4);
+ *(uint32 *)(pData + 4) = value;
+ exchange_uint32(pData);
+ exchange_uint32(pData + 4);
+}
+
+static void
+exchange_uint128(uint8 *pData)
+{
+ /* swap high 64bit and low 64bit */
+ uint64 value = *(uint64 *)pData;
+ *(uint64 *)pData = *(uint64 *)(pData + 8);
+ *(uint64 *)(pData + 8) = value;
+ /* exchange high 64bit */
+ exchange_uint64(pData);
+ /* exchange low 64bit */
+ exchange_uint64(pData + 8);
+}
+
+static union {
+ int a;
+ char b;
+} __ue = { .a = 1 };
+
+#define is_little_endian() (__ue.b == 1)
+
+#define CHECK_BUF(length) \
+ do { \
+ if (buf + offset + length > buf_end) { \
+ aot_set_last_error("buf overflow"); \
+ return false; \
+ } \
+ } while (0)
+
+#define EMIT_U8(v) \
+ do { \
+ CHECK_BUF(1); \
+ *(uint8 *)(buf + offset) = (uint8)v; \
+ offset++; \
+ } while (0)
+
+#define EMIT_U16(v) \
+ do { \
+ uint16 t = (uint16)v; \
+ CHECK_BUF(2); \
+ if (!is_little_endian()) \
+ exchange_uint16((uint8 *)&t); \
+ *(uint16 *)(buf + offset) = t; \
+ offset += (uint32)sizeof(uint16); \
+ } while (0)
+
+#define EMIT_U32(v) \
+ do { \
+ uint32 t = (uint32)v; \
+ CHECK_BUF(4); \
+ if (!is_little_endian()) \
+ exchange_uint32((uint8 *)&t); \
+ *(uint32 *)(buf + offset) = t; \
+ offset += (uint32)sizeof(uint32); \
+ } while (0)
+
+#define EMIT_U64(v) \
+ do { \
+ uint64 t = (uint64)v; \
+ CHECK_BUF(8); \
+ if (!is_little_endian()) \
+ exchange_uint64((uint8 *)&t); \
+ PUT_U64_TO_ADDR(buf + offset, t); \
+ offset += (uint32)sizeof(uint64); \
+ } while (0)
+
+#define EMIT_V128(v) \
+ do { \
+ uint64 *t = (uint64 *)v.i64x2; \
+ CHECK_BUF(16); \
+ if (!is_little_endian()) \
+ exchange_uint128((uint8 *)t); \
+ PUT_U64_TO_ADDR(buf + offset, t[0]); \
+ offset += (uint32)sizeof(uint64); \
+ PUT_U64_TO_ADDR(buf + offset, t[1]); \
+ offset += (uint32)sizeof(uint64); \
+ } while (0)
+
+#define EMIT_BUF(v, len) \
+ do { \
+ CHECK_BUF(len); \
+ memcpy(buf + offset, v, len); \
+ offset += len; \
+ } while (0)
+
+#define EMIT_STR(s) \
+ do { \
+ uint32 str_len = (uint32)strlen(s); \
+ if (str_len > INT16_MAX) { \
+ aot_set_last_error("emit string failed: " \
+ "string too long"); \
+ return false; \
+ } \
+ if (comp_ctx->is_indirect_mode) \
+ /* emit '\0' only in XIP mode */ \
+ str_len++; \
+ EMIT_U16(str_len); \
+ EMIT_BUF(s, str_len); \
+ } while (0)
+
+static bool
+read_leb(uint8 **p_buf, const uint8 *buf_end, uint32 maxbits, bool sign,
+ uint64 *p_result)
+{
+ const uint8 *buf = *p_buf;
+ uint64 result = 0;
+ uint32 shift = 0;
+ uint32 offset = 0, bcnt = 0;
+ uint64 byte;
+
+ while (true) {
+ /* uN or SN must not exceed ceil(N/7) bytes */
+ if (bcnt + 1 > (maxbits + 6) / 7) {
+ aot_set_last_error("integer representation too long");
+ return false;
+ }
+
+ if (buf + offset + 1 > buf_end) {
+ aot_set_last_error("unexpected end of section or function");
+ return false;
+ }
+ byte = buf[offset];
+ offset += 1;
+ result |= ((byte & 0x7f) << shift);
+ shift += 7;
+ bcnt += 1;
+ if ((byte & 0x80) == 0) {
+ break;
+ }
+ }
+
+ if (!sign && maxbits == 32 && shift >= maxbits) {
+ /* The top bits set represent values > 32 bits */
+ if (((uint8)byte) & 0xf0)
+ goto fail_integer_too_large;
+ }
+ else if (sign && maxbits == 32) {
+ if (shift < maxbits) {
+ /* Sign extend, second highest bit is the sign bit */
+ if ((uint8)byte & 0x40)
+ result |= (~((uint64)0)) << shift;
+ }
+ else {
+ /* The top bits should be a sign-extension of the sign bit */
+ bool sign_bit_set = ((uint8)byte) & 0x8;
+ int top_bits = ((uint8)byte) & 0xf0;
+ if ((sign_bit_set && top_bits != 0x70)
+ || (!sign_bit_set && top_bits != 0))
+ goto fail_integer_too_large;
+ }
+ }
+ else if (sign && maxbits == 64) {
+ if (shift < maxbits) {
+ /* Sign extend, second highest bit is the sign bit */
+ if ((uint8)byte & 0x40)
+ result |= (~((uint64)0)) << shift;
+ }
+ else {
+ /* The top bits should be a sign-extension of the sign bit */
+ bool sign_bit_set = ((uint8)byte) & 0x1;
+ int top_bits = ((uint8)byte) & 0xfe;
+
+ if ((sign_bit_set && top_bits != 0x7e)
+ || (!sign_bit_set && top_bits != 0))
+ goto fail_integer_too_large;
+ }
+ }
+
+ *p_buf += offset;
+ *p_result = result;
+ return true;
+
+fail_integer_too_large:
+ aot_set_last_error("integer too large");
+ return false;
+}
+
+#define read_leb_uint32(p, p_end, res) \
+ do { \
+ uint64 res64; \
+ if (!read_leb((uint8 **)&p, p_end, 32, false, &res64)) \
+ goto fail; \
+ res = (uint32)res64; \
+ } while (0)
+
+static uint32
+get_name_section_size(AOTCompData *comp_data)
+{
+ const uint8 *p = comp_data->name_section_buf,
+ *p_end = comp_data->name_section_buf_end;
+ uint8 *buf, *buf_end;
+ uint32 name_type, subsection_size;
+ uint32 previous_name_type = 0;
+ uint32 num_func_name;
+ uint32 func_index;
+ uint32 previous_func_index = ~0U;
+ uint32 func_name_len;
+ uint32 name_index;
+ int i = 0;
+ uint32 name_len;
+ uint32 offset = 0;
+ uint32 max_aot_buf_size = 0;
+
+ if (p >= p_end) {
+ aot_set_last_error("unexpected end");
+ return 0;
+ }
+
+ max_aot_buf_size = 4 * (uint32)(p_end - p);
+ if (!(buf = comp_data->aot_name_section_buf =
+ wasm_runtime_malloc(max_aot_buf_size))) {
+ aot_set_last_error("allocate memory for custom name section failed.");
+ return 0;
+ }
+ buf_end = buf + max_aot_buf_size;
+
+ read_leb_uint32(p, p_end, name_len);
+ offset = align_uint(offset, 4);
+ EMIT_U32(name_len);
+
+ if (name_len == 0 || p + name_len > p_end) {
+ aot_set_last_error("unexpected end");
+ return 0;
+ }
+
+ if (!check_utf8_str(p, name_len)) {
+ aot_set_last_error("invalid UTF-8 encoding");
+ return 0;
+ }
+
+ if (memcmp(p, "name", 4) != 0) {
+ aot_set_last_error("invalid custom name section");
+ return 0;
+ }
+ EMIT_BUF(p, name_len);
+ p += name_len;
+
+ while (p < p_end) {
+ read_leb_uint32(p, p_end, name_type);
+ if (i != 0) {
+ if (name_type == previous_name_type) {
+ aot_set_last_error("duplicate sub-section");
+ return 0;
+ }
+ if (name_type < previous_name_type) {
+ aot_set_last_error("out-of-order sub-section");
+ return 0;
+ }
+ }
+ previous_name_type = name_type;
+ read_leb_uint32(p, p_end, subsection_size);
+ switch (name_type) {
+ case SUB_SECTION_TYPE_FUNC:
+ if (subsection_size) {
+ offset = align_uint(offset, 4);
+ EMIT_U32(name_type);
+ EMIT_U32(subsection_size);
+
+ read_leb_uint32(p, p_end, num_func_name);
+ EMIT_U32(num_func_name);
+
+ for (name_index = 0; name_index < num_func_name;
+ name_index++) {
+ read_leb_uint32(p, p_end, func_index);
+ offset = align_uint(offset, 4);
+ EMIT_U32(func_index);
+ if (func_index == previous_func_index) {
+ aot_set_last_error("duplicate function name");
+ return 0;
+ }
+ if (func_index < previous_func_index
+ && previous_func_index != ~0U) {
+ aot_set_last_error("out-of-order function index ");
+ return 0;
+ }
+ previous_func_index = func_index;
+ read_leb_uint32(p, p_end, func_name_len);
+ offset = align_uint(offset, 2);
+ EMIT_U16(func_name_len);
+ EMIT_BUF(p, func_name_len);
+ p += func_name_len;
+ }
+ }
+ break;
+ case SUB_SECTION_TYPE_MODULE: /* TODO: Parse for module subsection
+ */
+ case SUB_SECTION_TYPE_LOCAL: /* TODO: Parse for local subsection */
+ default:
+ p = p + subsection_size;
+ break;
+ }
+ i++;
+ }
+
+ return offset;
+fail:
+ return 0;
+}
+
+static uint32
+get_custom_sections_size(AOTCompContext *comp_ctx, AOTCompData *comp_data)
+{
+#if WASM_ENABLE_LOAD_CUSTOM_SECTION != 0
+ uint32 size = 0, i;
+
+ for (i = 0; i < comp_ctx->custom_sections_count; i++) {
+ const char *section_name = comp_ctx->custom_sections_wp[i];
+ const uint8 *content = NULL;
+ uint32 length = 0;
+
+ content = wasm_loader_get_custom_section(comp_data->wasm_module,
+ section_name, &length);
+ if (!content) {
+ LOG_WARNING("Can't find custom section [%s], ignore it",
+ section_name);
+ continue;
+ }
+
+ size = align_uint(size, 4);
+ /* section id + section size + sub section id */
+ size += (uint32)sizeof(uint32) * 3;
+ /* section name and len */
+ size += get_string_size(comp_ctx, section_name);
+ /* section content */
+ size += length;
+ }
+
+ return size;
+#else
+ return 0;
+#endif
+}
+
+static bool
+aot_emit_file_header(uint8 *buf, uint8 *buf_end, uint32 *p_offset,
+ AOTCompData *comp_data, AOTObjectData *obj_data)
+{
+ uint32 offset = *p_offset;
+ uint32 aot_curr_version = AOT_CURRENT_VERSION;
+
+ EMIT_U8('\0');
+ EMIT_U8('a');
+ EMIT_U8('o');
+ EMIT_U8('t');
+
+ EMIT_U32(aot_curr_version);
+
+ *p_offset = offset;
+ return true;
+}
+
+static bool
+aot_emit_target_info_section(uint8 *buf, uint8 *buf_end, uint32 *p_offset,
+ AOTCompData *comp_data, AOTObjectData *obj_data)
+{
+ uint32 offset = *p_offset;
+ uint32 section_size = get_target_info_section_size();
+ AOTTargetInfo *target_info = &obj_data->target_info;
+
+ *p_offset = offset = align_uint(offset, 4);
+
+ EMIT_U32(AOT_SECTION_TYPE_TARGET_INFO);
+ EMIT_U32(section_size);
+
+ EMIT_U16(target_info->bin_type);
+ EMIT_U16(target_info->abi_type);
+ EMIT_U16(target_info->e_type);
+ EMIT_U16(target_info->e_machine);
+ EMIT_U32(target_info->e_version);
+ EMIT_U32(target_info->e_flags);
+ EMIT_U32(target_info->reserved);
+ EMIT_BUF(target_info->arch, sizeof(target_info->arch));
+
+ if (offset - *p_offset != section_size + sizeof(uint32) * 2) {
+ aot_set_last_error("emit target info failed.");
+ return false;
+ }
+
+ *p_offset = offset;
+
+ return true;
+}
+
+static bool
+aot_emit_mem_info(uint8 *buf, uint8 *buf_end, uint32 *p_offset,
+ AOTCompContext *comp_ctx, AOTCompData *comp_data,
+ AOTObjectData *obj_data)
+{
+ uint32 offset = *p_offset, i;
+ AOTMemInitData **init_datas = comp_data->mem_init_data_list;
+
+ *p_offset = offset = align_uint(offset, 4);
+
+ /* Emit import memory count, only emit 0 currently.
+ TODO: emit the actual import memory count and
+ the full import memory info. */
+ EMIT_U32(0);
+
+ /* Emit memory count */
+ EMIT_U32(comp_data->memory_count);
+ /* Emit memory items */
+ for (i = 0; i < comp_data->memory_count; i++) {
+ EMIT_U32(comp_data->memories[i].memory_flags);
+ EMIT_U32(comp_data->memories[i].num_bytes_per_page);
+ EMIT_U32(comp_data->memories[i].mem_init_page_count);
+ EMIT_U32(comp_data->memories[i].mem_max_page_count);
+ }
+
+ /* Emit mem init data count */
+ EMIT_U32(comp_data->mem_init_data_count);
+ /* Emit mem init data items */
+ for (i = 0; i < comp_data->mem_init_data_count; i++) {
+ offset = align_uint(offset, 4);
+#if WASM_ENABLE_BULK_MEMORY != 0
+ if (comp_ctx->enable_bulk_memory) {
+ EMIT_U32(init_datas[i]->is_passive);
+ EMIT_U32(init_datas[i]->memory_index);
+ }
+ else
+#endif
+ {
+ /* emit two placeholder to keep the same size */
+ EMIT_U32(0);
+ EMIT_U32(0);
+ }
+ EMIT_U32(init_datas[i]->offset.init_expr_type);
+ EMIT_U64(init_datas[i]->offset.u.i64);
+ EMIT_U32(init_datas[i]->byte_count);
+ EMIT_BUF(init_datas[i]->bytes, init_datas[i]->byte_count);
+ }
+
+ if (offset - *p_offset != get_mem_info_size(comp_data)) {
+ aot_set_last_error("emit memory info failed.");
+ return false;
+ }
+
+ *p_offset = offset;
+
+ return true;
+}
+
+static bool
+aot_emit_table_info(uint8 *buf, uint8 *buf_end, uint32 *p_offset,
+ AOTCompContext *comp_ctx, AOTCompData *comp_data,
+ AOTObjectData *obj_data)
+{
+ uint32 offset = *p_offset, i, j;
+ AOTTableInitData **init_datas = comp_data->table_init_data_list;
+
+ *p_offset = offset = align_uint(offset, 4);
+
+ /* Emit import table count */
+ EMIT_U32(comp_data->import_table_count);
+ /* Emit table items */
+ for (i = 0; i < comp_data->import_table_count; i++) {
+ /* TODO:
+ * EMIT_STR(comp_data->import_tables[i].module_name );
+ * EMIT_STR(comp_data->import_tables[i].table_name);
+ */
+ EMIT_U32(comp_data->import_tables[i].elem_type);
+ EMIT_U32(comp_data->import_tables[i].table_init_size);
+ EMIT_U32(comp_data->import_tables[i].table_max_size);
+ EMIT_U32(comp_data->import_tables[i].possible_grow & 0x000000FF);
+ }
+
+ /* Emit table count */
+ EMIT_U32(comp_data->table_count);
+ /* Emit table items */
+ for (i = 0; i < comp_data->table_count; i++) {
+ EMIT_U32(comp_data->tables[i].elem_type);
+ EMIT_U32(comp_data->tables[i].table_flags);
+ EMIT_U32(comp_data->tables[i].table_init_size);
+ EMIT_U32(comp_data->tables[i].table_max_size);
+ EMIT_U32(comp_data->tables[i].possible_grow & 0x000000FF);
+ }
+
+ /* Emit table init data count */
+ EMIT_U32(comp_data->table_init_data_count);
+ /* Emit table init data items */
+ for (i = 0; i < comp_data->table_init_data_count; i++) {
+ offset = align_uint(offset, 4);
+ EMIT_U32(init_datas[i]->mode);
+ EMIT_U32(init_datas[i]->elem_type);
+ EMIT_U32(init_datas[i]->table_index);
+ EMIT_U32(init_datas[i]->offset.init_expr_type);
+ EMIT_U64(init_datas[i]->offset.u.i64);
+ EMIT_U32(init_datas[i]->func_index_count);
+ for (j = 0; j < init_datas[i]->func_index_count; j++)
+ EMIT_U32(init_datas[i]->func_indexes[j]);
+ }
+
+ if (offset - *p_offset != get_table_info_size(comp_data)) {
+ aot_set_last_error("emit table info failed.");
+ return false;
+ }
+
+ *p_offset = offset;
+
+ return true;
+}
+
+static bool
+aot_emit_func_type_info(uint8 *buf, uint8 *buf_end, uint32 *p_offset,
+ AOTCompData *comp_data, AOTObjectData *obj_data)
+{
+ uint32 offset = *p_offset, i;
+ AOTFuncType **func_types = comp_data->func_types;
+
+ *p_offset = offset = align_uint(offset, 4);
+
+ EMIT_U32(comp_data->func_type_count);
+
+ for (i = 0; i < comp_data->func_type_count; i++) {
+ offset = align_uint(offset, 4);
+ EMIT_U32(func_types[i]->param_count);
+ EMIT_U32(func_types[i]->result_count);
+ EMIT_BUF(func_types[i]->types,
+ func_types[i]->param_count + func_types[i]->result_count);
+ }
+
+ if (offset - *p_offset != get_func_type_info_size(comp_data)) {
+ aot_set_last_error("emit function type info failed.");
+ return false;
+ }
+
+ *p_offset = offset;
+
+ return true;
+}
+
+static bool
+aot_emit_import_global_info(uint8 *buf, uint8 *buf_end, uint32 *p_offset,
+ AOTCompContext *comp_ctx, AOTCompData *comp_data,
+ AOTObjectData *obj_data)
+{
+ uint32 offset = *p_offset, i;
+ AOTImportGlobal *import_global = comp_data->import_globals;
+
+ *p_offset = offset = align_uint(offset, 4);
+
+ EMIT_U32(comp_data->import_global_count);
+
+ for (i = 0; i < comp_data->import_global_count; i++, import_global++) {
+ offset = align_uint(offset, 2);
+ EMIT_U8(import_global->type);
+ EMIT_U8(import_global->is_mutable);
+ EMIT_STR(import_global->module_name);
+ offset = align_uint(offset, 2);
+ EMIT_STR(import_global->global_name);
+ }
+
+ if (offset - *p_offset
+ != get_import_global_info_size(comp_ctx, comp_data)) {
+ aot_set_last_error("emit import global info failed.");
+ return false;
+ }
+
+ *p_offset = offset;
+
+ return true;
+}
+
+static bool
+aot_emit_global_info(uint8 *buf, uint8 *buf_end, uint32 *p_offset,
+ AOTCompData *comp_data, AOTObjectData *obj_data)
+{
+ uint32 offset = *p_offset, i;
+ AOTGlobal *global = comp_data->globals;
+
+ *p_offset = offset = align_uint(offset, 4);
+
+ EMIT_U32(comp_data->global_count);
+
+ for (i = 0; i < comp_data->global_count; i++, global++) {
+ offset = align_uint(offset, 4);
+ EMIT_U8(global->type);
+ EMIT_U8(global->is_mutable);
+ EMIT_U16(global->init_expr.init_expr_type);
+ if (global->init_expr.init_expr_type != INIT_EXPR_TYPE_V128_CONST)
+ EMIT_U64(global->init_expr.u.i64);
+ else
+ EMIT_V128(global->init_expr.u.v128);
+ }
+
+ if (offset - *p_offset != get_global_info_size(comp_data)) {
+ aot_set_last_error("emit global info failed.");
+ return false;
+ }
+
+ *p_offset = offset;
+
+ return true;
+}
+
+static bool
+aot_emit_import_func_info(uint8 *buf, uint8 *buf_end, uint32 *p_offset,
+ AOTCompContext *comp_ctx, AOTCompData *comp_data,
+ AOTObjectData *obj_data)
+{
+ uint32 offset = *p_offset, i;
+ AOTImportFunc *import_func = comp_data->import_funcs;
+
+ *p_offset = offset = align_uint(offset, 4);
+
+ EMIT_U32(comp_data->import_func_count);
+
+ for (i = 0; i < comp_data->import_func_count; i++, import_func++) {
+ offset = align_uint(offset, 2);
+ EMIT_U16(import_func->func_type_index);
+ EMIT_STR(import_func->module_name);
+ offset = align_uint(offset, 2);
+ EMIT_STR(import_func->func_name);
+ }
+
+ if (offset - *p_offset != get_import_func_info_size(comp_ctx, comp_data)) {
+ aot_set_last_error("emit import function info failed.");
+ return false;
+ }
+
+ *p_offset = offset;
+
+ return true;
+}
+
+static bool
+aot_emit_object_data_section_info(uint8 *buf, uint8 *buf_end, uint32 *p_offset,
+ AOTCompContext *comp_ctx,
+ AOTObjectData *obj_data)
+{
+ uint32 offset = *p_offset, i;
+ AOTObjectDataSection *data_section = obj_data->data_sections;
+
+ *p_offset = offset = align_uint(offset, 4);
+
+ EMIT_U32(obj_data->data_sections_count);
+
+ for (i = 0; i < obj_data->data_sections_count; i++, data_section++) {
+ offset = align_uint(offset, 2);
+ EMIT_STR(data_section->name);
+ offset = align_uint(offset, 4);
+ EMIT_U32(data_section->size);
+ EMIT_BUF(data_section->data, data_section->size);
+ }
+
+ if (offset - *p_offset
+ != get_object_data_section_info_size(comp_ctx, obj_data)) {
+ aot_set_last_error("emit object data section info failed.");
+ return false;
+ }
+
+ *p_offset = offset;
+
+ return true;
+}
+
+static bool
+aot_emit_init_data_section(uint8 *buf, uint8 *buf_end, uint32 *p_offset,
+ AOTCompContext *comp_ctx, AOTCompData *comp_data,
+ AOTObjectData *obj_data)
+{
+ uint32 section_size =
+ get_init_data_section_size(comp_ctx, comp_data, obj_data);
+ uint32 offset = *p_offset;
+
+ *p_offset = offset = align_uint(offset, 4);
+
+ EMIT_U32(AOT_SECTION_TYPE_INIT_DATA);
+ EMIT_U32(section_size);
+
+ if (!aot_emit_mem_info(buf, buf_end, &offset, comp_ctx, comp_data, obj_data)
+ || !aot_emit_table_info(buf, buf_end, &offset, comp_ctx, comp_data,
+ obj_data)
+ || !aot_emit_func_type_info(buf, buf_end, &offset, comp_data, obj_data)
+ || !aot_emit_import_global_info(buf, buf_end, &offset, comp_ctx,
+ comp_data, obj_data)
+ || !aot_emit_global_info(buf, buf_end, &offset, comp_data, obj_data)
+ || !aot_emit_import_func_info(buf, buf_end, &offset, comp_ctx,
+ comp_data, obj_data))
+ return false;
+
+ offset = align_uint(offset, 4);
+ EMIT_U32(comp_data->func_count);
+ EMIT_U32(comp_data->start_func_index);
+
+ EMIT_U32(comp_data->aux_data_end_global_index);
+ EMIT_U32(comp_data->aux_data_end);
+ EMIT_U32(comp_data->aux_heap_base_global_index);
+ EMIT_U32(comp_data->aux_heap_base);
+ EMIT_U32(comp_data->aux_stack_top_global_index);
+ EMIT_U32(comp_data->aux_stack_bottom);
+ EMIT_U32(comp_data->aux_stack_size);
+
+ if (!aot_emit_object_data_section_info(buf, buf_end, &offset, comp_ctx,
+ obj_data))
+ return false;
+
+ if (offset - *p_offset != section_size + sizeof(uint32) * 2) {
+ aot_set_last_error("emit init data section failed.");
+ return false;
+ }
+
+ *p_offset = offset;
+
+ return true;
+}
+
+static bool
+aot_emit_text_section(uint8 *buf, uint8 *buf_end, uint32 *p_offset,
+ AOTCompData *comp_data, AOTObjectData *obj_data)
+{
+ uint32 section_size = get_text_section_size(obj_data);
+ uint32 offset = *p_offset;
+ uint8 placeholder = 0;
+
+ *p_offset = offset = align_uint(offset, 4);
+
+ EMIT_U32(AOT_SECTION_TYPE_TEXT);
+ EMIT_U32(section_size);
+ EMIT_U32(obj_data->literal_size);
+ if (obj_data->literal_size > 0)
+ EMIT_BUF(obj_data->literal, obj_data->literal_size);
+ EMIT_BUF(obj_data->text, obj_data->text_size);
+
+ while (offset & 3)
+ EMIT_BUF(&placeholder, 1);
+
+ if (offset - *p_offset != section_size + sizeof(uint32) * 2) {
+ aot_set_last_error("emit text section failed.");
+ return false;
+ }
+
+ *p_offset = offset;
+
+ return true;
+}
+
+static bool
+aot_emit_func_section(uint8 *buf, uint8 *buf_end, uint32 *p_offset,
+ AOTCompData *comp_data, AOTObjectData *obj_data)
+{
+ uint32 section_size = get_func_section_size(comp_data, obj_data);
+ uint32 i, offset = *p_offset;
+ AOTObjectFunc *func = obj_data->funcs;
+ AOTFunc **funcs = comp_data->funcs;
+
+ *p_offset = offset = align_uint(offset, 4);
+
+ EMIT_U32(AOT_SECTION_TYPE_FUNCTION);
+ EMIT_U32(section_size);
+
+ for (i = 0; i < obj_data->func_count; i++, func++) {
+ if (is_32bit_binary(obj_data))
+ EMIT_U32(func->text_offset);
+ else
+ EMIT_U64(func->text_offset);
+ }
+
+ for (i = 0; i < comp_data->func_count; i++)
+ EMIT_U32(funcs[i]->func_type_index);
+
+ if (offset - *p_offset != section_size + sizeof(uint32) * 2) {
+ aot_set_last_error("emit function section failed.");
+ return false;
+ }
+
+ *p_offset = offset;
+
+ return true;
+}
+
+static bool
+aot_emit_export_section(uint8 *buf, uint8 *buf_end, uint32 *p_offset,
+ AOTCompContext *comp_ctx, AOTCompData *comp_data,
+ AOTObjectData *obj_data)
+{
+ uint32 section_size = get_export_section_size(comp_ctx, comp_data);
+ AOTExport *export = comp_data->wasm_module->exports;
+ uint32 export_count = comp_data->wasm_module->export_count;
+ uint32 i, offset = *p_offset;
+
+ *p_offset = offset = align_uint(offset, 4);
+
+ EMIT_U32(AOT_SECTION_TYPE_EXPORT);
+ EMIT_U32(section_size);
+ EMIT_U32(export_count);
+
+ for (i = 0; i < export_count; i++, export ++) {
+ offset = align_uint(offset, 4);
+ EMIT_U32(export->index);
+ EMIT_U8(export->kind);
+ EMIT_U8(0);
+ EMIT_STR(export->name);
+ }
+
+ if (offset - *p_offset != section_size + sizeof(uint32) * 2) {
+ aot_set_last_error("emit export section failed.");
+ return false;
+ }
+
+ *p_offset = offset;
+
+ return true;
+}
+
+static bool
+aot_emit_relocation_symbol_table(uint8 *buf, uint8 *buf_end, uint32 *p_offset,
+ AOTCompContext *comp_ctx,
+ AOTCompData *comp_data,
+ AOTObjectData *obj_data)
+{
+ uint32 symbol_offset = 0, total_string_len = 0;
+ uint32 offset = *p_offset;
+ AOTSymbolNode *sym;
+
+ EMIT_U32(obj_data->symbol_list.len);
+
+ /* emit symbol offsets */
+ sym = (AOTSymbolNode *)(obj_data->symbol_list.head);
+ while (sym) {
+ EMIT_U32(symbol_offset);
+ /* string_len + str[0 .. string_len - 1] */
+ symbol_offset += get_string_size(comp_ctx, sym->symbol);
+ symbol_offset = align_uint(symbol_offset, 2);
+ sym = sym->next;
+ }
+
+ /* emit total string len */
+ total_string_len = symbol_offset;
+ EMIT_U32(total_string_len);
+
+ /* emit symbols */
+ sym = (AOTSymbolNode *)(obj_data->symbol_list.head);
+ while (sym) {
+ EMIT_STR(sym->symbol);
+ offset = align_uint(offset, 2);
+ sym = sym->next;
+ }
+
+ *p_offset = offset;
+ return true;
+}
+
+static bool
+aot_emit_relocation_section(uint8 *buf, uint8 *buf_end, uint32 *p_offset,
+ AOTCompContext *comp_ctx, AOTCompData *comp_data,
+ AOTObjectData *obj_data)
+{
+ uint32 section_size = get_relocation_section_size(comp_ctx, obj_data);
+ uint32 i, offset = *p_offset;
+ AOTRelocationGroup *relocation_group = obj_data->relocation_groups;
+
+ if (section_size == (uint32)-1)
+ return false;
+
+ *p_offset = offset = align_uint(offset, 4);
+
+ EMIT_U32(AOT_SECTION_TYPE_RELOCATION);
+ EMIT_U32(section_size);
+
+ aot_emit_relocation_symbol_table(buf, buf_end, &offset, comp_ctx, comp_data,
+ obj_data);
+
+ offset = align_uint(offset, 4);
+ EMIT_U32(obj_data->relocation_group_count);
+
+ /* emit each relocation group */
+ for (i = 0; i < obj_data->relocation_group_count; i++, relocation_group++) {
+ AOTRelocation *relocation = relocation_group->relocations;
+ uint32 j;
+
+ offset = align_uint(offset, 4);
+ EMIT_U32(relocation_group->name_index);
+ offset = align_uint(offset, 4);
+ EMIT_U32(relocation_group->relocation_count);
+
+ /* emit each relocation */
+ for (j = 0; j < relocation_group->relocation_count; j++, relocation++) {
+ offset = align_uint(offset, 4);
+ if (is_32bit_binary(obj_data)) {
+ EMIT_U32(relocation->relocation_offset);
+ EMIT_U32(relocation->relocation_addend);
+ }
+ else {
+ EMIT_U64(relocation->relocation_offset);
+ EMIT_U64(relocation->relocation_addend);
+ }
+ EMIT_U32(relocation->relocation_type);
+ EMIT_U32(relocation->symbol_index);
+ }
+ }
+
+ if (offset - *p_offset != section_size + sizeof(uint32) * 2) {
+ aot_set_last_error("emit relocation section failed.");
+ return false;
+ }
+
+ *p_offset = offset;
+ return true;
+}
+
+static bool
+aot_emit_native_symbol(uint8 *buf, uint8 *buf_end, uint32 *p_offset,
+ AOTCompContext *comp_ctx)
+{
+ uint32 offset = *p_offset;
+ AOTNativeSymbol *sym = NULL;
+
+ if (bh_list_length(&comp_ctx->native_symbols) == 0)
+ /* emit only when there are native symbols */
+ return true;
+
+ *p_offset = offset = align_uint(offset, 4);
+
+ EMIT_U32(AOT_SECTION_TYPE_CUSTOM);
+ /* sub section id + symbol count + symbol list */
+ EMIT_U32(sizeof(uint32) * 2 + get_native_symbol_list_size(comp_ctx));
+ EMIT_U32(AOT_CUSTOM_SECTION_NATIVE_SYMBOL);
+ EMIT_U32(bh_list_length(&comp_ctx->native_symbols));
+
+ sym = bh_list_first_elem(&comp_ctx->native_symbols);
+
+ while (sym) {
+ offset = align_uint(offset, 2);
+ EMIT_STR(sym->symbol);
+ sym = bh_list_elem_next(sym);
+ }
+
+ *p_offset = offset;
+
+ return true;
+}
+
+static bool
+aot_emit_name_section(uint8 *buf, uint8 *buf_end, uint32 *p_offset,
+ AOTCompData *comp_data, AOTCompContext *comp_ctx)
+{
+ if (comp_ctx->enable_aux_stack_frame) {
+ uint32 offset = *p_offset;
+
+ *p_offset = offset = align_uint(offset, 4);
+
+ EMIT_U32(AOT_SECTION_TYPE_CUSTOM);
+ /* sub section id + name section size */
+ EMIT_U32(sizeof(uint32) * 1 + comp_data->aot_name_section_size);
+ EMIT_U32(AOT_CUSTOM_SECTION_NAME);
+ bh_memcpy_s((uint8 *)(buf + offset), (uint32)(buf_end - buf),
+ comp_data->aot_name_section_buf,
+ (uint32)comp_data->aot_name_section_size);
+ offset += comp_data->aot_name_section_size;
+
+ *p_offset = offset;
+ }
+
+ return true;
+}
+
+static bool
+aot_emit_custom_sections(uint8 *buf, uint8 *buf_end, uint32 *p_offset,
+ AOTCompData *comp_data, AOTCompContext *comp_ctx)
+{
+#if WASM_ENABLE_LOAD_CUSTOM_SECTION != 0
+ uint32 offset = *p_offset, i;
+
+ for (i = 0; i < comp_ctx->custom_sections_count; i++) {
+ const char *section_name = comp_ctx->custom_sections_wp[i];
+ const uint8 *content = NULL;
+ uint32 length = 0;
+
+ content = wasm_loader_get_custom_section(comp_data->wasm_module,
+ section_name, &length);
+ if (!content) {
+ /* Warning has been reported during calculating size */
+ continue;
+ }
+
+ offset = align_uint(offset, 4);
+ EMIT_U32(AOT_SECTION_TYPE_CUSTOM);
+ /* sub section id + content */
+ EMIT_U32(sizeof(uint32) * 1 + get_string_size(comp_ctx, section_name)
+ + length);
+ EMIT_U32(AOT_CUSTOM_SECTION_RAW);
+ EMIT_STR(section_name);
+ bh_memcpy_s((uint8 *)(buf + offset), (uint32)(buf_end - buf), content,
+ length);
+ offset += length;
+ }
+
+ *p_offset = offset;
+#endif
+
+ return true;
+}
+
+typedef uint32 U32;
+typedef int32 I32;
+typedef uint16 U16;
+typedef uint8 U8;
+
+struct coff_hdr {
+ U16 u16Machine;
+ U16 u16NumSections;
+ U32 u32DateTimeStamp;
+ U32 u32SymTblPtr;
+ U32 u32NumSymbols;
+ U16 u16PeHdrSize;
+ U16 u16Characs;
+};
+
+#define E_TYPE_REL 1
+#define E_TYPE_XIP 4
+
+#define IMAGE_FILE_MACHINE_AMD64 0x8664
+#define IMAGE_FILE_MACHINE_I386 0x014c
+#define IMAGE_FILE_MACHINE_IA64 0x0200
+
+#define AOT_COFF32_BIN_TYPE 4 /* 32-bit little endian */
+#define AOT_COFF64_BIN_TYPE 6 /* 64-bit little endian */
+
+#define EI_NIDENT 16
+
+typedef uint32 elf32_word;
+typedef int32 elf32_sword;
+typedef uint16 elf32_half;
+typedef uint32 elf32_off;
+typedef uint32 elf32_addr;
+
+struct elf32_ehdr {
+ unsigned char e_ident[EI_NIDENT]; /* ident bytes */
+ elf32_half e_type; /* file type */
+ elf32_half e_machine; /* target machine */
+ elf32_word e_version; /* file version */
+ elf32_addr e_entry; /* start address */
+ elf32_off e_phoff; /* phdr file offset */
+ elf32_off e_shoff; /* shdr file offset */
+ elf32_word e_flags; /* file flags */
+ elf32_half e_ehsize; /* sizeof ehdr */
+ elf32_half e_phentsize; /* sizeof phdr */
+ elf32_half e_phnum; /* number phdrs */
+ elf32_half e_shentsize; /* sizeof shdr */
+ elf32_half e_shnum; /* number shdrs */
+ elf32_half e_shstrndx; /* shdr string index */
+};
+
+struct elf32_rel {
+ elf32_addr r_offset;
+ elf32_word r_info;
+} elf32_rel;
+
+struct elf32_rela {
+ elf32_addr r_offset;
+ elf32_word r_info;
+ elf32_sword r_addend;
+} elf32_rela;
+
+typedef uint32 elf64_word;
+typedef int32 elf64_sword;
+typedef uint64 elf64_xword;
+typedef int64 elf64_sxword;
+typedef uint16 elf64_half;
+typedef uint64 elf64_off;
+typedef uint64 elf64_addr;
+
+struct elf64_ehdr {
+ unsigned char e_ident[EI_NIDENT]; /* ident bytes */
+ elf64_half e_type; /* file type */
+ elf64_half e_machine; /* target machine */
+ elf64_word e_version; /* file version */
+ elf64_addr e_entry; /* start address */
+ elf64_off e_phoff; /* phdr file offset */
+ elf64_off e_shoff; /* shdr file offset */
+ elf64_word e_flags; /* file flags */
+ elf64_half e_ehsize; /* sizeof ehdr */
+ elf64_half e_phentsize; /* sizeof phdr */
+ elf64_half e_phnum; /* number phdrs */
+ elf64_half e_shentsize; /* sizeof shdr */
+ elf64_half e_shnum; /* number shdrs */
+ elf64_half e_shstrndx; /* shdr string index */
+};
+
+typedef struct elf64_rel {
+ elf64_addr r_offset;
+ elf64_xword r_info;
+} elf64_rel;
+
+typedef struct elf64_rela {
+ elf64_addr r_offset;
+ elf64_xword r_info;
+ elf64_sxword r_addend;
+} elf64_rela;
+
+#define SET_TARGET_INFO(f, v, type, little) \
+ do { \
+ type tmp = elf_header->v; \
+ if ((little && !is_little_endian()) \
+ || (!little && is_little_endian())) \
+ exchange_##type((uint8 *)&tmp); \
+ obj_data->target_info.f = tmp; \
+ } while (0)
+
+static bool
+aot_resolve_target_info(AOTCompContext *comp_ctx, AOTObjectData *obj_data)
+{
+ LLVMBinaryType bin_type = LLVMBinaryGetType(obj_data->binary);
+ const uint8 *elf_buf = (uint8 *)LLVMGetBufferStart(obj_data->mem_buf);
+ uint32 elf_size = (uint32)LLVMGetBufferSize(obj_data->mem_buf);
+
+ if (bin_type != LLVMBinaryTypeCOFF && bin_type != LLVMBinaryTypeELF32L
+ && bin_type != LLVMBinaryTypeELF32B && bin_type != LLVMBinaryTypeELF64L
+ && bin_type != LLVMBinaryTypeELF64B
+ && bin_type != LLVMBinaryTypeMachO32L
+ && bin_type != LLVMBinaryTypeMachO32B
+ && bin_type != LLVMBinaryTypeMachO64L
+ && bin_type != LLVMBinaryTypeMachO64B) {
+ aot_set_last_error("invaid llvm binary bin_type.");
+ return false;
+ }
+
+ obj_data->target_info.bin_type = bin_type - LLVMBinaryTypeELF32L;
+
+ if (bin_type == LLVMBinaryTypeCOFF) {
+ struct coff_hdr *coff_header;
+
+ if (!elf_buf || elf_size < sizeof(struct coff_hdr)) {
+ aot_set_last_error("invalid coff_hdr buffer.");
+ return false;
+ }
+ coff_header = (struct coff_hdr *)elf_buf;
+
+ /* Emit eXecute In Place file type while in indirect mode */
+ if (comp_ctx->is_indirect_mode)
+ obj_data->target_info.e_type = E_TYPE_XIP;
+ else
+ obj_data->target_info.e_type = E_TYPE_REL;
+
+ obj_data->target_info.e_machine = coff_header->u16Machine;
+ obj_data->target_info.e_version = 1;
+ obj_data->target_info.e_flags = 0;
+
+ if (coff_header->u16Machine == IMAGE_FILE_MACHINE_AMD64
+ || coff_header->u16Machine == IMAGE_FILE_MACHINE_IA64)
+ obj_data->target_info.bin_type = AOT_COFF64_BIN_TYPE;
+ else if (coff_header->u16Machine == IMAGE_FILE_MACHINE_I386)
+ obj_data->target_info.bin_type = AOT_COFF32_BIN_TYPE;
+ }
+ else if (bin_type == LLVMBinaryTypeELF32L
+ || bin_type == LLVMBinaryTypeELF32B) {
+ struct elf32_ehdr *elf_header;
+ bool is_little_bin = bin_type == LLVMBinaryTypeELF32L;
+
+ if (!elf_buf || elf_size < sizeof(struct elf32_ehdr)) {
+ aot_set_last_error("invalid elf32 buffer.");
+ return false;
+ }
+
+ elf_header = (struct elf32_ehdr *)elf_buf;
+
+ /* Emit eXecute In Place file type while in indirect mode */
+ if (comp_ctx->is_indirect_mode)
+ elf_header->e_type = E_TYPE_XIP;
+
+ SET_TARGET_INFO(e_type, e_type, uint16, is_little_bin);
+ SET_TARGET_INFO(e_machine, e_machine, uint16, is_little_bin);
+ SET_TARGET_INFO(e_version, e_version, uint32, is_little_bin);
+ SET_TARGET_INFO(e_flags, e_flags, uint32, is_little_bin);
+ }
+ else if (bin_type == LLVMBinaryTypeELF64L
+ || bin_type == LLVMBinaryTypeELF64B) {
+ struct elf64_ehdr *elf_header;
+ bool is_little_bin = bin_type == LLVMBinaryTypeELF64L;
+
+ if (!elf_buf || elf_size < sizeof(struct elf64_ehdr)) {
+ aot_set_last_error("invalid elf64 buffer.");
+ return false;
+ }
+
+ elf_header = (struct elf64_ehdr *)elf_buf;
+
+ /* Emit eXecute In Place file type while in indirect mode */
+ if (comp_ctx->is_indirect_mode)
+ elf_header->e_type = E_TYPE_XIP;
+
+ SET_TARGET_INFO(e_type, e_type, uint16, is_little_bin);
+ SET_TARGET_INFO(e_machine, e_machine, uint16, is_little_bin);
+ SET_TARGET_INFO(e_version, e_version, uint32, is_little_bin);
+ SET_TARGET_INFO(e_flags, e_flags, uint32, is_little_bin);
+ }
+ else if (bin_type == LLVMBinaryTypeMachO32L
+ || bin_type == LLVMBinaryTypeMachO32B) {
+ /* TODO: parse file type of Mach-O 32 */
+ aot_set_last_error("invaid llvm binary bin_type.");
+ return false;
+ }
+ else if (bin_type == LLVMBinaryTypeMachO64L
+ || bin_type == LLVMBinaryTypeMachO64B) {
+ /* TODO: parse file type of Mach-O 64 */
+ aot_set_last_error("invaid llvm binary bin_type.");
+ return false;
+ }
+
+ bh_assert(sizeof(obj_data->target_info.arch)
+ == sizeof(comp_ctx->target_arch));
+ bh_memcpy_s(obj_data->target_info.arch, sizeof(obj_data->target_info.arch),
+ comp_ctx->target_arch, sizeof(comp_ctx->target_arch));
+
+ return true;
+}
+
+static bool
+aot_resolve_text(AOTObjectData *obj_data)
+{
+#if WASM_ENABLE_DEBUG_AOT != 0
+ LLVMBinaryType bin_type = LLVMBinaryGetType(obj_data->binary);
+ if (bin_type == LLVMBinaryTypeELF32L || bin_type == LLVMBinaryTypeELF64L) {
+ obj_data->text = (char *)LLVMGetBufferStart(obj_data->mem_buf);
+ obj_data->text_size = (uint32)LLVMGetBufferSize(obj_data->mem_buf);
+ }
+ else
+#endif
+ {
+ LLVMSectionIteratorRef sec_itr;
+ char *name;
+
+ if (!(sec_itr = LLVMObjectFileCopySectionIterator(obj_data->binary))) {
+ aot_set_last_error("llvm get section iterator failed.");
+ return false;
+ }
+ while (
+ !LLVMObjectFileIsSectionIteratorAtEnd(obj_data->binary, sec_itr)) {
+ if ((name = (char *)LLVMGetSectionName(sec_itr))
+ && !strcmp(name, ".text")) {
+ obj_data->text = (char *)LLVMGetSectionContents(sec_itr);
+ obj_data->text_size = (uint32)LLVMGetSectionSize(sec_itr);
+ break;
+ }
+ LLVMMoveToNextSection(sec_itr);
+ }
+ LLVMDisposeSectionIterator(sec_itr);
+ }
+
+ return true;
+}
+
+static bool
+aot_resolve_literal(AOTObjectData *obj_data)
+{
+ LLVMSectionIteratorRef sec_itr;
+ char *name;
+
+ if (!(sec_itr = LLVMObjectFileCopySectionIterator(obj_data->binary))) {
+ aot_set_last_error("llvm get section iterator failed.");
+ return false;
+ }
+ while (!LLVMObjectFileIsSectionIteratorAtEnd(obj_data->binary, sec_itr)) {
+ if ((name = (char *)LLVMGetSectionName(sec_itr))
+ && !strcmp(name, ".literal")) {
+ obj_data->literal = (char *)LLVMGetSectionContents(sec_itr);
+ obj_data->literal_size = (uint32)LLVMGetSectionSize(sec_itr);
+ break;
+ }
+ LLVMMoveToNextSection(sec_itr);
+ }
+ LLVMDisposeSectionIterator(sec_itr);
+
+ return true;
+}
+
+static bool
+get_relocations_count(LLVMSectionIteratorRef sec_itr, uint32 *p_count);
+
+static bool
+is_data_section(LLVMSectionIteratorRef sec_itr, char *section_name)
+{
+ uint32 relocation_count = 0;
+
+ return (!strcmp(section_name, ".data") || !strcmp(section_name, ".sdata")
+ || !strcmp(section_name, ".rodata")
+ /* ".rodata.cst4/8/16/.." */
+ || !strncmp(section_name, ".rodata.cst", strlen(".rodata.cst"))
+ /* ".rodata.strn.m" */
+ || !strncmp(section_name, ".rodata.str", strlen(".rodata.str"))
+ || (!strcmp(section_name, ".rdata")
+ && get_relocations_count(sec_itr, &relocation_count)
+ && relocation_count > 0));
+}
+
+static bool
+get_object_data_sections_count(AOTObjectData *obj_data, uint32 *p_count)
+{
+ LLVMSectionIteratorRef sec_itr;
+ char *name;
+ uint32 count = 0;
+
+ if (!(sec_itr = LLVMObjectFileCopySectionIterator(obj_data->binary))) {
+ aot_set_last_error("llvm get section iterator failed.");
+ return false;
+ }
+ while (!LLVMObjectFileIsSectionIteratorAtEnd(obj_data->binary, sec_itr)) {
+ if ((name = (char *)LLVMGetSectionName(sec_itr))
+ && (is_data_section(sec_itr, name))) {
+ count++;
+ }
+ LLVMMoveToNextSection(sec_itr);
+ }
+ LLVMDisposeSectionIterator(sec_itr);
+
+ *p_count = count;
+ return true;
+}
+
+static bool
+aot_resolve_object_data_sections(AOTObjectData *obj_data)
+{
+ LLVMSectionIteratorRef sec_itr;
+ char *name;
+ AOTObjectDataSection *data_section;
+ uint32 sections_count;
+ uint32 size;
+
+ if (!get_object_data_sections_count(obj_data, &sections_count)) {
+ return false;
+ }
+
+ if (sections_count > 0) {
+ size = (uint32)sizeof(AOTObjectDataSection) * sections_count;
+ if (!(data_section = obj_data->data_sections =
+ wasm_runtime_malloc(size))) {
+ aot_set_last_error("allocate memory for data sections failed.");
+ return false;
+ }
+ memset(obj_data->data_sections, 0, size);
+ obj_data->data_sections_count = sections_count;
+
+ if (!(sec_itr = LLVMObjectFileCopySectionIterator(obj_data->binary))) {
+ aot_set_last_error("llvm get section iterator failed.");
+ return false;
+ }
+ while (
+ !LLVMObjectFileIsSectionIteratorAtEnd(obj_data->binary, sec_itr)) {
+ if ((name = (char *)LLVMGetSectionName(sec_itr))
+ && (is_data_section(sec_itr, name))) {
+ data_section->name = name;
+ data_section->data = (uint8 *)LLVMGetSectionContents(sec_itr);
+ data_section->size = (uint32)LLVMGetSectionSize(sec_itr);
+ data_section++;
+ }
+ LLVMMoveToNextSection(sec_itr);
+ }
+ LLVMDisposeSectionIterator(sec_itr);
+ }
+
+ return true;
+}
+
+static bool
+aot_resolve_functions(AOTCompContext *comp_ctx, AOTObjectData *obj_data)
+{
+ AOTObjectFunc *func;
+ LLVMSymbolIteratorRef sym_itr;
+ char *name, *prefix = AOT_FUNC_PREFIX;
+ uint32 func_index, total_size;
+
+ /* allocate memory for aot function */
+ obj_data->func_count = comp_ctx->comp_data->func_count;
+ if (obj_data->func_count) {
+ total_size = (uint32)sizeof(AOTObjectFunc) * obj_data->func_count;
+ if (!(obj_data->funcs = wasm_runtime_malloc(total_size))) {
+ aot_set_last_error("allocate memory for functions failed.");
+ return false;
+ }
+ memset(obj_data->funcs, 0, total_size);
+ }
+
+ if (!(sym_itr = LLVMObjectFileCopySymbolIterator(obj_data->binary))) {
+ aot_set_last_error("llvm get symbol iterator failed.");
+ return false;
+ }
+
+ while (!LLVMObjectFileIsSymbolIteratorAtEnd(obj_data->binary, sym_itr)) {
+ if ((name = (char *)LLVMGetSymbolName(sym_itr))
+ && str_starts_with(name, prefix)) {
+ func_index = (uint32)atoi(name + strlen(prefix));
+ if (func_index < obj_data->func_count) {
+ func = obj_data->funcs + func_index;
+ func->func_name = name;
+ func->text_offset = LLVMGetSymbolAddress(sym_itr);
+ }
+ }
+ LLVMMoveToNextSymbol(sym_itr);
+ }
+ LLVMDisposeSymbolIterator(sym_itr);
+
+ return true;
+}
+
+static bool
+get_relocations_count(LLVMSectionIteratorRef sec_itr, uint32 *p_count)
+{
+ uint32 relocation_count = 0;
+ LLVMRelocationIteratorRef rel_itr;
+
+ if (!(rel_itr = LLVMGetRelocations(sec_itr))) {
+ aot_set_last_error("llvm get relocations failed.");
+ LLVMDisposeSectionIterator(sec_itr);
+ return false;
+ }
+
+ while (!LLVMIsRelocationIteratorAtEnd(sec_itr, rel_itr)) {
+ relocation_count++;
+ LLVMMoveToNextRelocation(rel_itr);
+ }
+ LLVMDisposeRelocationIterator(rel_itr);
+
+ *p_count = relocation_count;
+ return true;
+}
+
+static bool
+aot_resolve_object_relocation_group(AOTObjectData *obj_data,
+ AOTRelocationGroup *group,
+ LLVMSectionIteratorRef rel_sec)
+{
+ LLVMRelocationIteratorRef rel_itr;
+ AOTRelocation *relocation = group->relocations;
+ uint32 size;
+ bool is_binary_32bit = is_32bit_binary(obj_data);
+ bool is_binary_little_endian = is_little_endian_binary(obj_data);
+ bool has_addend = str_starts_with(group->section_name, ".rela");
+ uint8 *rela_content = NULL;
+
+ /* calculate relocations count and allocate memory */
+ if (!get_relocations_count(rel_sec, &group->relocation_count))
+ return false;
+ if (group->relocation_count == 0) {
+ aot_set_last_error("invalid relocations count");
+ return false;
+ }
+ size = (uint32)sizeof(AOTRelocation) * group->relocation_count;
+ if (!(relocation = group->relocations = wasm_runtime_malloc(size))) {
+ aot_set_last_error("allocate memory for relocations failed.");
+ return false;
+ }
+ memset(group->relocations, 0, size);
+
+ if (has_addend) {
+ uint64 rela_content_size;
+ /* LLVM doesn't provide C API to get relocation addend. So we have to
+ * parse it manually. */
+ rela_content = (uint8 *)LLVMGetSectionContents(rel_sec);
+ rela_content_size = LLVMGetSectionSize(rel_sec);
+ if (is_binary_32bit)
+ size = (uint32)sizeof(struct elf32_rela) * group->relocation_count;
+ else
+ size = (uint32)sizeof(struct elf64_rela) * group->relocation_count;
+ if (rela_content_size != (uint64)size) {
+ aot_set_last_error("invalid relocation section content.");
+ return false;
+ }
+ }
+
+ /* pares each relocation */
+ if (!(rel_itr = LLVMGetRelocations(rel_sec))) {
+ aot_set_last_error("llvm get relocations failed.");
+ return false;
+ }
+ while (!LLVMIsRelocationIteratorAtEnd(rel_sec, rel_itr)) {
+ uint64 offset = LLVMGetRelocationOffset(rel_itr);
+ uint64 type = LLVMGetRelocationType(rel_itr);
+ LLVMSymbolIteratorRef rel_sym = LLVMGetRelocationSymbol(rel_itr);
+
+ if (!rel_sym) {
+ aot_set_last_error("llvm get relocation symbol failed.");
+ goto fail;
+ }
+
+ /* parse relocation addend from reloction content */
+ if (has_addend) {
+ if (is_binary_32bit) {
+ int32 addend =
+ (int32)(((struct elf32_rela *)rela_content)->r_addend);
+ if (is_binary_little_endian != is_little_endian())
+ exchange_uint32((uint8 *)&addend);
+ relocation->relocation_addend = (int64)addend;
+ rela_content += sizeof(struct elf32_rela);
+ }
+ else {
+ int64 addend =
+ (int64)(((struct elf64_rela *)rela_content)->r_addend);
+ if (is_binary_little_endian != is_little_endian())
+ exchange_uint64((uint8 *)&addend);
+ relocation->relocation_addend = addend;
+ rela_content += sizeof(struct elf64_rela);
+ }
+ }
+
+ /* set relocation fields */
+ relocation->relocation_offset = offset;
+ relocation->relocation_type = (uint32)type;
+ relocation->symbol_name = (char *)LLVMGetSymbolName(rel_sym);
+
+ /* for ".LCPIxxx", ".LJTIxxx", ".LBBxxx" and switch lookup table
+ * relocation, transform the symbol name to real section name and set
+ * addend to the offset of the symbol in the real section */
+ if (relocation->symbol_name
+ && (str_starts_with(relocation->symbol_name, ".LCPI")
+ || str_starts_with(relocation->symbol_name, ".LJTI")
+ || str_starts_with(relocation->symbol_name, ".LBB")
+ || str_starts_with(relocation->symbol_name,
+ ".Lswitch.table."))) {
+ /* change relocation->relocation_addend and
+ relocation->symbol_name */
+ LLVMSectionIteratorRef contain_section;
+ if (!(contain_section =
+ LLVMObjectFileCopySectionIterator(obj_data->binary))) {
+ aot_set_last_error("llvm get section iterator failed.");
+ goto fail;
+ }
+ LLVMMoveToContainingSection(contain_section, rel_sym);
+ if (LLVMObjectFileIsSectionIteratorAtEnd(obj_data->binary,
+ contain_section)) {
+ LLVMDisposeSectionIterator(contain_section);
+ aot_set_last_error("llvm get containing section failed.");
+ goto fail;
+ }
+ relocation->relocation_addend += LLVMGetSymbolAddress(rel_sym);
+ relocation->symbol_name =
+ (char *)LLVMGetSectionName(contain_section);
+ LLVMDisposeSectionIterator(contain_section);
+ }
+
+ LLVMDisposeSymbolIterator(rel_sym);
+ LLVMMoveToNextRelocation(rel_itr);
+ relocation++;
+ }
+ LLVMDisposeRelocationIterator(rel_itr);
+ return true;
+
+fail:
+ LLVMDisposeRelocationIterator(rel_itr);
+ return false;
+}
+
+static bool
+is_relocation_section_name(char *section_name)
+{
+ return (!strcmp(section_name, ".rela.text")
+ || !strcmp(section_name, ".rel.text")
+ || !strcmp(section_name, ".rela.literal")
+ || !strcmp(section_name, ".rela.data")
+ || !strcmp(section_name, ".rel.data")
+ || !strcmp(section_name, ".rela.sdata")
+ || !strcmp(section_name, ".rel.sdata")
+ || !strcmp(section_name, ".rela.rodata")
+ || !strcmp(section_name, ".rel.rodata")
+ /* ".rela.rodata.cst4/8/16/.." */
+ || !strncmp(section_name, ".rela.rodata.cst",
+ strlen(".rela.rodata.cst"))
+ /* ".rel.rodata.cst4/8/16/.." */
+ || !strncmp(section_name, ".rel.rodata.cst",
+ strlen(".rel.rodata.cst")));
+}
+
+static bool
+is_relocation_section(LLVMSectionIteratorRef sec_itr)
+{
+ uint32 count = 0;
+ char *name = (char *)LLVMGetSectionName(sec_itr);
+ if (name) {
+ if (is_relocation_section_name(name))
+ return true;
+ else if ((!strcmp(name, ".text") || !strcmp(name, ".rdata"))
+ && get_relocations_count(sec_itr, &count) && count > 0)
+ return true;
+ }
+ return false;
+}
+
+static bool
+get_relocation_groups_count(AOTObjectData *obj_data, uint32 *p_count)
+{
+ uint32 count = 0;
+ LLVMSectionIteratorRef sec_itr;
+
+ if (!(sec_itr = LLVMObjectFileCopySectionIterator(obj_data->binary))) {
+ aot_set_last_error("llvm get section iterator failed.");
+ return false;
+ }
+ while (!LLVMObjectFileIsSectionIteratorAtEnd(obj_data->binary, sec_itr)) {
+ if (is_relocation_section(sec_itr)) {
+ count++;
+ }
+ LLVMMoveToNextSection(sec_itr);
+ }
+ LLVMDisposeSectionIterator(sec_itr);
+
+ *p_count = count;
+ return true;
+}
+
+static bool
+aot_resolve_object_relocation_groups(AOTObjectData *obj_data)
+{
+ LLVMSectionIteratorRef sec_itr;
+ AOTRelocationGroup *relocation_group;
+ uint32 group_count;
+ char *name;
+ uint32 size;
+
+ /* calculate relocation groups count and allocate memory */
+ if (!get_relocation_groups_count(obj_data, &group_count))
+ return false;
+
+ if (0 == (obj_data->relocation_group_count = group_count))
+ return true;
+
+ size = (uint32)sizeof(AOTRelocationGroup) * group_count;
+ if (!(relocation_group = obj_data->relocation_groups =
+ wasm_runtime_malloc(size))) {
+ aot_set_last_error("allocate memory for relocation groups failed.");
+ return false;
+ }
+
+ memset(obj_data->relocation_groups, 0, size);
+
+ /* resolve each relocation group */
+ if (!(sec_itr = LLVMObjectFileCopySectionIterator(obj_data->binary))) {
+ aot_set_last_error("llvm get section iterator failed.");
+ return false;
+ }
+ while (!LLVMObjectFileIsSectionIteratorAtEnd(obj_data->binary, sec_itr)) {
+ if (is_relocation_section(sec_itr)) {
+ name = (char *)LLVMGetSectionName(sec_itr);
+ relocation_group->section_name = name;
+ if (!aot_resolve_object_relocation_group(obj_data, relocation_group,
+ sec_itr)) {
+ LLVMDisposeSectionIterator(sec_itr);
+ return false;
+ }
+ relocation_group++;
+ }
+ LLVMMoveToNextSection(sec_itr);
+ }
+ LLVMDisposeSectionIterator(sec_itr);
+
+ return true;
+}
+
+static void
+destroy_relocation_groups(AOTRelocationGroup *relocation_groups,
+ uint32 relocation_group_count)
+{
+ uint32 i;
+ AOTRelocationGroup *relocation_group = relocation_groups;
+
+ for (i = 0; i < relocation_group_count; i++, relocation_group++)
+ if (relocation_group->relocations)
+ wasm_runtime_free(relocation_group->relocations);
+ wasm_runtime_free(relocation_groups);
+}
+
+static void
+destroy_relocation_symbol_list(AOTSymbolList *symbol_list)
+{
+ AOTSymbolNode *elem;
+
+ elem = symbol_list->head;
+ while (elem) {
+ AOTSymbolNode *next = elem->next;
+ wasm_runtime_free(elem);
+ elem = next;
+ }
+}
+
+static void
+aot_obj_data_destroy(AOTObjectData *obj_data)
+{
+ if (obj_data->binary)
+ LLVMDisposeBinary(obj_data->binary);
+ if (obj_data->mem_buf)
+ LLVMDisposeMemoryBuffer(obj_data->mem_buf);
+ if (obj_data->funcs)
+ wasm_runtime_free(obj_data->funcs);
+ if (obj_data->data_sections)
+ wasm_runtime_free(obj_data->data_sections);
+ if (obj_data->relocation_groups)
+ destroy_relocation_groups(obj_data->relocation_groups,
+ obj_data->relocation_group_count);
+ if (obj_data->symbol_list.len)
+ destroy_relocation_symbol_list(&obj_data->symbol_list);
+ wasm_runtime_free(obj_data);
+}
+
+static AOTObjectData *
+aot_obj_data_create(AOTCompContext *comp_ctx)
+{
+ char *err = NULL;
+ AOTObjectData *obj_data;
+ LLVMTargetRef target = LLVMGetTargetMachineTarget(comp_ctx->target_machine);
+
+ bh_print_time("Begin to emit object file to buffer");
+
+ if (!(obj_data = wasm_runtime_malloc(sizeof(AOTObjectData)))) {
+ aot_set_last_error("allocate memory failed.");
+ return false;
+ }
+ memset(obj_data, 0, sizeof(AOTObjectData));
+
+ bh_print_time("Begin to emit object file");
+ if (comp_ctx->external_llc_compiler || comp_ctx->external_asm_compiler) {
+#if defined(_WIN32) || defined(_WIN32_)
+ aot_set_last_error("external toolchain not supported on Windows");
+ goto fail;
+#else
+ /* Generate a temp file name */
+ int ret;
+ char obj_file_name[64];
+
+ if (!aot_generate_tempfile_name("wamrc-obj", "o", obj_file_name,
+ sizeof(obj_file_name))) {
+ goto fail;
+ }
+
+ if (!aot_emit_object_file(comp_ctx, obj_file_name)) {
+ goto fail;
+ }
+
+ /* create memory buffer from object file */
+ ret = LLVMCreateMemoryBufferWithContentsOfFile(
+ obj_file_name, &obj_data->mem_buf, &err);
+ /* remove temp object file */
+ unlink(obj_file_name);
+
+ if (ret != 0) {
+ if (err) {
+ LLVMDisposeMessage(err);
+ err = NULL;
+ }
+ aot_set_last_error("create mem buffer with file failed.");
+ goto fail;
+ }
+#endif /* end of defined(_WIN32) || defined(_WIN32_) */
+ }
+ else if (!strncmp(LLVMGetTargetName(target), "arc", 3)) {
+#if defined(_WIN32) || defined(_WIN32_)
+ aot_set_last_error("emit object file on Windows is unsupported.");
+ goto fail;
+#else
+ /* Emit to assmelby file instead for arc target
+ as it cannot emit to object file */
+ char file_name[] = "wasm-XXXXXX", buf[128];
+ int fd, ret;
+
+ if ((fd = mkstemp(file_name)) <= 0) {
+ aot_set_last_error("make temp file failed.");
+ goto fail;
+ }
+
+ /* close and remove temp file */
+ close(fd);
+ unlink(file_name);
+
+ snprintf(buf, sizeof(buf), "%s%s", file_name, ".s");
+ if (LLVMTargetMachineEmitToFile(comp_ctx->target_machine,
+ comp_ctx->module, buf, LLVMAssemblyFile,
+ &err)
+ != 0) {
+ if (err) {
+ LLVMDisposeMessage(err);
+ err = NULL;
+ }
+ aot_set_last_error("emit elf to object file failed.");
+ goto fail;
+ }
+
+ /* call arc gcc to compile assembly file to object file */
+ /* TODO: get arc gcc from environment variable firstly
+ and check whether the toolchain exists actually */
+ snprintf(buf, sizeof(buf), "%s%s%s%s%s%s",
+ "/opt/zephyr-sdk/arc-zephyr-elf/bin/arc-zephyr-elf-gcc ",
+ "-mcpu=arcem -o ", file_name, ".o -c ", file_name, ".s");
+ /* TODO: use try..catch to handle possible exceptions */
+ ret = system(buf);
+ /* remove temp assembly file */
+ snprintf(buf, sizeof(buf), "%s%s", file_name, ".s");
+ unlink(buf);
+
+ if (ret != 0) {
+ aot_set_last_error("failed to compile asm file to obj file "
+ "with arc gcc toolchain.");
+ goto fail;
+ }
+
+ /* create memory buffer from object file */
+ snprintf(buf, sizeof(buf), "%s%s", file_name, ".o");
+ ret = LLVMCreateMemoryBufferWithContentsOfFile(buf, &obj_data->mem_buf,
+ &err);
+ /* remove temp object file */
+ snprintf(buf, sizeof(buf), "%s%s", file_name, ".o");
+ unlink(buf);
+
+ if (ret != 0) {
+ if (err) {
+ LLVMDisposeMessage(err);
+ err = NULL;
+ }
+ aot_set_last_error("create mem buffer with file failed.");
+ goto fail;
+ }
+#endif /* end of defined(_WIN32) || defined(_WIN32_) */
+ }
+ else {
+ if (LLVMTargetMachineEmitToMemoryBuffer(
+ comp_ctx->target_machine, comp_ctx->module, LLVMObjectFile,
+ &err, &obj_data->mem_buf)
+ != 0) {
+ if (err) {
+ LLVMDisposeMessage(err);
+ err = NULL;
+ }
+ aot_set_last_error("llvm emit to memory buffer failed.");
+ goto fail;
+ }
+ }
+
+ if (!(obj_data->binary = LLVMCreateBinary(obj_data->mem_buf, NULL, &err))) {
+ if (err) {
+ LLVMDisposeMessage(err);
+ err = NULL;
+ }
+ aot_set_last_error("llvm create binary failed.");
+ goto fail;
+ }
+
+ bh_print_time("Begin to resolve object file info");
+
+ /* resolve target info/text/relocations/functions */
+ if (!aot_resolve_target_info(comp_ctx, obj_data)
+ || !aot_resolve_text(obj_data) || !aot_resolve_literal(obj_data)
+ || !aot_resolve_object_data_sections(obj_data)
+ || !aot_resolve_object_relocation_groups(obj_data)
+ || !aot_resolve_functions(comp_ctx, obj_data))
+ goto fail;
+
+ return obj_data;
+
+fail:
+ aot_obj_data_destroy(obj_data);
+ return NULL;
+}
+
+uint8 *
+aot_emit_aot_file_buf(AOTCompContext *comp_ctx, AOTCompData *comp_data,
+ uint32 *p_aot_file_size)
+{
+ AOTObjectData *obj_data = aot_obj_data_create(comp_ctx);
+ uint8 *aot_file_buf, *buf, *buf_end;
+ uint32 aot_file_size, offset = 0;
+
+ if (!obj_data)
+ return NULL;
+
+ aot_file_size = get_aot_file_size(comp_ctx, comp_data, obj_data);
+
+ if (!(buf = aot_file_buf = wasm_runtime_malloc(aot_file_size))) {
+ aot_set_last_error("allocate memory failed.");
+ goto fail1;
+ }
+
+ memset(aot_file_buf, 0, aot_file_size);
+ buf_end = buf + aot_file_size;
+
+ if (!aot_emit_file_header(buf, buf_end, &offset, comp_data, obj_data)
+ || !aot_emit_target_info_section(buf, buf_end, &offset, comp_data,
+ obj_data)
+ || !aot_emit_init_data_section(buf, buf_end, &offset, comp_ctx,
+ comp_data, obj_data)
+ || !aot_emit_text_section(buf, buf_end, &offset, comp_data, obj_data)
+ || !aot_emit_func_section(buf, buf_end, &offset, comp_data, obj_data)
+ || !aot_emit_export_section(buf, buf_end, &offset, comp_ctx, comp_data,
+ obj_data)
+ || !aot_emit_relocation_section(buf, buf_end, &offset, comp_ctx,
+ comp_data, obj_data)
+ || !aot_emit_native_symbol(buf, buf_end, &offset, comp_ctx)
+ || !aot_emit_name_section(buf, buf_end, &offset, comp_data, comp_ctx)
+ || !aot_emit_custom_sections(buf, buf_end, &offset, comp_data,
+ comp_ctx))
+ goto fail2;
+
+#if 0
+ dump_buf(buf, offset, "sections");
+#endif
+
+ if (offset != aot_file_size) {
+ aot_set_last_error("emit aot file failed.");
+ goto fail2;
+ }
+
+ *p_aot_file_size = aot_file_size;
+
+ aot_obj_data_destroy(obj_data);
+ return aot_file_buf;
+
+fail2:
+ wasm_runtime_free(aot_file_buf);
+
+fail1:
+ aot_obj_data_destroy(obj_data);
+ return NULL;
+}
+
+bool
+aot_emit_aot_file(AOTCompContext *comp_ctx, AOTCompData *comp_data,
+ const char *file_name)
+{
+ uint8 *aot_file_buf;
+ uint32 aot_file_size;
+ bool ret = false;
+ FILE *file;
+
+ bh_print_time("Begin to emit AOT file");
+
+ if (!(aot_file_buf =
+ aot_emit_aot_file_buf(comp_ctx, comp_data, &aot_file_size))) {
+ return false;
+ }
+
+ /* write buffer to file */
+ if (!(file = fopen(file_name, "wb"))) {
+ aot_set_last_error("open or create aot file failed.");
+ goto fail1;
+ }
+ if (!fwrite(aot_file_buf, aot_file_size, 1, file)) {
+ aot_set_last_error("write to aot file failed.");
+ goto fail2;
+ }
+
+ ret = true;
+
+fail2:
+ fclose(file);
+
+fail1:
+ wasm_runtime_free(aot_file_buf);
+
+ return ret;
+}
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/aot_emit_compare.c b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/aot_emit_compare.c
new file mode 100644
index 000000000..a38263264
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/aot_emit_compare.c
@@ -0,0 +1,232 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#include "aot_emit_compare.h"
+#include "../aot/aot_intrinsic.h"
+
+static bool
+int_cond_to_llvm_op(IntCond cond, LLVMIntPredicate *op)
+{
+ if (cond < INT_EQZ || cond > INT_GE_U)
+ return false;
+
+ switch (cond) {
+ case INT_EQZ:
+ case INT_EQ:
+ *op = LLVMIntEQ;
+ break;
+ case INT_NE:
+ *op = LLVMIntNE;
+ break;
+ case INT_LT_S:
+ *op = LLVMIntSLT;
+ break;
+ case INT_LT_U:
+ *op = LLVMIntULT;
+ break;
+ case INT_GT_S:
+ *op = LLVMIntSGT;
+ break;
+ case INT_GT_U:
+ *op = LLVMIntUGT;
+ break;
+ case INT_LE_S:
+ *op = LLVMIntSLE;
+ break;
+ case INT_LE_U:
+ *op = LLVMIntULE;
+ break;
+ case INT_GE_S:
+ *op = LLVMIntSGE;
+ break;
+ case INT_GE_U:
+ *op = LLVMIntUGE;
+ break;
+ default:
+ return false;
+ }
+
+ return true;
+}
+
+static bool
+float_cond_to_llvm_op(FloatCond cond, LLVMRealPredicate *op)
+{
+ if (cond < FLOAT_EQ || cond > FLOAT_GE)
+ return false;
+
+ switch (cond) {
+ case FLOAT_EQ:
+ *op = LLVMRealOEQ;
+ break;
+ case FLOAT_NE:
+ *op = LLVMRealUNE;
+ break;
+ case FLOAT_LT:
+ *op = LLVMRealOLT;
+ break;
+ case FLOAT_GT:
+ *op = LLVMRealOGT;
+ break;
+ case FLOAT_LE:
+ *op = LLVMRealOLE;
+ break;
+ case FLOAT_GE:
+ *op = LLVMRealOGE;
+ break;
+ default:
+ return false;
+ }
+
+ return true;
+}
+
+bool
+aot_compile_op_i32_compare(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ IntCond cond)
+{
+ LLVMIntPredicate op;
+ LLVMValueRef lhs, rhs, res;
+
+ if (!int_cond_to_llvm_op(cond, &op)) {
+ aot_set_last_error("invalid WASM condition opcode");
+ return false;
+ }
+
+ if (cond == INT_EQZ)
+ rhs = I32_ZERO;
+ else
+ POP_I32(rhs);
+
+ POP_I32(lhs);
+
+ if (!(res = LLVMBuildICmp(comp_ctx->builder, op, lhs, rhs, "i32_cmp"))) {
+ aot_set_last_error("llvm build compare failed.");
+ return false;
+ }
+
+ PUSH_COND(res);
+ return true;
+fail:
+ return false;
+}
+
+bool
+aot_compile_op_i64_compare(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ IntCond cond)
+{
+ LLVMIntPredicate op;
+ LLVMValueRef lhs, rhs, res;
+
+ if (!int_cond_to_llvm_op(cond, &op)) {
+ aot_set_last_error("invalid WASM condition opcode");
+ return false;
+ }
+
+ if (cond == INT_EQZ)
+ rhs = I64_CONST(0);
+ else
+ POP_I64(rhs);
+
+ POP_I64(lhs);
+
+ if (!(res = LLVMBuildICmp(comp_ctx->builder, op, lhs, rhs, "i64_cmp"))) {
+ aot_set_last_error("llvm build compare failed.");
+ return false;
+ }
+
+ PUSH_COND(res);
+ return true;
+fail:
+ return false;
+}
+
+bool
+aot_compile_op_f32_compare(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ FloatCond cond)
+{
+ LLVMRealPredicate op;
+ LLVMValueRef lhs, rhs, res;
+
+ if (!float_cond_to_llvm_op(cond, &op)) {
+ aot_set_last_error("invalid WASM condition opcode");
+ return false;
+ }
+
+ POP_F32(rhs);
+ POP_F32(lhs);
+
+ if (comp_ctx->disable_llvm_intrinsics
+ && aot_intrinsic_check_capability(comp_ctx, "f32_cmp")) {
+ LLVMTypeRef param_types[3];
+ LLVMValueRef opcond = LLVMConstInt(I32_TYPE, cond, true);
+ param_types[0] = I32_TYPE;
+ param_types[1] = F32_TYPE;
+ param_types[2] = F32_TYPE;
+ res = aot_call_llvm_intrinsic(comp_ctx, func_ctx, "f32_cmp", I32_TYPE,
+ param_types, 3, opcond, lhs, rhs);
+ if (!res) {
+ goto fail;
+ }
+ res = LLVMBuildIntCast(comp_ctx->builder, res, INT1_TYPE, "bit_cast");
+ }
+ else {
+ res = LLVMBuildFCmp(comp_ctx->builder, op, lhs, rhs, "f32_cmp");
+ }
+
+ if (!res) {
+ aot_set_last_error("llvm build compare failed.");
+ return false;
+ }
+
+ PUSH_COND(res);
+ return true;
+fail:
+ return false;
+}
+
+bool
+aot_compile_op_f64_compare(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ FloatCond cond)
+{
+ LLVMRealPredicate op;
+ LLVMValueRef lhs, rhs, res;
+
+ if (!float_cond_to_llvm_op(cond, &op)) {
+ aot_set_last_error("invalid WASM condition opcode");
+ return false;
+ }
+
+ POP_F64(rhs);
+ POP_F64(lhs);
+
+ if (comp_ctx->disable_llvm_intrinsics
+ && aot_intrinsic_check_capability(comp_ctx, "f64_cmp")) {
+ LLVMTypeRef param_types[3];
+ LLVMValueRef opcond = LLVMConstInt(I32_TYPE, cond, true);
+ param_types[0] = I32_TYPE;
+ param_types[1] = F64_TYPE;
+ param_types[2] = F64_TYPE;
+ res = aot_call_llvm_intrinsic(comp_ctx, func_ctx, "f64_cmp", I32_TYPE,
+ param_types, 3, opcond, lhs, rhs);
+ if (!res) {
+ goto fail;
+ }
+ res = LLVMBuildIntCast(comp_ctx->builder, res, INT1_TYPE, "bit_cast");
+ }
+ else {
+ res = LLVMBuildFCmp(comp_ctx->builder, op, lhs, rhs, "f64_cmp");
+ }
+
+ if (!res) {
+ aot_set_last_error("llvm build compare failed.");
+ return false;
+ }
+
+ PUSH_COND(res);
+ return true;
+fail:
+ return false;
+}
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/aot_emit_compare.h b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/aot_emit_compare.h
new file mode 100644
index 000000000..6ac37794c
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/aot_emit_compare.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#ifndef _AOT_EMIT_COMPARE_H_
+#define _AOT_EMIT_COMPARE_H_
+
+#include "aot_compiler.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+bool
+aot_compile_op_i32_compare(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ IntCond cond);
+
+bool
+aot_compile_op_i64_compare(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ IntCond cond);
+
+bool
+aot_compile_op_f32_compare(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ FloatCond cond);
+
+bool
+aot_compile_op_f64_compare(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ FloatCond cond);
+
+#ifdef __cplusplus
+} /* end of extern "C" */
+#endif
+
+#endif /* end of _AOT_EMIT_COMPARE_H_ */
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/aot_emit_const.c b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/aot_emit_const.c
new file mode 100644
index 000000000..4b38aa962
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/aot_emit_const.c
@@ -0,0 +1,172 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#include "aot_emit_const.h"
+#include "../aot/aot_intrinsic.h"
+
+bool
+aot_compile_op_i32_const(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ int32 i32_const)
+{
+ LLVMValueRef value;
+
+ if (comp_ctx->is_indirect_mode
+ && aot_intrinsic_check_capability(comp_ctx, "i32.const")) {
+ WASMValue wasm_value;
+ wasm_value.i32 = i32_const;
+ value = aot_load_const_from_table(comp_ctx, func_ctx->native_symbol,
+ &wasm_value, VALUE_TYPE_I32);
+ if (!value) {
+ return false;
+ }
+ }
+ else {
+ value = I32_CONST((uint32)i32_const);
+ CHECK_LLVM_CONST(value);
+ }
+
+ PUSH_I32(value);
+ return true;
+fail:
+ return false;
+}
+
+bool
+aot_compile_op_i64_const(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ int64 i64_const)
+{
+ LLVMValueRef value;
+
+ if (comp_ctx->is_indirect_mode
+ && aot_intrinsic_check_capability(comp_ctx, "i64.const")) {
+ WASMValue wasm_value;
+ wasm_value.i64 = i64_const;
+ value = aot_load_const_from_table(comp_ctx, func_ctx->native_symbol,
+ &wasm_value, VALUE_TYPE_I64);
+ if (!value) {
+ return false;
+ }
+ }
+ else {
+ value = I64_CONST((uint64)i64_const);
+ CHECK_LLVM_CONST(value);
+ }
+
+ PUSH_I64(value);
+ return true;
+fail:
+ return false;
+}
+
+bool
+aot_compile_op_f32_const(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ float32 f32_const)
+{
+ LLVMValueRef alloca, value;
+
+ if (!isnan(f32_const)) {
+ if (comp_ctx->is_indirect_mode
+ && aot_intrinsic_check_capability(comp_ctx, "f32.const")) {
+ WASMValue wasm_value;
+ memcpy(&wasm_value.f32, &f32_const, sizeof(float32));
+ value = aot_load_const_from_table(comp_ctx, func_ctx->native_symbol,
+ &wasm_value, VALUE_TYPE_F32);
+ if (!value) {
+ return false;
+ }
+ PUSH_F32(value);
+ }
+ else {
+ value = F32_CONST(f32_const);
+ CHECK_LLVM_CONST(value);
+ PUSH_F32(value);
+ }
+ }
+ else {
+ int32 i32_const;
+ memcpy(&i32_const, &f32_const, sizeof(int32));
+ if (!(alloca =
+ LLVMBuildAlloca(comp_ctx->builder, I32_TYPE, "i32_ptr"))) {
+ aot_set_last_error("llvm build alloca failed.");
+ return false;
+ }
+ if (!LLVMBuildStore(comp_ctx->builder, I32_CONST((uint32)i32_const),
+ alloca)) {
+ aot_set_last_error("llvm build store failed.");
+ return false;
+ }
+ if (!(alloca = LLVMBuildBitCast(comp_ctx->builder, alloca, F32_PTR_TYPE,
+ "f32_ptr"))) {
+ aot_set_last_error("llvm build bitcast failed.");
+ return false;
+ }
+ if (!(value =
+ LLVMBuildLoad2(comp_ctx->builder, F32_TYPE, alloca, ""))) {
+ aot_set_last_error("llvm build load failed.");
+ return false;
+ }
+ PUSH_F32(value);
+ }
+
+ return true;
+fail:
+ return false;
+}
+
+bool
+aot_compile_op_f64_const(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ float64 f64_const)
+{
+ LLVMValueRef alloca, value;
+
+ if (!isnan(f64_const)) {
+ if (comp_ctx->is_indirect_mode
+ && aot_intrinsic_check_capability(comp_ctx, "f64.const")) {
+ WASMValue wasm_value;
+ memcpy(&wasm_value.f64, &f64_const, sizeof(float64));
+ value = aot_load_const_from_table(comp_ctx, func_ctx->native_symbol,
+ &wasm_value, VALUE_TYPE_F64);
+ if (!value) {
+ return false;
+ }
+ PUSH_F64(value);
+ }
+ else {
+ value = F64_CONST(f64_const);
+ CHECK_LLVM_CONST(value);
+ PUSH_F64(value);
+ }
+ }
+ else {
+ int64 i64_const;
+ memcpy(&i64_const, &f64_const, sizeof(int64));
+ if (!(alloca =
+ LLVMBuildAlloca(comp_ctx->builder, I64_TYPE, "i64_ptr"))) {
+ aot_set_last_error("llvm build alloca failed.");
+ return false;
+ }
+ value = I64_CONST((uint64)i64_const);
+ CHECK_LLVM_CONST(value);
+ if (!LLVMBuildStore(comp_ctx->builder, value, alloca)) {
+ aot_set_last_error("llvm build store failed.");
+ return false;
+ }
+ if (!(alloca = LLVMBuildBitCast(comp_ctx->builder, alloca, F64_PTR_TYPE,
+ "f64_ptr"))) {
+ aot_set_last_error("llvm build bitcast failed.");
+ return false;
+ }
+ if (!(value =
+ LLVMBuildLoad2(comp_ctx->builder, F64_TYPE, alloca, ""))) {
+ aot_set_last_error("llvm build load failed.");
+ return false;
+ }
+ PUSH_F64(value);
+ }
+
+ return true;
+fail:
+ return false;
+}
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/aot_emit_const.h b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/aot_emit_const.h
new file mode 100644
index 000000000..0b56cb13b
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/aot_emit_const.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#ifndef _AOT_EMIT_CONST_H_
+#define _AOT_EMIT_CONST_H_
+
+#include "aot_compiler.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+bool
+aot_compile_op_i32_const(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ int32 i32_const);
+
+bool
+aot_compile_op_i64_const(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ int64 i64_const);
+
+bool
+aot_compile_op_f32_const(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ float32 f32_const);
+
+bool
+aot_compile_op_f64_const(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ float64 f64_const);
+
+#ifdef __cplusplus
+} /* end of extern "C" */
+#endif
+
+#endif /* end of _AOT_EMIT_CONST_H_ */
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/aot_emit_control.c b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/aot_emit_control.c
new file mode 100644
index 000000000..2cf51cf67
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/aot_emit_control.c
@@ -0,0 +1,1155 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#include "aot_emit_control.h"
+#include "aot_emit_exception.h"
+#include "../aot/aot_runtime.h"
+#include "../interpreter/wasm_loader.h"
+
+#if WASM_ENABLE_DEBUG_AOT != 0
+#include "debug/dwarf_extractor.h"
+#endif
+
+static char *block_name_prefix[] = { "block", "loop", "if" };
+static char *block_name_suffix[] = { "begin", "else", "end" };
+
+/* clang-format off */
+enum {
+ LABEL_BEGIN = 0,
+ LABEL_ELSE,
+ LABEL_END
+};
+/* clang-format on */
+
+static void
+format_block_name(char *name, uint32 name_size, uint32 block_index,
+ uint32 label_type, uint32 label_id)
+{
+ if (label_type != LABEL_TYPE_FUNCTION)
+ snprintf(name, name_size, "%s%d%s%s", block_name_prefix[label_type],
+ block_index, "_", block_name_suffix[label_id]);
+ else
+ snprintf(name, name_size, "%s", "func_end");
+}
+
+#define CREATE_BLOCK(new_llvm_block, name) \
+ do { \
+ if (!(new_llvm_block = LLVMAppendBasicBlockInContext( \
+ comp_ctx->context, func_ctx->func, name))) { \
+ aot_set_last_error("add LLVM basic block failed."); \
+ goto fail; \
+ } \
+ } while (0)
+
+#define CURR_BLOCK() LLVMGetInsertBlock(comp_ctx->builder)
+
+#define MOVE_BLOCK_AFTER(llvm_block, llvm_block_after) \
+ LLVMMoveBasicBlockAfter(llvm_block, llvm_block_after)
+
+#define MOVE_BLOCK_AFTER_CURR(llvm_block) \
+ LLVMMoveBasicBlockAfter(llvm_block, CURR_BLOCK())
+
+#define MOVE_BLOCK_BEFORE(llvm_block, llvm_block_before) \
+ LLVMMoveBasicBlockBefore(llvm_block, llvm_block_before)
+
+#define BUILD_BR(llvm_block) \
+ do { \
+ if (!LLVMBuildBr(comp_ctx->builder, llvm_block)) { \
+ aot_set_last_error("llvm build br failed."); \
+ goto fail; \
+ } \
+ } while (0)
+
+#define BUILD_COND_BR(value_if, block_then, block_else) \
+ do { \
+ if (!LLVMBuildCondBr(comp_ctx->builder, value_if, block_then, \
+ block_else)) { \
+ aot_set_last_error("llvm build cond br failed."); \
+ goto fail; \
+ } \
+ } while (0)
+
+#define SET_BUILDER_POS(llvm_block) \
+ LLVMPositionBuilderAtEnd(comp_ctx->builder, llvm_block)
+
+#define CREATE_RESULT_VALUE_PHIS(block) \
+ do { \
+ if (block->result_count && !block->result_phis) { \
+ uint32 _i; \
+ uint64 _size; \
+ LLVMBasicBlockRef _block_curr = CURR_BLOCK(); \
+ /* Allocate memory */ \
+ _size = sizeof(LLVMValueRef) * (uint64)block->result_count; \
+ if (_size >= UINT32_MAX \
+ || !(block->result_phis = \
+ wasm_runtime_malloc((uint32)_size))) { \
+ aot_set_last_error("allocate memory failed."); \
+ goto fail; \
+ } \
+ SET_BUILDER_POS(block->llvm_end_block); \
+ for (_i = 0; _i < block->result_count; _i++) { \
+ if (!(block->result_phis[_i] = LLVMBuildPhi( \
+ comp_ctx->builder, \
+ TO_LLVM_TYPE(block->result_types[_i]), "phi"))) { \
+ aot_set_last_error("llvm build phi failed."); \
+ goto fail; \
+ } \
+ } \
+ SET_BUILDER_POS(_block_curr); \
+ } \
+ } while (0)
+
+#define ADD_TO_RESULT_PHIS(block, value, idx) \
+ do { \
+ LLVMBasicBlockRef _block_curr = CURR_BLOCK(); \
+ LLVMTypeRef phi_ty = LLVMTypeOf(block->result_phis[idx]); \
+ LLVMTypeRef value_ty = LLVMTypeOf(value); \
+ bh_assert(LLVMGetTypeKind(phi_ty) == LLVMGetTypeKind(value_ty)); \
+ bh_assert(LLVMGetTypeContext(phi_ty) == LLVMGetTypeContext(value_ty)); \
+ LLVMAddIncoming(block->result_phis[idx], &value, &_block_curr, 1); \
+ (void)phi_ty; \
+ (void)value_ty; \
+ } while (0)
+
+#define BUILD_ICMP(op, left, right, res, name) \
+ do { \
+ if (!(res = \
+ LLVMBuildICmp(comp_ctx->builder, op, left, right, name))) { \
+ aot_set_last_error("llvm build icmp failed."); \
+ goto fail; \
+ } \
+ } while (0)
+
+#define ADD_TO_PARAM_PHIS(block, value, idx) \
+ do { \
+ LLVMBasicBlockRef _block_curr = CURR_BLOCK(); \
+ LLVMAddIncoming(block->param_phis[idx], &value, &_block_curr, 1); \
+ } while (0)
+
+static LLVMBasicBlockRef
+find_next_llvm_end_block(AOTBlock *block)
+{
+ block = block->prev;
+ while (block && !block->llvm_end_block)
+ block = block->prev;
+ return block ? block->llvm_end_block : NULL;
+}
+
+static AOTBlock *
+get_target_block(AOTFuncContext *func_ctx, uint32 br_depth)
+{
+ uint32 i = br_depth;
+ AOTBlock *block = func_ctx->block_stack.block_list_end;
+
+ while (i-- > 0 && block) {
+ block = block->prev;
+ }
+
+ if (!block) {
+ aot_set_last_error("WASM block stack underflow.");
+ return NULL;
+ }
+ return block;
+}
+
+static bool
+handle_next_reachable_block(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ uint8 **p_frame_ip)
+{
+ AOTBlock *block = func_ctx->block_stack.block_list_end;
+ AOTBlock *block_prev;
+ uint8 *frame_ip = NULL;
+ uint32 i;
+ AOTFuncType *func_type;
+ LLVMValueRef ret;
+#if WASM_ENABLE_DEBUG_AOT != 0
+ LLVMMetadataRef return_location;
+#endif
+
+ aot_checked_addr_list_destroy(func_ctx);
+ bh_assert(block);
+
+#if WASM_ENABLE_DEBUG_AOT != 0
+ return_location = dwarf_gen_location(
+ comp_ctx, func_ctx,
+ (*p_frame_ip - 1) - comp_ctx->comp_data->wasm_module->buf_code);
+#endif
+ if (block->label_type == LABEL_TYPE_IF && block->llvm_else_block
+ && *p_frame_ip <= block->wasm_code_else) {
+ /* Clear value stack and start to translate else branch */
+ aot_value_stack_destroy(&block->value_stack);
+ /* Recover parameters of else branch */
+ for (i = 0; i < block->param_count; i++)
+ PUSH(block->else_param_phis[i], block->param_types[i]);
+ SET_BUILDER_POS(block->llvm_else_block);
+ *p_frame_ip = block->wasm_code_else + 1;
+ return true;
+ }
+
+ while (block && !block->is_reachable) {
+ block_prev = block->prev;
+ block = aot_block_stack_pop(&func_ctx->block_stack);
+
+ if (block->label_type == LABEL_TYPE_IF) {
+ if (block->llvm_else_block && !block->skip_wasm_code_else
+ && *p_frame_ip <= block->wasm_code_else) {
+ /* Clear value stack and start to translate else branch */
+ aot_value_stack_destroy(&block->value_stack);
+ SET_BUILDER_POS(block->llvm_else_block);
+ *p_frame_ip = block->wasm_code_else + 1;
+ /* Push back the block */
+ aot_block_stack_push(&func_ctx->block_stack, block);
+ return true;
+ }
+ else if (block->llvm_end_block) {
+ /* Remove unreachable basic block */
+ LLVMDeleteBasicBlock(block->llvm_end_block);
+ block->llvm_end_block = NULL;
+ }
+ }
+
+ frame_ip = block->wasm_code_end;
+ aot_block_destroy(block);
+ block = block_prev;
+ }
+
+ if (!block) {
+ *p_frame_ip = frame_ip + 1;
+ return true;
+ }
+
+ *p_frame_ip = block->wasm_code_end + 1;
+ SET_BUILDER_POS(block->llvm_end_block);
+
+ /* Pop block, push its return value, and destroy the block */
+ block = aot_block_stack_pop(&func_ctx->block_stack);
+ func_type = func_ctx->aot_func->func_type;
+ for (i = 0; i < block->result_count; i++) {
+ bh_assert(block->result_phis[i]);
+ if (block->label_type != LABEL_TYPE_FUNCTION) {
+ PUSH(block->result_phis[i], block->result_types[i]);
+ }
+ else {
+ /* Store extra return values to function parameters */
+ if (i != 0) {
+ uint32 param_index = func_type->param_count + i;
+ if (!LLVMBuildStore(
+ comp_ctx->builder, block->result_phis[i],
+ LLVMGetParam(func_ctx->func, param_index))) {
+ aot_set_last_error("llvm build store failed.");
+ goto fail;
+ }
+ }
+ }
+ }
+ if (block->label_type == LABEL_TYPE_FUNCTION) {
+ if (block->result_count) {
+ /* Return the first return value */
+ if (!(ret =
+ LLVMBuildRet(comp_ctx->builder, block->result_phis[0]))) {
+ aot_set_last_error("llvm build return failed.");
+ goto fail;
+ }
+#if WASM_ENABLE_DEBUG_AOT != 0
+ LLVMInstructionSetDebugLoc(ret, return_location);
+#endif
+ }
+ else {
+ if (!(ret = LLVMBuildRetVoid(comp_ctx->builder))) {
+ aot_set_last_error("llvm build return void failed.");
+ goto fail;
+ }
+#if WASM_ENABLE_DEBUG_AOT != 0
+ LLVMInstructionSetDebugLoc(ret, return_location);
+#endif
+ }
+ }
+ aot_block_destroy(block);
+ return true;
+fail:
+ return false;
+}
+
+static bool
+push_aot_block_to_stack_and_pass_params(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx,
+ AOTBlock *block)
+{
+ uint32 i, param_index;
+ LLVMValueRef value;
+ uint64 size;
+ char name[32];
+ LLVMBasicBlockRef block_curr = CURR_BLOCK();
+
+ if (block->param_count) {
+ size = sizeof(LLVMValueRef) * (uint64)block->param_count;
+ if (size >= UINT32_MAX
+ || !(block->param_phis = wasm_runtime_malloc((uint32)size))) {
+ aot_set_last_error("allocate memory failed.");
+ return false;
+ }
+
+ if (block->label_type == LABEL_TYPE_IF && !block->skip_wasm_code_else
+ && !(block->else_param_phis = wasm_runtime_malloc((uint32)size))) {
+ wasm_runtime_free(block->param_phis);
+ block->param_phis = NULL;
+ aot_set_last_error("allocate memory failed.");
+ return false;
+ }
+
+ /* Create param phis */
+ for (i = 0; i < block->param_count; i++) {
+ SET_BUILDER_POS(block->llvm_entry_block);
+ snprintf(name, sizeof(name), "%s%d_phi%d",
+ block_name_prefix[block->label_type], block->block_index,
+ i);
+ if (!(block->param_phis[i] = LLVMBuildPhi(
+ comp_ctx->builder, TO_LLVM_TYPE(block->param_types[i]),
+ name))) {
+ aot_set_last_error("llvm build phi failed.");
+ goto fail;
+ }
+
+ if (block->label_type == LABEL_TYPE_IF
+ && !block->skip_wasm_code_else && block->llvm_else_block) {
+ /* Build else param phis */
+ SET_BUILDER_POS(block->llvm_else_block);
+ snprintf(name, sizeof(name), "else%d_phi%d", block->block_index,
+ i);
+ if (!(block->else_param_phis[i] = LLVMBuildPhi(
+ comp_ctx->builder,
+ TO_LLVM_TYPE(block->param_types[i]), name))) {
+ aot_set_last_error("llvm build phi failed.");
+ goto fail;
+ }
+ }
+ }
+ SET_BUILDER_POS(block_curr);
+
+ /* Pop param values from current block's
+ * value stack and add to param phis.
+ */
+ for (i = 0; i < block->param_count; i++) {
+ param_index = block->param_count - 1 - i;
+ POP(value, block->param_types[param_index]);
+ ADD_TO_PARAM_PHIS(block, value, param_index);
+ if (block->label_type == LABEL_TYPE_IF
+ && !block->skip_wasm_code_else) {
+ if (block->llvm_else_block) {
+ /* has else branch, add to else param phis */
+ LLVMAddIncoming(block->else_param_phis[param_index], &value,
+ &block_curr, 1);
+ }
+ else {
+ /* no else branch, add to result phis */
+ CREATE_RESULT_VALUE_PHIS(block);
+ ADD_TO_RESULT_PHIS(block, value, param_index);
+ }
+ }
+ }
+ }
+
+ /* Push the new block to block stack */
+ aot_block_stack_push(&func_ctx->block_stack, block);
+
+ /* Push param phis to the new block */
+ for (i = 0; i < block->param_count; i++) {
+ PUSH(block->param_phis[i], block->param_types[i]);
+ }
+
+ return true;
+
+fail:
+ if (block->param_phis) {
+ wasm_runtime_free(block->param_phis);
+ block->param_phis = NULL;
+ }
+ if (block->else_param_phis) {
+ wasm_runtime_free(block->else_param_phis);
+ block->else_param_phis = NULL;
+ }
+ return false;
+}
+
+bool
+aot_compile_op_block(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ uint8 **p_frame_ip, uint8 *frame_ip_end, uint32 label_type,
+ uint32 param_count, uint8 *param_types,
+ uint32 result_count, uint8 *result_types)
+{
+ BlockAddr block_addr_cache[BLOCK_ADDR_CACHE_SIZE][BLOCK_ADDR_CONFLICT_SIZE];
+ AOTBlock *block;
+ uint8 *else_addr, *end_addr;
+ LLVMValueRef value;
+ char name[32];
+
+ /* Check block stack */
+ if (!func_ctx->block_stack.block_list_end) {
+ aot_set_last_error("WASM block stack underflow.");
+ return false;
+ }
+
+ memset(block_addr_cache, 0, sizeof(block_addr_cache));
+
+ /* Get block info */
+ if (!(wasm_loader_find_block_addr(
+ NULL, (BlockAddr *)block_addr_cache, *p_frame_ip, frame_ip_end,
+ (uint8)label_type, &else_addr, &end_addr))) {
+ aot_set_last_error("find block end addr failed.");
+ return false;
+ }
+
+ /* Allocate memory */
+ if (!(block = wasm_runtime_malloc(sizeof(AOTBlock)))) {
+ aot_set_last_error("allocate memory failed.");
+ return false;
+ }
+ memset(block, 0, sizeof(AOTBlock));
+ if (param_count
+ && !(block->param_types = wasm_runtime_malloc(param_count))) {
+ aot_set_last_error("allocate memory failed.");
+ goto fail;
+ }
+ if (result_count) {
+ if (!(block->result_types = wasm_runtime_malloc(result_count))) {
+ aot_set_last_error("allocate memory failed.");
+ goto fail;
+ }
+ }
+
+ /* Init aot block data */
+ block->label_type = label_type;
+ block->param_count = param_count;
+ if (param_count) {
+ bh_memcpy_s(block->param_types, param_count, param_types, param_count);
+ }
+ block->result_count = result_count;
+ if (result_count) {
+ bh_memcpy_s(block->result_types, result_count, result_types,
+ result_count);
+ }
+ block->wasm_code_else = else_addr;
+ block->wasm_code_end = end_addr;
+ block->block_index = func_ctx->block_stack.block_index[label_type];
+ func_ctx->block_stack.block_index[label_type]++;
+
+ if (label_type == LABEL_TYPE_BLOCK || label_type == LABEL_TYPE_LOOP) {
+ /* Create block */
+ format_block_name(name, sizeof(name), block->block_index, label_type,
+ LABEL_BEGIN);
+ CREATE_BLOCK(block->llvm_entry_block, name);
+ MOVE_BLOCK_AFTER_CURR(block->llvm_entry_block);
+ /* Jump to the entry block */
+ BUILD_BR(block->llvm_entry_block);
+ if (!push_aot_block_to_stack_and_pass_params(comp_ctx, func_ctx, block))
+ goto fail;
+ /* Start to translate the block */
+ SET_BUILDER_POS(block->llvm_entry_block);
+ if (label_type == LABEL_TYPE_LOOP)
+ aot_checked_addr_list_destroy(func_ctx);
+ }
+ else if (label_type == LABEL_TYPE_IF) {
+ POP_COND(value);
+
+ if (LLVMIsUndef(value)
+#if LLVM_VERSION_NUMBER >= 12
+ || LLVMIsPoison(value)
+#endif
+ ) {
+ if (!(aot_emit_exception(comp_ctx, func_ctx, EXCE_INTEGER_OVERFLOW,
+ false, NULL, NULL))) {
+ goto fail;
+ }
+ aot_block_destroy(block);
+ return aot_handle_next_reachable_block(comp_ctx, func_ctx,
+ p_frame_ip);
+ }
+
+ if (!LLVMIsConstant(value)) {
+ /* Compare value is not constant, create condition br IR */
+ /* Create entry block */
+ format_block_name(name, sizeof(name), block->block_index,
+ label_type, LABEL_BEGIN);
+ CREATE_BLOCK(block->llvm_entry_block, name);
+ MOVE_BLOCK_AFTER_CURR(block->llvm_entry_block);
+
+ /* Create end block */
+ format_block_name(name, sizeof(name), block->block_index,
+ label_type, LABEL_END);
+ CREATE_BLOCK(block->llvm_end_block, name);
+ MOVE_BLOCK_AFTER(block->llvm_end_block, block->llvm_entry_block);
+
+ if (else_addr) {
+ /* Create else block */
+ format_block_name(name, sizeof(name), block->block_index,
+ label_type, LABEL_ELSE);
+ CREATE_BLOCK(block->llvm_else_block, name);
+ MOVE_BLOCK_AFTER(block->llvm_else_block,
+ block->llvm_entry_block);
+ /* Create condition br IR */
+ BUILD_COND_BR(value, block->llvm_entry_block,
+ block->llvm_else_block);
+ }
+ else {
+ /* Create condition br IR */
+ BUILD_COND_BR(value, block->llvm_entry_block,
+ block->llvm_end_block);
+ block->is_reachable = true;
+ }
+ if (!push_aot_block_to_stack_and_pass_params(comp_ctx, func_ctx,
+ block))
+ goto fail;
+ /* Start to translate if branch of BLOCK if */
+ SET_BUILDER_POS(block->llvm_entry_block);
+ }
+ else {
+ if ((int32)LLVMConstIntGetZExtValue(value) != 0) {
+ /* Compare value is not 0, condition is true, else branch of
+ BLOCK if cannot be reached */
+ block->skip_wasm_code_else = true;
+ /* Create entry block */
+ format_block_name(name, sizeof(name), block->block_index,
+ label_type, LABEL_BEGIN);
+ CREATE_BLOCK(block->llvm_entry_block, name);
+ MOVE_BLOCK_AFTER_CURR(block->llvm_entry_block);
+ /* Jump to the entry block */
+ BUILD_BR(block->llvm_entry_block);
+ if (!push_aot_block_to_stack_and_pass_params(comp_ctx, func_ctx,
+ block))
+ goto fail;
+ /* Start to translate the if branch */
+ SET_BUILDER_POS(block->llvm_entry_block);
+ }
+ else {
+ /* Compare value is not 0, condition is false, if branch of
+ BLOCK if cannot be reached */
+ if (else_addr) {
+ /* Create else block */
+ format_block_name(name, sizeof(name), block->block_index,
+ label_type, LABEL_ELSE);
+ CREATE_BLOCK(block->llvm_else_block, name);
+ MOVE_BLOCK_AFTER_CURR(block->llvm_else_block);
+ /* Jump to the else block */
+ BUILD_BR(block->llvm_else_block);
+ if (!push_aot_block_to_stack_and_pass_params(
+ comp_ctx, func_ctx, block))
+ goto fail;
+ /* Start to translate the else branch */
+ SET_BUILDER_POS(block->llvm_else_block);
+ *p_frame_ip = else_addr + 1;
+ }
+ else {
+ /* skip the block */
+ aot_block_destroy(block);
+ *p_frame_ip = end_addr + 1;
+ }
+ }
+ }
+ }
+ else {
+ aot_set_last_error("Invalid block type.");
+ goto fail;
+ }
+
+ return true;
+fail:
+ aot_block_destroy(block);
+ return false;
+}
+
+bool
+aot_compile_op_else(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ uint8 **p_frame_ip)
+{
+ AOTBlock *block = func_ctx->block_stack.block_list_end;
+ LLVMValueRef value;
+ char name[32];
+ uint32 i, result_index;
+
+ /* Check block */
+ if (!block) {
+ aot_set_last_error("WASM block stack underflow.");
+ return false;
+ }
+ if (block->label_type != LABEL_TYPE_IF
+ || (!block->skip_wasm_code_else && !block->llvm_else_block)) {
+ aot_set_last_error("Invalid WASM block type.");
+ return false;
+ }
+
+ /* Create end block if needed */
+ if (!block->llvm_end_block) {
+ format_block_name(name, sizeof(name), block->block_index,
+ block->label_type, LABEL_END);
+ CREATE_BLOCK(block->llvm_end_block, name);
+ if (block->llvm_else_block)
+ MOVE_BLOCK_AFTER(block->llvm_end_block, block->llvm_else_block);
+ else
+ MOVE_BLOCK_AFTER_CURR(block->llvm_end_block);
+ }
+
+ block->is_reachable = true;
+
+ /* Comes from the if branch of BLOCK if */
+ CREATE_RESULT_VALUE_PHIS(block);
+ for (i = 0; i < block->result_count; i++) {
+ result_index = block->result_count - 1 - i;
+ POP(value, block->result_types[result_index]);
+ ADD_TO_RESULT_PHIS(block, value, result_index);
+ }
+
+ /* Jump to end block */
+ BUILD_BR(block->llvm_end_block);
+
+ if (!block->skip_wasm_code_else && block->llvm_else_block) {
+ /* Clear value stack, recover param values
+ * and start to translate else branch.
+ */
+ aot_value_stack_destroy(&block->value_stack);
+ for (i = 0; i < block->param_count; i++)
+ PUSH(block->else_param_phis[i], block->param_types[i]);
+ SET_BUILDER_POS(block->llvm_else_block);
+ aot_checked_addr_list_destroy(func_ctx);
+ return true;
+ }
+
+ /* No else branch or no need to translate else branch */
+ block->is_reachable = true;
+ return handle_next_reachable_block(comp_ctx, func_ctx, p_frame_ip);
+fail:
+ return false;
+}
+
+bool
+aot_compile_op_end(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ uint8 **p_frame_ip)
+{
+ AOTBlock *block;
+ LLVMValueRef value;
+ LLVMBasicBlockRef next_llvm_end_block;
+ char name[32];
+ uint32 i, result_index;
+
+ /* Check block stack */
+ if (!(block = func_ctx->block_stack.block_list_end)) {
+ aot_set_last_error("WASM block stack underflow.");
+ return false;
+ }
+
+ /* Create the end block */
+ if (!block->llvm_end_block) {
+ format_block_name(name, sizeof(name), block->block_index,
+ block->label_type, LABEL_END);
+ CREATE_BLOCK(block->llvm_end_block, name);
+ if ((next_llvm_end_block = find_next_llvm_end_block(block)))
+ MOVE_BLOCK_BEFORE(block->llvm_end_block, next_llvm_end_block);
+ }
+
+ /* Handle block result values */
+ CREATE_RESULT_VALUE_PHIS(block);
+ for (i = 0; i < block->result_count; i++) {
+ value = NULL;
+ result_index = block->result_count - 1 - i;
+ POP(value, block->result_types[result_index]);
+ bh_assert(value);
+ ADD_TO_RESULT_PHIS(block, value, result_index);
+ }
+
+ /* Jump to the end block */
+ BUILD_BR(block->llvm_end_block);
+
+ block->is_reachable = true;
+ return handle_next_reachable_block(comp_ctx, func_ctx, p_frame_ip);
+fail:
+ return false;
+}
+
+#if WASM_ENABLE_THREAD_MGR != 0
+bool
+check_suspend_flags(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
+{
+ LLVMValueRef terminate_addr, terminate_flags, flag, offset, res;
+ LLVMBasicBlockRef terminate_block, non_terminate_block;
+ AOTFuncType *aot_func_type = func_ctx->aot_func->func_type;
+ bool is_shared_memory =
+ comp_ctx->comp_data->memories[0].memory_flags & 0x02 ? true : false;
+
+ /* Only need to check the suspend flags when memory is shared since
+ shared memory must be enabled for multi-threading */
+ if (!is_shared_memory) {
+ return true;
+ }
+
+ /* Offset of suspend_flags */
+ offset = I32_FIVE;
+
+ if (!(terminate_addr = LLVMBuildInBoundsGEP2(
+ comp_ctx->builder, OPQ_PTR_TYPE, func_ctx->exec_env, &offset, 1,
+ "terminate_addr"))) {
+ aot_set_last_error("llvm build in bounds gep failed");
+ return false;
+ }
+ if (!(terminate_addr =
+ LLVMBuildBitCast(comp_ctx->builder, terminate_addr,
+ INT32_PTR_TYPE, "terminate_addr_ptr"))) {
+ aot_set_last_error("llvm build bit cast failed");
+ return false;
+ }
+
+ if (!(terminate_flags =
+ LLVMBuildLoad2(comp_ctx->builder, I32_TYPE, terminate_addr,
+ "terminate_flags"))) {
+ aot_set_last_error("llvm build LOAD failed");
+ return false;
+ }
+ /* Set terminate_flags memory accecc to volatile, so that the value
+ will always be loaded from memory rather than register */
+ LLVMSetVolatile(terminate_flags, true);
+
+ if (!(flag = LLVMBuildAnd(comp_ctx->builder, terminate_flags, I32_ONE,
+ "termination_flag"))) {
+ aot_set_last_error("llvm build AND failed");
+ return false;
+ }
+
+ CREATE_BLOCK(non_terminate_block, "non_terminate");
+ MOVE_BLOCK_AFTER_CURR(non_terminate_block);
+
+ CREATE_BLOCK(terminate_block, "terminate");
+ MOVE_BLOCK_AFTER_CURR(terminate_block);
+
+ BUILD_ICMP(LLVMIntEQ, flag, I32_ZERO, res, "flag_terminate");
+ BUILD_COND_BR(res, non_terminate_block, terminate_block);
+
+ /* Move builder to terminate block */
+ SET_BUILDER_POS(terminate_block);
+ if (!aot_build_zero_function_ret(comp_ctx, func_ctx, aot_func_type)) {
+ goto fail;
+ }
+
+ /* Move builder to non terminate block */
+ SET_BUILDER_POS(non_terminate_block);
+ return true;
+
+fail:
+ return false;
+}
+#endif /* End of WASM_ENABLE_THREAD_MGR */
+
+bool
+aot_compile_op_br(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ uint32 br_depth, uint8 **p_frame_ip)
+{
+ AOTBlock *block_dst;
+ LLVMValueRef value_ret, value_param;
+ LLVMBasicBlockRef next_llvm_end_block;
+ char name[32];
+ uint32 i, param_index, result_index;
+
+#if WASM_ENABLE_THREAD_MGR != 0
+ /* Insert suspend check point */
+ if (comp_ctx->enable_thread_mgr) {
+ if (!check_suspend_flags(comp_ctx, func_ctx))
+ return false;
+ }
+#endif
+
+ if (!(block_dst = get_target_block(func_ctx, br_depth))) {
+ return false;
+ }
+
+ if (block_dst->label_type == LABEL_TYPE_LOOP) {
+ /* Dest block is Loop block */
+ /* Handle Loop parameters */
+ for (i = 0; i < block_dst->param_count; i++) {
+ param_index = block_dst->param_count - 1 - i;
+ POP(value_param, block_dst->param_types[param_index]);
+ ADD_TO_PARAM_PHIS(block_dst, value_param, param_index);
+ }
+ BUILD_BR(block_dst->llvm_entry_block);
+ }
+ else {
+ /* Dest block is Block/If/Function block */
+ /* Create the end block */
+ if (!block_dst->llvm_end_block) {
+ format_block_name(name, sizeof(name), block_dst->block_index,
+ block_dst->label_type, LABEL_END);
+ CREATE_BLOCK(block_dst->llvm_end_block, name);
+ if ((next_llvm_end_block = find_next_llvm_end_block(block_dst)))
+ MOVE_BLOCK_BEFORE(block_dst->llvm_end_block,
+ next_llvm_end_block);
+ }
+
+ block_dst->is_reachable = true;
+
+ /* Handle result values */
+ CREATE_RESULT_VALUE_PHIS(block_dst);
+ for (i = 0; i < block_dst->result_count; i++) {
+ result_index = block_dst->result_count - 1 - i;
+ POP(value_ret, block_dst->result_types[result_index]);
+ ADD_TO_RESULT_PHIS(block_dst, value_ret, result_index);
+ }
+ /* Jump to the end block */
+ BUILD_BR(block_dst->llvm_end_block);
+ }
+
+ return handle_next_reachable_block(comp_ctx, func_ctx, p_frame_ip);
+fail:
+ return false;
+}
+
+bool
+aot_compile_op_br_if(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ uint32 br_depth, uint8 **p_frame_ip)
+{
+ AOTBlock *block_dst;
+ LLVMValueRef value_cmp, value, *values = NULL;
+ LLVMBasicBlockRef llvm_else_block, next_llvm_end_block;
+ char name[32];
+ uint32 i, param_index, result_index;
+ uint64 size;
+
+#if WASM_ENABLE_THREAD_MGR != 0
+ /* Insert suspend check point */
+ if (comp_ctx->enable_thread_mgr) {
+ if (!check_suspend_flags(comp_ctx, func_ctx))
+ return false;
+ }
+#endif
+
+ POP_COND(value_cmp);
+
+ if (LLVMIsUndef(value_cmp)
+#if LLVM_VERSION_NUMBER >= 12
+ || LLVMIsPoison(value_cmp)
+#endif
+ ) {
+ if (!(aot_emit_exception(comp_ctx, func_ctx, EXCE_INTEGER_OVERFLOW,
+ false, NULL, NULL))) {
+ goto fail;
+ }
+ return aot_handle_next_reachable_block(comp_ctx, func_ctx, p_frame_ip);
+ }
+
+ if (!LLVMIsConstant(value_cmp)) {
+ /* Compare value is not constant, create condition br IR */
+ if (!(block_dst = get_target_block(func_ctx, br_depth))) {
+ return false;
+ }
+
+ /* Create llvm else block */
+ CREATE_BLOCK(llvm_else_block, "br_if_else");
+ MOVE_BLOCK_AFTER_CURR(llvm_else_block);
+
+ if (block_dst->label_type == LABEL_TYPE_LOOP) {
+ /* Dest block is Loop block */
+ /* Handle Loop parameters */
+ if (block_dst->param_count) {
+ size = sizeof(LLVMValueRef) * (uint64)block_dst->param_count;
+ if (size >= UINT32_MAX
+ || !(values = wasm_runtime_malloc((uint32)size))) {
+ aot_set_last_error("allocate memory failed.");
+ goto fail;
+ }
+ for (i = 0; i < block_dst->param_count; i++) {
+ param_index = block_dst->param_count - 1 - i;
+ POP(value, block_dst->param_types[param_index]);
+ ADD_TO_PARAM_PHIS(block_dst, value, param_index);
+ values[param_index] = value;
+ }
+ for (i = 0; i < block_dst->param_count; i++) {
+ PUSH(values[i], block_dst->param_types[i]);
+ }
+ wasm_runtime_free(values);
+ values = NULL;
+ }
+
+ BUILD_COND_BR(value_cmp, block_dst->llvm_entry_block,
+ llvm_else_block);
+
+ /* Move builder to else block */
+ SET_BUILDER_POS(llvm_else_block);
+ }
+ else {
+ /* Dest block is Block/If/Function block */
+ /* Create the end block */
+ if (!block_dst->llvm_end_block) {
+ format_block_name(name, sizeof(name), block_dst->block_index,
+ block_dst->label_type, LABEL_END);
+ CREATE_BLOCK(block_dst->llvm_end_block, name);
+ if ((next_llvm_end_block = find_next_llvm_end_block(block_dst)))
+ MOVE_BLOCK_BEFORE(block_dst->llvm_end_block,
+ next_llvm_end_block);
+ }
+
+ /* Set reachable flag and create condition br IR */
+ block_dst->is_reachable = true;
+
+ /* Handle result values */
+ if (block_dst->result_count) {
+ size = sizeof(LLVMValueRef) * (uint64)block_dst->result_count;
+ if (size >= UINT32_MAX
+ || !(values = wasm_runtime_malloc((uint32)size))) {
+ aot_set_last_error("allocate memory failed.");
+ goto fail;
+ }
+ CREATE_RESULT_VALUE_PHIS(block_dst);
+ for (i = 0; i < block_dst->result_count; i++) {
+ result_index = block_dst->result_count - 1 - i;
+ POP(value, block_dst->result_types[result_index]);
+ values[result_index] = value;
+ ADD_TO_RESULT_PHIS(block_dst, value, result_index);
+ }
+ for (i = 0; i < block_dst->result_count; i++) {
+ PUSH(values[i], block_dst->result_types[i]);
+ }
+ wasm_runtime_free(values);
+ values = NULL;
+ }
+
+ /* Condition jump to end block */
+ BUILD_COND_BR(value_cmp, block_dst->llvm_end_block,
+ llvm_else_block);
+
+ /* Move builder to else block */
+ SET_BUILDER_POS(llvm_else_block);
+ }
+ }
+ else {
+ if ((int32)LLVMConstIntGetZExtValue(value_cmp) != 0) {
+ /* Compare value is not 0, condition is true, same as op_br */
+ return aot_compile_op_br(comp_ctx, func_ctx, br_depth, p_frame_ip);
+ }
+ else {
+ /* Compare value is not 0, condition is false, skip br_if */
+ return true;
+ }
+ }
+ return true;
+fail:
+ if (values)
+ wasm_runtime_free(values);
+ return false;
+}
+
+bool
+aot_compile_op_br_table(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ uint32 *br_depths, uint32 br_count, uint8 **p_frame_ip)
+{
+ uint32 i, j;
+ LLVMValueRef value_switch, value_cmp, value_case, value, *values = NULL;
+ LLVMBasicBlockRef default_llvm_block = NULL, target_llvm_block;
+ LLVMBasicBlockRef next_llvm_end_block;
+ AOTBlock *target_block;
+ uint32 br_depth, depth_idx;
+ uint32 param_index, result_index;
+ uint64 size;
+ char name[32];
+
+#if WASM_ENABLE_THREAD_MGR != 0
+ /* Insert suspend check point */
+ if (comp_ctx->enable_thread_mgr) {
+ if (!check_suspend_flags(comp_ctx, func_ctx))
+ return false;
+ }
+#endif
+
+ POP_I32(value_cmp);
+
+ if (LLVMIsUndef(value_cmp)
+#if LLVM_VERSION_NUMBER >= 12
+ || LLVMIsPoison(value_cmp)
+#endif
+ ) {
+ if (!(aot_emit_exception(comp_ctx, func_ctx, EXCE_INTEGER_OVERFLOW,
+ false, NULL, NULL))) {
+ goto fail;
+ }
+ return aot_handle_next_reachable_block(comp_ctx, func_ctx, p_frame_ip);
+ }
+
+ if (!LLVMIsConstant(value_cmp)) {
+ /* Compare value is not constant, create switch IR */
+ for (i = 0; i <= br_count; i++) {
+ target_block = get_target_block(func_ctx, br_depths[i]);
+ if (!target_block)
+ return false;
+
+ if (target_block->label_type != LABEL_TYPE_LOOP) {
+ /* Dest block is Block/If/Function block */
+ /* Create the end block */
+ if (!target_block->llvm_end_block) {
+ format_block_name(name, sizeof(name),
+ target_block->block_index,
+ target_block->label_type, LABEL_END);
+ CREATE_BLOCK(target_block->llvm_end_block, name);
+ if ((next_llvm_end_block =
+ find_next_llvm_end_block(target_block)))
+ MOVE_BLOCK_BEFORE(target_block->llvm_end_block,
+ next_llvm_end_block);
+ }
+ /* Handle result values */
+ if (target_block->result_count) {
+ size = sizeof(LLVMValueRef)
+ * (uint64)target_block->result_count;
+ if (size >= UINT32_MAX
+ || !(values = wasm_runtime_malloc((uint32)size))) {
+ aot_set_last_error("allocate memory failed.");
+ goto fail;
+ }
+ CREATE_RESULT_VALUE_PHIS(target_block);
+ for (j = 0; j < target_block->result_count; j++) {
+ result_index = target_block->result_count - 1 - j;
+ POP(value, target_block->result_types[result_index]);
+ values[result_index] = value;
+ ADD_TO_RESULT_PHIS(target_block, value, result_index);
+ }
+ for (j = 0; j < target_block->result_count; j++) {
+ PUSH(values[j], target_block->result_types[j]);
+ }
+ wasm_runtime_free(values);
+ }
+ target_block->is_reachable = true;
+ if (i == br_count)
+ default_llvm_block = target_block->llvm_end_block;
+ }
+ else {
+ /* Handle Loop parameters */
+ if (target_block->param_count) {
+ size = sizeof(LLVMValueRef)
+ * (uint64)target_block->param_count;
+ if (size >= UINT32_MAX
+ || !(values = wasm_runtime_malloc((uint32)size))) {
+ aot_set_last_error("allocate memory failed.");
+ goto fail;
+ }
+ for (j = 0; j < target_block->param_count; j++) {
+ param_index = target_block->param_count - 1 - j;
+ POP(value, target_block->param_types[param_index]);
+ values[param_index] = value;
+ ADD_TO_PARAM_PHIS(target_block, value, param_index);
+ }
+ for (j = 0; j < target_block->param_count; j++) {
+ PUSH(values[j], target_block->param_types[j]);
+ }
+ wasm_runtime_free(values);
+ }
+ if (i == br_count)
+ default_llvm_block = target_block->llvm_entry_block;
+ }
+ }
+
+ /* Create switch IR */
+ if (!(value_switch = LLVMBuildSwitch(comp_ctx->builder, value_cmp,
+ default_llvm_block, br_count))) {
+ aot_set_last_error("llvm build switch failed.");
+ return false;
+ }
+
+ /* Add each case for switch IR */
+ for (i = 0; i < br_count; i++) {
+ value_case = I32_CONST(i);
+ CHECK_LLVM_CONST(value_case);
+ target_block = get_target_block(func_ctx, br_depths[i]);
+ if (!target_block)
+ return false;
+ target_llvm_block = target_block->label_type != LABEL_TYPE_LOOP
+ ? target_block->llvm_end_block
+ : target_block->llvm_entry_block;
+ LLVMAddCase(value_switch, value_case, target_llvm_block);
+ }
+
+ return handle_next_reachable_block(comp_ctx, func_ctx, p_frame_ip);
+ }
+ else {
+ /* Compare value is constant, create br IR */
+ depth_idx = (uint32)LLVMConstIntGetZExtValue(value_cmp);
+ br_depth = br_depths[br_count];
+ if (depth_idx < br_count) {
+ br_depth = br_depths[depth_idx];
+ }
+ return aot_compile_op_br(comp_ctx, func_ctx, br_depth, p_frame_ip);
+ }
+fail:
+ if (values)
+ wasm_runtime_free(values);
+ return false;
+}
+
+bool
+aot_compile_op_return(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ uint8 **p_frame_ip)
+{
+ AOTBlock *block_func = func_ctx->block_stack.block_list_head;
+ LLVMValueRef value;
+ LLVMValueRef ret;
+ AOTFuncType *func_type;
+ uint32 i, param_index, result_index;
+#if WASM_ENABLE_DEBUG_AOT != 0
+ LLVMMetadataRef return_location;
+#endif
+
+ bh_assert(block_func);
+ func_type = func_ctx->aot_func->func_type;
+
+#if WASM_ENABLE_DEBUG_AOT != 0
+ return_location = dwarf_gen_location(
+ comp_ctx, func_ctx,
+ (*p_frame_ip - 1) - comp_ctx->comp_data->wasm_module->buf_code);
+#endif
+ if (block_func->result_count) {
+ /* Store extra result values to function parameters */
+ for (i = 0; i < block_func->result_count - 1; i++) {
+ result_index = block_func->result_count - 1 - i;
+ POP(value, block_func->result_types[result_index]);
+ param_index = func_type->param_count + result_index;
+ if (!LLVMBuildStore(comp_ctx->builder, value,
+ LLVMGetParam(func_ctx->func, param_index))) {
+ aot_set_last_error("llvm build store failed.");
+ goto fail;
+ }
+ }
+ /* Return the first result value */
+ POP(value, block_func->result_types[0]);
+ if (!(ret = LLVMBuildRet(comp_ctx->builder, value))) {
+ aot_set_last_error("llvm build return failed.");
+ goto fail;
+ }
+#if WASM_ENABLE_DEBUG_AOT != 0
+ LLVMInstructionSetDebugLoc(ret, return_location);
+#endif
+ }
+ else {
+ if (!(ret = LLVMBuildRetVoid(comp_ctx->builder))) {
+ aot_set_last_error("llvm build return void failed.");
+ goto fail;
+ }
+#if WASM_ENABLE_DEBUG_AOT != 0
+ LLVMInstructionSetDebugLoc(ret, return_location);
+#endif
+ }
+
+ return handle_next_reachable_block(comp_ctx, func_ctx, p_frame_ip);
+fail:
+ return false;
+}
+
+bool
+aot_compile_op_unreachable(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ uint8 **p_frame_ip)
+{
+ if (!aot_emit_exception(comp_ctx, func_ctx, EXCE_UNREACHABLE, false, NULL,
+ NULL))
+ return false;
+
+ return handle_next_reachable_block(comp_ctx, func_ctx, p_frame_ip);
+}
+
+bool
+aot_handle_next_reachable_block(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, uint8 **p_frame_ip)
+{
+ return handle_next_reachable_block(comp_ctx, func_ctx, p_frame_ip);
+}
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/aot_emit_control.h b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/aot_emit_control.h
new file mode 100644
index 000000000..a203876c1
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/aot_emit_control.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#ifndef _AOT_EMIT_CONTROL_H_
+#define _AOT_EMIT_CONTROL_H_
+
+#include "aot_compiler.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+bool
+aot_compile_op_block(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ uint8 **p_frame_ip, uint8 *frame_ip_end, uint32 label_type,
+ uint32 param_count, uint8 *param_types,
+ uint32 result_count, uint8 *result_types);
+
+bool
+aot_compile_op_else(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ uint8 **p_frame_ip);
+
+bool
+aot_compile_op_end(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ uint8 **p_frame_ip);
+
+bool
+aot_compile_op_br(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ uint32 br_depth, uint8 **p_frame_ip);
+
+bool
+aot_compile_op_br_if(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ uint32 br_depth, uint8 **p_frame_ip);
+
+bool
+aot_compile_op_br_table(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ uint32 *br_depths, uint32 br_count, uint8 **p_frame_ip);
+
+bool
+aot_compile_op_return(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ uint8 **p_frame_ip);
+
+bool
+aot_compile_op_unreachable(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ uint8 **p_frame_ip);
+
+bool
+aot_handle_next_reachable_block(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, uint8 **p_frame_ip);
+
+#if WASM_ENABLE_THREAD_MGR != 0
+bool
+check_suspend_flags(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx);
+#endif
+
+#ifdef __cplusplus
+} /* end of extern "C" */
+#endif
+
+#endif /* end of _AOT_EMIT_CONTROL_H_ */
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/aot_emit_conversion.c b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/aot_emit_conversion.c
new file mode 100644
index 000000000..c3dfa6bf1
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/aot_emit_conversion.c
@@ -0,0 +1,939 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#include "aot_emit_conversion.h"
+#include "aot_emit_exception.h"
+#include "aot_emit_numberic.h"
+#include "../aot/aot_intrinsic.h"
+#include "../aot/aot_runtime.h"
+
+static LLVMValueRef
+call_fcmp_intrinsic(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ enum AOTFloatCond cond, LLVMRealPredicate op,
+ LLVMValueRef lhs, LLVMValueRef rhs, LLVMTypeRef src_type,
+ const char *name)
+{
+ LLVMValueRef res = NULL;
+ if (comp_ctx->disable_llvm_intrinsics
+ && aot_intrinsic_check_capability(
+ comp_ctx, src_type == F32_TYPE ? "f32_cmp" : "f64_cmp")) {
+ LLVMTypeRef param_types[3];
+ LLVMValueRef opcond = LLVMConstInt(I32_TYPE, cond, true);
+ param_types[0] = I32_TYPE;
+ param_types[1] = src_type;
+ param_types[2] = src_type;
+ res = aot_call_llvm_intrinsic(
+ comp_ctx, func_ctx, src_type == F32_TYPE ? "f32_cmp" : "f64_cmp",
+ I32_TYPE, param_types, 3, opcond, lhs, rhs);
+ if (!res) {
+ goto fail;
+ }
+ res = LLVMBuildIntCast(comp_ctx->builder, res, INT1_TYPE, "bit_cast");
+ }
+ else {
+ res = LLVMBuildFCmp(comp_ctx->builder, op, lhs, rhs, name);
+ }
+fail:
+ return res;
+}
+
+static bool
+trunc_float_to_int(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ LLVMValueRef operand, LLVMTypeRef src_type,
+ LLVMTypeRef dest_type, LLVMValueRef min_value,
+ LLVMValueRef max_value, char *name, bool sign)
+{
+ LLVMBasicBlockRef check_nan_succ, check_overflow_succ;
+ LLVMValueRef is_less, is_greater, res;
+
+ res = call_fcmp_intrinsic(comp_ctx, func_ctx, FLOAT_UNO, LLVMRealUNO,
+ operand, operand, src_type, "fcmp_is_nan");
+
+ if (!res) {
+ aot_set_last_error("llvm build fcmp failed.");
+ goto fail;
+ }
+
+ if (!(check_nan_succ = LLVMAppendBasicBlockInContext(
+ comp_ctx->context, func_ctx->func, "check_nan_succ"))) {
+ aot_set_last_error("llvm add basic block failed.");
+ goto fail;
+ }
+
+ LLVMMoveBasicBlockAfter(check_nan_succ,
+ LLVMGetInsertBlock(comp_ctx->builder));
+
+ if (!(aot_emit_exception(comp_ctx, func_ctx,
+ EXCE_INVALID_CONVERSION_TO_INTEGER, true, res,
+ check_nan_succ)))
+ goto fail;
+
+ is_less =
+ call_fcmp_intrinsic(comp_ctx, func_ctx, FLOAT_LE, LLVMRealOLE, operand,
+ min_value, src_type, "fcmp_min_value");
+
+ if (!is_less) {
+ aot_set_last_error("llvm build fcmp failed.");
+ goto fail;
+ }
+
+ is_greater =
+ call_fcmp_intrinsic(comp_ctx, func_ctx, FLOAT_GE, LLVMRealOGE, operand,
+ max_value, src_type, "fcmp_min_value");
+
+ if (!is_greater) {
+ aot_set_last_error("llvm build fcmp failed.");
+ goto fail;
+ }
+
+ if (!(res = LLVMBuildOr(comp_ctx->builder, is_less, is_greater,
+ "is_overflow"))) {
+ aot_set_last_error("llvm build logic and failed.");
+ goto fail;
+ }
+
+ /* Check if float value out of range */
+ if (!(check_overflow_succ = LLVMAppendBasicBlockInContext(
+ comp_ctx->context, func_ctx->func, "check_overflow_succ"))) {
+ aot_set_last_error("llvm add basic block failed.");
+ goto fail;
+ }
+
+ LLVMMoveBasicBlockAfter(check_overflow_succ,
+ LLVMGetInsertBlock(comp_ctx->builder));
+
+ if (!(aot_emit_exception(comp_ctx, func_ctx, EXCE_INTEGER_OVERFLOW, true,
+ res, check_overflow_succ)))
+ goto fail;
+
+ if (comp_ctx->disable_llvm_intrinsics
+ && aot_intrinsic_check_capability(comp_ctx, name)) {
+ LLVMTypeRef param_types[1];
+ param_types[0] = src_type;
+ res = aot_call_llvm_intrinsic(comp_ctx, func_ctx, name, dest_type,
+ param_types, 1, operand);
+ }
+ else {
+ if (sign)
+ res = LLVMBuildFPToSI(comp_ctx->builder, operand, dest_type, name);
+ else
+ res = LLVMBuildFPToUI(comp_ctx->builder, operand, dest_type, name);
+ }
+
+ if (!res) {
+ aot_set_last_error("llvm build conversion failed.");
+ return false;
+ }
+
+ if (dest_type == I32_TYPE)
+ PUSH_I32(res);
+ else if (dest_type == I64_TYPE)
+ PUSH_I64(res);
+ return true;
+fail:
+ return false;
+}
+
+#define ADD_BASIC_BLOCK(block, name) \
+ do { \
+ if (!(block = LLVMAppendBasicBlockInContext(comp_ctx->context, \
+ func_ctx->func, name))) { \
+ aot_set_last_error("llvm add basic block failed."); \
+ goto fail; \
+ } \
+ \
+ LLVMMoveBasicBlockAfter(block, LLVMGetInsertBlock(comp_ctx->builder)); \
+ } while (0)
+
+static bool
+trunc_sat_float_to_int(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ LLVMValueRef operand, LLVMTypeRef src_type,
+ LLVMTypeRef dest_type, LLVMValueRef min_value,
+ LLVMValueRef max_value, char *name, bool sign)
+{
+ LLVMBasicBlockRef check_nan_succ, check_less_succ, check_greater_succ;
+ LLVMBasicBlockRef is_nan_block, is_less_block, is_greater_block, res_block;
+ LLVMValueRef is_less, is_greater, res, phi;
+ LLVMValueRef zero = (dest_type == I32_TYPE) ? I32_ZERO : I64_ZERO;
+ LLVMValueRef vmin, vmax;
+
+ if (!(res =
+ call_fcmp_intrinsic(comp_ctx, func_ctx, FLOAT_UNO, LLVMRealUNO,
+ operand, operand, src_type, "fcmp_is_nan"))) {
+ aot_set_last_error("llvm build fcmp failed.");
+ goto fail;
+ }
+
+ ADD_BASIC_BLOCK(check_nan_succ, "check_nan_succ");
+ ADD_BASIC_BLOCK(is_nan_block, "is_nan_block");
+ ADD_BASIC_BLOCK(check_less_succ, "check_less_succ");
+ ADD_BASIC_BLOCK(is_less_block, "is_less_block");
+ ADD_BASIC_BLOCK(check_greater_succ, "check_greater_succ");
+ ADD_BASIC_BLOCK(is_greater_block, "is_greater_block");
+ ADD_BASIC_BLOCK(res_block, "res_block");
+
+ if (!LLVMBuildCondBr(comp_ctx->builder, res, is_nan_block,
+ check_nan_succ)) {
+ aot_set_last_error("llvm build cond br failed.");
+ goto fail;
+ }
+
+ /* Start to translate is_nan block */
+ LLVMPositionBuilderAtEnd(comp_ctx->builder, is_nan_block);
+ if (!LLVMBuildBr(comp_ctx->builder, res_block)) {
+ aot_set_last_error("llvm build br failed.");
+ goto fail;
+ }
+
+ /* Start to translate check_nan_succ block */
+ LLVMPositionBuilderAtEnd(comp_ctx->builder, check_nan_succ);
+ if (!(is_less = call_fcmp_intrinsic(comp_ctx, func_ctx, FLOAT_LE,
+ LLVMRealOLE, operand, min_value,
+ src_type, "fcmp_min_value"))) {
+ aot_set_last_error("llvm build fcmp failed.");
+ goto fail;
+ }
+ if (!LLVMBuildCondBr(comp_ctx->builder, is_less, is_less_block,
+ check_less_succ)) {
+ aot_set_last_error("llvm build cond br failed.");
+ goto fail;
+ }
+
+ /* Start to translate is_less block */
+ LLVMPositionBuilderAtEnd(comp_ctx->builder, is_less_block);
+ if (!LLVMBuildBr(comp_ctx->builder, res_block)) {
+ aot_set_last_error("llvm build br failed.");
+ goto fail;
+ }
+
+ /* Start to translate check_less_succ block */
+ LLVMPositionBuilderAtEnd(comp_ctx->builder, check_less_succ);
+ if (!(is_greater = call_fcmp_intrinsic(comp_ctx, func_ctx, FLOAT_GE,
+ LLVMRealOGE, operand, max_value,
+ src_type, "fcmp_max_value"))) {
+ aot_set_last_error("llvm build fcmp failed.");
+ goto fail;
+ }
+ if (!LLVMBuildCondBr(comp_ctx->builder, is_greater, is_greater_block,
+ check_greater_succ)) {
+ aot_set_last_error("llvm build cond br failed.");
+ goto fail;
+ }
+
+ /* Start to translate is_greater block */
+ LLVMPositionBuilderAtEnd(comp_ctx->builder, is_greater_block);
+ if (!LLVMBuildBr(comp_ctx->builder, res_block)) {
+ aot_set_last_error("llvm build br failed.");
+ goto fail;
+ }
+
+ /* Start to translate check_greater_succ block */
+ LLVMPositionBuilderAtEnd(comp_ctx->builder, check_greater_succ);
+
+ if (comp_ctx->disable_llvm_intrinsics
+ && aot_intrinsic_check_capability(comp_ctx, name)) {
+ LLVMTypeRef param_types[1];
+ param_types[0] = src_type;
+ res = aot_call_llvm_intrinsic(comp_ctx, func_ctx, name, dest_type,
+ param_types, 1, operand);
+ }
+ else {
+ char intrinsic[128];
+
+ /* Integer width is always 32 or 64 here. */
+
+ snprintf(intrinsic, sizeof(intrinsic), "i%d_trunc_f%d_%c",
+ LLVMGetIntTypeWidth(dest_type),
+ LLVMGetTypeKind(src_type) == LLVMFloatTypeKind ? 32 : 64,
+ sign ? 's' : 'u');
+
+ if (comp_ctx->disable_llvm_intrinsics
+ && aot_intrinsic_check_capability(comp_ctx, intrinsic)) {
+ res = aot_call_llvm_intrinsic(comp_ctx, func_ctx, intrinsic,
+ dest_type, &src_type, 1, operand);
+ }
+ else {
+ if (sign) {
+ res = LLVMBuildFPToSI(comp_ctx->builder, operand, dest_type,
+ name);
+ }
+ else {
+ res = LLVMBuildFPToUI(comp_ctx->builder, operand, dest_type,
+ name);
+ }
+ }
+ }
+
+ if (!res) {
+ aot_set_last_error("llvm build conversion failed.");
+ return false;
+ }
+ if (!LLVMBuildBr(comp_ctx->builder, res_block)) {
+ aot_set_last_error("llvm build br failed.");
+ goto fail;
+ }
+
+ /* Start to translate res_block */
+ LLVMPositionBuilderAtEnd(comp_ctx->builder, res_block);
+ /* Create result phi */
+ if (!(phi = LLVMBuildPhi(comp_ctx->builder, dest_type,
+ "trunc_sat_result_phi"))) {
+ aot_set_last_error("llvm build phi failed.");
+ return false;
+ }
+
+ /* Add phi incoming values */
+ if (dest_type == I32_TYPE) {
+ if (sign) {
+ vmin = I32_CONST(INT32_MIN);
+ vmax = I32_CONST(INT32_MAX);
+ }
+ else {
+ vmin = I32_CONST(0);
+ vmax = I32_CONST(UINT32_MAX);
+ }
+ }
+ else if (dest_type == I64_TYPE) {
+ if (sign) {
+ vmin = I64_CONST(INT64_MIN);
+ vmax = I64_CONST(INT64_MAX);
+ }
+ else {
+ vmin = I64_CONST(0);
+ vmax = I64_CONST(UINT64_MAX);
+ }
+ }
+ LLVMAddIncoming(phi, &zero, &is_nan_block, 1);
+ LLVMAddIncoming(phi, &vmin, &is_less_block, 1);
+ LLVMAddIncoming(phi, &vmax, &is_greater_block, 1);
+ LLVMAddIncoming(phi, &res, &check_greater_succ, 1);
+
+ if (dest_type == I32_TYPE)
+ PUSH_I32(phi);
+ else if (dest_type == I64_TYPE)
+ PUSH_I64(phi);
+ return true;
+fail:
+ return false;
+}
+
+bool
+aot_compile_op_i32_wrap_i64(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
+{
+ LLVMValueRef value, res;
+
+ POP_I64(value);
+
+ if (!(res = LLVMBuildTrunc(comp_ctx->builder, value, I32_TYPE,
+ "i32_wrap_i64"))) {
+ aot_set_last_error("llvm build conversion failed.");
+ return false;
+ }
+
+ PUSH_I32(res);
+ return true;
+fail:
+ return false;
+}
+
+bool
+aot_compile_op_i32_trunc_f32(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ bool sign, bool saturating)
+{
+ LLVMValueRef value;
+ LLVMValueRef min_value, max_value;
+
+ POP_F32(value);
+
+ if (!comp_ctx->is_indirect_mode) {
+ if (sign) {
+ min_value = F32_CONST(-2147483904.0f);
+ max_value = F32_CONST(2147483648.0f);
+ }
+ else {
+ min_value = F32_CONST(-1.0f);
+ max_value = F32_CONST(4294967296.0f);
+ }
+ }
+ else {
+ WASMValue wasm_value;
+ if (sign) {
+ wasm_value.f32 = -2147483904.0f;
+ min_value = aot_load_const_from_table(
+ comp_ctx, func_ctx->native_symbol, &wasm_value, VALUE_TYPE_F32);
+ wasm_value.f32 = 2147483648.0f;
+ max_value = aot_load_const_from_table(
+ comp_ctx, func_ctx->native_symbol, &wasm_value, VALUE_TYPE_F32);
+ }
+ else {
+ wasm_value.f32 = -1.0f;
+ min_value = aot_load_const_from_table(
+ comp_ctx, func_ctx->native_symbol, &wasm_value, VALUE_TYPE_F32);
+ wasm_value.f32 = 4294967296.0f;
+ max_value = aot_load_const_from_table(
+ comp_ctx, func_ctx->native_symbol, &wasm_value, VALUE_TYPE_F32);
+ }
+ }
+ CHECK_LLVM_CONST(min_value);
+ CHECK_LLVM_CONST(max_value);
+
+ if (!saturating)
+ return trunc_float_to_int(
+ comp_ctx, func_ctx, value, F32_TYPE, I32_TYPE, min_value, max_value,
+ sign ? "i32_trunc_f32_s" : "i32_trunc_f32_u", sign);
+ else
+ return trunc_sat_float_to_int(
+ comp_ctx, func_ctx, value, F32_TYPE, I32_TYPE, min_value, max_value,
+ sign ? "i32_trunc_sat_f32_s" : "i32_trunc_sat_f32_u", sign);
+fail:
+ return false;
+}
+
+bool
+aot_compile_op_i32_trunc_f64(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ bool sign, bool saturating)
+{
+ LLVMValueRef value;
+ LLVMValueRef min_value, max_value;
+
+ POP_F64(value);
+
+ if (!comp_ctx->is_indirect_mode) {
+ if (sign) {
+ min_value = F64_CONST(-2147483649.0);
+ max_value = F64_CONST(2147483648.0);
+ }
+ else {
+ min_value = F64_CONST(-1.0);
+ max_value = F64_CONST(4294967296.0);
+ }
+ }
+ else {
+ WASMValue wasm_value;
+ if (sign) {
+ wasm_value.f64 = -2147483649.0;
+ min_value = aot_load_const_from_table(
+ comp_ctx, func_ctx->native_symbol, &wasm_value, VALUE_TYPE_F64);
+ wasm_value.f64 = 2147483648.0;
+ max_value = aot_load_const_from_table(
+ comp_ctx, func_ctx->native_symbol, &wasm_value, VALUE_TYPE_F64);
+ }
+ else {
+ wasm_value.f64 = -1.0;
+ min_value = aot_load_const_from_table(
+ comp_ctx, func_ctx->native_symbol, &wasm_value, VALUE_TYPE_F64);
+ wasm_value.f64 = 4294967296.0;
+ max_value = aot_load_const_from_table(
+ comp_ctx, func_ctx->native_symbol, &wasm_value, VALUE_TYPE_F64);
+ }
+ }
+ CHECK_LLVM_CONST(min_value);
+ CHECK_LLVM_CONST(max_value);
+
+ if (!saturating)
+ return trunc_float_to_int(
+ comp_ctx, func_ctx, value, F64_TYPE, I32_TYPE, min_value, max_value,
+ sign ? "i32_trunc_f64_s" : "i32_trunc_f64_u", sign);
+ else
+ return trunc_sat_float_to_int(
+ comp_ctx, func_ctx, value, F64_TYPE, I32_TYPE, min_value, max_value,
+ sign ? "i32_trunc_sat_f64_s" : "i32_trunc_sat_f64_u", sign);
+fail:
+ return false;
+}
+
+bool
+aot_compile_op_i64_extend_i32(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, bool sign)
+{
+ LLVMValueRef value, res;
+
+ POP_I32(value);
+
+ if (sign)
+ res = LLVMBuildSExt(comp_ctx->builder, value, I64_TYPE,
+ "i64_extend_i32_s");
+ else
+ res = LLVMBuildZExt(comp_ctx->builder, value, I64_TYPE,
+ "i64_extend_i32_u");
+ if (!res) {
+ aot_set_last_error("llvm build conversion failed.");
+ return false;
+ }
+
+ PUSH_I64(res);
+ return true;
+fail:
+ return false;
+}
+
+bool
+aot_compile_op_i64_extend_i64(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, int8 bitwidth)
+{
+ LLVMValueRef value, res, cast_value = NULL;
+
+ POP_I64(value);
+
+ if (bitwidth == 8) {
+ cast_value = LLVMBuildIntCast2(comp_ctx->builder, value, INT8_TYPE,
+ true, "i8_intcast_i64");
+ }
+ else if (bitwidth == 16) {
+ cast_value = LLVMBuildIntCast2(comp_ctx->builder, value, INT16_TYPE,
+ true, "i16_intcast_i64");
+ }
+ else if (bitwidth == 32) {
+ cast_value = LLVMBuildIntCast2(comp_ctx->builder, value, I32_TYPE, true,
+ "i32_intcast_i64");
+ }
+
+ if (!cast_value) {
+ aot_set_last_error("llvm build conversion failed.");
+ return false;
+ }
+
+ res = LLVMBuildSExt(comp_ctx->builder, cast_value, I64_TYPE,
+ "i64_extend_i64_s");
+
+ if (!res) {
+ aot_set_last_error("llvm build conversion failed.");
+ return false;
+ }
+
+ PUSH_I64(res);
+ return true;
+fail:
+ return false;
+}
+
+bool
+aot_compile_op_i32_extend_i32(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, int8 bitwidth)
+{
+ LLVMValueRef value, res, cast_value = NULL;
+
+ POP_I32(value);
+
+ if (bitwidth == 8) {
+ cast_value = LLVMBuildIntCast2(comp_ctx->builder, value, INT8_TYPE,
+ true, "i8_intcast_i32");
+ }
+ else if (bitwidth == 16) {
+ cast_value = LLVMBuildIntCast2(comp_ctx->builder, value, INT16_TYPE,
+ true, "i16_intcast_i32");
+ }
+
+ if (!cast_value) {
+ aot_set_last_error("llvm build conversion failed.");
+ return false;
+ }
+
+ res = LLVMBuildSExt(comp_ctx->builder, cast_value, I32_TYPE,
+ "i32_extend_i32_s");
+
+ if (!res) {
+ aot_set_last_error("llvm build conversion failed.");
+ return false;
+ }
+
+ PUSH_I32(res);
+ return true;
+fail:
+ return false;
+}
+
+bool
+aot_compile_op_i64_trunc_f32(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ bool sign, bool saturating)
+{
+ LLVMValueRef value;
+ LLVMValueRef min_value, max_value;
+
+ POP_F32(value);
+
+ if (!comp_ctx->is_indirect_mode) {
+ if (sign) {
+ min_value = F32_CONST(-9223373136366403584.0f);
+ max_value = F32_CONST(9223372036854775808.0f);
+ }
+ else {
+ min_value = F32_CONST(-1.0f);
+ max_value = F32_CONST(18446744073709551616.0f);
+ }
+ }
+ else {
+ WASMValue wasm_value;
+ if (sign) {
+ wasm_value.f32 = -9223373136366403584.0f;
+ min_value = aot_load_const_from_table(
+ comp_ctx, func_ctx->native_symbol, &wasm_value, VALUE_TYPE_F32);
+ wasm_value.f32 = 9223372036854775808.0f;
+ max_value = aot_load_const_from_table(
+ comp_ctx, func_ctx->native_symbol, &wasm_value, VALUE_TYPE_F32);
+ }
+ else {
+ wasm_value.f32 = -1.0f;
+ min_value = aot_load_const_from_table(
+ comp_ctx, func_ctx->native_symbol, &wasm_value, VALUE_TYPE_F32);
+ wasm_value.f32 = 18446744073709551616.0f;
+ max_value = aot_load_const_from_table(
+ comp_ctx, func_ctx->native_symbol, &wasm_value, VALUE_TYPE_F32);
+ }
+ }
+ CHECK_LLVM_CONST(min_value);
+ CHECK_LLVM_CONST(max_value);
+
+ if (!saturating)
+ return trunc_float_to_int(
+ comp_ctx, func_ctx, value, F32_TYPE, I64_TYPE, min_value, max_value,
+ sign ? "i64_trunc_f32_s" : "i64_trunc_f32_u", sign);
+ else
+ return trunc_sat_float_to_int(
+ comp_ctx, func_ctx, value, F32_TYPE, I64_TYPE, min_value, max_value,
+ sign ? "i64_trunc_sat_f32_s" : "i64_trunc_sat_f32_u", sign);
+fail:
+ return false;
+}
+
+bool
+aot_compile_op_i64_trunc_f64(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ bool sign, bool saturating)
+{
+ LLVMValueRef value;
+ LLVMValueRef min_value, max_value;
+
+ POP_F64(value);
+
+ if (!comp_ctx->is_indirect_mode) {
+ if (sign) {
+ min_value = F64_CONST(-9223372036854777856.0);
+ max_value = F64_CONST(9223372036854775808.0);
+ }
+ else {
+ min_value = F64_CONST(-1.0);
+ max_value = F64_CONST(18446744073709551616.0);
+ }
+ }
+ else {
+ WASMValue wasm_value;
+ if (sign) {
+ wasm_value.f64 = -9223372036854777856.0;
+ min_value = aot_load_const_from_table(
+ comp_ctx, func_ctx->native_symbol, &wasm_value, VALUE_TYPE_F64);
+ wasm_value.f64 = 9223372036854775808.0;
+ max_value = aot_load_const_from_table(
+ comp_ctx, func_ctx->native_symbol, &wasm_value, VALUE_TYPE_F64);
+ }
+ else {
+ wasm_value.f64 = -1.0;
+ min_value = aot_load_const_from_table(
+ comp_ctx, func_ctx->native_symbol, &wasm_value, VALUE_TYPE_F64);
+ wasm_value.f64 = 18446744073709551616.0;
+ max_value = aot_load_const_from_table(
+ comp_ctx, func_ctx->native_symbol, &wasm_value, VALUE_TYPE_F64);
+ }
+ }
+ CHECK_LLVM_CONST(min_value);
+ CHECK_LLVM_CONST(max_value);
+
+ if (!saturating)
+ return trunc_float_to_int(
+ comp_ctx, func_ctx, value, F64_TYPE, I64_TYPE, min_value, max_value,
+ sign ? "i64_trunc_f64_s" : "i64_trunc_f64_u", sign);
+ else
+ return trunc_sat_float_to_int(
+ comp_ctx, func_ctx, value, F64_TYPE, I64_TYPE, min_value, max_value,
+ sign ? "i64_trunc_sat_f64_s" : "i64_trunc_sat_f64_u", sign);
+
+fail:
+ return false;
+}
+
+bool
+aot_compile_op_f32_convert_i32(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, bool sign)
+{
+ LLVMValueRef value, res;
+
+ POP_I32(value);
+
+ if (comp_ctx->disable_llvm_intrinsics
+ && aot_intrinsic_check_capability(
+ comp_ctx, sign ? "f32_convert_i32_s" : "f32_convert_i32_u")) {
+ LLVMTypeRef param_types[1];
+ param_types[0] = I32_TYPE;
+ res = aot_call_llvm_intrinsic(comp_ctx, func_ctx,
+ sign ? "f32_convert_i32_s"
+ : "f32_convert_i32_u",
+ F32_TYPE, param_types, 1, value);
+ }
+ else {
+ if (sign)
+ res = LLVMBuildSIToFP(comp_ctx->builder, value, F32_TYPE,
+ "f32_convert_i32_s");
+ else
+ res = LLVMBuildUIToFP(comp_ctx->builder, value, F32_TYPE,
+ "f32_convert_i32_u");
+ }
+ if (!res) {
+ aot_set_last_error("llvm build conversion failed.");
+ return false;
+ }
+
+ PUSH_F32(res);
+ return true;
+fail:
+ return false;
+}
+
+bool
+aot_compile_op_f32_convert_i64(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, bool sign)
+{
+ LLVMValueRef value, res;
+
+ POP_I64(value);
+
+ if (comp_ctx->disable_llvm_intrinsics
+ && aot_intrinsic_check_capability(
+ comp_ctx, sign ? "f32_convert_i64_s" : "f32_convert_i64_u")) {
+ LLVMTypeRef param_types[1];
+ param_types[0] = I64_TYPE;
+ res = aot_call_llvm_intrinsic(comp_ctx, func_ctx,
+ sign ? "f32_convert_i64_s"
+ : "f32_convert_i64_u",
+ F32_TYPE, param_types, 1, value);
+ }
+ else {
+ if (sign)
+ res = LLVMBuildSIToFP(comp_ctx->builder, value, F32_TYPE,
+ "f32_convert_i64_s");
+ else
+ res = LLVMBuildUIToFP(comp_ctx->builder, value, F32_TYPE,
+ "f32_convert_i64_u");
+ }
+
+ if (!res) {
+ aot_set_last_error("llvm build conversion failed.");
+ return false;
+ }
+
+ PUSH_F32(res);
+ return true;
+fail:
+ return false;
+}
+
+bool
+aot_compile_op_f32_demote_f64(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx)
+{
+ LLVMValueRef value, res;
+
+ POP_F64(value);
+
+ if (comp_ctx->disable_llvm_intrinsics
+ && aot_intrinsic_check_capability(comp_ctx, "f32_demote_f64")) {
+ LLVMTypeRef param_types[1];
+ param_types[0] = F64_TYPE;
+ res = aot_call_llvm_intrinsic(comp_ctx, func_ctx, "f32_demote_f64",
+ F32_TYPE, param_types, 1, value);
+ }
+ else {
+ res = LLVMBuildFPTrunc(comp_ctx->builder, value, F32_TYPE,
+ "f32_demote_f64");
+ }
+
+ if (!res) {
+ aot_set_last_error("llvm build conversion failed.");
+ return false;
+ }
+
+ PUSH_F32(res);
+ return true;
+fail:
+ return false;
+}
+
+bool
+aot_compile_op_f64_convert_i32(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, bool sign)
+{
+ LLVMValueRef value, res;
+
+ POP_I32(value);
+
+ if (comp_ctx->disable_llvm_intrinsics
+ && aot_intrinsic_check_capability(
+ comp_ctx, sign ? "f64_convert_i32_s" : "f64_convert_i32_u")) {
+ LLVMTypeRef param_types[1];
+ param_types[0] = I32_TYPE;
+
+ res = aot_call_llvm_intrinsic(comp_ctx, func_ctx,
+ sign ? "f64_convert_i32_s"
+ : "f64_convert_i32_u",
+ F64_TYPE, param_types, 1, value);
+ }
+ else {
+ if (sign)
+ res = LLVMBuildSIToFP(comp_ctx->builder, value, F64_TYPE,
+ "f64_convert_i32_s");
+ else
+ res = LLVMBuildUIToFP(comp_ctx->builder, value, F64_TYPE,
+ "f64_convert_i32_u");
+ }
+
+ if (!res) {
+ aot_set_last_error("llvm build conversion failed.");
+ return false;
+ }
+
+ PUSH_F64(res);
+ return true;
+fail:
+ return false;
+}
+
+bool
+aot_compile_op_f64_convert_i64(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, bool sign)
+{
+ LLVMValueRef value, res;
+
+ POP_I64(value);
+
+ if (comp_ctx->disable_llvm_intrinsics
+ && aot_intrinsic_check_capability(
+ comp_ctx, sign ? "f64_convert_i64_s" : "f64_convert_i64_u")) {
+ LLVMTypeRef param_types[1];
+ param_types[0] = I64_TYPE;
+
+ res = aot_call_llvm_intrinsic(comp_ctx, func_ctx,
+ sign ? "f64_convert_i64_s"
+ : "f64_convert_i64_u",
+ F64_TYPE, param_types, 1, value);
+ }
+ else {
+ if (sign)
+ res = LLVMBuildSIToFP(comp_ctx->builder, value, F64_TYPE,
+ "f64_convert_i64_s");
+ else
+ res = LLVMBuildUIToFP(comp_ctx->builder, value, F64_TYPE,
+ "f64_convert_i64_u");
+ }
+
+ if (!res) {
+ aot_set_last_error("llvm build conversion failed.");
+ return false;
+ }
+
+ PUSH_F64(res);
+ return true;
+fail:
+ return false;
+}
+
+bool
+aot_compile_op_f64_promote_f32(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx)
+{
+ LLVMValueRef value, res;
+
+ POP_F32(value);
+
+ if (comp_ctx->disable_llvm_intrinsics
+ && aot_intrinsic_check_capability(comp_ctx, "f64_promote_f32")) {
+ LLVMTypeRef param_types[1];
+ param_types[0] = F32_TYPE;
+ res = aot_call_llvm_intrinsic(comp_ctx, func_ctx, "f64_promote_f32",
+ F64_TYPE, param_types, 1, value);
+ }
+ else {
+ res = LLVMBuildFPExt(comp_ctx->builder, value, F64_TYPE,
+ "f64_promote_f32");
+ }
+
+ if (!res) {
+ aot_set_last_error("llvm build conversion failed.");
+ return false;
+ }
+
+ PUSH_F64(res);
+
+ /* Avoid the promote being optimized away */
+ PUSH_F64(F64_CONST(1.0));
+ return aot_compile_op_f64_arithmetic(comp_ctx, func_ctx, FLOAT_MUL);
+fail:
+ return false;
+}
+
+bool
+aot_compile_op_i64_reinterpret_f64(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx)
+{
+ LLVMValueRef value;
+ POP_F64(value);
+ if (!(value =
+ LLVMBuildBitCast(comp_ctx->builder, value, I64_TYPE, "i64"))) {
+ aot_set_last_error("llvm build fp to si failed.");
+ return false;
+ }
+ PUSH_I64(value);
+ return true;
+fail:
+ return false;
+}
+
+bool
+aot_compile_op_i32_reinterpret_f32(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx)
+{
+ LLVMValueRef value;
+ POP_F32(value);
+ if (!(value =
+ LLVMBuildBitCast(comp_ctx->builder, value, I32_TYPE, "i32"))) {
+ aot_set_last_error("llvm build fp to si failed.");
+ return false;
+ }
+ PUSH_I32(value);
+ return true;
+fail:
+ return false;
+}
+
+bool
+aot_compile_op_f64_reinterpret_i64(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx)
+{
+ LLVMValueRef value;
+ POP_I64(value);
+ if (!(value =
+ LLVMBuildBitCast(comp_ctx->builder, value, F64_TYPE, "f64"))) {
+ aot_set_last_error("llvm build si to fp failed.");
+ return false;
+ }
+ PUSH_F64(value);
+ return true;
+fail:
+ return false;
+}
+
+bool
+aot_compile_op_f32_reinterpret_i32(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx)
+{
+ LLVMValueRef value;
+ POP_I32(value);
+ if (!(value =
+ LLVMBuildBitCast(comp_ctx->builder, value, F32_TYPE, "f32"))) {
+ aot_set_last_error("llvm build si to fp failed.");
+ return false;
+ }
+ PUSH_F32(value);
+ return true;
+fail:
+ return false;
+}
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/aot_emit_conversion.h b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/aot_emit_conversion.h
new file mode 100644
index 000000000..a0e2fcb2e
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/aot_emit_conversion.h
@@ -0,0 +1,90 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#ifndef _AOT_EMIT_CONVERSION_H_
+#define _AOT_EMIT_CONVERSION_H_
+
+#include "aot_compiler.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+bool
+aot_compile_op_i32_wrap_i64(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx);
+
+bool
+aot_compile_op_i32_trunc_f32(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ bool sign, bool saturating);
+
+bool
+aot_compile_op_i32_trunc_f64(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ bool sign, bool saturating);
+
+bool
+aot_compile_op_i64_extend_i32(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, bool sign);
+
+bool
+aot_compile_op_i64_extend_i64(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, int8 bitwidth);
+
+bool
+aot_compile_op_i32_extend_i32(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, int8 bitwidth);
+
+bool
+aot_compile_op_i64_trunc_f32(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ bool sign, bool saturating);
+
+bool
+aot_compile_op_i64_trunc_f64(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ bool sign, bool saturating);
+
+bool
+aot_compile_op_f32_convert_i32(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, bool sign);
+
+bool
+aot_compile_op_f32_convert_i64(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, bool sign);
+
+bool
+aot_compile_op_f32_demote_f64(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx);
+
+bool
+aot_compile_op_f64_convert_i32(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, bool sign);
+
+bool
+aot_compile_op_f64_convert_i64(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, bool sign);
+
+bool
+aot_compile_op_f64_promote_f32(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx);
+
+bool
+aot_compile_op_i64_reinterpret_f64(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx);
+
+bool
+aot_compile_op_i32_reinterpret_f32(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx);
+
+bool
+aot_compile_op_f64_reinterpret_i64(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx);
+
+bool
+aot_compile_op_f32_reinterpret_i32(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx);
+
+#ifdef __cplusplus
+} /* end of extern "C" */
+#endif
+
+#endif /* end of _AOT_EMIT_CONVERSION_H_ */
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/aot_emit_exception.c b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/aot_emit_exception.c
new file mode 100644
index 000000000..d40ccc6a4
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/aot_emit_exception.c
@@ -0,0 +1,141 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#include "aot_emit_exception.h"
+#include "../interpreter/wasm_runtime.h"
+#include "../aot/aot_runtime.h"
+
+bool
+aot_emit_exception(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ int32 exception_id, bool is_cond_br, LLVMValueRef cond_br_if,
+ LLVMBasicBlockRef cond_br_else_block)
+{
+ LLVMBasicBlockRef block_curr = LLVMGetInsertBlock(comp_ctx->builder);
+ LLVMValueRef exce_id = I32_CONST((uint32)exception_id), func_const, func;
+ LLVMTypeRef param_types[2], ret_type, func_type, func_ptr_type;
+ LLVMValueRef param_values[2];
+
+ bh_assert(exception_id >= 0 && exception_id < EXCE_NUM);
+
+ CHECK_LLVM_CONST(exce_id);
+
+ /* Create got_exception block if needed */
+ if (!func_ctx->got_exception_block) {
+ if (!(func_ctx->got_exception_block = LLVMAppendBasicBlockInContext(
+ comp_ctx->context, func_ctx->func, "got_exception"))) {
+ aot_set_last_error("add LLVM basic block failed.");
+ return false;
+ }
+
+ LLVMPositionBuilderAtEnd(comp_ctx->builder,
+ func_ctx->got_exception_block);
+
+ /* Create exection id phi */
+ if (!(func_ctx->exception_id_phi = LLVMBuildPhi(
+ comp_ctx->builder, I32_TYPE, "exception_id_phi"))) {
+ aot_set_last_error("llvm build phi failed.");
+ return false;
+ }
+
+ /* Call aot_set_exception_with_id() to throw exception */
+ param_types[0] = INT8_PTR_TYPE;
+ param_types[1] = I32_TYPE;
+ ret_type = VOID_TYPE;
+
+ /* Create function type */
+ if (!(func_type = LLVMFunctionType(ret_type, param_types, 2, false))) {
+ aot_set_last_error("create LLVM function type failed.");
+ return false;
+ }
+
+ if (comp_ctx->is_jit_mode) {
+ /* Create function type */
+ if (!(func_ptr_type = LLVMPointerType(func_type, 0))) {
+ aot_set_last_error("create LLVM function type failed.");
+ return false;
+ }
+ /* Create LLVM function with const function pointer */
+ if (!(func_const =
+ I64_CONST((uint64)(uintptr_t)jit_set_exception_with_id))
+ || !(func = LLVMConstIntToPtr(func_const, func_ptr_type))) {
+ aot_set_last_error("create LLVM value failed.");
+ return false;
+ }
+ }
+ else if (comp_ctx->is_indirect_mode) {
+ int32 func_index;
+ if (!(func_ptr_type = LLVMPointerType(func_type, 0))) {
+ aot_set_last_error("create LLVM function type failed.");
+ return false;
+ }
+
+ func_index = aot_get_native_symbol_index(
+ comp_ctx, "aot_set_exception_with_id");
+ if (func_index < 0) {
+ return false;
+ }
+ if (!(func =
+ aot_get_func_from_table(comp_ctx, func_ctx->native_symbol,
+ func_ptr_type, func_index))) {
+ return false;
+ }
+ }
+ else {
+ /* Create LLVM function with external function pointer */
+ if (!(func = LLVMGetNamedFunction(func_ctx->module,
+ "aot_set_exception_with_id"))
+ && !(func = LLVMAddFunction(func_ctx->module,
+ "aot_set_exception_with_id",
+ func_type))) {
+ aot_set_last_error("add LLVM function failed.");
+ return false;
+ }
+ }
+
+ /* Call the aot_set_exception_with_id() function */
+ param_values[0] = func_ctx->aot_inst;
+ param_values[1] = func_ctx->exception_id_phi;
+ if (!LLVMBuildCall2(comp_ctx->builder, func_type, func, param_values, 2,
+ "")) {
+ aot_set_last_error("llvm build call failed.");
+ return false;
+ }
+
+ /* Create return IR */
+ AOTFuncType *aot_func_type = func_ctx->aot_func->func_type;
+ if (!aot_build_zero_function_ret(comp_ctx, func_ctx, aot_func_type)) {
+ return false;
+ }
+
+ /* Resume the builder position */
+ LLVMPositionBuilderAtEnd(comp_ctx->builder, block_curr);
+ }
+
+ /* Add phi incoming value to got_exception block */
+ LLVMAddIncoming(func_ctx->exception_id_phi, &exce_id, &block_curr, 1);
+
+ if (!is_cond_br) {
+ /* not condition br, create br IR */
+ if (!LLVMBuildBr(comp_ctx->builder, func_ctx->got_exception_block)) {
+ aot_set_last_error("llvm build br failed.");
+ return false;
+ }
+ }
+ else {
+ /* Create condition br */
+ if (!LLVMBuildCondBr(comp_ctx->builder, cond_br_if,
+ func_ctx->got_exception_block,
+ cond_br_else_block)) {
+ aot_set_last_error("llvm build cond br failed.");
+ return false;
+ }
+ /* Start to translate the else block */
+ LLVMPositionBuilderAtEnd(comp_ctx->builder, cond_br_else_block);
+ }
+
+ return true;
+fail:
+ return false;
+}
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/aot_emit_exception.h b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/aot_emit_exception.h
new file mode 100644
index 000000000..91c8bd3cf
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/aot_emit_exception.h
@@ -0,0 +1,24 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#ifndef _AOT_EMIT_EXCEPTION_H_
+#define _AOT_EMIT_EXCEPTION_H_
+
+#include "aot_compiler.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+bool
+aot_emit_exception(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ int32 exception_id, bool is_cond_br, LLVMValueRef cond_br_if,
+ LLVMBasicBlockRef cond_br_else_block);
+
+#ifdef __cplusplus
+} /* end of extern "C" */
+#endif
+
+#endif /* end of _AOT_EMIT_EXCEPTION_H_ */
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/aot_emit_function.c b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/aot_emit_function.c
new file mode 100644
index 000000000..9ba8baa24
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/aot_emit_function.c
@@ -0,0 +1,1729 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#include "aot_emit_function.h"
+#include "aot_emit_exception.h"
+#include "aot_emit_control.h"
+#include "aot_emit_table.h"
+#include "../aot/aot_runtime.h"
+
+#define ADD_BASIC_BLOCK(block, name) \
+ do { \
+ if (!(block = LLVMAppendBasicBlockInContext(comp_ctx->context, \
+ func_ctx->func, name))) { \
+ aot_set_last_error("llvm add basic block failed."); \
+ goto fail; \
+ } \
+ } while (0)
+
+static bool
+create_func_return_block(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
+{
+ LLVMBasicBlockRef block_curr = LLVMGetInsertBlock(comp_ctx->builder);
+ AOTFuncType *aot_func_type = func_ctx->aot_func->func_type;
+
+ /* Create function return block if it isn't created */
+ if (!func_ctx->func_return_block) {
+ if (!(func_ctx->func_return_block = LLVMAppendBasicBlockInContext(
+ comp_ctx->context, func_ctx->func, "func_ret"))) {
+ aot_set_last_error("llvm add basic block failed.");
+ return false;
+ }
+
+ /* Create return IR */
+ LLVMPositionBuilderAtEnd(comp_ctx->builder,
+ func_ctx->func_return_block);
+ if (!comp_ctx->enable_bound_check) {
+ if (!aot_emit_exception(comp_ctx, func_ctx, EXCE_ALREADY_THROWN,
+ false, NULL, NULL)) {
+ return false;
+ }
+ }
+ else if (!aot_build_zero_function_ret(comp_ctx, func_ctx,
+ aot_func_type)) {
+ return false;
+ }
+ }
+
+ LLVMPositionBuilderAtEnd(comp_ctx->builder, block_curr);
+ return true;
+}
+
+/* Check whether there was exception thrown, if yes, return directly */
+static bool
+check_exception_thrown(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
+{
+ LLVMBasicBlockRef block_curr, check_exce_succ;
+ LLVMValueRef value, cmp;
+
+ /* Create function return block if it isn't created */
+ if (!create_func_return_block(comp_ctx, func_ctx))
+ return false;
+
+ /* Load the first byte of aot_module_inst->cur_exception, and check
+ whether it is '\0'. If yes, no exception was thrown. */
+ if (!(value = LLVMBuildLoad2(comp_ctx->builder, INT8_TYPE,
+ func_ctx->cur_exception, "exce_value"))
+ || !(cmp = LLVMBuildICmp(comp_ctx->builder, LLVMIntEQ, value, I8_ZERO,
+ "cmp"))) {
+ aot_set_last_error("llvm build icmp failed.");
+ return false;
+ }
+
+ /* Add check exection success block */
+ if (!(check_exce_succ = LLVMAppendBasicBlockInContext(
+ comp_ctx->context, func_ctx->func, "check_exce_succ"))) {
+ aot_set_last_error("llvm add basic block failed.");
+ return false;
+ }
+
+ block_curr = LLVMGetInsertBlock(comp_ctx->builder);
+ LLVMMoveBasicBlockAfter(check_exce_succ, block_curr);
+
+ LLVMPositionBuilderAtEnd(comp_ctx->builder, block_curr);
+ /* Create condition br */
+ if (!LLVMBuildCondBr(comp_ctx->builder, cmp, check_exce_succ,
+ func_ctx->func_return_block)) {
+ aot_set_last_error("llvm build cond br failed.");
+ return false;
+ }
+
+ LLVMPositionBuilderAtEnd(comp_ctx->builder, check_exce_succ);
+ return true;
+}
+
+/* Check whether there was exception thrown, if yes, return directly */
+static bool
+check_call_return(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ LLVMValueRef res)
+{
+ LLVMBasicBlockRef block_curr, check_call_succ;
+ LLVMValueRef cmp;
+
+ /* Create function return block if it isn't created */
+ if (!create_func_return_block(comp_ctx, func_ctx))
+ return false;
+
+ if (!(cmp = LLVMBuildICmp(comp_ctx->builder, LLVMIntNE, res, I8_ZERO,
+ "cmp"))) {
+ aot_set_last_error("llvm build icmp failed.");
+ return false;
+ }
+
+ /* Add check exection success block */
+ if (!(check_call_succ = LLVMAppendBasicBlockInContext(
+ comp_ctx->context, func_ctx->func, "check_call_succ"))) {
+ aot_set_last_error("llvm add basic block failed.");
+ return false;
+ }
+
+ block_curr = LLVMGetInsertBlock(comp_ctx->builder);
+ LLVMMoveBasicBlockAfter(check_call_succ, block_curr);
+
+ LLVMPositionBuilderAtEnd(comp_ctx->builder, block_curr);
+ /* Create condition br */
+ if (!LLVMBuildCondBr(comp_ctx->builder, cmp, check_call_succ,
+ func_ctx->func_return_block)) {
+ aot_set_last_error("llvm build cond br failed.");
+ return false;
+ }
+
+ LLVMPositionBuilderAtEnd(comp_ctx->builder, check_call_succ);
+ return true;
+}
+
+static bool
+call_aot_invoke_native_func(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ LLVMValueRef func_idx, AOTFuncType *aot_func_type,
+ LLVMTypeRef *param_types,
+ LLVMValueRef *param_values, uint32 param_count,
+ uint32 param_cell_num, LLVMTypeRef ret_type,
+ uint8 wasm_ret_type, LLVMValueRef *p_value_ret,
+ LLVMValueRef *p_res)
+{
+ LLVMTypeRef func_type, func_ptr_type, func_param_types[4];
+ LLVMTypeRef ret_ptr_type, elem_ptr_type;
+ LLVMValueRef func, elem_idx, elem_ptr;
+ LLVMValueRef func_param_values[4], value_ret = NULL, res;
+ char buf[32], *func_name = "aot_invoke_native";
+ uint32 i, cell_num = 0;
+
+ /* prepare function type of aot_invoke_native */
+ func_param_types[0] = comp_ctx->exec_env_type; /* exec_env */
+ func_param_types[1] = I32_TYPE; /* func_idx */
+ func_param_types[2] = I32_TYPE; /* argc */
+ func_param_types[3] = INT32_PTR_TYPE; /* argv */
+ if (!(func_type =
+ LLVMFunctionType(INT8_TYPE, func_param_types, 4, false))) {
+ aot_set_last_error("llvm add function type failed.");
+ return false;
+ }
+
+ /* prepare function pointer */
+ if (comp_ctx->is_jit_mode) {
+ if (!(func_ptr_type = LLVMPointerType(func_type, 0))) {
+ aot_set_last_error("create LLVM function type failed.");
+ return false;
+ }
+
+ /* JIT mode, call the function directly */
+ if (!(func = I64_CONST((uint64)(uintptr_t)llvm_jit_invoke_native))
+ || !(func = LLVMConstIntToPtr(func, func_ptr_type))) {
+ aot_set_last_error("create LLVM value failed.");
+ return false;
+ }
+ }
+ else if (comp_ctx->is_indirect_mode) {
+ int32 func_index;
+ if (!(func_ptr_type = LLVMPointerType(func_type, 0))) {
+ aot_set_last_error("create LLVM function type failed.");
+ return false;
+ }
+ func_index = aot_get_native_symbol_index(comp_ctx, func_name);
+ if (func_index < 0) {
+ return false;
+ }
+ if (!(func = aot_get_func_from_table(comp_ctx, func_ctx->native_symbol,
+ func_ptr_type, func_index))) {
+ return false;
+ }
+ }
+ else {
+ if (!(func = LLVMGetNamedFunction(func_ctx->module, func_name))
+ && !(func =
+ LLVMAddFunction(func_ctx->module, func_name, func_type))) {
+ aot_set_last_error("add LLVM function failed.");
+ return false;
+ }
+ }
+
+ if (param_cell_num > 64) {
+ aot_set_last_error("prepare native arguments failed: "
+ "maximum 64 parameter cell number supported.");
+ return false;
+ }
+
+ /* prepare frame_lp */
+ for (i = 0; i < param_count; i++) {
+ if (!(elem_idx = I32_CONST(cell_num))
+ || !(elem_ptr_type = LLVMPointerType(param_types[i], 0))) {
+ aot_set_last_error("llvm add const or pointer type failed.");
+ return false;
+ }
+
+ snprintf(buf, sizeof(buf), "%s%d", "elem", i);
+ if (!(elem_ptr =
+ LLVMBuildInBoundsGEP2(comp_ctx->builder, I32_TYPE,
+ func_ctx->argv_buf, &elem_idx, 1, buf))
+ || !(elem_ptr = LLVMBuildBitCast(comp_ctx->builder, elem_ptr,
+ elem_ptr_type, buf))) {
+ aot_set_last_error("llvm build bit cast failed.");
+ return false;
+ }
+
+ if (!(res = LLVMBuildStore(comp_ctx->builder, param_values[i],
+ elem_ptr))) {
+ aot_set_last_error("llvm build store failed.");
+ return false;
+ }
+ LLVMSetAlignment(res, 1);
+
+ cell_num += wasm_value_type_cell_num(aot_func_type->types[i]);
+ }
+
+ func_param_values[0] = func_ctx->exec_env;
+ func_param_values[1] = func_idx;
+ func_param_values[2] = I32_CONST(param_cell_num);
+ func_param_values[3] = func_ctx->argv_buf;
+
+ if (!func_param_values[2]) {
+ aot_set_last_error("llvm create const failed.");
+ return false;
+ }
+
+ /* call aot_invoke_native() function */
+ if (!(res = LLVMBuildCall2(comp_ctx->builder, func_type, func,
+ func_param_values, 4, "res"))) {
+ aot_set_last_error("llvm build call failed.");
+ return false;
+ }
+
+ /* get function return value */
+ if (wasm_ret_type != VALUE_TYPE_VOID) {
+ if (!(ret_ptr_type = LLVMPointerType(ret_type, 0))) {
+ aot_set_last_error("llvm add pointer type failed.");
+ return false;
+ }
+
+ if (!(value_ret =
+ LLVMBuildBitCast(comp_ctx->builder, func_ctx->argv_buf,
+ ret_ptr_type, "argv_ret"))) {
+ aot_set_last_error("llvm build bit cast failed.");
+ return false;
+ }
+ if (!(*p_value_ret = LLVMBuildLoad2(comp_ctx->builder, ret_type,
+ value_ret, "value_ret"))) {
+ aot_set_last_error("llvm build load failed.");
+ return false;
+ }
+ }
+
+ *p_res = res;
+ return true;
+}
+
+#if (WASM_ENABLE_DUMP_CALL_STACK != 0) || (WASM_ENABLE_PERF_PROFILING != 0)
+static bool
+call_aot_alloc_frame_func(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ LLVMValueRef func_idx)
+{
+ LLVMValueRef param_values[2], ret_value, value, func;
+ LLVMTypeRef param_types[2], ret_type, func_type, func_ptr_type;
+ LLVMBasicBlockRef block_curr = LLVMGetInsertBlock(comp_ctx->builder);
+ LLVMBasicBlockRef frame_alloc_fail, frame_alloc_success;
+ AOTFuncType *aot_func_type = func_ctx->aot_func->func_type;
+
+ param_types[0] = comp_ctx->exec_env_type;
+ param_types[1] = I32_TYPE;
+ ret_type = INT8_TYPE;
+
+ if (comp_ctx->is_jit_mode)
+ GET_AOT_FUNCTION(llvm_jit_alloc_frame, 2);
+ else
+ GET_AOT_FUNCTION(aot_alloc_frame, 2);
+
+ param_values[0] = func_ctx->exec_env;
+ param_values[1] = func_idx;
+
+ if (!(ret_value =
+ LLVMBuildCall2(comp_ctx->builder, func_type, func, param_values,
+ 2, "call_aot_alloc_frame"))) {
+ aot_set_last_error("llvm build call failed.");
+ return false;
+ }
+
+ if (!(ret_value = LLVMBuildICmp(comp_ctx->builder, LLVMIntUGT, ret_value,
+ I8_ZERO, "frame_alloc_ret"))) {
+ aot_set_last_error("llvm build icmp failed.");
+ return false;
+ }
+
+ ADD_BASIC_BLOCK(frame_alloc_fail, "frame_alloc_fail");
+ ADD_BASIC_BLOCK(frame_alloc_success, "frame_alloc_success");
+
+ LLVMMoveBasicBlockAfter(frame_alloc_fail, block_curr);
+ LLVMMoveBasicBlockAfter(frame_alloc_success, block_curr);
+
+ if (!LLVMBuildCondBr(comp_ctx->builder, ret_value, frame_alloc_success,
+ frame_alloc_fail)) {
+ aot_set_last_error("llvm build cond br failed.");
+ return false;
+ }
+
+ /* If frame alloc failed, return this function
+ so the runtime can catch the exception */
+ LLVMPositionBuilderAtEnd(comp_ctx->builder, frame_alloc_fail);
+ if (!aot_build_zero_function_ret(comp_ctx, func_ctx, aot_func_type)) {
+ return false;
+ }
+
+ LLVMPositionBuilderAtEnd(comp_ctx->builder, frame_alloc_success);
+
+ return true;
+
+fail:
+ return false;
+}
+
+static bool
+call_aot_free_frame_func(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
+{
+ LLVMValueRef param_values[1], ret_value, value, func;
+ LLVMTypeRef param_types[1], ret_type, func_type, func_ptr_type;
+
+ param_types[0] = comp_ctx->exec_env_type;
+ ret_type = INT8_TYPE;
+
+ if (comp_ctx->is_jit_mode)
+ GET_AOT_FUNCTION(llvm_jit_free_frame, 1);
+ else
+ GET_AOT_FUNCTION(aot_free_frame, 1);
+
+ param_values[0] = func_ctx->exec_env;
+
+ if (!(ret_value = LLVMBuildCall2(comp_ctx->builder, func_type, func,
+ param_values, 1, "call_aot_free_frame"))) {
+ aot_set_last_error("llvm build call failed.");
+ return false;
+ }
+
+ return true;
+fail:
+ return false;
+}
+#endif /* end of (WASM_ENABLE_DUMP_CALL_STACK != 0) \
+ || (WASM_ENABLE_PERF_PROFILING != 0) */
+
+static bool
+record_stack_usage(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ uint32 callee_cell_num)
+{
+ LLVMBasicBlockRef block_curr = LLVMGetInsertBlock(comp_ctx->builder);
+ LLVMBasicBlockRef block_update;
+ LLVMBasicBlockRef block_after_update;
+ LLVMValueRef callee_local_size, new_sp, cmp;
+ LLVMValueRef native_stack_top_min;
+ LLVMTypeRef ptrdiff_type;
+ if (comp_ctx->pointer_size == sizeof(uint64_t)) {
+ ptrdiff_type = I64_TYPE;
+ }
+ else {
+ ptrdiff_type = I32_TYPE;
+ }
+
+ /*
+ * new_sp = last_alloca - callee_local_size;
+ * if (*native_stack_top_min_addr > new_sp) {
+ * *native_stack_top_min_addr = new_sp;
+ * }
+ */
+
+ if (!(callee_local_size = LLVMConstInt(
+ ptrdiff_type, -(int64_t)callee_cell_num * 4, true))) {
+ aot_set_last_error("llvm build const failed.");
+ return false;
+ }
+ if (!(new_sp = LLVMBuildInBoundsGEP2(comp_ctx->builder, INT8_TYPE,
+ func_ctx->last_alloca,
+ &callee_local_size, 1, "new_sp"))) {
+ aot_set_last_error("llvm build gep failed");
+ return false;
+ }
+ if (!(native_stack_top_min = LLVMBuildLoad2(
+ comp_ctx->builder, OPQ_PTR_TYPE,
+ func_ctx->native_stack_top_min_addr, "native_stack_top_min"))) {
+ aot_set_last_error("llvm build load failed");
+ return false;
+ }
+ if (!(cmp = LLVMBuildICmp(comp_ctx->builder, LLVMIntULT, new_sp,
+ native_stack_top_min, "cmp"))) {
+ aot_set_last_error("llvm build icmp failed.");
+ return false;
+ }
+
+ if (!(block_update = LLVMAppendBasicBlockInContext(
+ comp_ctx->context, func_ctx->func, "block_update"))) {
+ aot_set_last_error("llvm add basic block failed.");
+ return false;
+ }
+ if (!(block_after_update = LLVMAppendBasicBlockInContext(
+ comp_ctx->context, func_ctx->func, "block_after_update"))) {
+ aot_set_last_error("llvm add basic block failed.");
+ return false;
+ }
+ LLVMMoveBasicBlockAfter(block_update, block_curr);
+ LLVMMoveBasicBlockAfter(block_after_update, block_update);
+
+ if (!LLVMBuildCondBr(comp_ctx->builder, cmp, block_update,
+ block_after_update)) {
+ aot_set_last_error("llvm build cond br failed.");
+ return false;
+ }
+
+ LLVMPositionBuilderAtEnd(comp_ctx->builder, block_update);
+ if (!LLVMBuildStore(comp_ctx->builder, new_sp,
+ func_ctx->native_stack_top_min_addr)) {
+ aot_set_last_error("llvm build store failed");
+ return false;
+ }
+ if (!LLVMBuildBr(comp_ctx->builder, block_after_update)) {
+ aot_set_last_error("llvm build br failed.");
+ return false;
+ }
+
+ LLVMPositionBuilderAtEnd(comp_ctx->builder, block_after_update);
+ return true;
+}
+
+static bool
+check_stack_boundary(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ uint32 callee_cell_num)
+{
+ LLVMBasicBlockRef block_curr = LLVMGetInsertBlock(comp_ctx->builder);
+ LLVMBasicBlockRef check_stack;
+ LLVMValueRef callee_local_size, stack_bound, cmp;
+
+ if (!(callee_local_size = I32_CONST(callee_cell_num * 4))) {
+ aot_set_last_error("llvm build const failed.");
+ return false;
+ }
+
+ if (!(stack_bound = LLVMBuildInBoundsGEP2(
+ comp_ctx->builder, INT8_TYPE, func_ctx->native_stack_bound,
+ &callee_local_size, 1, "stack_bound"))) {
+ aot_set_last_error("llvm build inbound gep failed.");
+ return false;
+ }
+
+ if (!(check_stack = LLVMAppendBasicBlockInContext(
+ comp_ctx->context, func_ctx->func, "check_stack"))) {
+ aot_set_last_error("llvm add basic block failed.");
+ return false;
+ }
+
+ LLVMMoveBasicBlockAfter(check_stack, block_curr);
+
+ if (!(cmp = LLVMBuildICmp(comp_ctx->builder, LLVMIntULT,
+ func_ctx->last_alloca, stack_bound, "cmp"))) {
+ aot_set_last_error("llvm build icmp failed.");
+ return false;
+ }
+
+ if (!aot_emit_exception(comp_ctx, func_ctx, EXCE_NATIVE_STACK_OVERFLOW,
+ true, cmp, check_stack)) {
+ return false;
+ }
+
+ LLVMPositionBuilderAtEnd(comp_ctx->builder, check_stack);
+ return true;
+}
+
+static bool
+check_stack(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ uint32 callee_cell_num)
+{
+ if (comp_ctx->enable_stack_estimation
+ && !record_stack_usage(comp_ctx, func_ctx, callee_cell_num))
+ return false;
+ if (comp_ctx->enable_stack_bound_check
+ && !check_stack_boundary(comp_ctx, func_ctx, callee_cell_num))
+ return false;
+ return true;
+}
+
+/**
+ * Check whether the app address and its buffer are inside the linear memory,
+ * if no, throw exception
+ */
+static bool
+check_app_addr_and_convert(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ bool is_str_arg, LLVMValueRef app_addr,
+ LLVMValueRef buf_size,
+ LLVMValueRef *p_native_addr_converted)
+{
+ LLVMTypeRef func_type, func_ptr_type, func_param_types[5];
+ LLVMValueRef func, func_param_values[5], res, native_addr_ptr;
+ char *func_name = "aot_check_app_addr_and_convert";
+
+ /* prepare function type of aot_check_app_addr_and_convert */
+ func_param_types[0] = comp_ctx->aot_inst_type; /* module_inst */
+ func_param_types[1] = INT8_TYPE; /* is_str_arg */
+ func_param_types[2] = I32_TYPE; /* app_offset */
+ func_param_types[3] = I32_TYPE; /* buf_size */
+ func_param_types[4] =
+ comp_ctx->basic_types.int8_pptr_type; /* p_native_addr */
+ if (!(func_type =
+ LLVMFunctionType(INT8_TYPE, func_param_types, 5, false))) {
+ aot_set_last_error("llvm add function type failed.");
+ return false;
+ }
+
+ /* prepare function pointer */
+ if (comp_ctx->is_jit_mode) {
+ if (!(func_ptr_type = LLVMPointerType(func_type, 0))) {
+ aot_set_last_error("create LLVM function type failed.");
+ return false;
+ }
+
+ /* JIT mode, call the function directly */
+ if (!(func =
+ I64_CONST((uint64)(uintptr_t)jit_check_app_addr_and_convert))
+ || !(func = LLVMConstIntToPtr(func, func_ptr_type))) {
+ aot_set_last_error("create LLVM value failed.");
+ return false;
+ }
+ }
+ else if (comp_ctx->is_indirect_mode) {
+ int32 func_index;
+ if (!(func_ptr_type = LLVMPointerType(func_type, 0))) {
+ aot_set_last_error("create LLVM function type failed.");
+ return false;
+ }
+ func_index = aot_get_native_symbol_index(comp_ctx, func_name);
+ if (func_index < 0) {
+ return false;
+ }
+ if (!(func = aot_get_func_from_table(comp_ctx, func_ctx->native_symbol,
+ func_ptr_type, func_index))) {
+ return false;
+ }
+ }
+ else {
+ if (!(func = LLVMGetNamedFunction(func_ctx->module, func_name))
+ && !(func =
+ LLVMAddFunction(func_ctx->module, func_name, func_type))) {
+ aot_set_last_error("add LLVM function failed.");
+ return false;
+ }
+ }
+
+ if (!(native_addr_ptr = LLVMBuildBitCast(
+ comp_ctx->builder, func_ctx->argv_buf,
+ comp_ctx->basic_types.int8_pptr_type, "p_native_addr"))) {
+ aot_set_last_error("llvm build bit cast failed.");
+ return false;
+ }
+
+ func_param_values[0] = func_ctx->aot_inst;
+ func_param_values[1] = I8_CONST(is_str_arg);
+ func_param_values[2] = app_addr;
+ func_param_values[3] = buf_size;
+ func_param_values[4] = native_addr_ptr;
+
+ if (!func_param_values[1]) {
+ aot_set_last_error("llvm create const failed.");
+ return false;
+ }
+
+ /* call aot_check_app_addr_and_convert() function */
+ if (!(res = LLVMBuildCall2(comp_ctx->builder, func_type, func,
+ func_param_values, 5, "res"))) {
+ aot_set_last_error("llvm build call failed.");
+ return false;
+ }
+
+ /* Check whether exception was thrown when executing the function */
+ if (comp_ctx->enable_bound_check
+ && !check_call_return(comp_ctx, func_ctx, res)) {
+ return false;
+ }
+
+ if (!(*p_native_addr_converted =
+ LLVMBuildLoad2(comp_ctx->builder, OPQ_PTR_TYPE, native_addr_ptr,
+ "native_addr"))) {
+ aot_set_last_error("llvm build load failed.");
+ return false;
+ }
+
+ return true;
+}
+
+bool
+aot_compile_op_call(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ uint32 func_idx, bool tail_call)
+{
+ uint32 import_func_count = comp_ctx->comp_data->import_func_count;
+ AOTImportFunc *import_funcs = comp_ctx->comp_data->import_funcs;
+ uint32 func_count = comp_ctx->func_ctx_count, param_cell_num = 0;
+ uint32 ext_ret_cell_num = 0, cell_num = 0;
+ AOTFuncContext **func_ctxes = comp_ctx->func_ctxes;
+ AOTFuncType *func_type;
+ AOTFunc *aot_func;
+ LLVMTypeRef *param_types = NULL, ret_type;
+ LLVMTypeRef ext_ret_ptr_type;
+ LLVMValueRef *param_values = NULL, value_ret = NULL, func;
+ LLVMValueRef import_func_idx, res;
+ LLVMValueRef ext_ret, ext_ret_ptr, ext_ret_idx;
+ int32 i, j = 0, param_count, result_count, ext_ret_count;
+ uint64 total_size;
+ uint32 callee_cell_num;
+ uint8 wasm_ret_type;
+ uint8 *ext_ret_types = NULL;
+ const char *signature = NULL;
+ bool ret = false;
+ char buf[32];
+
+#if WASM_ENABLE_THREAD_MGR != 0
+ /* Insert suspend check point */
+ if (comp_ctx->enable_thread_mgr) {
+ if (!check_suspend_flags(comp_ctx, func_ctx))
+ return false;
+ }
+#endif
+
+ /* Check function index */
+ if (func_idx >= import_func_count + func_count) {
+ aot_set_last_error("Function index out of range.");
+ return false;
+ }
+
+ /* Get function type */
+ if (func_idx < import_func_count) {
+ func_type = import_funcs[func_idx].func_type;
+ signature = import_funcs[func_idx].signature;
+ }
+ else {
+ func_type =
+ func_ctxes[func_idx - import_func_count]->aot_func->func_type;
+ }
+
+ /* Get param cell number */
+ param_cell_num = func_type->param_cell_num;
+
+#if (WASM_ENABLE_DUMP_CALL_STACK != 0) || (WASM_ENABLE_PERF_PROFILING != 0)
+ if (comp_ctx->enable_aux_stack_frame) {
+ LLVMValueRef func_idx_const;
+
+ if (!(func_idx_const = I32_CONST(func_idx))) {
+ aot_set_last_error("llvm build const failed.");
+ return false;
+ }
+ if (!call_aot_alloc_frame_func(comp_ctx, func_ctx, func_idx_const))
+ return false;
+ }
+#endif
+
+ /* Allocate memory for parameters.
+ * Parameters layout:
+ * - exec env
+ * - wasm function's parameters
+ * - extra results'(except the first one) addresses
+ */
+ param_count = (int32)func_type->param_count;
+ result_count = (int32)func_type->result_count;
+ ext_ret_count = result_count > 1 ? result_count - 1 : 0;
+ total_size =
+ sizeof(LLVMValueRef) * (uint64)(param_count + 1 + ext_ret_count);
+ if (total_size >= UINT32_MAX
+ || !(param_values = wasm_runtime_malloc((uint32)total_size))) {
+ aot_set_last_error("allocate memory failed.");
+ return false;
+ }
+
+ /* First parameter is exec env */
+ param_values[j++] = func_ctx->exec_env;
+
+ /* Pop parameters from stack */
+ for (i = param_count - 1; i >= 0; i--)
+ POP(param_values[i + j], func_type->types[i]);
+
+ /* Set parameters for multiple return values, the first return value
+ is returned by function return value, and the other return values
+ are returned by function parameters with pointer types */
+ if (ext_ret_count > 0) {
+ ext_ret_types = func_type->types + param_count + 1;
+ ext_ret_cell_num = wasm_get_cell_num(ext_ret_types, ext_ret_count);
+ if (ext_ret_cell_num > 64) {
+ aot_set_last_error("prepare extra results's return "
+ "address arguments failed: "
+ "maximum 64 parameter cell number supported.");
+ goto fail;
+ }
+
+ for (i = 0; i < ext_ret_count; i++) {
+ if (!(ext_ret_idx = I32_CONST(cell_num))
+ || !(ext_ret_ptr_type =
+ LLVMPointerType(TO_LLVM_TYPE(ext_ret_types[i]), 0))) {
+ aot_set_last_error("llvm add const or pointer type failed.");
+ goto fail;
+ }
+
+ snprintf(buf, sizeof(buf), "ext_ret%d_ptr", i);
+ if (!(ext_ret_ptr = LLVMBuildInBoundsGEP2(
+ comp_ctx->builder, I32_TYPE, func_ctx->argv_buf,
+ &ext_ret_idx, 1, buf))) {
+ aot_set_last_error("llvm build GEP failed.");
+ goto fail;
+ }
+ snprintf(buf, sizeof(buf), "ext_ret%d_ptr_cast", i);
+ if (!(ext_ret_ptr = LLVMBuildBitCast(comp_ctx->builder, ext_ret_ptr,
+ ext_ret_ptr_type, buf))) {
+ aot_set_last_error("llvm build bit cast failed.");
+ goto fail;
+ }
+ param_values[param_count + 1 + i] = ext_ret_ptr;
+ cell_num += wasm_value_type_cell_num(ext_ret_types[i]);
+ }
+ }
+
+ if (func_idx < import_func_count) {
+ if (!(import_func_idx = I32_CONST(func_idx))) {
+ aot_set_last_error("llvm build inbounds gep failed.");
+ goto fail;
+ }
+
+ /* Initialize parameter types of the LLVM function */
+ total_size = sizeof(LLVMTypeRef) * (uint64)(param_count + 1);
+ if (total_size >= UINT32_MAX
+ || !(param_types = wasm_runtime_malloc((uint32)total_size))) {
+ aot_set_last_error("allocate memory failed.");
+ goto fail;
+ }
+
+ j = 0;
+ param_types[j++] = comp_ctx->exec_env_type;
+
+ for (i = 0; i < param_count; i++, j++) {
+ param_types[j] = TO_LLVM_TYPE(func_type->types[i]);
+
+ /* If the signature can be gotten, e.g. the signature of the builtin
+ native libraries, just check the app offset and buf size, and
+ then convert app offset to native addr and call the native func
+ directly, no need to call aot_invoke_native to call it */
+ if (signature) {
+ LLVMValueRef native_addr, native_addr_size;
+ if (signature[i + 1] == '*' || signature[i + 1] == '$') {
+ param_types[j] = INT8_PTR_TYPE;
+ }
+ if (signature[i + 1] == '*') {
+ if (signature[i + 2] == '~')
+ native_addr_size = param_values[i + 2];
+ else
+ native_addr_size = I32_ONE;
+ if (!check_app_addr_and_convert(
+ comp_ctx, func_ctx, false, param_values[j],
+ native_addr_size, &native_addr)) {
+ goto fail;
+ }
+ param_values[j] = native_addr;
+ }
+ else if (signature[i + 1] == '$') {
+ native_addr_size = I32_ZERO;
+ if (!check_app_addr_and_convert(
+ comp_ctx, func_ctx, true, param_values[j],
+ native_addr_size, &native_addr)) {
+ goto fail;
+ }
+ param_values[j] = native_addr;
+ }
+ }
+ }
+
+ if (func_type->result_count) {
+ wasm_ret_type = func_type->types[func_type->param_count];
+ ret_type = TO_LLVM_TYPE(wasm_ret_type);
+ }
+ else {
+ wasm_ret_type = VALUE_TYPE_VOID;
+ ret_type = VOID_TYPE;
+ }
+
+ if (!signature) {
+ /* call aot_invoke_native() */
+ if (!call_aot_invoke_native_func(
+ comp_ctx, func_ctx, import_func_idx, func_type,
+ param_types + 1, param_values + 1, param_count,
+ param_cell_num, ret_type, wasm_ret_type, &value_ret, &res))
+ goto fail;
+ /* Check whether there was exception thrown when executing
+ the function */
+ if (comp_ctx->enable_bound_check
+ && !check_call_return(comp_ctx, func_ctx, res))
+ goto fail;
+ }
+ else { /* call native func directly */
+ LLVMTypeRef native_func_type, func_ptr_type;
+ LLVMValueRef func_ptr;
+
+ if (!(native_func_type = LLVMFunctionType(
+ ret_type, param_types, param_count + 1, false))) {
+ aot_set_last_error("llvm add function type failed.");
+ goto fail;
+ }
+
+ if (!(func_ptr_type = LLVMPointerType(native_func_type, 0))) {
+ aot_set_last_error("create LLVM function type failed.");
+ goto fail;
+ }
+
+ /* Load function pointer */
+ if (!(func_ptr = LLVMBuildInBoundsGEP2(
+ comp_ctx->builder, OPQ_PTR_TYPE, func_ctx->func_ptrs,
+ &import_func_idx, 1, "native_func_ptr_tmp"))) {
+ aot_set_last_error("llvm build inbounds gep failed.");
+ goto fail;
+ }
+
+ if (!(func_ptr = LLVMBuildLoad2(comp_ctx->builder, OPQ_PTR_TYPE,
+ func_ptr, "native_func_ptr"))) {
+ aot_set_last_error("llvm build load failed.");
+ goto fail;
+ }
+
+ if (!(func = LLVMBuildBitCast(comp_ctx->builder, func_ptr,
+ func_ptr_type, "native_func"))) {
+ aot_set_last_error("llvm bit cast failed.");
+ goto fail;
+ }
+
+ /* Call the function */
+ if (!(value_ret = LLVMBuildCall2(
+ comp_ctx->builder, native_func_type, func, param_values,
+ (uint32)param_count + 1 + ext_ret_count,
+ (func_type->result_count > 0 ? "call" : "")))) {
+ aot_set_last_error("LLVM build call failed.");
+ goto fail;
+ }
+
+ /* Check whether there was exception thrown when executing
+ the function */
+ if (!check_exception_thrown(comp_ctx, func_ctx)) {
+ goto fail;
+ }
+ }
+ }
+ else {
+#if LLVM_VERSION_MAJOR >= 14
+ LLVMTypeRef llvm_func_type;
+#endif
+ bool recursive_call =
+ (func_ctx == func_ctxes[func_idx - import_func_count]) ? true
+ : false;
+
+ if (comp_ctx->is_indirect_mode) {
+ LLVMTypeRef func_ptr_type;
+
+ if (!(func_ptr_type = LLVMPointerType(
+ func_ctxes[func_idx - import_func_count]->func_type,
+ 0))) {
+ aot_set_last_error("construct func ptr type failed.");
+ goto fail;
+ }
+ if (!(func = aot_get_func_from_table(comp_ctx, func_ctx->func_ptrs,
+ func_ptr_type, func_idx))) {
+ goto fail;
+ }
+ }
+ else {
+ if (func_ctxes[func_idx - import_func_count] == func_ctx) {
+ /* recursive call */
+ func = func_ctx->func;
+ }
+ else {
+ if (!comp_ctx->is_jit_mode) {
+ func = func_ctxes[func_idx - import_func_count]->func;
+ }
+ else {
+#if !(WASM_ENABLE_FAST_JIT != 0 && WASM_ENABLE_LAZY_JIT != 0)
+ func = func_ctxes[func_idx - import_func_count]->func;
+#else
+ /* JIT tier-up, load func ptr from func_ptrs[func_idx] */
+ LLVMValueRef func_ptr, func_idx_const;
+ LLVMTypeRef func_ptr_type;
+
+ if (!(func_idx_const = I32_CONST(func_idx))) {
+ aot_set_last_error("llvm build const failed.");
+ goto fail;
+ }
+
+ if (!(func_ptr = LLVMBuildInBoundsGEP2(
+ comp_ctx->builder, OPQ_PTR_TYPE,
+ func_ctx->func_ptrs, &func_idx_const, 1,
+ "func_ptr_tmp"))) {
+ aot_set_last_error("llvm build inbounds gep failed.");
+ goto fail;
+ }
+
+ if (!(func_ptr =
+ LLVMBuildLoad2(comp_ctx->builder, OPQ_PTR_TYPE,
+ func_ptr, "func_ptr"))) {
+ aot_set_last_error("llvm build load failed.");
+ goto fail;
+ }
+
+ if (!(func_ptr_type = LLVMPointerType(
+ func_ctxes[func_idx - import_func_count]
+ ->func_type,
+ 0))) {
+ aot_set_last_error("construct func ptr type failed.");
+ goto fail;
+ }
+
+ if (!(func = LLVMBuildBitCast(comp_ctx->builder, func_ptr,
+ func_ptr_type,
+ "indirect_func"))) {
+ aot_set_last_error("llvm build bit cast failed.");
+ goto fail;
+ }
+#endif /* end of !(WASM_ENABLE_FAST_JIT != 0 && WASM_ENABLE_LAZY_JIT != 0) */
+ }
+ }
+ }
+
+ aot_func = func_ctxes[func_idx - import_func_count]->aot_func;
+ callee_cell_num =
+ aot_func->param_cell_num + aot_func->local_cell_num + 1;
+
+ if (!check_stack(comp_ctx, func_ctx, callee_cell_num))
+ goto fail;
+
+#if LLVM_VERSION_MAJOR >= 14
+ llvm_func_type = func_ctxes[func_idx - import_func_count]->func_type;
+#endif
+
+ /* Call the function */
+ if (!(value_ret = LLVMBuildCall2(
+ comp_ctx->builder, llvm_func_type, func, param_values,
+ (uint32)param_count + 1 + ext_ret_count,
+ (func_type->result_count > 0 ? "call" : "")))) {
+ aot_set_last_error("LLVM build call failed.");
+ goto fail;
+ }
+
+ /* Set calling convention for the call with the func's calling
+ convention */
+ LLVMSetInstructionCallConv(value_ret, LLVMGetFunctionCallConv(func));
+
+ if (tail_call)
+ LLVMSetTailCall(value_ret, true);
+
+ /* Check whether there was exception thrown when executing
+ the function */
+ if (!tail_call && !recursive_call && comp_ctx->enable_bound_check
+ && !check_exception_thrown(comp_ctx, func_ctx))
+ goto fail;
+ }
+
+ if (func_type->result_count > 0) {
+ /* Push the first result to stack */
+ PUSH(value_ret, func_type->types[func_type->param_count]);
+ /* Load extra result from its address and push to stack */
+ for (i = 0; i < ext_ret_count; i++) {
+ snprintf(buf, sizeof(buf), "func%d_ext_ret%d", func_idx, i);
+ if (!(ext_ret = LLVMBuildLoad2(
+ comp_ctx->builder, TO_LLVM_TYPE(ext_ret_types[i]),
+ param_values[1 + param_count + i], buf))) {
+ aot_set_last_error("llvm build load failed.");
+ goto fail;
+ }
+ PUSH(ext_ret, ext_ret_types[i]);
+ }
+ }
+
+#if (WASM_ENABLE_DUMP_CALL_STACK != 0) || (WASM_ENABLE_PERF_PROFILING != 0)
+ if (comp_ctx->enable_aux_stack_frame) {
+ if (!call_aot_free_frame_func(comp_ctx, func_ctx))
+ goto fail;
+ }
+#endif
+
+#if WASM_ENABLE_THREAD_MGR != 0
+ /* Insert suspend check point */
+ if (comp_ctx->enable_thread_mgr) {
+ if (!check_suspend_flags(comp_ctx, func_ctx))
+ goto fail;
+ }
+#endif
+
+ ret = true;
+fail:
+ if (param_types)
+ wasm_runtime_free(param_types);
+ if (param_values)
+ wasm_runtime_free(param_values);
+ return ret;
+}
+
+static bool
+call_aot_call_indirect_func(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ AOTFuncType *aot_func_type,
+ LLVMValueRef func_type_idx, LLVMValueRef table_idx,
+ LLVMValueRef table_elem_idx,
+ LLVMTypeRef *param_types,
+ LLVMValueRef *param_values, uint32 param_count,
+ uint32 param_cell_num, uint32 result_count,
+ uint8 *wasm_ret_types, LLVMValueRef *value_rets,
+ LLVMValueRef *p_res)
+{
+ LLVMTypeRef func_type, func_ptr_type, func_param_types[6];
+ LLVMTypeRef ret_type, ret_ptr_type, elem_ptr_type;
+ LLVMValueRef func, ret_idx, ret_ptr, elem_idx, elem_ptr;
+ LLVMValueRef func_param_values[6], res = NULL;
+ char buf[32], *func_name = "aot_call_indirect";
+ uint32 i, cell_num = 0, ret_cell_num, argv_cell_num;
+
+ /* prepare function type of aot_call_indirect */
+ func_param_types[0] = comp_ctx->exec_env_type; /* exec_env */
+ func_param_types[1] = I32_TYPE; /* table_idx */
+ func_param_types[2] = I32_TYPE; /* table_elem_idx */
+ func_param_types[3] = I32_TYPE; /* argc */
+ func_param_types[4] = INT32_PTR_TYPE; /* argv */
+ if (!(func_type =
+ LLVMFunctionType(INT8_TYPE, func_param_types, 5, false))) {
+ aot_set_last_error("llvm add function type failed.");
+ return false;
+ }
+
+ /* prepare function pointer */
+ if (comp_ctx->is_jit_mode) {
+ if (!(func_ptr_type = LLVMPointerType(func_type, 0))) {
+ aot_set_last_error("create LLVM function type failed.");
+ return false;
+ }
+
+ /* JIT mode, call the function directly */
+ if (!(func = I64_CONST((uint64)(uintptr_t)llvm_jit_call_indirect))
+ || !(func = LLVMConstIntToPtr(func, func_ptr_type))) {
+ aot_set_last_error("create LLVM value failed.");
+ return false;
+ }
+ }
+ else if (comp_ctx->is_indirect_mode) {
+ int32 func_index;
+ if (!(func_ptr_type = LLVMPointerType(func_type, 0))) {
+ aot_set_last_error("create LLVM function type failed.");
+ return false;
+ }
+ func_index = aot_get_native_symbol_index(comp_ctx, func_name);
+ if (func_index < 0) {
+ return false;
+ }
+ if (!(func = aot_get_func_from_table(comp_ctx, func_ctx->native_symbol,
+ func_ptr_type, func_index))) {
+ return false;
+ }
+ }
+ else {
+ if (!(func = LLVMGetNamedFunction(func_ctx->module, func_name))
+ && !(func =
+ LLVMAddFunction(func_ctx->module, func_name, func_type))) {
+ aot_set_last_error("add LLVM function failed.");
+ return false;
+ }
+ }
+
+ ret_cell_num = wasm_get_cell_num(wasm_ret_types, result_count);
+ argv_cell_num =
+ param_cell_num > ret_cell_num ? param_cell_num : ret_cell_num;
+ if (argv_cell_num > 64) {
+ aot_set_last_error("prepare native arguments failed: "
+ "maximum 64 parameter cell number supported.");
+ return false;
+ }
+
+ /* prepare frame_lp */
+ for (i = 0; i < param_count; i++) {
+ if (!(elem_idx = I32_CONST(cell_num))
+ || !(elem_ptr_type = LLVMPointerType(param_types[i], 0))) {
+ aot_set_last_error("llvm add const or pointer type failed.");
+ return false;
+ }
+
+ snprintf(buf, sizeof(buf), "%s%d", "elem", i);
+ if (!(elem_ptr =
+ LLVMBuildInBoundsGEP2(comp_ctx->builder, I32_TYPE,
+ func_ctx->argv_buf, &elem_idx, 1, buf))
+ || !(elem_ptr = LLVMBuildBitCast(comp_ctx->builder, elem_ptr,
+ elem_ptr_type, buf))) {
+ aot_set_last_error("llvm build bit cast failed.");
+ return false;
+ }
+
+ if (!(res = LLVMBuildStore(comp_ctx->builder, param_values[i],
+ elem_ptr))) {
+ aot_set_last_error("llvm build store failed.");
+ return false;
+ }
+ LLVMSetAlignment(res, 1);
+
+ cell_num += wasm_value_type_cell_num(aot_func_type->types[i]);
+ }
+
+ func_param_values[0] = func_ctx->exec_env;
+ func_param_values[1] = table_idx;
+ func_param_values[2] = table_elem_idx;
+ func_param_values[3] = I32_CONST(param_cell_num);
+ func_param_values[4] = func_ctx->argv_buf;
+
+ if (!func_param_values[3]) {
+ aot_set_last_error("llvm create const failed.");
+ return false;
+ }
+
+ /* call aot_call_indirect() function */
+ if (!(res = LLVMBuildCall2(comp_ctx->builder, func_type, func,
+ func_param_values, 5, "res"))) {
+ aot_set_last_error("llvm build call failed.");
+ return false;
+ }
+
+ /* get function result values */
+ cell_num = 0;
+ for (i = 0; i < result_count; i++) {
+ ret_type = TO_LLVM_TYPE(wasm_ret_types[i]);
+ if (!(ret_idx = I32_CONST(cell_num))
+ || !(ret_ptr_type = LLVMPointerType(ret_type, 0))) {
+ aot_set_last_error("llvm add const or pointer type failed.");
+ return false;
+ }
+
+ snprintf(buf, sizeof(buf), "argv_ret%d", i);
+ if (!(ret_ptr =
+ LLVMBuildInBoundsGEP2(comp_ctx->builder, I32_TYPE,
+ func_ctx->argv_buf, &ret_idx, 1, buf))
+ || !(ret_ptr = LLVMBuildBitCast(comp_ctx->builder, ret_ptr,
+ ret_ptr_type, buf))) {
+ aot_set_last_error("llvm build GEP or bit cast failed.");
+ return false;
+ }
+
+ snprintf(buf, sizeof(buf), "ret%d", i);
+ if (!(value_rets[i] =
+ LLVMBuildLoad2(comp_ctx->builder, ret_type, ret_ptr, buf))) {
+ aot_set_last_error("llvm build load failed.");
+ return false;
+ }
+ cell_num += wasm_value_type_cell_num(wasm_ret_types[i]);
+ }
+
+ *p_res = res;
+ return true;
+}
+
+bool
+aot_compile_op_call_indirect(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ uint32 type_idx, uint32 tbl_idx)
+{
+ AOTFuncType *func_type;
+ LLVMValueRef tbl_idx_value, elem_idx, table_elem, func_idx;
+ LLVMValueRef ftype_idx_ptr, ftype_idx, ftype_idx_const;
+ LLVMValueRef cmp_elem_idx, cmp_func_idx, cmp_ftype_idx;
+ LLVMValueRef func, func_ptr, table_size_const;
+ LLVMValueRef ext_ret_offset, ext_ret_ptr, ext_ret, res;
+ LLVMValueRef *param_values = NULL, *value_rets = NULL;
+ LLVMValueRef *result_phis = NULL, value_ret, import_func_count;
+ LLVMTypeRef *param_types = NULL, ret_type;
+ LLVMTypeRef llvm_func_type, llvm_func_ptr_type;
+ LLVMTypeRef ext_ret_ptr_type;
+ LLVMBasicBlockRef check_elem_idx_succ, check_ftype_idx_succ;
+ LLVMBasicBlockRef check_func_idx_succ, block_return, block_curr;
+ LLVMBasicBlockRef block_call_import, block_call_non_import;
+ LLVMValueRef offset;
+ uint32 total_param_count, func_param_count, func_result_count;
+ uint32 ext_cell_num, param_cell_num, i, j;
+ uint8 wasm_ret_type, *wasm_ret_types;
+ uint64 total_size;
+ char buf[32];
+ bool ret = false;
+
+ /* Check function type index */
+ if (type_idx >= comp_ctx->comp_data->func_type_count) {
+ aot_set_last_error("function type index out of range");
+ return false;
+ }
+
+ /* Find the equivalent function type whose type index is the smallest:
+ the callee function's type index is also converted to the smallest
+ one in wasm loader, so we can just check whether the two type indexes
+ are equal (the type index of call_indirect opcode and callee func),
+ we don't need to check whether the whole function types are equal,
+ including param types and result types. */
+ type_idx = wasm_get_smallest_type_idx(comp_ctx->comp_data->func_types,
+ comp_ctx->comp_data->func_type_count,
+ type_idx);
+ ftype_idx_const = I32_CONST(type_idx);
+ CHECK_LLVM_CONST(ftype_idx_const);
+
+ func_type = comp_ctx->comp_data->func_types[type_idx];
+ func_param_count = func_type->param_count;
+ func_result_count = func_type->result_count;
+
+ POP_I32(elem_idx);
+
+ /* get the cur size of the table instance */
+ if (!(offset = I32_CONST(get_tbl_inst_offset(comp_ctx, func_ctx, tbl_idx)
+ + offsetof(AOTTableInstance, cur_size)))) {
+ HANDLE_FAILURE("LLVMConstInt");
+ goto fail;
+ }
+
+ if (!(table_size_const = LLVMBuildInBoundsGEP2(comp_ctx->builder, INT8_TYPE,
+ func_ctx->aot_inst, &offset,
+ 1, "cur_size_i8p"))) {
+ HANDLE_FAILURE("LLVMBuildGEP");
+ goto fail;
+ }
+
+ if (!(table_size_const =
+ LLVMBuildBitCast(comp_ctx->builder, table_size_const,
+ INT32_PTR_TYPE, "cur_siuze_i32p"))) {
+ HANDLE_FAILURE("LLVMBuildBitCast");
+ goto fail;
+ }
+
+ if (!(table_size_const = LLVMBuildLoad2(comp_ctx->builder, I32_TYPE,
+ table_size_const, "cur_size"))) {
+ HANDLE_FAILURE("LLVMBuildLoad");
+ goto fail;
+ }
+
+ /* Check if (uint32)elem index >= table size */
+ if (!(cmp_elem_idx = LLVMBuildICmp(comp_ctx->builder, LLVMIntUGE, elem_idx,
+ table_size_const, "cmp_elem_idx"))) {
+ aot_set_last_error("llvm build icmp failed.");
+ goto fail;
+ }
+
+ /* Throw exception if elem index >= table size */
+ if (!(check_elem_idx_succ = LLVMAppendBasicBlockInContext(
+ comp_ctx->context, func_ctx->func, "check_elem_idx_succ"))) {
+ aot_set_last_error("llvm add basic block failed.");
+ goto fail;
+ }
+
+ LLVMMoveBasicBlockAfter(check_elem_idx_succ,
+ LLVMGetInsertBlock(comp_ctx->builder));
+
+ if (!(aot_emit_exception(comp_ctx, func_ctx, EXCE_UNDEFINED_ELEMENT, true,
+ cmp_elem_idx, check_elem_idx_succ)))
+ goto fail;
+
+ /* load data as i32* */
+ if (!(offset = I32_CONST(get_tbl_inst_offset(comp_ctx, func_ctx, tbl_idx)
+ + offsetof(AOTTableInstance, elems)))) {
+ HANDLE_FAILURE("LLVMConstInt");
+ goto fail;
+ }
+
+ if (!(table_elem = LLVMBuildInBoundsGEP2(comp_ctx->builder, INT8_TYPE,
+ func_ctx->aot_inst, &offset, 1,
+ "table_elem_i8p"))) {
+ aot_set_last_error("llvm build add failed.");
+ goto fail;
+ }
+
+ if (!(table_elem = LLVMBuildBitCast(comp_ctx->builder, table_elem,
+ INT32_PTR_TYPE, "table_elem_i32p"))) {
+ HANDLE_FAILURE("LLVMBuildBitCast");
+ goto fail;
+ }
+
+ /* Load function index */
+ if (!(table_elem =
+ LLVMBuildInBoundsGEP2(comp_ctx->builder, I32_TYPE, table_elem,
+ &elem_idx, 1, "table_elem"))) {
+ HANDLE_FAILURE("LLVMBuildNUWAdd");
+ goto fail;
+ }
+
+ if (!(func_idx = LLVMBuildLoad2(comp_ctx->builder, I32_TYPE, table_elem,
+ "func_idx"))) {
+ aot_set_last_error("llvm build load failed.");
+ goto fail;
+ }
+
+ /* Check if func_idx == -1 */
+ if (!(cmp_func_idx = LLVMBuildICmp(comp_ctx->builder, LLVMIntEQ, func_idx,
+ I32_NEG_ONE, "cmp_func_idx"))) {
+ aot_set_last_error("llvm build icmp failed.");
+ goto fail;
+ }
+
+ /* Throw exception if func_idx == -1 */
+ if (!(check_func_idx_succ = LLVMAppendBasicBlockInContext(
+ comp_ctx->context, func_ctx->func, "check_func_idx_succ"))) {
+ aot_set_last_error("llvm add basic block failed.");
+ goto fail;
+ }
+
+ LLVMMoveBasicBlockAfter(check_func_idx_succ,
+ LLVMGetInsertBlock(comp_ctx->builder));
+
+ if (!(aot_emit_exception(comp_ctx, func_ctx, EXCE_UNINITIALIZED_ELEMENT,
+ true, cmp_func_idx, check_func_idx_succ)))
+ goto fail;
+
+ /* Load function type index */
+ if (!(ftype_idx_ptr = LLVMBuildInBoundsGEP2(
+ comp_ctx->builder, I32_TYPE, func_ctx->func_type_indexes,
+ &func_idx, 1, "ftype_idx_ptr"))) {
+ aot_set_last_error("llvm build inbounds gep failed.");
+ goto fail;
+ }
+
+ if (!(ftype_idx = LLVMBuildLoad2(comp_ctx->builder, I32_TYPE, ftype_idx_ptr,
+ "ftype_idx"))) {
+ aot_set_last_error("llvm build load failed.");
+ goto fail;
+ }
+
+ /* Check if function type index not equal */
+ if (!(cmp_ftype_idx = LLVMBuildICmp(comp_ctx->builder, LLVMIntNE, ftype_idx,
+ ftype_idx_const, "cmp_ftype_idx"))) {
+ aot_set_last_error("llvm build icmp failed.");
+ goto fail;
+ }
+
+ /* Throw exception if ftype_idx != ftype_idx_const */
+ if (!(check_ftype_idx_succ = LLVMAppendBasicBlockInContext(
+ comp_ctx->context, func_ctx->func, "check_ftype_idx_succ"))) {
+ aot_set_last_error("llvm add basic block failed.");
+ goto fail;
+ }
+
+ LLVMMoveBasicBlockAfter(check_ftype_idx_succ,
+ LLVMGetInsertBlock(comp_ctx->builder));
+
+ if (!(aot_emit_exception(comp_ctx, func_ctx,
+ EXCE_INVALID_FUNCTION_TYPE_INDEX, true,
+ cmp_ftype_idx, check_ftype_idx_succ)))
+ goto fail;
+
+ /* Initialize parameter types of the LLVM function */
+ total_param_count = 1 + func_param_count;
+
+ /* Extra function results' addresses (except the first one) are
+ appended to aot function parameters. */
+ if (func_result_count > 1)
+ total_param_count += func_result_count - 1;
+
+ total_size = sizeof(LLVMTypeRef) * (uint64)total_param_count;
+ if (total_size >= UINT32_MAX
+ || !(param_types = wasm_runtime_malloc((uint32)total_size))) {
+ aot_set_last_error("allocate memory failed.");
+ goto fail;
+ }
+
+ /* Prepare param types */
+ j = 0;
+ param_types[j++] = comp_ctx->exec_env_type;
+ for (i = 0; i < func_param_count; i++)
+ param_types[j++] = TO_LLVM_TYPE(func_type->types[i]);
+
+ for (i = 1; i < func_result_count; i++, j++) {
+ param_types[j] = TO_LLVM_TYPE(func_type->types[func_param_count + i]);
+ if (!(param_types[j] = LLVMPointerType(param_types[j], 0))) {
+ aot_set_last_error("llvm get pointer type failed.");
+ goto fail;
+ }
+ }
+
+ /* Resolve return type of the LLVM function */
+ if (func_result_count) {
+ wasm_ret_type = func_type->types[func_param_count];
+ ret_type = TO_LLVM_TYPE(wasm_ret_type);
+ }
+ else {
+ wasm_ret_type = VALUE_TYPE_VOID;
+ ret_type = VOID_TYPE;
+ }
+
+ /* Allocate memory for parameters */
+ total_size = sizeof(LLVMValueRef) * (uint64)total_param_count;
+ if (total_size >= UINT32_MAX
+ || !(param_values = wasm_runtime_malloc((uint32)total_size))) {
+ aot_set_last_error("allocate memory failed.");
+ goto fail;
+ }
+
+ /* First parameter is exec env */
+ j = 0;
+ param_values[j++] = func_ctx->exec_env;
+
+ /* Pop parameters from stack */
+ for (i = func_param_count - 1; (int32)i >= 0; i--)
+ POP(param_values[i + j], func_type->types[i]);
+
+ /* Prepare extra parameters */
+ ext_cell_num = 0;
+ for (i = 1; i < func_result_count; i++) {
+ ext_ret_offset = I32_CONST(ext_cell_num);
+ CHECK_LLVM_CONST(ext_ret_offset);
+
+ snprintf(buf, sizeof(buf), "ext_ret%d_ptr", i - 1);
+ if (!(ext_ret_ptr = LLVMBuildInBoundsGEP2(comp_ctx->builder, I32_TYPE,
+ func_ctx->argv_buf,
+ &ext_ret_offset, 1, buf))) {
+ aot_set_last_error("llvm build GEP failed.");
+ goto fail;
+ }
+
+ ext_ret_ptr_type = param_types[func_param_count + i];
+ snprintf(buf, sizeof(buf), "ext_ret%d_ptr_cast", i - 1);
+ if (!(ext_ret_ptr = LLVMBuildBitCast(comp_ctx->builder, ext_ret_ptr,
+ ext_ret_ptr_type, buf))) {
+ aot_set_last_error("llvm build bit cast failed.");
+ goto fail;
+ }
+
+ param_values[func_param_count + i] = ext_ret_ptr;
+ ext_cell_num +=
+ wasm_value_type_cell_num(func_type->types[func_param_count + i]);
+ }
+
+ if (ext_cell_num > 64) {
+ aot_set_last_error("prepare call-indirect arguments failed: "
+ "maximum 64 extra cell number supported.");
+ goto fail;
+ }
+
+#if WASM_ENABLE_THREAD_MGR != 0
+ /* Insert suspend check point */
+ if (comp_ctx->enable_thread_mgr) {
+ if (!check_suspend_flags(comp_ctx, func_ctx))
+ goto fail;
+ }
+#endif
+
+#if (WASM_ENABLE_DUMP_CALL_STACK != 0) || (WASM_ENABLE_PERF_PROFILING != 0)
+ if (comp_ctx->enable_aux_stack_frame) {
+ if (!call_aot_alloc_frame_func(comp_ctx, func_ctx, func_idx))
+ goto fail;
+ }
+#endif
+
+ /* Add basic blocks */
+ block_call_import = LLVMAppendBasicBlockInContext(
+ comp_ctx->context, func_ctx->func, "call_import");
+ block_call_non_import = LLVMAppendBasicBlockInContext(
+ comp_ctx->context, func_ctx->func, "call_non_import");
+ block_return = LLVMAppendBasicBlockInContext(comp_ctx->context,
+ func_ctx->func, "func_return");
+ if (!block_call_import || !block_call_non_import || !block_return) {
+ aot_set_last_error("llvm add basic block failed.");
+ goto fail;
+ }
+
+ LLVMMoveBasicBlockAfter(block_call_import,
+ LLVMGetInsertBlock(comp_ctx->builder));
+ LLVMMoveBasicBlockAfter(block_call_non_import, block_call_import);
+ LLVMMoveBasicBlockAfter(block_return, block_call_non_import);
+
+ import_func_count = I32_CONST(comp_ctx->comp_data->import_func_count);
+ CHECK_LLVM_CONST(import_func_count);
+
+ /* Check if func_idx < import_func_count */
+ if (!(cmp_func_idx = LLVMBuildICmp(comp_ctx->builder, LLVMIntULT, func_idx,
+ import_func_count, "cmp_func_idx"))) {
+ aot_set_last_error("llvm build icmp failed.");
+ goto fail;
+ }
+
+ /* If func_idx < import_func_count, jump to call import block,
+ else jump to call non-import block */
+ if (!LLVMBuildCondBr(comp_ctx->builder, cmp_func_idx, block_call_import,
+ block_call_non_import)) {
+ aot_set_last_error("llvm build cond br failed.");
+ goto fail;
+ }
+
+ /* Add result phis for return block */
+ LLVMPositionBuilderAtEnd(comp_ctx->builder, block_return);
+
+ if (func_result_count > 0) {
+ total_size = sizeof(LLVMValueRef) * (uint64)func_result_count;
+ if (total_size >= UINT32_MAX
+ || !(result_phis = wasm_runtime_malloc((uint32)total_size))) {
+ aot_set_last_error("allocate memory failed.");
+ goto fail;
+ }
+ memset(result_phis, 0, (uint32)total_size);
+ for (i = 0; i < func_result_count; i++) {
+ LLVMTypeRef tmp_type =
+ TO_LLVM_TYPE(func_type->types[func_param_count + i]);
+ if (!(result_phis[i] =
+ LLVMBuildPhi(comp_ctx->builder, tmp_type, "phi"))) {
+ aot_set_last_error("llvm build phi failed.");
+ goto fail;
+ }
+ }
+ }
+
+ /* Translate call import block */
+ LLVMPositionBuilderAtEnd(comp_ctx->builder, block_call_import);
+
+ /* Allocate memory for result values */
+ if (func_result_count > 0) {
+ total_size = sizeof(LLVMValueRef) * (uint64)func_result_count;
+ if (total_size >= UINT32_MAX
+ || !(value_rets = wasm_runtime_malloc((uint32)total_size))) {
+ aot_set_last_error("allocate memory failed.");
+ goto fail;
+ }
+ memset(value_rets, 0, (uint32)total_size);
+ }
+
+ param_cell_num = func_type->param_cell_num;
+ wasm_ret_types = func_type->types + func_type->param_count;
+
+ tbl_idx_value = I32_CONST(tbl_idx);
+ if (!tbl_idx_value) {
+ aot_set_last_error("llvm create const failed.");
+ goto fail;
+ }
+
+ if (!call_aot_call_indirect_func(
+ comp_ctx, func_ctx, func_type, ftype_idx, tbl_idx_value, elem_idx,
+ param_types + 1, param_values + 1, func_param_count, param_cell_num,
+ func_result_count, wasm_ret_types, value_rets, &res))
+ goto fail;
+
+ /* Check whether exception was thrown when executing the function */
+ if (comp_ctx->enable_bound_check
+ && !check_call_return(comp_ctx, func_ctx, res))
+ goto fail;
+
+ block_curr = LLVMGetInsertBlock(comp_ctx->builder);
+ for (i = 0; i < func_result_count; i++) {
+ LLVMAddIncoming(result_phis[i], &value_rets[i], &block_curr, 1);
+ }
+
+ if (!LLVMBuildBr(comp_ctx->builder, block_return)) {
+ aot_set_last_error("llvm build br failed.");
+ goto fail;
+ }
+
+ /* Translate call non-import block */
+ LLVMPositionBuilderAtEnd(comp_ctx->builder, block_call_non_import);
+
+ if (!check_stack(comp_ctx, func_ctx,
+ param_cell_num + ext_cell_num
+ + 1
+ /* Reserve some local variables */
+ + 16))
+ goto fail;
+
+ /* Load function pointer */
+ if (!(func_ptr = LLVMBuildInBoundsGEP2(comp_ctx->builder, OPQ_PTR_TYPE,
+ func_ctx->func_ptrs, &func_idx, 1,
+ "func_ptr_tmp"))) {
+ aot_set_last_error("llvm build inbounds gep failed.");
+ goto fail;
+ }
+
+ if (!(func_ptr = LLVMBuildLoad2(comp_ctx->builder, OPQ_PTR_TYPE, func_ptr,
+ "func_ptr"))) {
+ aot_set_last_error("llvm build load failed.");
+ goto fail;
+ }
+
+ if (!(llvm_func_type =
+ LLVMFunctionType(ret_type, param_types, total_param_count, false))
+ || !(llvm_func_ptr_type = LLVMPointerType(llvm_func_type, 0))) {
+ aot_set_last_error("llvm add function type failed.");
+ goto fail;
+ }
+
+ if (!(func = LLVMBuildBitCast(comp_ctx->builder, func_ptr,
+ llvm_func_ptr_type, "indirect_func"))) {
+ aot_set_last_error("llvm build bit cast failed.");
+ goto fail;
+ }
+
+ if (!(value_ret = LLVMBuildCall2(comp_ctx->builder, llvm_func_type, func,
+ param_values, total_param_count,
+ func_result_count > 0 ? "ret" : ""))) {
+ aot_set_last_error("llvm build call failed.");
+ goto fail;
+ }
+
+ /* Check whether exception was thrown when executing the function */
+ if (comp_ctx->enable_bound_check
+ && !check_exception_thrown(comp_ctx, func_ctx))
+ goto fail;
+
+ if (func_result_count > 0) {
+ block_curr = LLVMGetInsertBlock(comp_ctx->builder);
+
+ /* Push the first result to stack */
+ LLVMAddIncoming(result_phis[0], &value_ret, &block_curr, 1);
+
+ /* Load extra result from its address and push to stack */
+ for (i = 1; i < func_result_count; i++) {
+ ret_type = TO_LLVM_TYPE(func_type->types[func_param_count + i]);
+ snprintf(buf, sizeof(buf), "ext_ret%d", i - 1);
+ if (!(ext_ret = LLVMBuildLoad2(comp_ctx->builder, ret_type,
+ param_values[func_param_count + i],
+ buf))) {
+ aot_set_last_error("llvm build load failed.");
+ goto fail;
+ }
+ LLVMAddIncoming(result_phis[i], &ext_ret, &block_curr, 1);
+ }
+ }
+
+ if (!LLVMBuildBr(comp_ctx->builder, block_return)) {
+ aot_set_last_error("llvm build br failed.");
+ goto fail;
+ }
+
+ /* Translate function return block */
+ LLVMPositionBuilderAtEnd(comp_ctx->builder, block_return);
+
+ for (i = 0; i < func_result_count; i++) {
+ PUSH(result_phis[i], func_type->types[func_param_count + i]);
+ }
+
+#if (WASM_ENABLE_DUMP_CALL_STACK != 0) || (WASM_ENABLE_PERF_PROFILING != 0)
+ if (comp_ctx->enable_aux_stack_frame) {
+ if (!call_aot_free_frame_func(comp_ctx, func_ctx))
+ goto fail;
+ }
+#endif
+
+#if WASM_ENABLE_THREAD_MGR != 0
+ /* Insert suspend check point */
+ if (comp_ctx->enable_thread_mgr) {
+ if (!check_suspend_flags(comp_ctx, func_ctx))
+ goto fail;
+ }
+#endif
+
+ ret = true;
+
+fail:
+ if (param_values)
+ wasm_runtime_free(param_values);
+ if (param_types)
+ wasm_runtime_free(param_types);
+ if (value_rets)
+ wasm_runtime_free(value_rets);
+ if (result_phis)
+ wasm_runtime_free(result_phis);
+ return ret;
+}
+
+bool
+aot_compile_op_ref_null(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
+{
+ PUSH_I32(REF_NULL);
+
+ return true;
+fail:
+ return false;
+}
+
+bool
+aot_compile_op_ref_is_null(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
+{
+ LLVMValueRef lhs, res;
+
+ POP_I32(lhs);
+
+ if (!(res = LLVMBuildICmp(comp_ctx->builder, LLVMIntEQ, lhs, REF_NULL,
+ "cmp_w_null"))) {
+ HANDLE_FAILURE("LLVMBuildICmp");
+ goto fail;
+ }
+
+ if (!(res = LLVMBuildZExt(comp_ctx->builder, res, I32_TYPE, "r_i"))) {
+ HANDLE_FAILURE("LLVMBuildZExt");
+ goto fail;
+ }
+
+ PUSH_I32(res);
+
+ return true;
+fail:
+ return false;
+}
+
+bool
+aot_compile_op_ref_func(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ uint32 func_idx)
+{
+ LLVMValueRef ref_idx;
+
+ if (!(ref_idx = I32_CONST(func_idx))) {
+ HANDLE_FAILURE("LLVMConstInt");
+ goto fail;
+ }
+
+ PUSH_I32(ref_idx);
+
+ return true;
+fail:
+ return false;
+}
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/aot_emit_function.h b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/aot_emit_function.h
new file mode 100644
index 000000000..26f09c660
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/aot_emit_function.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#ifndef _AOT_EMIT_FUNCTION_H_
+#define _AOT_EMIT_FUNCTION_H_
+
+#include "aot_compiler.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+bool
+aot_compile_op_call(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ uint32 func_idx, bool tail_call);
+
+bool
+aot_compile_op_call_indirect(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ uint32 type_idx, uint32 tbl_idx);
+
+bool
+aot_compile_op_ref_null(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx);
+
+bool
+aot_compile_op_ref_is_null(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx);
+
+bool
+aot_compile_op_ref_func(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ uint32 func_idx);
+#ifdef __cplusplus
+} /* end of extern "C" */
+#endif
+
+#endif /* end of _AOT_EMIT_FUNCTION_H_ */
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/aot_emit_memory.c b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/aot_emit_memory.c
new file mode 100644
index 000000000..4da4cc807
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/aot_emit_memory.c
@@ -0,0 +1,1435 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#include "aot_emit_memory.h"
+#include "aot_emit_exception.h"
+#include "../aot/aot_runtime.h"
+#include "aot_intrinsic.h"
+#include "aot_emit_control.h"
+
+#define BUILD_ICMP(op, left, right, res, name) \
+ do { \
+ if (!(res = \
+ LLVMBuildICmp(comp_ctx->builder, op, left, right, name))) { \
+ aot_set_last_error("llvm build icmp failed."); \
+ goto fail; \
+ } \
+ } while (0)
+
+#define BUILD_OP(Op, left, right, res, name) \
+ do { \
+ if (!(res = LLVMBuild##Op(comp_ctx->builder, left, right, name))) { \
+ aot_set_last_error("llvm build " #Op " fail."); \
+ goto fail; \
+ } \
+ } while (0)
+
+#define ADD_BASIC_BLOCK(block, name) \
+ do { \
+ if (!(block = LLVMAppendBasicBlockInContext(comp_ctx->context, \
+ func_ctx->func, name))) { \
+ aot_set_last_error("llvm add basic block failed."); \
+ goto fail; \
+ } \
+ } while (0)
+
+#define SET_BUILD_POS(block) LLVMPositionBuilderAtEnd(comp_ctx->builder, block)
+
+static LLVMValueRef
+get_memory_check_bound(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ uint32 bytes)
+{
+ LLVMValueRef mem_check_bound = NULL;
+ switch (bytes) {
+ case 1:
+ mem_check_bound = func_ctx->mem_info[0].mem_bound_check_1byte;
+ break;
+ case 2:
+ mem_check_bound = func_ctx->mem_info[0].mem_bound_check_2bytes;
+ break;
+ case 4:
+ mem_check_bound = func_ctx->mem_info[0].mem_bound_check_4bytes;
+ break;
+ case 8:
+ mem_check_bound = func_ctx->mem_info[0].mem_bound_check_8bytes;
+ break;
+ case 16:
+ mem_check_bound = func_ctx->mem_info[0].mem_bound_check_16bytes;
+ break;
+ default:
+ bh_assert(0);
+ return NULL;
+ }
+
+ if (func_ctx->mem_space_unchanged)
+ return mem_check_bound;
+
+ if (!(mem_check_bound = LLVMBuildLoad2(
+ comp_ctx->builder,
+ (comp_ctx->pointer_size == sizeof(uint64)) ? I64_TYPE : I32_TYPE,
+ mem_check_bound, "mem_check_bound"))) {
+ aot_set_last_error("llvm build load failed.");
+ return NULL;
+ }
+ return mem_check_bound;
+}
+
+static LLVMValueRef
+get_memory_curr_page_count(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx);
+
+LLVMValueRef
+aot_check_memory_overflow(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ uint32 offset, uint32 bytes)
+{
+ LLVMValueRef offset_const = I32_CONST(offset);
+ LLVMValueRef addr, maddr, offset1, cmp1, cmp2, cmp;
+ LLVMValueRef mem_base_addr, mem_check_bound;
+ LLVMBasicBlockRef block_curr = LLVMGetInsertBlock(comp_ctx->builder);
+ LLVMBasicBlockRef check_succ;
+ AOTValue *aot_value_top;
+ uint32 local_idx_of_aot_value = 0;
+ bool is_target_64bit, is_local_of_aot_value = false;
+#if WASM_ENABLE_SHARED_MEMORY != 0
+ bool is_shared_memory =
+ comp_ctx->comp_data->memories[0].memory_flags & 0x02;
+#endif
+
+ is_target_64bit = (comp_ctx->pointer_size == sizeof(uint64)) ? true : false;
+
+ if (comp_ctx->is_indirect_mode
+ && aot_intrinsic_check_capability(comp_ctx, "i32.const")) {
+ WASMValue wasm_value;
+ wasm_value.i32 = offset;
+ offset_const = aot_load_const_from_table(
+ comp_ctx, func_ctx->native_symbol, &wasm_value, VALUE_TYPE_I32);
+ if (!offset_const) {
+ return NULL;
+ }
+ }
+ else {
+ CHECK_LLVM_CONST(offset_const);
+ }
+
+ /* Get memory base address and memory data size */
+ if (func_ctx->mem_space_unchanged
+#if WASM_ENABLE_SHARED_MEMORY != 0
+ || is_shared_memory
+#endif
+ ) {
+ mem_base_addr = func_ctx->mem_info[0].mem_base_addr;
+ }
+ else {
+ if (!(mem_base_addr = LLVMBuildLoad2(
+ comp_ctx->builder, OPQ_PTR_TYPE,
+ func_ctx->mem_info[0].mem_base_addr, "mem_base"))) {
+ aot_set_last_error("llvm build load failed.");
+ goto fail;
+ }
+ }
+
+ aot_value_top =
+ func_ctx->block_stack.block_list_end->value_stack.value_list_end;
+ if (aot_value_top) {
+ /* aot_value_top is freed in the following POP_I32(addr),
+ so save its fields here for further use */
+ is_local_of_aot_value = aot_value_top->is_local;
+ local_idx_of_aot_value = aot_value_top->local_idx;
+ }
+
+ POP_I32(addr);
+
+ /*
+ * Note: not throw the integer-overflow-exception here since it must
+ * have been thrown when converting float to integer before
+ */
+ /* return addres directly if constant offset and inside memory space */
+ if (LLVMIsConstant(addr) && !LLVMIsUndef(addr)
+#if LLVM_VERSION_NUMBER >= 12
+ && !LLVMIsPoison(addr)
+#endif
+ ) {
+ uint64 mem_offset =
+ (uint64)LLVMConstIntGetZExtValue(addr) + (uint64)offset;
+ uint32 num_bytes_per_page =
+ comp_ctx->comp_data->memories[0].num_bytes_per_page;
+ uint32 init_page_count =
+ comp_ctx->comp_data->memories[0].mem_init_page_count;
+ uint64 mem_data_size = (uint64)num_bytes_per_page * init_page_count;
+
+ if (mem_offset + bytes <= mem_data_size) {
+ /* inside memory space */
+ offset1 = I32_CONST((uint32)mem_offset);
+ CHECK_LLVM_CONST(offset1);
+ if (!(maddr = LLVMBuildInBoundsGEP2(comp_ctx->builder, INT8_TYPE,
+ mem_base_addr, &offset1, 1,
+ "maddr"))) {
+ aot_set_last_error("llvm build add failed.");
+ goto fail;
+ }
+ return maddr;
+ }
+ }
+
+ if (is_target_64bit) {
+ if (!(offset_const = LLVMBuildZExt(comp_ctx->builder, offset_const,
+ I64_TYPE, "offset_i64"))
+ || !(addr = LLVMBuildZExt(comp_ctx->builder, addr, I64_TYPE,
+ "addr_i64"))) {
+ aot_set_last_error("llvm build zero extend failed.");
+ goto fail;
+ }
+ }
+
+ /* offset1 = offset + addr; */
+ BUILD_OP(Add, offset_const, addr, offset1, "offset1");
+
+ if (comp_ctx->enable_bound_check
+ && !(is_local_of_aot_value
+ && aot_checked_addr_list_find(func_ctx, local_idx_of_aot_value,
+ offset, bytes))) {
+ uint32 init_page_count =
+ comp_ctx->comp_data->memories[0].mem_init_page_count;
+ if (init_page_count == 0) {
+ LLVMValueRef mem_size;
+
+ if (!(mem_size = get_memory_curr_page_count(comp_ctx, func_ctx))) {
+ goto fail;
+ }
+ BUILD_ICMP(LLVMIntEQ, mem_size, I32_ZERO, cmp, "is_zero");
+ ADD_BASIC_BLOCK(check_succ, "check_mem_size_succ");
+ LLVMMoveBasicBlockAfter(check_succ, block_curr);
+ if (!aot_emit_exception(comp_ctx, func_ctx,
+ EXCE_OUT_OF_BOUNDS_MEMORY_ACCESS, true, cmp,
+ check_succ)) {
+ goto fail;
+ }
+
+ SET_BUILD_POS(check_succ);
+ block_curr = check_succ;
+ }
+
+ if (!(mem_check_bound =
+ get_memory_check_bound(comp_ctx, func_ctx, bytes))) {
+ goto fail;
+ }
+
+ if (is_target_64bit) {
+ BUILD_ICMP(LLVMIntUGT, offset1, mem_check_bound, cmp, "cmp");
+ }
+ else {
+ /* Check integer overflow */
+ BUILD_ICMP(LLVMIntULT, offset1, addr, cmp1, "cmp1");
+ BUILD_ICMP(LLVMIntUGT, offset1, mem_check_bound, cmp2, "cmp2");
+ BUILD_OP(Or, cmp1, cmp2, cmp, "cmp");
+ }
+
+ /* Add basic blocks */
+ ADD_BASIC_BLOCK(check_succ, "check_succ");
+ LLVMMoveBasicBlockAfter(check_succ, block_curr);
+
+ if (!aot_emit_exception(comp_ctx, func_ctx,
+ EXCE_OUT_OF_BOUNDS_MEMORY_ACCESS, true, cmp,
+ check_succ)) {
+ goto fail;
+ }
+
+ SET_BUILD_POS(check_succ);
+
+ if (is_local_of_aot_value) {
+ if (!aot_checked_addr_list_add(func_ctx, local_idx_of_aot_value,
+ offset, bytes))
+ goto fail;
+ }
+ }
+
+ /* maddr = mem_base_addr + offset1 */
+ if (!(maddr = LLVMBuildInBoundsGEP2(comp_ctx->builder, INT8_TYPE,
+ mem_base_addr, &offset1, 1, "maddr"))) {
+ aot_set_last_error("llvm build add failed.");
+ goto fail;
+ }
+ return maddr;
+fail:
+ return NULL;
+}
+
+#define BUILD_PTR_CAST(ptr_type) \
+ do { \
+ if (!(maddr = LLVMBuildBitCast(comp_ctx->builder, maddr, ptr_type, \
+ "data_ptr"))) { \
+ aot_set_last_error("llvm build bit cast failed."); \
+ goto fail; \
+ } \
+ } while (0)
+
+#define BUILD_LOAD(data_type) \
+ do { \
+ if (!(value = LLVMBuildLoad2(comp_ctx->builder, data_type, maddr, \
+ "data"))) { \
+ aot_set_last_error("llvm build load failed."); \
+ goto fail; \
+ } \
+ LLVMSetAlignment(value, 1); \
+ } while (0)
+
+#define BUILD_TRUNC(value, data_type) \
+ do { \
+ if (!(value = LLVMBuildTrunc(comp_ctx->builder, value, data_type, \
+ "val_trunc"))) { \
+ aot_set_last_error("llvm build trunc failed."); \
+ goto fail; \
+ } \
+ } while (0)
+
+#define BUILD_STORE() \
+ do { \
+ LLVMValueRef res; \
+ if (!(res = LLVMBuildStore(comp_ctx->builder, value, maddr))) { \
+ aot_set_last_error("llvm build store failed."); \
+ goto fail; \
+ } \
+ LLVMSetAlignment(res, 1); \
+ } while (0)
+
+#define BUILD_SIGN_EXT(dst_type) \
+ do { \
+ if (!(value = LLVMBuildSExt(comp_ctx->builder, value, dst_type, \
+ "data_s_ext"))) { \
+ aot_set_last_error("llvm build sign ext failed."); \
+ goto fail; \
+ } \
+ } while (0)
+
+#define BUILD_ZERO_EXT(dst_type) \
+ do { \
+ if (!(value = LLVMBuildZExt(comp_ctx->builder, value, dst_type, \
+ "data_z_ext"))) { \
+ aot_set_last_error("llvm build zero ext failed."); \
+ goto fail; \
+ } \
+ } while (0)
+
+#if WASM_ENABLE_SHARED_MEMORY != 0
+bool
+check_memory_alignment(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ LLVMValueRef addr, uint32 align)
+{
+ LLVMBasicBlockRef block_curr = LLVMGetInsertBlock(comp_ctx->builder);
+ LLVMBasicBlockRef check_align_succ;
+ LLVMValueRef align_mask = I32_CONST(((uint32)1 << align) - 1);
+ LLVMValueRef res;
+
+ CHECK_LLVM_CONST(align_mask);
+
+ /* Convert pointer to int */
+ if (!(addr = LLVMBuildPtrToInt(comp_ctx->builder, addr, I32_TYPE,
+ "address"))) {
+ aot_set_last_error("llvm build ptr to int failed.");
+ goto fail;
+ }
+
+ /* The memory address should be aligned */
+ BUILD_OP(And, addr, align_mask, res, "and");
+ BUILD_ICMP(LLVMIntNE, res, I32_ZERO, res, "cmp");
+
+ /* Add basic blocks */
+ ADD_BASIC_BLOCK(check_align_succ, "check_align_succ");
+ LLVMMoveBasicBlockAfter(check_align_succ, block_curr);
+
+ if (!aot_emit_exception(comp_ctx, func_ctx, EXCE_UNALIGNED_ATOMIC, true,
+ res, check_align_succ)) {
+ goto fail;
+ }
+
+ SET_BUILD_POS(check_align_succ);
+
+ return true;
+fail:
+ return false;
+}
+
+#define BUILD_ATOMIC_LOAD(align, data_type) \
+ do { \
+ if (!(check_memory_alignment(comp_ctx, func_ctx, maddr, align))) { \
+ goto fail; \
+ } \
+ if (!(value = LLVMBuildLoad2(comp_ctx->builder, data_type, maddr, \
+ "data"))) { \
+ aot_set_last_error("llvm build load failed."); \
+ goto fail; \
+ } \
+ LLVMSetAlignment(value, 1 << align); \
+ LLVMSetVolatile(value, true); \
+ LLVMSetOrdering(value, LLVMAtomicOrderingSequentiallyConsistent); \
+ } while (0)
+
+#define BUILD_ATOMIC_STORE(align) \
+ do { \
+ LLVMValueRef res; \
+ if (!(check_memory_alignment(comp_ctx, func_ctx, maddr, align))) { \
+ goto fail; \
+ } \
+ if (!(res = LLVMBuildStore(comp_ctx->builder, value, maddr))) { \
+ aot_set_last_error("llvm build store failed."); \
+ goto fail; \
+ } \
+ LLVMSetAlignment(res, 1 << align); \
+ LLVMSetVolatile(res, true); \
+ LLVMSetOrdering(res, LLVMAtomicOrderingSequentiallyConsistent); \
+ } while (0)
+#endif
+
+bool
+aot_compile_op_i32_load(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ uint32 align, uint32 offset, uint32 bytes, bool sign,
+ bool atomic)
+{
+ LLVMValueRef maddr, value = NULL;
+ LLVMTypeRef data_type;
+
+ if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, bytes)))
+ return false;
+
+ switch (bytes) {
+ case 4:
+ BUILD_PTR_CAST(INT32_PTR_TYPE);
+#if WASM_ENABLE_SHARED_MEMORY != 0
+ if (atomic)
+ BUILD_ATOMIC_LOAD(align, I32_TYPE);
+ else
+#endif
+ BUILD_LOAD(I32_TYPE);
+ break;
+ case 2:
+ case 1:
+ if (bytes == 2) {
+ BUILD_PTR_CAST(INT16_PTR_TYPE);
+ data_type = INT16_TYPE;
+ }
+ else {
+ BUILD_PTR_CAST(INT8_PTR_TYPE);
+ data_type = INT8_TYPE;
+ }
+
+#if WASM_ENABLE_SHARED_MEMORY != 0
+ if (atomic) {
+ BUILD_ATOMIC_LOAD(align, data_type);
+ BUILD_ZERO_EXT(I32_TYPE);
+ }
+ else
+#endif
+ {
+ BUILD_LOAD(data_type);
+ if (sign)
+ BUILD_SIGN_EXT(I32_TYPE);
+ else
+ BUILD_ZERO_EXT(I32_TYPE);
+ }
+ break;
+ default:
+ bh_assert(0);
+ break;
+ }
+
+ PUSH_I32(value);
+ (void)data_type;
+ return true;
+fail:
+ return false;
+}
+
+bool
+aot_compile_op_i64_load(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ uint32 align, uint32 offset, uint32 bytes, bool sign,
+ bool atomic)
+{
+ LLVMValueRef maddr, value = NULL;
+ LLVMTypeRef data_type;
+
+ if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, bytes)))
+ return false;
+
+ switch (bytes) {
+ case 8:
+ BUILD_PTR_CAST(INT64_PTR_TYPE);
+#if WASM_ENABLE_SHARED_MEMORY != 0
+ if (atomic)
+ BUILD_ATOMIC_LOAD(align, I64_TYPE);
+ else
+#endif
+ BUILD_LOAD(I64_TYPE);
+ break;
+ case 4:
+ case 2:
+ case 1:
+ if (bytes == 4) {
+ BUILD_PTR_CAST(INT32_PTR_TYPE);
+ data_type = I32_TYPE;
+ }
+ else if (bytes == 2) {
+ BUILD_PTR_CAST(INT16_PTR_TYPE);
+ data_type = INT16_TYPE;
+ }
+ else {
+ BUILD_PTR_CAST(INT8_PTR_TYPE);
+ data_type = INT8_TYPE;
+ }
+
+#if WASM_ENABLE_SHARED_MEMORY != 0
+ if (atomic) {
+ BUILD_ATOMIC_LOAD(align, data_type);
+ BUILD_ZERO_EXT(I64_TYPE);
+ }
+ else
+#endif
+ {
+ BUILD_LOAD(data_type);
+ if (sign)
+ BUILD_SIGN_EXT(I64_TYPE);
+ else
+ BUILD_ZERO_EXT(I64_TYPE);
+ }
+ break;
+ default:
+ bh_assert(0);
+ break;
+ }
+
+ PUSH_I64(value);
+ (void)data_type;
+ return true;
+fail:
+ return false;
+}
+
+bool
+aot_compile_op_f32_load(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ uint32 align, uint32 offset)
+{
+ LLVMValueRef maddr, value;
+
+ if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, 4)))
+ return false;
+
+ BUILD_PTR_CAST(F32_PTR_TYPE);
+ BUILD_LOAD(F32_TYPE);
+ PUSH_F32(value);
+ return true;
+fail:
+ return false;
+}
+
+bool
+aot_compile_op_f64_load(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ uint32 align, uint32 offset)
+{
+ LLVMValueRef maddr, value;
+
+ if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, 8)))
+ return false;
+
+ BUILD_PTR_CAST(F64_PTR_TYPE);
+ BUILD_LOAD(F64_TYPE);
+ PUSH_F64(value);
+ return true;
+fail:
+ return false;
+}
+
+bool
+aot_compile_op_i32_store(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ uint32 align, uint32 offset, uint32 bytes, bool atomic)
+{
+ LLVMValueRef maddr, value;
+
+ POP_I32(value);
+
+ if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, bytes)))
+ return false;
+
+ switch (bytes) {
+ case 4:
+ BUILD_PTR_CAST(INT32_PTR_TYPE);
+ break;
+ case 2:
+ BUILD_PTR_CAST(INT16_PTR_TYPE);
+ BUILD_TRUNC(value, INT16_TYPE);
+ break;
+ case 1:
+ BUILD_PTR_CAST(INT8_PTR_TYPE);
+ BUILD_TRUNC(value, INT8_TYPE);
+ break;
+ default:
+ bh_assert(0);
+ break;
+ }
+
+#if WASM_ENABLE_SHARED_MEMORY != 0
+ if (atomic)
+ BUILD_ATOMIC_STORE(align);
+ else
+#endif
+ BUILD_STORE();
+ return true;
+fail:
+ return false;
+}
+
+bool
+aot_compile_op_i64_store(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ uint32 align, uint32 offset, uint32 bytes, bool atomic)
+{
+ LLVMValueRef maddr, value;
+
+ POP_I64(value);
+
+ if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, bytes)))
+ return false;
+
+ switch (bytes) {
+ case 8:
+ BUILD_PTR_CAST(INT64_PTR_TYPE);
+ break;
+ case 4:
+ BUILD_PTR_CAST(INT32_PTR_TYPE);
+ BUILD_TRUNC(value, I32_TYPE);
+ break;
+ case 2:
+ BUILD_PTR_CAST(INT16_PTR_TYPE);
+ BUILD_TRUNC(value, INT16_TYPE);
+ break;
+ case 1:
+ BUILD_PTR_CAST(INT8_PTR_TYPE);
+ BUILD_TRUNC(value, INT8_TYPE);
+ break;
+ default:
+ bh_assert(0);
+ break;
+ }
+
+#if WASM_ENABLE_SHARED_MEMORY != 0
+ if (atomic)
+ BUILD_ATOMIC_STORE(align);
+ else
+#endif
+ BUILD_STORE();
+ return true;
+fail:
+ return false;
+}
+
+bool
+aot_compile_op_f32_store(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ uint32 align, uint32 offset)
+{
+ LLVMValueRef maddr, value;
+
+ POP_F32(value);
+
+ if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, 4)))
+ return false;
+
+ BUILD_PTR_CAST(F32_PTR_TYPE);
+ BUILD_STORE();
+ return true;
+fail:
+ return false;
+}
+
+bool
+aot_compile_op_f64_store(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ uint32 align, uint32 offset)
+{
+ LLVMValueRef maddr, value;
+
+ POP_F64(value);
+
+ if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, 8)))
+ return false;
+
+ BUILD_PTR_CAST(F64_PTR_TYPE);
+ BUILD_STORE();
+ return true;
+fail:
+ return false;
+}
+
+static LLVMValueRef
+get_memory_curr_page_count(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
+{
+ LLVMValueRef mem_size;
+
+ if (func_ctx->mem_space_unchanged) {
+ mem_size = func_ctx->mem_info[0].mem_cur_page_count_addr;
+ }
+ else {
+ if (!(mem_size = LLVMBuildLoad2(
+ comp_ctx->builder, I32_TYPE,
+ func_ctx->mem_info[0].mem_cur_page_count_addr, "mem_size"))) {
+ aot_set_last_error("llvm build load failed.");
+ goto fail;
+ }
+ }
+
+ return mem_size;
+fail:
+ return NULL;
+}
+
+bool
+aot_compile_op_memory_size(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
+{
+ LLVMValueRef mem_size = get_memory_curr_page_count(comp_ctx, func_ctx);
+
+ if (mem_size)
+ PUSH_I32(mem_size);
+ return mem_size ? true : false;
+fail:
+ return false;
+}
+
+bool
+aot_compile_op_memory_grow(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
+{
+ LLVMValueRef mem_size = get_memory_curr_page_count(comp_ctx, func_ctx);
+ LLVMValueRef delta, param_values[2], ret_value, func, value;
+ LLVMTypeRef param_types[2], ret_type, func_type, func_ptr_type;
+ int32 func_index;
+
+ if (!mem_size)
+ return false;
+
+ POP_I32(delta);
+
+ /* Function type of aot_enlarge_memory() */
+ param_types[0] = INT8_PTR_TYPE;
+ param_types[1] = I32_TYPE;
+ ret_type = INT8_TYPE;
+
+ if (!(func_type = LLVMFunctionType(ret_type, param_types, 2, false))) {
+ aot_set_last_error("llvm add function type failed.");
+ return false;
+ }
+
+ if (comp_ctx->is_jit_mode) {
+ /* JIT mode, call the function directly */
+ if (!(func_ptr_type = LLVMPointerType(func_type, 0))) {
+ aot_set_last_error("llvm add pointer type failed.");
+ return false;
+ }
+ if (!(value = I64_CONST((uint64)(uintptr_t)wasm_enlarge_memory))
+ || !(func = LLVMConstIntToPtr(value, func_ptr_type))) {
+ aot_set_last_error("create LLVM value failed.");
+ return false;
+ }
+ }
+ else if (comp_ctx->is_indirect_mode) {
+ if (!(func_ptr_type = LLVMPointerType(func_type, 0))) {
+ aot_set_last_error("create LLVM function type failed.");
+ return false;
+ }
+ func_index =
+ aot_get_native_symbol_index(comp_ctx, "aot_enlarge_memory");
+ if (func_index < 0) {
+ return false;
+ }
+ if (!(func = aot_get_func_from_table(comp_ctx, func_ctx->native_symbol,
+ func_ptr_type, func_index))) {
+ return false;
+ }
+ }
+ else {
+ char *func_name = "aot_enlarge_memory";
+ /* AOT mode, delcare the function */
+ if (!(func = LLVMGetNamedFunction(func_ctx->module, func_name))
+ && !(func =
+ LLVMAddFunction(func_ctx->module, func_name, func_type))) {
+ aot_set_last_error("llvm add function failed.");
+ return false;
+ }
+ }
+
+ /* Call function aot_enlarge_memory() */
+ param_values[0] = func_ctx->aot_inst;
+ param_values[1] = delta;
+ if (!(ret_value = LLVMBuildCall2(comp_ctx->builder, func_type, func,
+ param_values, 2, "call"))) {
+ aot_set_last_error("llvm build call failed.");
+ return false;
+ }
+
+ BUILD_ICMP(LLVMIntUGT, ret_value, I8_ZERO, ret_value, "mem_grow_ret");
+
+ /* ret_value = ret_value == true ? delta : pre_page_count */
+ if (!(ret_value = LLVMBuildSelect(comp_ctx->builder, ret_value, mem_size,
+ I32_NEG_ONE, "mem_grow_ret"))) {
+ aot_set_last_error("llvm build select failed.");
+ return false;
+ }
+
+ PUSH_I32(ret_value);
+ return true;
+fail:
+ return false;
+}
+
+#if WASM_ENABLE_BULK_MEMORY != 0
+
+static LLVMValueRef
+check_bulk_memory_overflow(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ LLVMValueRef offset, LLVMValueRef bytes)
+{
+ LLVMValueRef maddr, max_addr, cmp;
+ LLVMValueRef mem_base_addr;
+ LLVMBasicBlockRef block_curr = LLVMGetInsertBlock(comp_ctx->builder);
+ LLVMBasicBlockRef check_succ;
+ LLVMValueRef mem_size;
+
+ /* Get memory base address and memory data size */
+#if WASM_ENABLE_SHARED_MEMORY != 0
+ bool is_shared_memory =
+ comp_ctx->comp_data->memories[0].memory_flags & 0x02;
+
+ if (func_ctx->mem_space_unchanged || is_shared_memory) {
+#else
+ if (func_ctx->mem_space_unchanged) {
+#endif
+ mem_base_addr = func_ctx->mem_info[0].mem_base_addr;
+ }
+ else {
+ if (!(mem_base_addr = LLVMBuildLoad2(
+ comp_ctx->builder, OPQ_PTR_TYPE,
+ func_ctx->mem_info[0].mem_base_addr, "mem_base"))) {
+ aot_set_last_error("llvm build load failed.");
+ goto fail;
+ }
+ }
+
+ /*
+ * Note: not throw the integer-overflow-exception here since it must
+ * have been thrown when converting float to integer before
+ */
+ /* return addres directly if constant offset and inside memory space */
+ if (!LLVMIsUndef(offset) && !LLVMIsUndef(bytes)
+#if LLVM_VERSION_NUMBER >= 12
+ && !LLVMIsPoison(offset) && !LLVMIsPoison(bytes)
+#endif
+ && LLVMIsConstant(offset) && LLVMIsConstant(bytes)) {
+ uint64 mem_offset = (uint64)LLVMConstIntGetZExtValue(offset);
+ uint64 mem_len = (uint64)LLVMConstIntGetZExtValue(bytes);
+ uint32 num_bytes_per_page =
+ comp_ctx->comp_data->memories[0].num_bytes_per_page;
+ uint32 init_page_count =
+ comp_ctx->comp_data->memories[0].mem_init_page_count;
+ uint32 mem_data_size = num_bytes_per_page * init_page_count;
+ if (mem_data_size > 0 && mem_offset + mem_len <= mem_data_size) {
+ /* inside memory space */
+ /* maddr = mem_base_addr + moffset */
+ if (!(maddr = LLVMBuildInBoundsGEP2(comp_ctx->builder, INT8_TYPE,
+ mem_base_addr, &offset, 1,
+ "maddr"))) {
+ aot_set_last_error("llvm build add failed.");
+ goto fail;
+ }
+ return maddr;
+ }
+ }
+
+ if (func_ctx->mem_space_unchanged) {
+ mem_size = func_ctx->mem_info[0].mem_data_size_addr;
+ }
+ else {
+ if (!(mem_size = LLVMBuildLoad2(
+ comp_ctx->builder, I32_TYPE,
+ func_ctx->mem_info[0].mem_data_size_addr, "mem_size"))) {
+ aot_set_last_error("llvm build load failed.");
+ goto fail;
+ }
+ }
+
+ ADD_BASIC_BLOCK(check_succ, "check_succ");
+ LLVMMoveBasicBlockAfter(check_succ, block_curr);
+
+ offset =
+ LLVMBuildZExt(comp_ctx->builder, offset, I64_TYPE, "extend_offset");
+ bytes = LLVMBuildZExt(comp_ctx->builder, bytes, I64_TYPE, "extend_len");
+ mem_size =
+ LLVMBuildZExt(comp_ctx->builder, mem_size, I64_TYPE, "extend_size");
+
+ BUILD_OP(Add, offset, bytes, max_addr, "max_addr");
+ BUILD_ICMP(LLVMIntUGT, max_addr, mem_size, cmp, "cmp_max_mem_addr");
+ if (!aot_emit_exception(comp_ctx, func_ctx,
+ EXCE_OUT_OF_BOUNDS_MEMORY_ACCESS, true, cmp,
+ check_succ)) {
+ goto fail;
+ }
+
+ /* maddr = mem_base_addr + offset */
+ if (!(maddr = LLVMBuildInBoundsGEP2(comp_ctx->builder, INT8_TYPE,
+ mem_base_addr, &offset, 1, "maddr"))) {
+ aot_set_last_error("llvm build add failed.");
+ goto fail;
+ }
+ return maddr;
+fail:
+ return NULL;
+}
+
+bool
+aot_compile_op_memory_init(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ uint32 seg_index)
+{
+ LLVMValueRef seg, offset, dst, len, param_values[5], ret_value, func, value;
+ LLVMTypeRef param_types[5], ret_type, func_type, func_ptr_type;
+ AOTFuncType *aot_func_type = func_ctx->aot_func->func_type;
+ LLVMBasicBlockRef block_curr = LLVMGetInsertBlock(comp_ctx->builder);
+ LLVMBasicBlockRef mem_init_fail, init_success;
+
+ seg = I32_CONST(seg_index);
+
+ POP_I32(len);
+ POP_I32(offset);
+ POP_I32(dst);
+
+ param_types[0] = INT8_PTR_TYPE;
+ param_types[1] = I32_TYPE;
+ param_types[2] = I32_TYPE;
+ param_types[3] = I32_TYPE;
+ param_types[4] = I32_TYPE;
+ ret_type = INT8_TYPE;
+
+ if (comp_ctx->is_jit_mode)
+ GET_AOT_FUNCTION(llvm_jit_memory_init, 5);
+ else
+ GET_AOT_FUNCTION(aot_memory_init, 5);
+
+ /* Call function aot_memory_init() */
+ param_values[0] = func_ctx->aot_inst;
+ param_values[1] = seg;
+ param_values[2] = offset;
+ param_values[3] = len;
+ param_values[4] = dst;
+ if (!(ret_value = LLVMBuildCall2(comp_ctx->builder, func_type, func,
+ param_values, 5, "call"))) {
+ aot_set_last_error("llvm build call failed.");
+ return false;
+ }
+
+ BUILD_ICMP(LLVMIntUGT, ret_value, I8_ZERO, ret_value, "mem_init_ret");
+
+ ADD_BASIC_BLOCK(mem_init_fail, "mem_init_fail");
+ ADD_BASIC_BLOCK(init_success, "init_success");
+
+ LLVMMoveBasicBlockAfter(mem_init_fail, block_curr);
+ LLVMMoveBasicBlockAfter(init_success, block_curr);
+
+ if (!LLVMBuildCondBr(comp_ctx->builder, ret_value, init_success,
+ mem_init_fail)) {
+ aot_set_last_error("llvm build cond br failed.");
+ goto fail;
+ }
+
+ /* If memory.init failed, return this function
+ so the runtime can catch the exception */
+ LLVMPositionBuilderAtEnd(comp_ctx->builder, mem_init_fail);
+ if (!aot_build_zero_function_ret(comp_ctx, func_ctx, aot_func_type)) {
+ goto fail;
+ }
+
+ LLVMPositionBuilderAtEnd(comp_ctx->builder, init_success);
+
+ return true;
+fail:
+ return false;
+}
+
+bool
+aot_compile_op_data_drop(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ uint32 seg_index)
+{
+ LLVMValueRef seg, param_values[2], ret_value, func, value;
+ LLVMTypeRef param_types[2], ret_type, func_type, func_ptr_type;
+
+ seg = I32_CONST(seg_index);
+ CHECK_LLVM_CONST(seg);
+
+ param_types[0] = INT8_PTR_TYPE;
+ param_types[1] = I32_TYPE;
+ ret_type = INT8_TYPE;
+
+ if (comp_ctx->is_jit_mode)
+ GET_AOT_FUNCTION(llvm_jit_data_drop, 2);
+ else
+ GET_AOT_FUNCTION(aot_data_drop, 2);
+
+ /* Call function aot_data_drop() */
+ param_values[0] = func_ctx->aot_inst;
+ param_values[1] = seg;
+ if (!(ret_value = LLVMBuildCall2(comp_ctx->builder, func_type, func,
+ param_values, 2, "call"))) {
+ aot_set_last_error("llvm build call failed.");
+ return false;
+ }
+
+ return true;
+fail:
+ return false;
+}
+
+bool
+aot_compile_op_memory_copy(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
+{
+ LLVMValueRef src, dst, src_addr, dst_addr, len, res;
+ bool call_aot_memmove = false;
+
+ POP_I32(len);
+ POP_I32(src);
+ POP_I32(dst);
+
+ if (!(src_addr = check_bulk_memory_overflow(comp_ctx, func_ctx, src, len)))
+ return false;
+
+ if (!(dst_addr = check_bulk_memory_overflow(comp_ctx, func_ctx, dst, len)))
+ return false;
+
+ call_aot_memmove = comp_ctx->is_indirect_mode || comp_ctx->is_jit_mode;
+ if (call_aot_memmove) {
+ LLVMTypeRef param_types[3], ret_type, func_type, func_ptr_type;
+ LLVMValueRef func, params[3];
+
+ param_types[0] = INT8_PTR_TYPE;
+ param_types[1] = INT8_PTR_TYPE;
+ param_types[2] = I32_TYPE;
+ ret_type = INT8_PTR_TYPE;
+
+ if (!(func_type = LLVMFunctionType(ret_type, param_types, 3, false))) {
+ aot_set_last_error("create LLVM function type failed.");
+ return false;
+ }
+
+ if (!(func_ptr_type = LLVMPointerType(func_type, 0))) {
+ aot_set_last_error("create LLVM function pointer type failed.");
+ return false;
+ }
+
+ if (comp_ctx->is_jit_mode) {
+ if (!(func = I64_CONST((uint64)(uintptr_t)aot_memmove))
+ || !(func = LLVMConstIntToPtr(func, func_ptr_type))) {
+ aot_set_last_error("create LLVM value failed.");
+ return false;
+ }
+ }
+ else {
+ int32 func_index;
+ func_index = aot_get_native_symbol_index(comp_ctx, "memmove");
+ if (func_index < 0) {
+ return false;
+ }
+ if (!(func =
+ aot_get_func_from_table(comp_ctx, func_ctx->native_symbol,
+ func_ptr_type, func_index))) {
+ return false;
+ }
+ }
+
+ params[0] = dst_addr;
+ params[1] = src_addr;
+ params[2] = len;
+ if (!(res = LLVMBuildCall2(comp_ctx->builder, func_type, func, params,
+ 3, "call_memmove"))) {
+ aot_set_last_error("llvm build memmove failed.");
+ return false;
+ }
+ }
+ else {
+ if (!(res = LLVMBuildMemMove(comp_ctx->builder, dst_addr, 1, src_addr,
+ 1, len))) {
+ aot_set_last_error("llvm build memmove failed.");
+ return false;
+ }
+ }
+
+ return true;
+fail:
+ return false;
+}
+
+static void *
+jit_memset(void *s, int c, size_t n)
+{
+ return memset(s, c, n);
+}
+
+bool
+aot_compile_op_memory_fill(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
+{
+ LLVMValueRef val, dst, dst_addr, len, res;
+ LLVMTypeRef param_types[3], ret_type, func_type, func_ptr_type;
+ LLVMValueRef func, params[3];
+
+ POP_I32(len);
+ POP_I32(val);
+ POP_I32(dst);
+
+ if (!(dst_addr = check_bulk_memory_overflow(comp_ctx, func_ctx, dst, len)))
+ return false;
+
+ param_types[0] = INT8_PTR_TYPE;
+ param_types[1] = I32_TYPE;
+ param_types[2] = I32_TYPE;
+ ret_type = INT8_PTR_TYPE;
+
+ if (!(func_type = LLVMFunctionType(ret_type, param_types, 3, false))) {
+ aot_set_last_error("create LLVM function type failed.");
+ return false;
+ }
+
+ if (!(func_ptr_type = LLVMPointerType(func_type, 0))) {
+ aot_set_last_error("create LLVM function pointer type failed.");
+ return false;
+ }
+
+ if (comp_ctx->is_jit_mode) {
+ if (!(func = I64_CONST((uint64)(uintptr_t)jit_memset))
+ || !(func = LLVMConstIntToPtr(func, func_ptr_type))) {
+ aot_set_last_error("create LLVM value failed.");
+ return false;
+ }
+ }
+ else if (comp_ctx->is_indirect_mode) {
+ int32 func_index;
+ func_index = aot_get_native_symbol_index(comp_ctx, "memset");
+ if (func_index < 0) {
+ return false;
+ }
+ if (!(func = aot_get_func_from_table(comp_ctx, func_ctx->native_symbol,
+ func_ptr_type, func_index))) {
+ return false;
+ }
+ }
+ else {
+ if (!(func = LLVMGetNamedFunction(func_ctx->module, "memset"))
+ && !(func =
+ LLVMAddFunction(func_ctx->module, "memset", func_type))) {
+ aot_set_last_error("llvm add function failed.");
+ return false;
+ }
+ }
+
+ params[0] = dst_addr;
+ params[1] = val;
+ params[2] = len;
+ if (!(res = LLVMBuildCall2(comp_ctx->builder, func_type, func, params, 3,
+ "call_memset"))) {
+ aot_set_last_error("llvm build memset failed.");
+ return false;
+ }
+
+ return true;
+fail:
+ return false;
+}
+#endif /* end of WASM_ENABLE_BULK_MEMORY */
+
+#if WASM_ENABLE_SHARED_MEMORY != 0
+bool
+aot_compile_op_atomic_rmw(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ uint8 atomic_op, uint8 op_type, uint32 align,
+ uint32 offset, uint32 bytes)
+{
+ LLVMValueRef maddr, value, result;
+
+ if (op_type == VALUE_TYPE_I32)
+ POP_I32(value);
+ else
+ POP_I64(value);
+
+ if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, bytes)))
+ return false;
+
+ if (!check_memory_alignment(comp_ctx, func_ctx, maddr, align))
+ return false;
+
+ switch (bytes) {
+ case 8:
+ BUILD_PTR_CAST(INT64_PTR_TYPE);
+ break;
+ case 4:
+ BUILD_PTR_CAST(INT32_PTR_TYPE);
+ if (op_type == VALUE_TYPE_I64)
+ BUILD_TRUNC(value, I32_TYPE);
+ break;
+ case 2:
+ BUILD_PTR_CAST(INT16_PTR_TYPE);
+ BUILD_TRUNC(value, INT16_TYPE);
+ break;
+ case 1:
+ BUILD_PTR_CAST(INT8_PTR_TYPE);
+ BUILD_TRUNC(value, INT8_TYPE);
+ break;
+ default:
+ bh_assert(0);
+ break;
+ }
+
+ if (!(result = LLVMBuildAtomicRMW(
+ comp_ctx->builder, atomic_op, maddr, value,
+ LLVMAtomicOrderingSequentiallyConsistent, false))) {
+ goto fail;
+ }
+
+ LLVMSetVolatile(result, true);
+
+ if (op_type == VALUE_TYPE_I32) {
+ if (!(result = LLVMBuildZExt(comp_ctx->builder, result, I32_TYPE,
+ "result_i32"))) {
+ goto fail;
+ }
+ PUSH_I32(result);
+ }
+ else {
+ if (!(result = LLVMBuildZExt(comp_ctx->builder, result, I64_TYPE,
+ "result_i64"))) {
+ goto fail;
+ }
+ PUSH_I64(result);
+ }
+
+ return true;
+fail:
+ return false;
+}
+
+bool
+aot_compile_op_atomic_cmpxchg(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, uint8 op_type,
+ uint32 align, uint32 offset, uint32 bytes)
+{
+ LLVMValueRef maddr, value, expect, result;
+
+ if (op_type == VALUE_TYPE_I32) {
+ POP_I32(value);
+ POP_I32(expect);
+ }
+ else {
+ POP_I64(value);
+ POP_I64(expect);
+ }
+
+ if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, bytes)))
+ return false;
+
+ if (!check_memory_alignment(comp_ctx, func_ctx, maddr, align))
+ return false;
+
+ switch (bytes) {
+ case 8:
+ BUILD_PTR_CAST(INT64_PTR_TYPE);
+ break;
+ case 4:
+ BUILD_PTR_CAST(INT32_PTR_TYPE);
+ if (op_type == VALUE_TYPE_I64) {
+ BUILD_TRUNC(value, I32_TYPE);
+ BUILD_TRUNC(expect, I32_TYPE);
+ }
+ break;
+ case 2:
+ BUILD_PTR_CAST(INT16_PTR_TYPE);
+ BUILD_TRUNC(value, INT16_TYPE);
+ BUILD_TRUNC(expect, INT16_TYPE);
+ break;
+ case 1:
+ BUILD_PTR_CAST(INT8_PTR_TYPE);
+ BUILD_TRUNC(value, INT8_TYPE);
+ BUILD_TRUNC(expect, INT8_TYPE);
+ break;
+ default:
+ bh_assert(0);
+ break;
+ }
+
+ if (!(result = LLVMBuildAtomicCmpXchg(
+ comp_ctx->builder, maddr, expect, value,
+ LLVMAtomicOrderingSequentiallyConsistent,
+ LLVMAtomicOrderingSequentiallyConsistent, false))) {
+ goto fail;
+ }
+
+ LLVMSetVolatile(result, true);
+
+ /* CmpXchg return {i32, i1} structure,
+ we need to extrack the previous_value from the structure */
+ if (!(result = LLVMBuildExtractValue(comp_ctx->builder, result, 0,
+ "previous_value"))) {
+ goto fail;
+ }
+
+ if (op_type == VALUE_TYPE_I32) {
+ if (LLVMTypeOf(result) != I32_TYPE) {
+ if (!(result = LLVMBuildZExt(comp_ctx->builder, result, I32_TYPE,
+ "result_i32"))) {
+ goto fail;
+ }
+ }
+ PUSH_I32(result);
+ }
+ else {
+ if (LLVMTypeOf(result) != I64_TYPE) {
+ if (!(result = LLVMBuildZExt(comp_ctx->builder, result, I64_TYPE,
+ "result_i64"))) {
+ goto fail;
+ }
+ }
+ PUSH_I64(result);
+ }
+
+ return true;
+fail:
+ return false;
+}
+
+bool
+aot_compile_op_atomic_wait(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ uint8 op_type, uint32 align, uint32 offset,
+ uint32 bytes)
+{
+ LLVMValueRef maddr, value, timeout, expect, cmp;
+ LLVMValueRef param_values[5], ret_value, func, is_wait64;
+ LLVMTypeRef param_types[5], ret_type, func_type, func_ptr_type;
+ LLVMBasicBlockRef wait_fail, wait_success;
+ LLVMBasicBlockRef block_curr = LLVMGetInsertBlock(comp_ctx->builder);
+ AOTFuncType *aot_func_type = func_ctx->aot_func->func_type;
+
+ POP_I64(timeout);
+ if (op_type == VALUE_TYPE_I32) {
+ POP_I32(expect);
+ is_wait64 = I8_CONST(false);
+ if (!(expect = LLVMBuildZExt(comp_ctx->builder, expect, I64_TYPE,
+ "expect_i64"))) {
+ goto fail;
+ }
+ }
+ else {
+ POP_I64(expect);
+ is_wait64 = I8_CONST(true);
+ }
+
+ CHECK_LLVM_CONST(is_wait64);
+
+ if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, bytes)))
+ return false;
+
+ if (!check_memory_alignment(comp_ctx, func_ctx, maddr, align))
+ return false;
+
+ param_types[0] = INT8_PTR_TYPE;
+ param_types[1] = INT8_PTR_TYPE;
+ param_types[2] = I64_TYPE;
+ param_types[3] = I64_TYPE;
+ param_types[4] = INT8_TYPE;
+ ret_type = I32_TYPE;
+
+ GET_AOT_FUNCTION(wasm_runtime_atomic_wait, 5);
+
+ /* Call function wasm_runtime_atomic_wait() */
+ param_values[0] = func_ctx->aot_inst;
+ param_values[1] = maddr;
+ param_values[2] = expect;
+ param_values[3] = timeout;
+ param_values[4] = is_wait64;
+ if (!(ret_value = LLVMBuildCall2(comp_ctx->builder, func_type, func,
+ param_values, 5, "call"))) {
+ aot_set_last_error("llvm build call failed.");
+ return false;
+ }
+
+ BUILD_ICMP(LLVMIntNE, ret_value, I32_NEG_ONE, cmp, "atomic_wait_ret");
+
+ ADD_BASIC_BLOCK(wait_fail, "atomic_wait_fail");
+ ADD_BASIC_BLOCK(wait_success, "wait_success");
+
+ LLVMMoveBasicBlockAfter(wait_fail, block_curr);
+ LLVMMoveBasicBlockAfter(wait_success, block_curr);
+
+ if (!LLVMBuildCondBr(comp_ctx->builder, cmp, wait_success, wait_fail)) {
+ aot_set_last_error("llvm build cond br failed.");
+ goto fail;
+ }
+
+ /* If atomic wait failed, return this function
+ so the runtime can catch the exception */
+ LLVMPositionBuilderAtEnd(comp_ctx->builder, wait_fail);
+ if (!aot_build_zero_function_ret(comp_ctx, func_ctx, aot_func_type)) {
+ goto fail;
+ }
+
+ LLVMPositionBuilderAtEnd(comp_ctx->builder, wait_success);
+
+ PUSH_I32(ret_value);
+
+#if WASM_ENABLE_THREAD_MGR != 0
+ /* Insert suspend check point */
+ if (comp_ctx->enable_thread_mgr) {
+ if (!check_suspend_flags(comp_ctx, func_ctx))
+ return false;
+ }
+#endif
+
+ return true;
+fail:
+ return false;
+}
+
+bool
+aot_compiler_op_atomic_notify(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, uint32 align,
+ uint32 offset, uint32 bytes)
+{
+ LLVMValueRef maddr, value, count;
+ LLVMValueRef param_values[3], ret_value, func;
+ LLVMTypeRef param_types[3], ret_type, func_type, func_ptr_type;
+
+ POP_I32(count);
+
+ if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, bytes)))
+ return false;
+
+ if (!check_memory_alignment(comp_ctx, func_ctx, maddr, align))
+ return false;
+
+ param_types[0] = INT8_PTR_TYPE;
+ param_types[1] = INT8_PTR_TYPE;
+ param_types[2] = I32_TYPE;
+ ret_type = I32_TYPE;
+
+ GET_AOT_FUNCTION(wasm_runtime_atomic_notify, 3);
+
+ /* Call function wasm_runtime_atomic_notify() */
+ param_values[0] = func_ctx->aot_inst;
+ param_values[1] = maddr;
+ param_values[2] = count;
+ if (!(ret_value = LLVMBuildCall2(comp_ctx->builder, func_type, func,
+ param_values, 3, "call"))) {
+ aot_set_last_error("llvm build call failed.");
+ return false;
+ }
+
+ PUSH_I32(ret_value);
+
+ return true;
+fail:
+ return false;
+}
+
+bool
+aot_compiler_op_atomic_fence(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
+{
+ return LLVMBuildFence(comp_ctx->builder,
+ LLVMAtomicOrderingSequentiallyConsistent, false, "")
+ ? true
+ : false;
+}
+
+#endif /* end of WASM_ENABLE_SHARED_MEMORY */
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/aot_emit_memory.h b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/aot_emit_memory.h
new file mode 100644
index 000000000..e49582e3c
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/aot_emit_memory.h
@@ -0,0 +1,110 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#ifndef _AOT_EMIT_MEMORY_H_
+#define _AOT_EMIT_MEMORY_H_
+
+#include "aot_compiler.h"
+#if WASM_ENABLE_SHARED_MEMORY != 0
+#include "wasm_shared_memory.h"
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+bool
+aot_compile_op_i32_load(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ uint32 align, uint32 offset, uint32 bytes, bool sign,
+ bool atomic);
+
+bool
+aot_compile_op_i64_load(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ uint32 align, uint32 offset, uint32 bytes, bool sign,
+ bool atomic);
+
+bool
+aot_compile_op_f32_load(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ uint32 align, uint32 offset);
+
+bool
+aot_compile_op_f64_load(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ uint32 align, uint32 offset);
+
+bool
+aot_compile_op_i32_store(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ uint32 align, uint32 offset, uint32 bytes,
+ bool atomic);
+
+bool
+aot_compile_op_i64_store(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ uint32 align, uint32 offset, uint32 bytes,
+ bool atomic);
+
+bool
+aot_compile_op_f32_store(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ uint32 align, uint32 offset);
+
+bool
+aot_compile_op_f64_store(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ uint32 align, uint32 offset);
+
+LLVMValueRef
+aot_check_memory_overflow(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ uint32 offset, uint32 bytes);
+
+bool
+aot_compile_op_memory_size(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx);
+
+bool
+aot_compile_op_memory_grow(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx);
+
+#if WASM_ENABLE_BULK_MEMORY != 0
+bool
+aot_compile_op_memory_init(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ uint32 seg_index);
+
+bool
+aot_compile_op_data_drop(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ uint32 seg_index);
+
+bool
+aot_compile_op_memory_copy(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx);
+
+bool
+aot_compile_op_memory_fill(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx);
+#endif
+
+#if WASM_ENABLE_SHARED_MEMORY != 0
+bool
+aot_compile_op_atomic_rmw(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ uint8 atomic_op, uint8 op_type, uint32 align,
+ uint32 offset, uint32 bytes);
+
+bool
+aot_compile_op_atomic_cmpxchg(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, uint8 op_type,
+ uint32 align, uint32 offset, uint32 bytes);
+
+bool
+aot_compile_op_atomic_wait(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ uint8 op_type, uint32 align, uint32 offset,
+ uint32 bytes);
+
+bool
+aot_compiler_op_atomic_notify(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, uint32 align,
+ uint32 offset, uint32 bytes);
+
+bool
+aot_compiler_op_atomic_fence(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx);
+#endif
+
+#ifdef __cplusplus
+} /* end of extern "C" */
+#endif
+
+#endif /* end of _AOT_EMIT_MEMORY_H_ */
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/aot_emit_numberic.c b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/aot_emit_numberic.c
new file mode 100644
index 000000000..4c63e8a40
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/aot_emit_numberic.c
@@ -0,0 +1,1248 @@
+/*
+ * Copyright (C) 2020 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#include "aot_emit_numberic.h"
+#include "aot_emit_exception.h"
+#include "aot_emit_control.h"
+#include "../aot/aot_runtime.h"
+#include "../aot/aot_intrinsic.h"
+
+#include <stdarg.h>
+
+#define LLVM_BUILD_ICMP(op, left, right, res, name) \
+ do { \
+ if (!(res = \
+ LLVMBuildICmp(comp_ctx->builder, op, left, right, name))) { \
+ aot_set_last_error("llvm build " name " fail."); \
+ return false; \
+ } \
+ } while (0)
+
+#define LLVM_BUILD_OP(Op, left, right, res, name, err_ret) \
+ do { \
+ if (!(res = LLVMBuild##Op(comp_ctx->builder, left, right, name))) { \
+ aot_set_last_error("llvm build " #name " fail."); \
+ return err_ret; \
+ } \
+ } while (0)
+
+#define LLVM_BUILD_OP_OR_INTRINSIC(Op, left, right, res, intrinsic, name, \
+ err_ret) \
+ do { \
+ if (comp_ctx->disable_llvm_intrinsics \
+ && aot_intrinsic_check_capability(comp_ctx, intrinsic)) { \
+ res = aot_call_llvm_intrinsic(comp_ctx, func_ctx, intrinsic, \
+ param_types[0], param_types, 2, \
+ left, right); \
+ } \
+ else { \
+ LLVM_BUILD_OP(Op, left, right, res, name, false); \
+ } \
+ } while (0)
+
+#define ADD_BASIC_BLOCK(block, name) \
+ do { \
+ if (!(block = LLVMAppendBasicBlockInContext(comp_ctx->context, \
+ func_ctx->func, name))) { \
+ aot_set_last_error("llvm add basic block failed."); \
+ goto fail; \
+ } \
+ \
+ LLVMMoveBasicBlockAfter(block, LLVMGetInsertBlock(comp_ctx->builder)); \
+ } while (0)
+
+#if LLVM_VERSION_NUMBER >= 12
+#define IS_CONST_ZERO(val) \
+ (!LLVMIsUndef(val) && !LLVMIsPoison(val) && LLVMIsConstant(val) \
+ && ((is_i32 && (int32)LLVMConstIntGetZExtValue(val) == 0) \
+ || (!is_i32 && (int64)LLVMConstIntGetSExtValue(val) == 0)))
+#else
+#define IS_CONST_ZERO(val) \
+ (!LLVMIsUndef(val) && LLVMIsConstant(val) \
+ && ((is_i32 && (int32)LLVMConstIntGetZExtValue(val) == 0) \
+ || (!is_i32 && (int64)LLVMConstIntGetSExtValue(val) == 0)))
+#endif
+
+#define CHECK_INT_OVERFLOW(type) \
+ do { \
+ LLVMValueRef cmp_min_int, cmp_neg_one; \
+ LLVM_BUILD_ICMP(LLVMIntEQ, left, type##_MIN, cmp_min_int, \
+ "cmp_min_int"); \
+ LLVM_BUILD_ICMP(LLVMIntEQ, right, type##_NEG_ONE, cmp_neg_one, \
+ "cmp_neg_one"); \
+ LLVM_BUILD_OP(And, cmp_min_int, cmp_neg_one, overflow, "overflow", \
+ false); \
+ } while (0)
+
+#define PUSH_INT(v) \
+ do { \
+ if (is_i32) \
+ PUSH_I32(v); \
+ else \
+ PUSH_I64(v); \
+ } while (0)
+
+#define POP_INT(v) \
+ do { \
+ if (is_i32) \
+ POP_I32(v); \
+ else \
+ POP_I64(v); \
+ } while (0)
+
+#define PUSH_FLOAT(v) \
+ do { \
+ if (is_f32) \
+ PUSH_F32(v); \
+ else \
+ PUSH_F64(v); \
+ } while (0)
+
+#define POP_FLOAT(v) \
+ do { \
+ if (is_f32) \
+ POP_F32(v); \
+ else \
+ POP_F64(v); \
+ } while (0)
+
+#define DEF_INT_UNARY_OP(op, err) \
+ do { \
+ LLVMValueRef res, operand; \
+ POP_INT(operand); \
+ if (!(res = op)) { \
+ if (err) \
+ aot_set_last_error(err); \
+ return false; \
+ } \
+ PUSH_INT(res); \
+ } while (0)
+
+#define DEF_INT_BINARY_OP(op, err) \
+ do { \
+ LLVMValueRef res, left, right; \
+ POP_INT(right); \
+ POP_INT(left); \
+ if (!(res = op)) { \
+ if (err) \
+ aot_set_last_error(err); \
+ return false; \
+ } \
+ PUSH_INT(res); \
+ } while (0)
+
+#define DEF_FP_UNARY_OP(op, err) \
+ do { \
+ LLVMValueRef res, operand; \
+ POP_FLOAT(operand); \
+ if (!(res = op)) { \
+ if (err) \
+ aot_set_last_error(err); \
+ return false; \
+ } \
+ PUSH_FLOAT(res); \
+ } while (0)
+
+#define DEF_FP_BINARY_OP(op, err) \
+ do { \
+ LLVMValueRef res, left, right; \
+ POP_FLOAT(right); \
+ POP_FLOAT(left); \
+ if (!(res = op)) { \
+ if (err) \
+ aot_set_last_error(err); \
+ return false; \
+ } \
+ PUSH_FLOAT(res); \
+ } while (0)
+
+#define SHIFT_COUNT_MASK \
+ do { \
+ /* LLVM has undefined behavior if shift count is greater than \
+ * bits count while Webassembly spec requires the shift count \
+ * be wrapped. \
+ */ \
+ LLVMValueRef shift_count_mask, bits_minus_one; \
+ bits_minus_one = is_i32 ? I32_31 : I64_63; \
+ LLVM_BUILD_OP(And, right, bits_minus_one, shift_count_mask, \
+ "shift_count_mask", NULL); \
+ right = shift_count_mask; \
+ } while (0)
+
+/* Call llvm constrained floating-point intrinsic */
+static LLVMValueRef
+call_llvm_float_experimental_constrained_intrinsic(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx,
+ bool is_f32,
+ const char *intrinsic, ...)
+{
+ va_list param_value_list;
+ LLVMValueRef ret;
+ LLVMTypeRef param_types[4], ret_type = is_f32 ? F32_TYPE : F64_TYPE;
+ int param_count = (comp_ctx->disable_llvm_intrinsics
+ && aot_intrinsic_check_capability(comp_ctx, intrinsic))
+ ? 2
+ : 4;
+
+ param_types[0] = param_types[1] = ret_type;
+ param_types[2] = param_types[3] = MD_TYPE;
+
+ va_start(param_value_list, intrinsic);
+
+ ret = aot_call_llvm_intrinsic_v(comp_ctx, func_ctx, intrinsic, ret_type,
+ param_types, param_count, param_value_list);
+
+ va_end(param_value_list);
+
+ return ret;
+}
+
+/* Call llvm constrained libm-equivalent intrinsic */
+static LLVMValueRef
+call_llvm_libm_experimental_constrained_intrinsic(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx,
+ bool is_f32,
+ const char *intrinsic, ...)
+{
+ va_list param_value_list;
+ LLVMValueRef ret;
+ LLVMTypeRef param_types[3], ret_type = is_f32 ? F32_TYPE : F64_TYPE;
+
+ param_types[0] = ret_type;
+ param_types[1] = param_types[2] = MD_TYPE;
+
+ va_start(param_value_list, intrinsic);
+
+ ret = aot_call_llvm_intrinsic_v(comp_ctx, func_ctx, intrinsic, ret_type,
+ param_types, 3, param_value_list);
+
+ va_end(param_value_list);
+
+ return ret;
+}
+
+static LLVMValueRef
+compile_op_float_min_max(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ bool is_f32, LLVMValueRef left, LLVMValueRef right,
+ bool is_min)
+{
+ LLVMTypeRef param_types[2], ret_type = is_f32 ? F32_TYPE : F64_TYPE,
+ int_type = is_f32 ? I32_TYPE : I64_TYPE;
+ LLVMValueRef cmp, is_eq, is_nan, ret, left_int, right_int, tmp,
+ nan = LLVMConstRealOfString(ret_type, "NaN");
+ char *intrinsic = is_min ? (is_f32 ? "llvm.minnum.f32" : "llvm.minnum.f64")
+ : (is_f32 ? "llvm.maxnum.f32" : "llvm.maxnum.f64");
+ CHECK_LLVM_CONST(nan);
+
+ param_types[0] = param_types[1] = ret_type;
+
+ if (comp_ctx->disable_llvm_intrinsics
+ && aot_intrinsic_check_capability(comp_ctx,
+ is_f32 ? "f32_cmp" : "f64_cmp")) {
+ LLVMTypeRef param_types_intrinsic[3];
+ LLVMValueRef opcond = LLVMConstInt(I32_TYPE, FLOAT_UNO, true);
+ param_types_intrinsic[0] = I32_TYPE;
+ param_types_intrinsic[1] = is_f32 ? F32_TYPE : F64_TYPE;
+ param_types_intrinsic[2] = param_types_intrinsic[1];
+ is_nan = aot_call_llvm_intrinsic(
+ comp_ctx, func_ctx, is_f32 ? "f32_cmp" : "f64_cmp", I32_TYPE,
+ param_types_intrinsic, 3, opcond, left, right);
+
+ opcond = LLVMConstInt(I32_TYPE, FLOAT_EQ, true);
+ is_eq = aot_call_llvm_intrinsic(
+ comp_ctx, func_ctx, is_f32 ? "f32_cmp" : "f64_cmp", I32_TYPE,
+ param_types_intrinsic, 3, opcond, left, right);
+
+ if (!is_nan || !is_eq) {
+ return NULL;
+ }
+
+ if (!(is_nan = LLVMBuildIntCast(comp_ctx->builder, is_nan, INT1_TYPE,
+ "bit_cast_is_nan"))) {
+ aot_set_last_error("llvm build is_nan bit cast fail.");
+ return NULL;
+ }
+
+ if (!(is_eq = LLVMBuildIntCast(comp_ctx->builder, is_eq, INT1_TYPE,
+ "bit_cast_is_eq"))) {
+ aot_set_last_error("llvm build is_eq bit cast fail.");
+ return NULL;
+ }
+ }
+ else if (!(is_nan = LLVMBuildFCmp(comp_ctx->builder, LLVMRealUNO, left,
+ right, "is_nan"))
+ || !(is_eq = LLVMBuildFCmp(comp_ctx->builder, LLVMRealOEQ, left,
+ right, "is_eq"))) {
+ aot_set_last_error("llvm build fcmp fail.");
+ return NULL;
+ }
+
+ /* If left and right are equal, they may be zero with different sign.
+ Webassembly spec assert -0 < +0. So do a bitwise here. */
+ if (!(left_int =
+ LLVMBuildBitCast(comp_ctx->builder, left, int_type, "left_int"))
+ || !(right_int = LLVMBuildBitCast(comp_ctx->builder, right, int_type,
+ "right_int"))) {
+ aot_set_last_error("llvm build bitcast fail.");
+ return NULL;
+ }
+
+ if (is_min)
+ LLVM_BUILD_OP_OR_INTRINSIC(Or, left_int, right_int, tmp,
+ is_f32 ? "i32.or" : "i64.or", "tmp_int",
+ false);
+ else
+ LLVM_BUILD_OP_OR_INTRINSIC(And, left_int, right_int, tmp,
+ is_f32 ? "i32.and" : "i64.and", "tmp_int",
+ false);
+
+ if (!(tmp = LLVMBuildBitCast(comp_ctx->builder, tmp, ret_type, "tmp"))) {
+ aot_set_last_error("llvm build bitcast fail.");
+ return NULL;
+ }
+
+ if (!(cmp = aot_call_llvm_intrinsic(comp_ctx, func_ctx, intrinsic, ret_type,
+ param_types, 2, left, right)))
+ return NULL;
+
+ /* The result of XIP intrinsic is 0 or 1, should return it directly */
+
+ if (comp_ctx->disable_llvm_intrinsics
+ && aot_intrinsic_check_capability(comp_ctx,
+ is_f32 ? "f32_cmp" : "f64_cmp")) {
+ return cmp;
+ }
+
+ if (!(cmp = LLVMBuildSelect(comp_ctx->builder, is_eq, tmp, cmp, "cmp"))) {
+ aot_set_last_error("llvm build select fail.");
+ return NULL;
+ }
+
+ if (!(ret = LLVMBuildSelect(comp_ctx->builder, is_nan, nan, cmp,
+ is_min ? "min" : "max"))) {
+ aot_set_last_error("llvm build select fail.");
+ return NULL;
+ }
+
+ return ret;
+fail:
+ return NULL;
+}
+
+typedef enum BitCountType {
+ CLZ32 = 0,
+ CLZ64,
+ CTZ32,
+ CTZ64,
+ POP_CNT32,
+ POP_CNT64
+} BitCountType;
+
+/* clang-format off */
+static char *bit_cnt_llvm_intrinsic[] = {
+ "llvm.ctlz.i32",
+ "llvm.ctlz.i64",
+ "llvm.cttz.i32",
+ "llvm.cttz.i64",
+ "llvm.ctpop.i32",
+ "llvm.ctpop.i64",
+};
+/* clang-format on */
+
+static bool
+aot_compile_int_bit_count(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ BitCountType type, bool is_i32)
+{
+ LLVMValueRef zero_undef;
+ LLVMTypeRef ret_type, param_types[2];
+
+ param_types[0] = ret_type = is_i32 ? I32_TYPE : I64_TYPE;
+ param_types[1] = LLVMInt1TypeInContext(comp_ctx->context);
+
+ zero_undef = LLVMConstInt(param_types[1], false, true);
+ CHECK_LLVM_CONST(zero_undef);
+
+ /* Call the LLVM intrinsic function */
+ if (type < POP_CNT32)
+ DEF_INT_UNARY_OP(aot_call_llvm_intrinsic(
+ comp_ctx, func_ctx, bit_cnt_llvm_intrinsic[type],
+ ret_type, param_types, 2, operand, zero_undef),
+ NULL);
+ else
+ DEF_INT_UNARY_OP(aot_call_llvm_intrinsic(
+ comp_ctx, func_ctx, bit_cnt_llvm_intrinsic[type],
+ ret_type, param_types, 1, operand),
+ NULL);
+
+ return true;
+
+fail:
+ return false;
+}
+
+static bool
+compile_rems(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ LLVMValueRef left, LLVMValueRef right, LLVMValueRef overflow_cond,
+ bool is_i32)
+{
+ LLVMValueRef phi, no_overflow_value, zero = is_i32 ? I32_ZERO : I64_ZERO;
+ LLVMBasicBlockRef block_curr, no_overflow_block, rems_end_block;
+ LLVMTypeRef param_types[2];
+
+ param_types[1] = param_types[0] = is_i32 ? I32_TYPE : I64_TYPE;
+
+ block_curr = LLVMGetInsertBlock(comp_ctx->builder);
+
+ /* Add 2 blocks: no_overflow_block and rems_end block */
+ ADD_BASIC_BLOCK(rems_end_block, "rems_end");
+ ADD_BASIC_BLOCK(no_overflow_block, "rems_no_overflow");
+
+ /* Create condition br */
+ if (!LLVMBuildCondBr(comp_ctx->builder, overflow_cond, rems_end_block,
+ no_overflow_block)) {
+ aot_set_last_error("llvm build cond br failed.");
+ return false;
+ }
+
+ /* Translate no_overflow_block */
+ LLVMPositionBuilderAtEnd(comp_ctx->builder, no_overflow_block);
+
+ LLVM_BUILD_OP_OR_INTRINSIC(SRem, left, right, no_overflow_value,
+ is_i32 ? "i32.rem_s" : "i64.rem_s", "rem_s",
+ false);
+
+ /* Jump to rems_end block */
+ if (!LLVMBuildBr(comp_ctx->builder, rems_end_block)) {
+ aot_set_last_error("llvm build br failed.");
+ return false;
+ }
+
+ /* Translate rems_end_block */
+ LLVMPositionBuilderAtEnd(comp_ctx->builder, rems_end_block);
+
+ /* Create result phi */
+ if (!(phi = LLVMBuildPhi(comp_ctx->builder, is_i32 ? I32_TYPE : I64_TYPE,
+ "rems_result_phi"))) {
+ aot_set_last_error("llvm build phi failed.");
+ return false;
+ }
+
+ /* Add phi incoming values */
+ LLVMAddIncoming(phi, &no_overflow_value, &no_overflow_block, 1);
+ LLVMAddIncoming(phi, &zero, &block_curr, 1);
+
+ if (is_i32)
+ PUSH_I32(phi);
+ else
+ PUSH_I64(phi);
+
+ return true;
+
+fail:
+ return false;
+}
+
+static bool
+compile_int_div(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ IntArithmetic arith_op, bool is_i32, uint8 **p_frame_ip)
+{
+ LLVMValueRef left, right, cmp_div_zero, overflow, res;
+ LLVMBasicBlockRef check_div_zero_succ, check_overflow_succ;
+ LLVMTypeRef param_types[2];
+ const char *intrinsic = NULL;
+
+ param_types[1] = param_types[0] = is_i32 ? I32_TYPE : I64_TYPE;
+
+ bh_assert(arith_op == INT_DIV_S || arith_op == INT_DIV_U
+ || arith_op == INT_REM_S || arith_op == INT_REM_U);
+
+ POP_INT(right);
+ POP_INT(left);
+
+ if (LLVMIsUndef(right) || LLVMIsUndef(left)
+#if LLVM_VERSION_NUMBER >= 12
+ || LLVMIsPoison(right) || LLVMIsPoison(left)
+#endif
+ ) {
+ if (!(aot_emit_exception(comp_ctx, func_ctx, EXCE_INTEGER_OVERFLOW,
+ false, NULL, NULL))) {
+ goto fail;
+ }
+ return aot_handle_next_reachable_block(comp_ctx, func_ctx, p_frame_ip);
+ }
+
+ if (LLVMIsConstant(right)) {
+ int64 right_val = (int64)LLVMConstIntGetSExtValue(right);
+ switch (right_val) {
+ case 0:
+ /* Directly throw exception if divided by zero */
+ if (!(aot_emit_exception(comp_ctx, func_ctx,
+ EXCE_INTEGER_DIVIDE_BY_ZERO, false,
+ NULL, NULL)))
+ goto fail;
+
+ return aot_handle_next_reachable_block(comp_ctx, func_ctx,
+ p_frame_ip);
+ case 1:
+ if (arith_op == INT_DIV_S || arith_op == INT_DIV_U)
+ PUSH_INT(left);
+ else
+ PUSH_INT(is_i32 ? I32_ZERO : I64_ZERO);
+ return true;
+ case -1:
+ if (arith_op == INT_DIV_S) {
+ LLVM_BUILD_ICMP(LLVMIntEQ, left, is_i32 ? I32_MIN : I64_MIN,
+ overflow, "overflow");
+ ADD_BASIC_BLOCK(check_overflow_succ,
+ "check_overflow_success");
+
+ /* Throw conditional exception if overflow */
+ if (!(aot_emit_exception(comp_ctx, func_ctx,
+ EXCE_INTEGER_OVERFLOW, true,
+ overflow, check_overflow_succ)))
+ goto fail;
+
+ /* Push -(left) to stack */
+ if (!(res = LLVMBuildNeg(comp_ctx->builder, left, "neg"))) {
+ aot_set_last_error("llvm build neg fail.");
+ return false;
+ }
+ PUSH_INT(res);
+ return true;
+ }
+ else if (arith_op == INT_REM_S) {
+ PUSH_INT(is_i32 ? I32_ZERO : I64_ZERO);
+ return true;
+ }
+ else {
+ /* fall to default */
+ goto handle_default;
+ }
+ handle_default:
+ default:
+ /* Build div */
+ switch (arith_op) {
+ case INT_DIV_S:
+ LLVM_BUILD_OP_OR_INTRINSIC(
+ SDiv, left, right, res,
+ is_i32 ? "i32.div_s" : "i64.div_s", "div_s", false);
+ break;
+ case INT_DIV_U:
+ LLVM_BUILD_OP_OR_INTRINSIC(
+ UDiv, left, right, res,
+ is_i32 ? "i32.div_u" : "i64.div_u", "div_u", false);
+ break;
+ case INT_REM_S:
+ LLVM_BUILD_OP_OR_INTRINSIC(
+ SRem, left, right, res,
+ is_i32 ? "i32.rem_s" : "i64.rem_s", "rem_s", false);
+ break;
+ case INT_REM_U:
+ LLVM_BUILD_OP_OR_INTRINSIC(
+ URem, left, right, res,
+ is_i32 ? "i32.rem_u" : "i64.rem_u", "rem_u", false);
+ break;
+ default:
+ bh_assert(0);
+ return false;
+ }
+
+ PUSH_INT(res);
+ return true;
+ }
+ }
+ else {
+ /* Check divied by zero */
+ LLVM_BUILD_ICMP(LLVMIntEQ, right, is_i32 ? I32_ZERO : I64_ZERO,
+ cmp_div_zero, "cmp_div_zero");
+ ADD_BASIC_BLOCK(check_div_zero_succ, "check_div_zero_success");
+
+ /* Throw conditional exception if divided by zero */
+ if (!(aot_emit_exception(comp_ctx, func_ctx,
+ EXCE_INTEGER_DIVIDE_BY_ZERO, true,
+ cmp_div_zero, check_div_zero_succ)))
+ goto fail;
+
+ switch (arith_op) {
+ case INT_DIV_S:
+ /* Check integer overflow */
+ if (is_i32)
+ CHECK_INT_OVERFLOW(I32);
+ else
+ CHECK_INT_OVERFLOW(I64);
+
+ ADD_BASIC_BLOCK(check_overflow_succ, "check_overflow_success");
+
+ /* Throw conditional exception if integer overflow */
+ if (!(aot_emit_exception(comp_ctx, func_ctx,
+ EXCE_INTEGER_OVERFLOW, true, overflow,
+ check_overflow_succ)))
+ goto fail;
+
+ LLVM_BUILD_OP_OR_INTRINSIC(SDiv, left, right, res,
+ is_i32 ? "i32.div_s" : "i64.div_s",
+ "div_s", false);
+ PUSH_INT(res);
+ return true;
+ case INT_DIV_U:
+ intrinsic = is_i32 ? "i32.div_u" : "i64.div_u";
+ if (comp_ctx->disable_llvm_intrinsics
+ && aot_intrinsic_check_capability(comp_ctx, intrinsic)) {
+ res = aot_call_llvm_intrinsic(comp_ctx, func_ctx, intrinsic,
+ param_types[0], param_types,
+ 2, left, right);
+ }
+ else {
+ LLVM_BUILD_OP(UDiv, left, right, res, "div_u", false);
+ }
+ PUSH_INT(res);
+ return true;
+ case INT_REM_S:
+ /* Webassembly spec requires it return 0 */
+ if (is_i32)
+ CHECK_INT_OVERFLOW(I32);
+ else
+ CHECK_INT_OVERFLOW(I64);
+ return compile_rems(comp_ctx, func_ctx, left, right, overflow,
+ is_i32);
+ case INT_REM_U:
+ LLVM_BUILD_OP_OR_INTRINSIC(URem, left, right, res,
+ is_i32 ? "i32.rem_u" : "i64.rem_u",
+ "rem_u", false);
+ PUSH_INT(res);
+ return true;
+ default:
+ bh_assert(0);
+ return false;
+ }
+ }
+
+fail:
+ return false;
+}
+
+static LLVMValueRef
+compile_int_add(AOTCompContext *comp_ctx, LLVMValueRef left, LLVMValueRef right,
+ bool is_i32)
+{
+ /* If one of the operands is 0, just return the other */
+ if (IS_CONST_ZERO(left))
+ return right;
+ if (IS_CONST_ZERO(right))
+ return left;
+
+ /* Build add */
+ return LLVMBuildAdd(comp_ctx->builder, left, right, "add");
+}
+
+static LLVMValueRef
+compile_int_sub(AOTCompContext *comp_ctx, LLVMValueRef left, LLVMValueRef right,
+ bool is_i32)
+{
+ /* If the right operand is 0, just return the left */
+ if (IS_CONST_ZERO(right))
+ return left;
+
+ /* Build sub */
+ return LLVMBuildSub(comp_ctx->builder, left, right, "sub");
+}
+
+static LLVMValueRef
+compile_int_mul(AOTCompContext *comp_ctx, LLVMValueRef left, LLVMValueRef right,
+ bool is_i32)
+{
+ /* If one of the operands is 0, just return constant 0 */
+ if (IS_CONST_ZERO(left) || IS_CONST_ZERO(right))
+ return is_i32 ? I32_ZERO : I64_ZERO;
+
+ /* Build mul */
+ return LLVMBuildMul(comp_ctx->builder, left, right, "mul");
+}
+
+static bool
+compile_op_int_arithmetic(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ IntArithmetic arith_op, bool is_i32,
+ uint8 **p_frame_ip)
+{
+ switch (arith_op) {
+ case INT_ADD:
+ DEF_INT_BINARY_OP(compile_int_add(comp_ctx, left, right, is_i32),
+ "compile int add fail.");
+ return true;
+ case INT_SUB:
+ DEF_INT_BINARY_OP(compile_int_sub(comp_ctx, left, right, is_i32),
+ "compile int sub fail.");
+ return true;
+ case INT_MUL:
+ DEF_INT_BINARY_OP(compile_int_mul(comp_ctx, left, right, is_i32),
+ "compile int mul fail.");
+ return true;
+ case INT_DIV_S:
+ case INT_DIV_U:
+ case INT_REM_S:
+ case INT_REM_U:
+ return compile_int_div(comp_ctx, func_ctx, arith_op, is_i32,
+ p_frame_ip);
+ default:
+ bh_assert(0);
+ return false;
+ }
+
+fail:
+ return false;
+}
+
+static bool
+compile_op_int_bitwise(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ IntBitwise bitwise_op, bool is_i32)
+{
+ switch (bitwise_op) {
+ case INT_AND:
+ DEF_INT_BINARY_OP(
+ LLVMBuildAnd(comp_ctx->builder, left, right, "and"),
+ "llvm build and fail.");
+ return true;
+ case INT_OR:
+ DEF_INT_BINARY_OP(LLVMBuildOr(comp_ctx->builder, left, right, "or"),
+ "llvm build or fail.");
+ return true;
+ case INT_XOR:
+ DEF_INT_BINARY_OP(
+ LLVMBuildXor(comp_ctx->builder, left, right, "xor"),
+ "llvm build xor fail.");
+ return true;
+ default:
+ bh_assert(0);
+ return false;
+ }
+
+fail:
+ return false;
+}
+
+static LLVMValueRef
+compile_int_shl(AOTCompContext *comp_ctx, LLVMValueRef left, LLVMValueRef right,
+ bool is_i32)
+{
+ LLVMValueRef res;
+
+ if (strcmp(comp_ctx->target_arch, "x86_64") != 0
+ && strcmp(comp_ctx->target_arch, "i386") != 0)
+ SHIFT_COUNT_MASK;
+
+ /* Build shl */
+ LLVM_BUILD_OP(Shl, left, right, res, "shl", NULL);
+
+ return res;
+}
+
+static LLVMValueRef
+compile_int_shr_s(AOTCompContext *comp_ctx, LLVMValueRef left,
+ LLVMValueRef right, bool is_i32)
+{
+ LLVMValueRef res;
+
+ if (strcmp(comp_ctx->target_arch, "x86_64") != 0
+ && strcmp(comp_ctx->target_arch, "i386") != 0)
+ SHIFT_COUNT_MASK;
+
+ /* Build shl */
+ LLVM_BUILD_OP(AShr, left, right, res, "shr_s", NULL);
+
+ return res;
+}
+
+static LLVMValueRef
+compile_int_shr_u(AOTCompContext *comp_ctx, LLVMValueRef left,
+ LLVMValueRef right, bool is_i32)
+{
+ LLVMValueRef res;
+
+ if (strcmp(comp_ctx->target_arch, "x86_64") != 0
+ && strcmp(comp_ctx->target_arch, "i386") != 0)
+ SHIFT_COUNT_MASK;
+
+ /* Build shl */
+ LLVM_BUILD_OP(LShr, left, right, res, "shr_u", NULL);
+
+ return res;
+}
+
+static LLVMValueRef
+compile_int_rot(AOTCompContext *comp_ctx, LLVMValueRef left, LLVMValueRef right,
+ bool is_rotl, bool is_i32)
+{
+ LLVMValueRef bits_minus_shift_count, res, tmp_l, tmp_r;
+ char *name = is_rotl ? "rotl" : "rotr";
+
+ SHIFT_COUNT_MASK;
+
+ /* rotl/rotr with 0 */
+ if (IS_CONST_ZERO(right))
+ return left;
+
+ /* Calculate (bits - shif_count) */
+ LLVM_BUILD_OP(Sub, is_i32 ? I32_32 : I64_64, right, bits_minus_shift_count,
+ "bits_minus_shift_count", NULL);
+
+ if (is_rotl) {
+ /* left<<count | left>>(BITS-count) */
+ LLVM_BUILD_OP(Shl, left, right, tmp_l, "tmp_l", NULL);
+ LLVM_BUILD_OP(LShr, left, bits_minus_shift_count, tmp_r, "tmp_r", NULL);
+ }
+ else {
+ /* left>>count | left<<(BITS-count) */
+ LLVM_BUILD_OP(LShr, left, right, tmp_l, "tmp_l", NULL);
+ LLVM_BUILD_OP(Shl, left, bits_minus_shift_count, tmp_r, "tmp_r", NULL);
+ }
+
+ LLVM_BUILD_OP(Or, tmp_l, tmp_r, res, name, NULL);
+
+ return res;
+}
+
+static bool
+compile_op_int_shift(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ IntShift shift_op, bool is_i32)
+{
+ switch (shift_op) {
+ case INT_SHL:
+ DEF_INT_BINARY_OP(compile_int_shl(comp_ctx, left, right, is_i32),
+ NULL);
+ return true;
+ case INT_SHR_S:
+ DEF_INT_BINARY_OP(compile_int_shr_s(comp_ctx, left, right, is_i32),
+ NULL);
+ return true;
+ case INT_SHR_U:
+ DEF_INT_BINARY_OP(compile_int_shr_u(comp_ctx, left, right, is_i32),
+ NULL);
+ return true;
+ case INT_ROTL:
+ DEF_INT_BINARY_OP(
+ compile_int_rot(comp_ctx, left, right, true, is_i32), NULL);
+ return true;
+ case INT_ROTR:
+ DEF_INT_BINARY_OP(
+ compile_int_rot(comp_ctx, left, right, false, is_i32), NULL);
+ return true;
+ default:
+ bh_assert(0);
+ return false;
+ }
+
+fail:
+ return false;
+}
+
+static bool
+is_target_arm(AOTCompContext *comp_ctx)
+{
+ return !strncmp(comp_ctx->target_arch, "arm", 3)
+ || !strncmp(comp_ctx->target_arch, "aarch64", 7)
+ || !strncmp(comp_ctx->target_arch, "thumb", 5);
+}
+
+static bool
+is_target_x86(AOTCompContext *comp_ctx)
+{
+ return !strncmp(comp_ctx->target_arch, "x86_64", 6)
+ || !strncmp(comp_ctx->target_arch, "i386", 4);
+}
+
+static bool
+is_target_xtensa(AOTCompContext *comp_ctx)
+{
+ return !strncmp(comp_ctx->target_arch, "xtensa", 6);
+}
+
+static bool
+is_target_mips(AOTCompContext *comp_ctx)
+{
+ return !strncmp(comp_ctx->target_arch, "mips", 4);
+}
+
+static bool
+is_target_riscv(AOTCompContext *comp_ctx)
+{
+ return !strncmp(comp_ctx->target_arch, "riscv", 5);
+}
+
+static bool
+is_targeting_soft_float(AOTCompContext *comp_ctx, bool is_f32)
+{
+ bool ret = false;
+ char *feature_string;
+
+ if (!(feature_string =
+ LLVMGetTargetMachineFeatureString(comp_ctx->target_machine))) {
+ aot_set_last_error("llvm get target machine feature string fail.");
+ return false;
+ }
+
+ /* Note:
+ * LLVM CodeGen uses FPU Coprocessor registers by default,
+ * so user must specify '--cpu-features=+soft-float' to wamrc if the target
+ * doesn't have or enable FPU on arm, x86 or mips. */
+ if (is_target_arm(comp_ctx) || is_target_x86(comp_ctx)
+ || is_target_mips(comp_ctx)) {
+ ret = strstr(feature_string, "+soft-float") ? true : false;
+ }
+ else if (is_target_xtensa(comp_ctx)) {
+ /* Note:
+ * 1. The Floating-Point Coprocessor Option of xtensa only support
+ * single-precision floating-point operations, so must use soft-float
+ * for f64(i.e. double).
+ * 2. LLVM CodeGen uses Floating-Point Coprocessor registers by default,
+ * so user must specify '--cpu-features=-fp' to wamrc if the target
+ * doesn't have or enable Floating-Point Coprocessor Option on xtensa.
+ */
+ if (comp_ctx->disable_llvm_intrinsics)
+ ret = false;
+ else
+ ret = (!is_f32 || strstr(feature_string, "-fp")) ? true : false;
+ }
+ else if (is_target_riscv(comp_ctx)) {
+ /*
+ * Note: Use builtin intrinsics since hardware float operation
+ * will cause rodata relocation, this will try to use hardware
+ * float unit (by return false) but handled by software finally
+ */
+ if (comp_ctx->disable_llvm_intrinsics)
+ ret = false;
+ else
+ ret = !strstr(feature_string, "+d") ? true : false;
+ }
+ else {
+ ret = true;
+ }
+
+ LLVMDisposeMessage(feature_string);
+ return ret;
+}
+
+static bool
+compile_op_float_arithmetic(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ FloatArithmetic arith_op, bool is_f32)
+{
+ switch (arith_op) {
+ case FLOAT_ADD:
+ if (is_targeting_soft_float(comp_ctx, is_f32))
+ DEF_FP_BINARY_OP(
+ LLVMBuildFAdd(comp_ctx->builder, left, right, "fadd"),
+ "llvm build fadd fail.");
+ else
+ DEF_FP_BINARY_OP(
+ call_llvm_float_experimental_constrained_intrinsic(
+ comp_ctx, func_ctx, is_f32,
+ (is_f32 ? "llvm.experimental.constrained.fadd.f32"
+ : "llvm.experimental.constrained.fadd.f64"),
+ left, right, comp_ctx->fp_rounding_mode,
+ comp_ctx->fp_exception_behavior),
+ NULL);
+ return true;
+ case FLOAT_SUB:
+ if (is_targeting_soft_float(comp_ctx, is_f32))
+ DEF_FP_BINARY_OP(
+ LLVMBuildFSub(comp_ctx->builder, left, right, "fsub"),
+ "llvm build fsub fail.");
+ else
+ DEF_FP_BINARY_OP(
+ call_llvm_float_experimental_constrained_intrinsic(
+ comp_ctx, func_ctx, is_f32,
+ (is_f32 ? "llvm.experimental.constrained.fsub.f32"
+ : "llvm.experimental.constrained.fsub.f64"),
+ left, right, comp_ctx->fp_rounding_mode,
+ comp_ctx->fp_exception_behavior),
+ NULL);
+ return true;
+ case FLOAT_MUL:
+ if (is_targeting_soft_float(comp_ctx, is_f32))
+ DEF_FP_BINARY_OP(
+ LLVMBuildFMul(comp_ctx->builder, left, right, "fmul"),
+ "llvm build fmul fail.");
+ else
+ DEF_FP_BINARY_OP(
+ call_llvm_float_experimental_constrained_intrinsic(
+ comp_ctx, func_ctx, is_f32,
+ (is_f32 ? "llvm.experimental.constrained.fmul.f32"
+ : "llvm.experimental.constrained.fmul.f64"),
+ left, right, comp_ctx->fp_rounding_mode,
+ comp_ctx->fp_exception_behavior),
+ NULL);
+ return true;
+ case FLOAT_DIV:
+ if (is_targeting_soft_float(comp_ctx, is_f32))
+ DEF_FP_BINARY_OP(
+ LLVMBuildFDiv(comp_ctx->builder, left, right, "fdiv"),
+ "llvm build fdiv fail.");
+ else
+ DEF_FP_BINARY_OP(
+ call_llvm_float_experimental_constrained_intrinsic(
+ comp_ctx, func_ctx, is_f32,
+ (is_f32 ? "llvm.experimental.constrained.fdiv.f32"
+ : "llvm.experimental.constrained.fdiv.f64"),
+ left, right, comp_ctx->fp_rounding_mode,
+ comp_ctx->fp_exception_behavior),
+ NULL);
+ return true;
+ case FLOAT_MIN:
+ DEF_FP_BINARY_OP(compile_op_float_min_max(
+ comp_ctx, func_ctx, is_f32, left, right, true),
+ NULL);
+ return true;
+ case FLOAT_MAX:
+ DEF_FP_BINARY_OP(compile_op_float_min_max(comp_ctx, func_ctx,
+ is_f32, left, right,
+ false),
+ NULL);
+
+ return true;
+ default:
+ bh_assert(0);
+ return false;
+ }
+
+fail:
+ return false;
+}
+
+static LLVMValueRef
+call_llvm_float_math_intrinsic(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, bool is_f32,
+ const char *intrinsic, ...)
+{
+ va_list param_value_list;
+ LLVMValueRef ret;
+ LLVMTypeRef param_type, ret_type = is_f32 ? F32_TYPE : F64_TYPE;
+
+ param_type = ret_type;
+
+ va_start(param_value_list, intrinsic);
+
+ ret = aot_call_llvm_intrinsic_v(comp_ctx, func_ctx, intrinsic, ret_type,
+ &param_type, 1, param_value_list);
+
+ va_end(param_value_list);
+
+ return ret;
+}
+
+static bool
+compile_op_float_math(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ FloatMath math_op, bool is_f32)
+{
+ switch (math_op) {
+ case FLOAT_ABS:
+ DEF_FP_UNARY_OP(call_llvm_float_math_intrinsic(
+ comp_ctx, func_ctx, is_f32,
+ is_f32 ? "llvm.fabs.f32" : "llvm.fabs.f64",
+ operand),
+ NULL);
+ return true;
+ case FLOAT_NEG:
+ DEF_FP_UNARY_OP(LLVMBuildFNeg(comp_ctx->builder, operand, "fneg"),
+ "llvm build fneg fail.");
+ return true;
+
+ case FLOAT_CEIL:
+ DEF_FP_UNARY_OP(call_llvm_float_math_intrinsic(
+ comp_ctx, func_ctx, is_f32,
+ is_f32 ? "llvm.ceil.f32" : "llvm.ceil.f64",
+ operand),
+ NULL);
+ return true;
+ case FLOAT_FLOOR:
+ DEF_FP_UNARY_OP(call_llvm_float_math_intrinsic(
+ comp_ctx, func_ctx, is_f32,
+ is_f32 ? "llvm.floor.f32" : "llvm.floor.f64",
+ operand),
+ NULL);
+ return true;
+ case FLOAT_TRUNC:
+ DEF_FP_UNARY_OP(call_llvm_float_math_intrinsic(
+ comp_ctx, func_ctx, is_f32,
+ is_f32 ? "llvm.trunc.f32" : "llvm.trunc.f64",
+ operand),
+ NULL);
+ return true;
+ case FLOAT_NEAREST:
+ DEF_FP_UNARY_OP(call_llvm_float_math_intrinsic(
+ comp_ctx, func_ctx, is_f32,
+ is_f32 ? "llvm.rint.f32" : "llvm.rint.f64",
+ operand),
+ NULL);
+ return true;
+ case FLOAT_SQRT:
+ if (is_targeting_soft_float(comp_ctx, is_f32)
+ || comp_ctx->disable_llvm_intrinsics)
+ DEF_FP_UNARY_OP(call_llvm_float_math_intrinsic(
+ comp_ctx, func_ctx, is_f32,
+ is_f32 ? "llvm.sqrt.f32" : "llvm.sqrt.f64",
+ operand),
+ NULL);
+ else
+ DEF_FP_UNARY_OP(
+ call_llvm_libm_experimental_constrained_intrinsic(
+ comp_ctx, func_ctx, is_f32,
+ (is_f32 ? "llvm.experimental.constrained.sqrt.f32"
+ : "llvm.experimental.constrained.sqrt.f64"),
+ operand, comp_ctx->fp_rounding_mode,
+ comp_ctx->fp_exception_behavior),
+ NULL);
+ return true;
+ default:
+ bh_assert(0);
+ return false;
+ }
+
+ return true;
+
+fail:
+ return false;
+}
+
+static bool
+compile_float_copysign(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ bool is_f32)
+{
+ LLVMTypeRef ret_type, param_types[2];
+
+ param_types[0] = param_types[1] = ret_type = is_f32 ? F32_TYPE : F64_TYPE;
+
+ DEF_FP_BINARY_OP(aot_call_llvm_intrinsic(
+ comp_ctx, func_ctx,
+ is_f32 ? "llvm.copysign.f32" : "llvm.copysign.f64",
+ ret_type, param_types, 2, left, right),
+ NULL);
+ return true;
+
+fail:
+ return false;
+}
+
+bool
+aot_compile_op_i32_clz(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
+{
+ return aot_compile_int_bit_count(comp_ctx, func_ctx, CLZ32, true);
+}
+
+bool
+aot_compile_op_i32_ctz(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
+{
+ return aot_compile_int_bit_count(comp_ctx, func_ctx, CTZ32, true);
+}
+
+bool
+aot_compile_op_i32_popcnt(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
+{
+ return aot_compile_int_bit_count(comp_ctx, func_ctx, POP_CNT32, true);
+}
+
+bool
+aot_compile_op_i64_clz(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
+{
+ return aot_compile_int_bit_count(comp_ctx, func_ctx, CLZ64, false);
+}
+
+bool
+aot_compile_op_i64_ctz(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
+{
+ return aot_compile_int_bit_count(comp_ctx, func_ctx, CTZ64, false);
+}
+
+bool
+aot_compile_op_i64_popcnt(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
+{
+ return aot_compile_int_bit_count(comp_ctx, func_ctx, POP_CNT64, false);
+}
+
+bool
+aot_compile_op_i32_arithmetic(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, IntArithmetic arith_op,
+ uint8 **p_frame_ip)
+{
+ return compile_op_int_arithmetic(comp_ctx, func_ctx, arith_op, true,
+ p_frame_ip);
+}
+
+bool
+aot_compile_op_i64_arithmetic(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, IntArithmetic arith_op,
+ uint8 **p_frame_ip)
+{
+ return compile_op_int_arithmetic(comp_ctx, func_ctx, arith_op, false,
+ p_frame_ip);
+}
+
+bool
+aot_compile_op_i32_bitwise(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ IntBitwise bitwise_op)
+{
+ return compile_op_int_bitwise(comp_ctx, func_ctx, bitwise_op, true);
+}
+
+bool
+aot_compile_op_i64_bitwise(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ IntBitwise bitwise_op)
+{
+ return compile_op_int_bitwise(comp_ctx, func_ctx, bitwise_op, false);
+}
+
+bool
+aot_compile_op_i32_shift(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ IntShift shift_op)
+{
+ return compile_op_int_shift(comp_ctx, func_ctx, shift_op, true);
+}
+
+bool
+aot_compile_op_i64_shift(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ IntShift shift_op)
+{
+ return compile_op_int_shift(comp_ctx, func_ctx, shift_op, false);
+}
+
+bool
+aot_compile_op_f32_math(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ FloatMath math_op)
+{
+ return compile_op_float_math(comp_ctx, func_ctx, math_op, true);
+}
+
+bool
+aot_compile_op_f64_math(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ FloatMath math_op)
+{
+ return compile_op_float_math(comp_ctx, func_ctx, math_op, false);
+}
+
+bool
+aot_compile_op_f32_arithmetic(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx,
+ FloatArithmetic arith_op)
+{
+ return compile_op_float_arithmetic(comp_ctx, func_ctx, arith_op, true);
+}
+
+bool
+aot_compile_op_f64_arithmetic(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx,
+ FloatArithmetic arith_op)
+{
+ return compile_op_float_arithmetic(comp_ctx, func_ctx, arith_op, false);
+}
+
+bool
+aot_compile_op_f32_copysign(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
+{
+ return compile_float_copysign(comp_ctx, func_ctx, true);
+}
+
+bool
+aot_compile_op_f64_copysign(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
+{
+ return compile_float_copysign(comp_ctx, func_ctx, false);
+}
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/aot_emit_numberic.h b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/aot_emit_numberic.h
new file mode 100644
index 000000000..7206315df
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/aot_emit_numberic.h
@@ -0,0 +1,87 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#ifndef _AOT_EMIT_NUMBERIC_H_
+#define _AOT_EMIT_NUMBERIC_H_
+
+#include "aot_compiler.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+bool
+aot_compile_op_i32_clz(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx);
+
+bool
+aot_compile_op_i32_ctz(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx);
+
+bool
+aot_compile_op_i32_popcnt(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx);
+
+bool
+aot_compile_op_i64_clz(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx);
+
+bool
+aot_compile_op_i64_ctz(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx);
+
+bool
+aot_compile_op_i64_popcnt(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx);
+
+bool
+aot_compile_op_i32_arithmetic(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, IntArithmetic arith_op,
+ uint8 **p_frame_ip);
+
+bool
+aot_compile_op_i64_arithmetic(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, IntArithmetic arith_op,
+ uint8 **p_frame_ip);
+
+bool
+aot_compile_op_i32_bitwise(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ IntBitwise bitwise_op);
+
+bool
+aot_compile_op_i64_bitwise(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ IntBitwise bitwise_op);
+
+bool
+aot_compile_op_i32_shift(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ IntShift shift_op);
+
+bool
+aot_compile_op_i64_shift(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ IntShift shift_op);
+
+bool
+aot_compile_op_f32_math(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ FloatMath math_op);
+
+bool
+aot_compile_op_f64_math(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ FloatMath math_op);
+
+bool
+aot_compile_op_f32_arithmetic(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx,
+ FloatArithmetic arith_op);
+
+bool
+aot_compile_op_f64_arithmetic(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx,
+ FloatArithmetic arith_op);
+
+bool
+aot_compile_op_f32_copysign(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx);
+
+bool
+aot_compile_op_f64_copysign(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx);
+
+#ifdef __cplusplus
+} /* end of extern "C" */
+#endif
+
+#endif /* end of _AOT_EMIT_NUMBERIC_H_ */
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/aot_emit_parametric.c b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/aot_emit_parametric.c
new file mode 100644
index 000000000..8b1a9e6da
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/aot_emit_parametric.c
@@ -0,0 +1,107 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#include "aot_emit_parametric.h"
+
+static bool
+pop_value_from_wasm_stack(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ LLVMValueRef *p_value, bool is_32, uint8 *p_type)
+{
+ AOTValue *aot_value;
+ uint8 type;
+
+ if (!func_ctx->block_stack.block_list_end) {
+ aot_set_last_error("WASM block stack underflow.");
+ return false;
+ }
+ if (!func_ctx->block_stack.block_list_end->value_stack.value_list_end) {
+ aot_set_last_error("WASM data stack underflow.");
+ return false;
+ }
+
+ aot_value =
+ aot_value_stack_pop(&func_ctx->block_stack.block_list_end->value_stack);
+ type = aot_value->type;
+
+ if (aot_value->type == VALUE_TYPE_I1) {
+ if (!(aot_value->value =
+ LLVMBuildZExt(comp_ctx->builder, aot_value->value, I32_TYPE,
+ "val_s_ext"))) {
+ aot_set_last_error("llvm build sign ext failed.");
+ return false;
+ }
+ type = aot_value->type = VALUE_TYPE_I32;
+ }
+
+ if (p_type != NULL) {
+ *p_type = aot_value->type;
+ }
+ if (p_value != NULL) {
+ *p_value = aot_value->value;
+ }
+
+ wasm_runtime_free(aot_value);
+
+ /* is_32: i32, f32, ref.func, ref.extern, v128 */
+ if (is_32
+ && !(type == VALUE_TYPE_I32 || type == VALUE_TYPE_F32
+ || type == VALUE_TYPE_FUNCREF || type == VALUE_TYPE_EXTERNREF
+ || type == VALUE_TYPE_V128)) {
+ aot_set_last_error("invalid WASM stack data type.");
+ return false;
+ }
+
+ /* !is_32: i64, f64 */
+ if (!is_32 && !(type == VALUE_TYPE_I64 || type == VALUE_TYPE_F64)) {
+ aot_set_last_error("invalid WASM stack data type.");
+ return false;
+ }
+
+ return true;
+}
+
+bool
+aot_compile_op_drop(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ bool is_drop_32)
+{
+ if (!pop_value_from_wasm_stack(comp_ctx, func_ctx, NULL, is_drop_32, NULL))
+ return false;
+
+ return true;
+}
+
+bool
+aot_compile_op_select(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ bool is_select_32)
+{
+ LLVMValueRef val1, val2, cond, selected;
+ uint8 val1_type, val2_type;
+
+ POP_COND(cond);
+
+ if (!pop_value_from_wasm_stack(comp_ctx, func_ctx, &val2, is_select_32,
+ &val2_type)
+ || !pop_value_from_wasm_stack(comp_ctx, func_ctx, &val1, is_select_32,
+ &val1_type))
+ return false;
+
+ if (val1_type != val2_type) {
+ aot_set_last_error("invalid stack values with different type");
+ return false;
+ }
+
+ if (!(selected =
+ LLVMBuildSelect(comp_ctx->builder, cond, val1, val2, "select"))) {
+ aot_set_last_error("llvm build select failed.");
+ return false;
+ }
+
+ PUSH(selected, val1_type);
+
+ return true;
+
+fail:
+ return false;
+}
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/aot_emit_parametric.h b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/aot_emit_parametric.h
new file mode 100644
index 000000000..68fe8f11d
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/aot_emit_parametric.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#ifndef _AOT_EMIT_PARAMETRIC_H_
+#define _AOT_EMIT_PARAMETRIC_H_
+
+#include "aot_compiler.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+bool
+aot_compile_op_drop(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ bool is_drop_32);
+
+bool
+aot_compile_op_select(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ bool is_select_32);
+
+#ifdef __cplusplus
+} /* end of extern "C" */
+#endif
+
+#endif /* end of _AOT_EMIT_PARAMETRIC_H_ */
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/aot_emit_table.c b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/aot_emit_table.c
new file mode 100644
index 000000000..d8a5efd91
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/aot_emit_table.c
@@ -0,0 +1,503 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#include "aot_emit_table.h"
+#include "aot_emit_exception.h"
+#include "../aot/aot_runtime.h"
+
+uint64
+get_tbl_inst_offset(const AOTCompContext *comp_ctx,
+ const AOTFuncContext *func_ctx, uint32 tbl_idx)
+{
+ uint64 offset = 0, i = 0;
+ AOTImportTable *imp_tbls = comp_ctx->comp_data->import_tables;
+ AOTTable *tbls = comp_ctx->comp_data->tables;
+
+ offset =
+ offsetof(AOTModuleInstance, global_table_data.bytes)
+ + (uint64)comp_ctx->comp_data->memory_count * sizeof(AOTMemoryInstance)
+ + comp_ctx->comp_data->global_data_size;
+
+ while (i < tbl_idx && i < comp_ctx->comp_data->import_table_count) {
+ offset += offsetof(AOTTableInstance, elems);
+ /* avoid loading from current AOTTableInstance */
+ offset +=
+ sizeof(uint32)
+ * aot_get_imp_tbl_data_slots(imp_tbls + i, comp_ctx->is_jit_mode);
+ ++i;
+ }
+
+ if (i == tbl_idx) {
+ return offset;
+ }
+
+ tbl_idx -= comp_ctx->comp_data->import_table_count;
+ i -= comp_ctx->comp_data->import_table_count;
+ while (i < tbl_idx && i < comp_ctx->comp_data->table_count) {
+ offset += offsetof(AOTTableInstance, elems);
+ /* avoid loading from current AOTTableInstance */
+ offset += sizeof(uint32)
+ * aot_get_tbl_data_slots(tbls + i, comp_ctx->is_jit_mode);
+ ++i;
+ }
+
+ return offset;
+}
+
+#if WASM_ENABLE_REF_TYPES != 0
+
+LLVMValueRef
+aot_compile_get_tbl_inst(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ uint32 tbl_idx)
+{
+ LLVMValueRef offset, tbl_inst;
+
+ if (!(offset =
+ I64_CONST(get_tbl_inst_offset(comp_ctx, func_ctx, tbl_idx)))) {
+ HANDLE_FAILURE("LLVMConstInt");
+ goto fail;
+ }
+
+ if (!(tbl_inst = LLVMBuildInBoundsGEP2(comp_ctx->builder, INT8_TYPE,
+ func_ctx->aot_inst, &offset, 1,
+ "tbl_inst"))) {
+ HANDLE_FAILURE("LLVMBuildInBoundsGEP");
+ goto fail;
+ }
+
+ return tbl_inst;
+fail:
+ return NULL;
+}
+
+bool
+aot_compile_op_elem_drop(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ uint32 tbl_seg_idx)
+{
+ LLVMTypeRef param_types[2], ret_type, func_type, func_ptr_type;
+ LLVMValueRef param_values[2], ret_value, func, value;
+
+ /* void aot_drop_table_seg(AOTModuleInstance *, uint32 ) */
+ param_types[0] = INT8_PTR_TYPE;
+ param_types[1] = I32_TYPE;
+ ret_type = VOID_TYPE;
+
+ if (comp_ctx->is_jit_mode)
+ GET_AOT_FUNCTION(llvm_jit_drop_table_seg, 2);
+ else
+ GET_AOT_FUNCTION(aot_drop_table_seg, 2);
+
+ param_values[0] = func_ctx->aot_inst;
+ if (!(param_values[1] = I32_CONST(tbl_seg_idx))) {
+ HANDLE_FAILURE("LLVMConstInt");
+ goto fail;
+ }
+
+ /* "" means return void */
+ if (!(ret_value = LLVMBuildCall2(comp_ctx->builder, func_type, func,
+ param_values, 2, ""))) {
+ HANDLE_FAILURE("LLVMBuildCall");
+ goto fail;
+ }
+
+ return true;
+fail:
+ return false;
+}
+
+static bool
+aot_check_table_access(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ uint32 tbl_idx, LLVMValueRef elem_idx)
+{
+ LLVMValueRef offset, tbl_sz, cmp_elem_idx;
+ LLVMBasicBlockRef check_elem_idx_succ;
+
+ /* get the cur size of the table instance */
+ if (!(offset = I32_CONST(get_tbl_inst_offset(comp_ctx, func_ctx, tbl_idx)
+ + offsetof(AOTTableInstance, cur_size)))) {
+ HANDLE_FAILURE("LLVMConstInt");
+ goto fail;
+ }
+
+ if (!(tbl_sz = LLVMBuildInBoundsGEP2(comp_ctx->builder, INT8_TYPE,
+ func_ctx->aot_inst, &offset, 1,
+ "cur_size_i8p"))) {
+ HANDLE_FAILURE("LLVMBuildInBoundsGEP");
+ goto fail;
+ }
+
+ if (!(tbl_sz = LLVMBuildBitCast(comp_ctx->builder, tbl_sz, INT32_PTR_TYPE,
+ "cur_siuze_i32p"))) {
+ HANDLE_FAILURE("LLVMBuildBitCast");
+ goto fail;
+ }
+
+ if (!(tbl_sz = LLVMBuildLoad2(comp_ctx->builder, I32_TYPE, tbl_sz,
+ "cur_size"))) {
+ HANDLE_FAILURE("LLVMBuildLoad");
+ goto fail;
+ }
+
+ /* Check if (uint32)elem index >= table size */
+ if (!(cmp_elem_idx = LLVMBuildICmp(comp_ctx->builder, LLVMIntUGE, elem_idx,
+ tbl_sz, "cmp_elem_idx"))) {
+ aot_set_last_error("llvm build icmp failed.");
+ goto fail;
+ }
+
+ /* Throw exception if elem index >= table size */
+ if (!(check_elem_idx_succ = LLVMAppendBasicBlockInContext(
+ comp_ctx->context, func_ctx->func, "check_elem_idx_succ"))) {
+ aot_set_last_error("llvm add basic block failed.");
+ goto fail;
+ }
+
+ LLVMMoveBasicBlockAfter(check_elem_idx_succ,
+ LLVMGetInsertBlock(comp_ctx->builder));
+
+ if (!(aot_emit_exception(comp_ctx, func_ctx,
+ EXCE_OUT_OF_BOUNDS_TABLE_ACCESS, true,
+ cmp_elem_idx, check_elem_idx_succ)))
+ goto fail;
+
+ return true;
+fail:
+ return false;
+}
+
+bool
+aot_compile_op_table_get(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ uint32 tbl_idx)
+{
+ LLVMValueRef elem_idx, offset, table_elem, func_idx;
+
+ POP_I32(elem_idx);
+
+ if (!aot_check_table_access(comp_ctx, func_ctx, tbl_idx, elem_idx)) {
+ goto fail;
+ }
+
+ /* load data as i32* */
+ if (!(offset = I32_CONST(get_tbl_inst_offset(comp_ctx, func_ctx, tbl_idx)
+ + offsetof(AOTTableInstance, elems)))) {
+ HANDLE_FAILURE("LLVMConstInt");
+ goto fail;
+ }
+
+ if (!(table_elem = LLVMBuildInBoundsGEP2(comp_ctx->builder, INT8_TYPE,
+ func_ctx->aot_inst, &offset, 1,
+ "table_elem_i8p"))) {
+ aot_set_last_error("llvm build add failed.");
+ goto fail;
+ }
+
+ if (!(table_elem = LLVMBuildBitCast(comp_ctx->builder, table_elem,
+ INT32_PTR_TYPE, "table_elem_i32p"))) {
+ HANDLE_FAILURE("LLVMBuildBitCast");
+ goto fail;
+ }
+
+ /* Load function index */
+ if (!(table_elem =
+ LLVMBuildInBoundsGEP2(comp_ctx->builder, I32_TYPE, table_elem,
+ &elem_idx, 1, "table_elem"))) {
+ HANDLE_FAILURE("LLVMBuildNUWAdd");
+ goto fail;
+ }
+
+ if (!(func_idx = LLVMBuildLoad2(comp_ctx->builder, I32_TYPE, table_elem,
+ "func_idx"))) {
+ HANDLE_FAILURE("LLVMBuildLoad");
+ goto fail;
+ }
+
+ PUSH_I32(func_idx);
+
+ return true;
+fail:
+ return false;
+}
+
+bool
+aot_compile_op_table_set(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ uint32 tbl_idx)
+{
+ LLVMValueRef val, elem_idx, offset, table_elem;
+
+ POP_I32(val);
+ POP_I32(elem_idx);
+
+ if (!aot_check_table_access(comp_ctx, func_ctx, tbl_idx, elem_idx)) {
+ goto fail;
+ }
+
+ /* load data as i32* */
+ if (!(offset = I32_CONST(get_tbl_inst_offset(comp_ctx, func_ctx, tbl_idx)
+ + offsetof(AOTTableInstance, elems)))) {
+ HANDLE_FAILURE("LLVMConstInt");
+ goto fail;
+ }
+
+ if (!(table_elem = LLVMBuildInBoundsGEP2(comp_ctx->builder, INT8_TYPE,
+ func_ctx->aot_inst, &offset, 1,
+ "table_elem_i8p"))) {
+ HANDLE_FAILURE("LLVMBuildInBoundsGEP");
+ goto fail;
+ }
+
+ if (!(table_elem = LLVMBuildBitCast(comp_ctx->builder, table_elem,
+ INT32_PTR_TYPE, "table_elem_i32p"))) {
+ HANDLE_FAILURE("LLVMBuildBitCast");
+ goto fail;
+ }
+
+ /* Load function index */
+ if (!(table_elem =
+ LLVMBuildInBoundsGEP2(comp_ctx->builder, I32_TYPE, table_elem,
+ &elem_idx, 1, "table_elem"))) {
+ HANDLE_FAILURE("LLVMBuildInBoundsGEP");
+ goto fail;
+ }
+
+ if (!(LLVMBuildStore(comp_ctx->builder, val, table_elem))) {
+ HANDLE_FAILURE("LLVMBuildStore");
+ goto fail;
+ }
+
+ return true;
+fail:
+ return false;
+}
+
+bool
+aot_compile_op_table_init(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ uint32 tbl_idx, uint32 tbl_seg_idx)
+
+{
+ LLVMValueRef func, param_values[6], value;
+ LLVMTypeRef param_types[6], ret_type, func_type, func_ptr_type;
+
+ param_types[0] = INT8_PTR_TYPE;
+ param_types[1] = I32_TYPE;
+ param_types[2] = I32_TYPE;
+ param_types[3] = I32_TYPE;
+ param_types[4] = I32_TYPE;
+ param_types[5] = I32_TYPE;
+ ret_type = VOID_TYPE;
+
+ if (comp_ctx->is_jit_mode)
+ GET_AOT_FUNCTION(llvm_jit_table_init, 6);
+ else
+ GET_AOT_FUNCTION(aot_table_init, 6);
+
+ param_values[0] = func_ctx->aot_inst;
+
+ if (!(param_values[1] = I32_CONST(tbl_idx))) {
+ HANDLE_FAILURE("LLVMConstInt");
+ goto fail;
+ }
+
+ if (!(param_values[2] = I32_CONST(tbl_seg_idx))) {
+ HANDLE_FAILURE("LLVMConstInt");
+ goto fail;
+ }
+
+ /* n */
+ POP_I32(param_values[3]);
+ /* s */
+ POP_I32(param_values[4]);
+ /* d */
+ POP_I32(param_values[5]);
+
+ /* "" means return void */
+ if (!(LLVMBuildCall2(comp_ctx->builder, func_type, func, param_values, 6,
+ ""))) {
+ HANDLE_FAILURE("LLVMBuildCall");
+ goto fail;
+ }
+
+ return true;
+fail:
+ return false;
+}
+
+bool
+aot_compile_op_table_copy(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ uint32 src_tbl_idx, uint32 dst_tbl_idx)
+{
+ LLVMTypeRef param_types[6], ret_type, func_type, func_ptr_type;
+ LLVMValueRef func, param_values[6], value;
+
+ param_types[0] = INT8_PTR_TYPE;
+ param_types[1] = I32_TYPE;
+ param_types[2] = I32_TYPE;
+ param_types[3] = I32_TYPE;
+ param_types[4] = I32_TYPE;
+ param_types[5] = I32_TYPE;
+ ret_type = VOID_TYPE;
+
+ if (comp_ctx->is_jit_mode)
+ GET_AOT_FUNCTION(llvm_jit_table_copy, 6);
+ else
+ GET_AOT_FUNCTION(aot_table_copy, 6);
+
+ param_values[0] = func_ctx->aot_inst;
+
+ if (!(param_values[1] = I32_CONST(src_tbl_idx))) {
+ HANDLE_FAILURE("LLVMConstInt");
+ goto fail;
+ }
+
+ if (!(param_values[2] = I32_CONST(dst_tbl_idx))) {
+ HANDLE_FAILURE("LLVMConstInt");
+ goto fail;
+ }
+
+ /* n */
+ POP_I32(param_values[3]);
+ /* s */
+ POP_I32(param_values[4]);
+ /* d */
+ POP_I32(param_values[5]);
+
+ /* "" means return void */
+ if (!(LLVMBuildCall2(comp_ctx->builder, func_type, func, param_values, 6,
+ ""))) {
+ HANDLE_FAILURE("LLVMBuildCall");
+ goto fail;
+ }
+
+ return true;
+fail:
+ return false;
+}
+
+bool
+aot_compile_op_table_size(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ uint32 tbl_idx)
+{
+ LLVMValueRef offset, tbl_sz;
+
+ if (!(offset = I32_CONST(get_tbl_inst_offset(comp_ctx, func_ctx, tbl_idx)
+ + offsetof(AOTTableInstance, cur_size)))) {
+ HANDLE_FAILURE("LLVMConstInt");
+ goto fail;
+ }
+
+ if (!(tbl_sz = LLVMBuildInBoundsGEP2(comp_ctx->builder, INT8_TYPE,
+ func_ctx->aot_inst, &offset, 1,
+ "tbl_sz_ptr_i8"))) {
+ HANDLE_FAILURE("LLVMBuildInBoundsGEP");
+ goto fail;
+ }
+
+ if (!(tbl_sz = LLVMBuildBitCast(comp_ctx->builder, tbl_sz, INT32_PTR_TYPE,
+ "tbl_sz_ptr"))) {
+ HANDLE_FAILURE("LLVMBuildBitCast");
+ goto fail;
+ }
+
+ if (!(tbl_sz =
+ LLVMBuildLoad2(comp_ctx->builder, I32_TYPE, tbl_sz, "tbl_sz"))) {
+ HANDLE_FAILURE("LLVMBuildLoad");
+ goto fail;
+ }
+
+ PUSH_I32(tbl_sz);
+
+ return true;
+fail:
+ return false;
+}
+
+bool
+aot_compile_op_table_grow(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ uint32 tbl_idx)
+{
+ LLVMTypeRef param_types[4], ret_type, func_type, func_ptr_type;
+ LLVMValueRef func, param_values[4], ret, value;
+
+ param_types[0] = INT8_PTR_TYPE;
+ param_types[1] = I32_TYPE;
+ param_types[2] = I32_TYPE;
+ param_types[3] = I32_TYPE;
+ ret_type = I32_TYPE;
+
+ if (comp_ctx->is_jit_mode)
+ GET_AOT_FUNCTION(llvm_jit_table_grow, 4);
+ else
+ GET_AOT_FUNCTION(aot_table_grow, 4);
+
+ param_values[0] = func_ctx->aot_inst;
+
+ if (!(param_values[1] = I32_CONST(tbl_idx))) {
+ HANDLE_FAILURE("LLVMConstInt");
+ goto fail;
+ }
+
+ /* n */
+ POP_I32(param_values[2]);
+ /* v */
+ POP_I32(param_values[3]);
+
+ if (!(ret = LLVMBuildCall2(comp_ctx->builder, func_type, func, param_values,
+ 4, "table_grow"))) {
+ HANDLE_FAILURE("LLVMBuildCall");
+ goto fail;
+ }
+
+ PUSH_I32(ret);
+
+ return true;
+fail:
+ return false;
+}
+
+bool
+aot_compile_op_table_fill(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ uint32 tbl_idx)
+{
+ LLVMTypeRef param_types[5], ret_type, func_type, func_ptr_type;
+ LLVMValueRef func, param_values[5], value;
+
+ param_types[0] = INT8_PTR_TYPE;
+ param_types[1] = I32_TYPE;
+ param_types[2] = I32_TYPE;
+ param_types[3] = I32_TYPE;
+ param_types[4] = I32_TYPE;
+ ret_type = VOID_TYPE;
+
+ if (comp_ctx->is_jit_mode)
+ GET_AOT_FUNCTION(llvm_jit_table_fill, 5);
+ else
+ GET_AOT_FUNCTION(aot_table_fill, 5);
+
+ param_values[0] = func_ctx->aot_inst;
+
+ if (!(param_values[1] = I32_CONST(tbl_idx))) {
+ HANDLE_FAILURE("LLVMConstInt");
+ goto fail;
+ }
+
+ /* n */
+ POP_I32(param_values[2]);
+ /* v */
+ POP_I32(param_values[3]);
+ /* i */
+ POP_I32(param_values[4]);
+
+ /* "" means return void */
+ if (!(LLVMBuildCall2(comp_ctx->builder, func_type, func, param_values, 5,
+ ""))) {
+ HANDLE_FAILURE("LLVMBuildCall");
+ goto fail;
+ }
+
+ return true;
+fail:
+ return false;
+}
+
+#endif /* WASM_ENABLE_REF_TYPES != 0 */
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/aot_emit_table.h b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/aot_emit_table.h
new file mode 100644
index 000000000..e5ab0ed48
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/aot_emit_table.h
@@ -0,0 +1,59 @@
+
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#ifndef _AOT_EMIT_TABLE_H_
+#define _AOT_EMIT_TABLE_H_
+
+#include "aot_compiler.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+bool
+aot_compile_op_elem_drop(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ uint32 tbl_seg_idx);
+
+bool
+aot_compile_op_table_get(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ uint32 tbl_idx);
+
+bool
+aot_compile_op_table_set(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ uint32 tbl_idx);
+
+bool
+aot_compile_op_table_init(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ uint32 tbl_idx, uint32 tbl_seg_idx);
+
+bool
+aot_compile_op_table_copy(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ uint32 src_tbl_idx, uint32 dst_tbl_idx);
+
+bool
+aot_compile_op_table_size(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ uint32 tbl_idx);
+
+bool
+aot_compile_op_table_grow(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ uint32 tbl_idx);
+
+bool
+aot_compile_op_table_fill(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ uint32 tbl_idx);
+
+uint64
+get_tbl_inst_offset(const AOTCompContext *comp_ctx,
+ const AOTFuncContext *func_ctx, uint32 tbl_idx);
+
+LLVMValueRef
+aot_compile_get_tbl_inst(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ uint32 tbl_idx);
+
+#ifdef __cplusplus
+} /* end of extern "C" */
+#endif
+#endif \ No newline at end of file
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/aot_emit_variable.c b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/aot_emit_variable.c
new file mode 100644
index 000000000..70487d4de
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/aot_emit_variable.c
@@ -0,0 +1,267 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#include "aot_emit_variable.h"
+#include "aot_emit_exception.h"
+#include "../aot/aot_runtime.h"
+
+#define CHECK_LOCAL(idx) \
+ do { \
+ if (idx >= func_ctx->aot_func->func_type->param_count \
+ + func_ctx->aot_func->local_count) { \
+ aot_set_last_error("local index out of range"); \
+ return false; \
+ } \
+ } while (0)
+
+static uint8
+get_local_type(AOTFuncContext *func_ctx, uint32 local_idx)
+{
+ AOTFunc *aot_func = func_ctx->aot_func;
+ uint32 param_count = aot_func->func_type->param_count;
+ return local_idx < param_count
+ ? aot_func->func_type->types[local_idx]
+ : aot_func->local_types[local_idx - param_count];
+}
+
+bool
+aot_compile_op_get_local(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ uint32 local_idx)
+{
+ char name[32];
+ LLVMValueRef value;
+ AOTValue *aot_value_top;
+ uint8 local_type;
+
+ CHECK_LOCAL(local_idx);
+
+ local_type = get_local_type(func_ctx, local_idx);
+
+ snprintf(name, sizeof(name), "%s%d%s", "local", local_idx, "#");
+ if (!(value = LLVMBuildLoad2(comp_ctx->builder, TO_LLVM_TYPE(local_type),
+ func_ctx->locals[local_idx], name))) {
+ aot_set_last_error("llvm build load fail");
+ return false;
+ }
+
+ PUSH(value, local_type);
+
+ aot_value_top =
+ func_ctx->block_stack.block_list_end->value_stack.value_list_end;
+ aot_value_top->is_local = true;
+ aot_value_top->local_idx = local_idx;
+ return true;
+
+fail:
+ return false;
+}
+
+bool
+aot_compile_op_set_local(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ uint32 local_idx)
+{
+ LLVMValueRef value;
+
+ CHECK_LOCAL(local_idx);
+
+ POP(value, get_local_type(func_ctx, local_idx));
+
+ if (!LLVMBuildStore(comp_ctx->builder, value,
+ func_ctx->locals[local_idx])) {
+ aot_set_last_error("llvm build store fail");
+ return false;
+ }
+
+ aot_checked_addr_list_del(func_ctx, local_idx);
+ return true;
+
+fail:
+ return false;
+}
+
+bool
+aot_compile_op_tee_local(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ uint32 local_idx)
+{
+ LLVMValueRef value;
+ uint8 type;
+
+ CHECK_LOCAL(local_idx);
+
+ type = get_local_type(func_ctx, local_idx);
+
+ POP(value, type);
+
+ if (!LLVMBuildStore(comp_ctx->builder, value,
+ func_ctx->locals[local_idx])) {
+ aot_set_last_error("llvm build store fail");
+ return false;
+ }
+
+ PUSH(value, type);
+ aot_checked_addr_list_del(func_ctx, local_idx);
+ return true;
+
+fail:
+ return false;
+}
+
+static bool
+compile_global(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ uint32 global_idx, bool is_set, bool is_aux_stack)
+{
+ AOTCompData *comp_data = comp_ctx->comp_data;
+ uint32 import_global_count = comp_data->import_global_count;
+ uint32 global_base_offset;
+ uint32 global_offset;
+ uint8 global_type;
+ LLVMValueRef offset, global_ptr, global, res;
+ LLVMTypeRef ptr_type = NULL;
+
+ global_base_offset =
+ offsetof(AOTModuleInstance, global_table_data.bytes)
+ + sizeof(AOTMemoryInstance) * comp_ctx->comp_data->memory_count;
+
+ bh_assert(global_idx < import_global_count + comp_data->global_count);
+
+ if (global_idx < import_global_count) {
+ global_offset = global_base_offset
+ + comp_data->import_globals[global_idx].data_offset;
+ global_type = comp_data->import_globals[global_idx].type;
+ }
+ else {
+ global_offset =
+ global_base_offset
+ + comp_data->globals[global_idx - import_global_count].data_offset;
+ global_type = comp_data->globals[global_idx - import_global_count].type;
+ }
+
+ offset = I32_CONST(global_offset);
+ if (!(global_ptr = LLVMBuildInBoundsGEP2(comp_ctx->builder, INT8_TYPE,
+ func_ctx->aot_inst, &offset, 1,
+ "global_ptr_tmp"))) {
+ aot_set_last_error("llvm build in bounds gep failed.");
+ return false;
+ }
+
+ switch (global_type) {
+ case VALUE_TYPE_I32:
+ case VALUE_TYPE_EXTERNREF:
+ case VALUE_TYPE_FUNCREF:
+ ptr_type = comp_ctx->basic_types.int32_ptr_type;
+ break;
+ case VALUE_TYPE_I64:
+ ptr_type = comp_ctx->basic_types.int64_ptr_type;
+ break;
+ case VALUE_TYPE_F32:
+ ptr_type = comp_ctx->basic_types.float32_ptr_type;
+ break;
+ case VALUE_TYPE_F64:
+ ptr_type = comp_ctx->basic_types.float64_ptr_type;
+ break;
+ case VALUE_TYPE_V128:
+ ptr_type = comp_ctx->basic_types.v128_ptr_type;
+ break;
+ default:
+ bh_assert("unknown type");
+ break;
+ }
+
+ if (!(global_ptr = LLVMBuildBitCast(comp_ctx->builder, global_ptr, ptr_type,
+ "global_ptr"))) {
+ aot_set_last_error("llvm build bit cast failed.");
+ return false;
+ }
+
+ if (!is_set) {
+ if (!(global =
+ LLVMBuildLoad2(comp_ctx->builder, TO_LLVM_TYPE(global_type),
+ global_ptr, "global"))) {
+ aot_set_last_error("llvm build load failed.");
+ return false;
+ }
+ /* All globals' data is 4-byte aligned */
+ LLVMSetAlignment(global, 4);
+ PUSH(global, global_type);
+ }
+ else {
+ POP(global, global_type);
+
+ if (is_aux_stack && comp_ctx->enable_aux_stack_check) {
+ LLVMBasicBlockRef block_curr =
+ LLVMGetInsertBlock(comp_ctx->builder);
+ LLVMBasicBlockRef check_overflow_succ, check_underflow_succ;
+ LLVMValueRef cmp;
+
+ /* Add basic blocks */
+ if (!(check_overflow_succ = LLVMAppendBasicBlockInContext(
+ comp_ctx->context, func_ctx->func,
+ "check_overflow_succ"))) {
+ aot_set_last_error("llvm add basic block failed.");
+ return false;
+ }
+ LLVMMoveBasicBlockAfter(check_overflow_succ, block_curr);
+
+ if (!(check_underflow_succ = LLVMAppendBasicBlockInContext(
+ comp_ctx->context, func_ctx->func,
+ "check_underflow_succ"))) {
+ aot_set_last_error("llvm add basic block failed.");
+ return false;
+ }
+ LLVMMoveBasicBlockAfter(check_underflow_succ, check_overflow_succ);
+
+ /* Check aux stack overflow */
+ if (!(cmp = LLVMBuildICmp(comp_ctx->builder, LLVMIntULE, global,
+ func_ctx->aux_stack_bound, "cmp"))) {
+ aot_set_last_error("llvm build icmp failed.");
+ return false;
+ }
+ if (!aot_emit_exception(comp_ctx, func_ctx, EXCE_AUX_STACK_OVERFLOW,
+ true, cmp, check_overflow_succ)) {
+ return false;
+ }
+
+ /* Check aux stack underflow */
+ LLVMPositionBuilderAtEnd(comp_ctx->builder, check_overflow_succ);
+ if (!(cmp = LLVMBuildICmp(comp_ctx->builder, LLVMIntUGT, global,
+ func_ctx->aux_stack_bottom, "cmp"))) {
+ aot_set_last_error("llvm build icmp failed.");
+ return false;
+ }
+ if (!aot_emit_exception(comp_ctx, func_ctx,
+ EXCE_AUX_STACK_UNDERFLOW, true, cmp,
+ check_underflow_succ)) {
+ return false;
+ }
+
+ LLVMPositionBuilderAtEnd(comp_ctx->builder, check_underflow_succ);
+ }
+
+ if (!(res = LLVMBuildStore(comp_ctx->builder, global, global_ptr))) {
+ aot_set_last_error("llvm build store failed.");
+ return false;
+ }
+ /* All globals' data is 4-byte aligned */
+ LLVMSetAlignment(res, 4);
+ }
+
+ return true;
+fail:
+ return false;
+}
+
+bool
+aot_compile_op_get_global(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ uint32 global_idx)
+{
+ return compile_global(comp_ctx, func_ctx, global_idx, false, false);
+}
+
+bool
+aot_compile_op_set_global(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ uint32 global_idx, bool is_aux_stack)
+{
+ return compile_global(comp_ctx, func_ctx, global_idx, true, is_aux_stack);
+}
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/aot_emit_variable.h b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/aot_emit_variable.h
new file mode 100644
index 000000000..28c0bd093
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/aot_emit_variable.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#ifndef _AOT_EMIT_VARIABLE_H_
+#define _AOT_EMIT_VARIABLE_H_
+
+#include "aot_compiler.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+bool
+aot_compile_op_get_local(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ uint32 local_idx);
+
+bool
+aot_compile_op_set_local(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ uint32 local_idx);
+
+bool
+aot_compile_op_tee_local(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ uint32 local_idx);
+
+bool
+aot_compile_op_get_global(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ uint32 global_idx);
+
+bool
+aot_compile_op_set_global(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ uint32 global_idx, bool is_aux_stack);
+
+#ifdef __cplusplus
+} /* end of extern "C" */
+#endif
+
+#endif /* end of _AOT_EMIT_VARIABLE_H_ */
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/aot_llvm.c b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/aot_llvm.c
new file mode 100644
index 000000000..dc3fe7f59
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/aot_llvm.c
@@ -0,0 +1,2770 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#include "aot_llvm.h"
+#include "aot_llvm_extra2.h"
+#include "aot_compiler.h"
+#include "aot_emit_exception.h"
+#include "../aot/aot_runtime.h"
+#include "../aot/aot_intrinsic.h"
+
+#if WASM_ENABLE_DEBUG_AOT != 0
+#include "debug/dwarf_extractor.h"
+#endif
+
+LLVMTypeRef
+wasm_type_to_llvm_type(AOTLLVMTypes *llvm_types, uint8 wasm_type)
+{
+ switch (wasm_type) {
+ case VALUE_TYPE_I32:
+ case VALUE_TYPE_FUNCREF:
+ case VALUE_TYPE_EXTERNREF:
+ return llvm_types->int32_type;
+ case VALUE_TYPE_I64:
+ return llvm_types->int64_type;
+ case VALUE_TYPE_F32:
+ return llvm_types->float32_type;
+ case VALUE_TYPE_F64:
+ return llvm_types->float64_type;
+ case VALUE_TYPE_V128:
+ return llvm_types->i64x2_vec_type;
+ case VALUE_TYPE_VOID:
+ return llvm_types->void_type;
+ default:
+ break;
+ }
+ return NULL;
+}
+
+/**
+ * Add LLVM function
+ */
+static LLVMValueRef
+aot_add_llvm_func(AOTCompContext *comp_ctx, LLVMModuleRef module,
+ AOTFuncType *aot_func_type, uint32 func_index,
+ LLVMTypeRef *p_func_type)
+{
+ LLVMValueRef func = NULL;
+ LLVMTypeRef *param_types, ret_type, func_type;
+ LLVMValueRef local_value;
+ LLVMTypeRef func_type_wrapper;
+ LLVMValueRef func_wrapper;
+ LLVMBasicBlockRef func_begin;
+ char func_name[48];
+ uint64 size;
+ uint32 i, j = 0, param_count = (uint64)aot_func_type->param_count;
+ uint32 backend_thread_num, compile_thread_num;
+
+ /* exec env as first parameter */
+ param_count++;
+
+ /* Extra wasm function results(except the first one)'s address are
+ * appended to aot function parameters. */
+ if (aot_func_type->result_count > 1)
+ param_count += aot_func_type->result_count - 1;
+
+ /* Initialize parameter types of the LLVM function */
+ size = sizeof(LLVMTypeRef) * ((uint64)param_count);
+ if (size >= UINT32_MAX
+ || !(param_types = wasm_runtime_malloc((uint32)size))) {
+ aot_set_last_error("allocate memory failed.");
+ return NULL;
+ }
+
+ /* exec env as first parameter */
+ param_types[j++] = comp_ctx->exec_env_type;
+ for (i = 0; i < aot_func_type->param_count; i++)
+ param_types[j++] = TO_LLVM_TYPE(aot_func_type->types[i]);
+ /* Extra results' address */
+ for (i = 1; i < aot_func_type->result_count; i++, j++) {
+ param_types[j] =
+ TO_LLVM_TYPE(aot_func_type->types[aot_func_type->param_count + i]);
+ if (!(param_types[j] = LLVMPointerType(param_types[j], 0))) {
+ aot_set_last_error("llvm get pointer type failed.");
+ goto fail;
+ }
+ }
+
+ /* Resolve return type of the LLVM function */
+ if (aot_func_type->result_count)
+ ret_type =
+ TO_LLVM_TYPE(aot_func_type->types[aot_func_type->param_count]);
+ else
+ ret_type = VOID_TYPE;
+
+ /* Resolve function prototype */
+ if (!(func_type =
+ LLVMFunctionType(ret_type, param_types, param_count, false))) {
+ aot_set_last_error("create LLVM function type failed.");
+ goto fail;
+ }
+
+ /* Add LLVM function */
+ snprintf(func_name, sizeof(func_name), "%s%d", AOT_FUNC_PREFIX, func_index);
+ if (!(func = LLVMAddFunction(module, func_name, func_type))) {
+ aot_set_last_error("add LLVM function failed.");
+ goto fail;
+ }
+
+ j = 0;
+ local_value = LLVMGetParam(func, j++);
+ LLVMSetValueName(local_value, "exec_env");
+
+ /* Set parameter names */
+ for (i = 0; i < aot_func_type->param_count; i++) {
+ local_value = LLVMGetParam(func, j++);
+ LLVMSetValueName(local_value, "");
+ }
+
+ if (p_func_type)
+ *p_func_type = func_type;
+
+ backend_thread_num = WASM_ORC_JIT_BACKEND_THREAD_NUM;
+ compile_thread_num = WASM_ORC_JIT_COMPILE_THREAD_NUM;
+
+ /* Add the jit wrapper function with simple prototype, so that we
+ can easily call it to trigger its compilation and let LLVM JIT
+ compile the actual jit functions by adding them into the function
+ list in the PartitionFunction callback */
+ if (comp_ctx->is_jit_mode
+ && (func_index % (backend_thread_num * compile_thread_num)
+ < backend_thread_num)) {
+ func_type_wrapper = LLVMFunctionType(VOID_TYPE, NULL, 0, false);
+ if (!func_type_wrapper) {
+ aot_set_last_error("create LLVM function type failed.");
+ goto fail;
+ }
+
+ snprintf(func_name, sizeof(func_name), "%s%d%s", AOT_FUNC_PREFIX,
+ func_index, "_wrapper");
+ if (!(func_wrapper =
+ LLVMAddFunction(module, func_name, func_type_wrapper))) {
+ aot_set_last_error("add LLVM function failed.");
+ goto fail;
+ }
+
+ if (!(func_begin = LLVMAppendBasicBlockInContext(
+ comp_ctx->context, func_wrapper, "func_begin"))) {
+ aot_set_last_error("add LLVM basic block failed.");
+ goto fail;
+ }
+
+ LLVMPositionBuilderAtEnd(comp_ctx->builder, func_begin);
+ if (!LLVMBuildRetVoid(comp_ctx->builder)) {
+ aot_set_last_error("llvm build ret failed.");
+ goto fail;
+ }
+ }
+
+fail:
+ wasm_runtime_free(param_types);
+ return func;
+}
+
+static void
+free_block_memory(AOTBlock *block)
+{
+ if (block->param_types)
+ wasm_runtime_free(block->param_types);
+ if (block->result_types)
+ wasm_runtime_free(block->result_types);
+ wasm_runtime_free(block);
+}
+
+/**
+ * Create first AOTBlock, or function block for the function
+ */
+static AOTBlock *
+aot_create_func_block(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ AOTFunc *func, AOTFuncType *aot_func_type)
+{
+ AOTBlock *aot_block;
+ uint32 param_count = aot_func_type->param_count,
+ result_count = aot_func_type->result_count;
+
+ /* Allocate memory */
+ if (!(aot_block = wasm_runtime_malloc(sizeof(AOTBlock)))) {
+ aot_set_last_error("allocate memory failed.");
+ return NULL;
+ }
+ memset(aot_block, 0, sizeof(AOTBlock));
+ if (param_count
+ && !(aot_block->param_types = wasm_runtime_malloc(param_count))) {
+ aot_set_last_error("allocate memory failed.");
+ goto fail;
+ }
+ if (result_count) {
+ if (!(aot_block->result_types = wasm_runtime_malloc(result_count))) {
+ aot_set_last_error("allocate memory failed.");
+ goto fail;
+ }
+ }
+
+ /* Set block data */
+ aot_block->label_type = LABEL_TYPE_FUNCTION;
+ aot_block->param_count = param_count;
+ if (param_count) {
+ bh_memcpy_s(aot_block->param_types, param_count, aot_func_type->types,
+ param_count);
+ }
+ aot_block->result_count = result_count;
+ if (result_count) {
+ bh_memcpy_s(aot_block->result_types, result_count,
+ aot_func_type->types + param_count, result_count);
+ }
+ aot_block->wasm_code_end = func->code + func->code_size;
+
+ /* Add function entry block */
+ if (!(aot_block->llvm_entry_block = LLVMAppendBasicBlockInContext(
+ comp_ctx->context, func_ctx->func, "func_begin"))) {
+ aot_set_last_error("add LLVM basic block failed.");
+ goto fail;
+ }
+
+ return aot_block;
+
+fail:
+ free_block_memory(aot_block);
+ return NULL;
+}
+
+static bool
+create_argv_buf(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
+{
+ LLVMValueRef argv_buf_offset = I32_THREE, argv_buf_addr;
+ LLVMTypeRef int32_ptr_type;
+
+ /* Get argv buffer address */
+ if (!(argv_buf_addr = LLVMBuildInBoundsGEP2(
+ comp_ctx->builder, OPQ_PTR_TYPE, func_ctx->exec_env,
+ &argv_buf_offset, 1, "argv_buf_addr"))) {
+ aot_set_last_error("llvm build in bounds gep failed");
+ return false;
+ }
+
+ if (!(int32_ptr_type = LLVMPointerType(INT32_PTR_TYPE, 0))) {
+ aot_set_last_error("llvm add pointer type failed");
+ return false;
+ }
+
+ /* Convert to int32 pointer type */
+ if (!(argv_buf_addr = LLVMBuildBitCast(comp_ctx->builder, argv_buf_addr,
+ int32_ptr_type, "argv_buf_ptr"))) {
+ aot_set_last_error("llvm build load failed");
+ return false;
+ }
+
+ if (!(func_ctx->argv_buf = LLVMBuildLoad2(comp_ctx->builder, INT32_PTR_TYPE,
+ argv_buf_addr, "argv_buf"))) {
+ aot_set_last_error("llvm build load failed");
+ return false;
+ }
+
+ return true;
+}
+
+static bool
+create_native_stack_bound(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
+{
+ LLVMValueRef stack_bound_offset = I32_FOUR, stack_bound_addr;
+
+ if (!(stack_bound_addr = LLVMBuildInBoundsGEP2(
+ comp_ctx->builder, OPQ_PTR_TYPE, func_ctx->exec_env,
+ &stack_bound_offset, 1, "stack_bound_addr"))) {
+ aot_set_last_error("llvm build in bounds gep failed");
+ return false;
+ }
+
+ if (!(func_ctx->native_stack_bound =
+ LLVMBuildLoad2(comp_ctx->builder, OPQ_PTR_TYPE, stack_bound_addr,
+ "native_stack_bound"))) {
+ aot_set_last_error("llvm build load failed");
+ return false;
+ }
+
+ return true;
+}
+
+static bool
+create_native_stack_top_min(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
+{
+ LLVMValueRef offset = I32_NINE;
+
+ if (!(func_ctx->native_stack_top_min_addr = LLVMBuildInBoundsGEP2(
+ comp_ctx->builder, OPQ_PTR_TYPE, func_ctx->exec_env, &offset, 1,
+ "native_stack_top_min_addr"))) {
+ aot_set_last_error("llvm build in bounds gep failed");
+ return false;
+ }
+
+ return true;
+}
+
+static bool
+create_aux_stack_info(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
+{
+ LLVMValueRef aux_stack_bound_offset = I32_SIX, aux_stack_bound_addr;
+ LLVMValueRef aux_stack_bottom_offset = I32_SEVEN, aux_stack_bottom_addr;
+
+ /* Get aux stack boundary address */
+ if (!(aux_stack_bound_addr = LLVMBuildInBoundsGEP2(
+ comp_ctx->builder, OPQ_PTR_TYPE, func_ctx->exec_env,
+ &aux_stack_bound_offset, 1, "aux_stack_bound_addr"))) {
+ aot_set_last_error("llvm build in bounds gep failed");
+ return false;
+ }
+
+ if (!(aux_stack_bound_addr =
+ LLVMBuildBitCast(comp_ctx->builder, aux_stack_bound_addr,
+ INT32_PTR_TYPE, "aux_stack_bound_ptr"))) {
+ aot_set_last_error("llvm build bit cast failed");
+ return false;
+ }
+
+ if (!(func_ctx->aux_stack_bound =
+ LLVMBuildLoad2(comp_ctx->builder, I32_TYPE, aux_stack_bound_addr,
+ "aux_stack_bound"))) {
+ aot_set_last_error("llvm build load failed");
+ return false;
+ }
+
+ /* Get aux stack bottom address */
+ if (!(aux_stack_bottom_addr = LLVMBuildInBoundsGEP2(
+ comp_ctx->builder, OPQ_PTR_TYPE, func_ctx->exec_env,
+ &aux_stack_bottom_offset, 1, "aux_stack_bottom_addr"))) {
+ aot_set_last_error("llvm build in bounds gep failed");
+ return false;
+ }
+
+ if (!(aux_stack_bottom_addr =
+ LLVMBuildBitCast(comp_ctx->builder, aux_stack_bottom_addr,
+ INT32_PTR_TYPE, "aux_stack_bottom_ptr"))) {
+ aot_set_last_error("llvm build bit cast failed");
+ return false;
+ }
+ if (!(func_ctx->aux_stack_bottom =
+ LLVMBuildLoad2(comp_ctx->builder, I32_TYPE, aux_stack_bottom_addr,
+ "aux_stack_bottom"))) {
+ aot_set_last_error("llvm build load failed");
+ return false;
+ }
+
+ return true;
+}
+
+static bool
+create_native_symbol(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
+{
+ LLVMValueRef native_symbol_offset = I32_EIGHT, native_symbol_addr;
+
+ if (!(native_symbol_addr = LLVMBuildInBoundsGEP2(
+ comp_ctx->builder, OPQ_PTR_TYPE, func_ctx->exec_env,
+ &native_symbol_offset, 1, "native_symbol_addr"))) {
+ aot_set_last_error("llvm build in bounds gep failed");
+ return false;
+ }
+
+ if (!(func_ctx->native_symbol =
+ LLVMBuildLoad2(comp_ctx->builder, OPQ_PTR_TYPE,
+ native_symbol_addr, "native_symbol_tmp"))) {
+ aot_set_last_error("llvm build bit cast failed");
+ return false;
+ }
+
+ if (!(func_ctx->native_symbol =
+ LLVMBuildBitCast(comp_ctx->builder, func_ctx->native_symbol,
+ comp_ctx->exec_env_type, "native_symbol"))) {
+ aot_set_last_error("llvm build bit cast failed");
+ return false;
+ }
+
+ return true;
+}
+
+static bool
+create_local_variables(AOTCompData *comp_data, AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, AOTFunc *func)
+{
+ AOTFuncType *aot_func_type = comp_data->func_types[func->func_type_index];
+ char local_name[32];
+ uint32 i, j = 1;
+
+ for (i = 0; i < aot_func_type->param_count; i++, j++) {
+ snprintf(local_name, sizeof(local_name), "l%d", i);
+ func_ctx->locals[i] =
+ LLVMBuildAlloca(comp_ctx->builder,
+ TO_LLVM_TYPE(aot_func_type->types[i]), local_name);
+ if (!func_ctx->locals[i]) {
+ aot_set_last_error("llvm build alloca failed.");
+ return false;
+ }
+ if (!LLVMBuildStore(comp_ctx->builder, LLVMGetParam(func_ctx->func, j),
+ func_ctx->locals[i])) {
+ aot_set_last_error("llvm build store failed.");
+ return false;
+ }
+ }
+
+ for (i = 0; i < func->local_count; i++) {
+ LLVMTypeRef local_type;
+ LLVMValueRef local_value = NULL;
+ snprintf(local_name, sizeof(local_name), "l%d",
+ aot_func_type->param_count + i);
+ local_type = TO_LLVM_TYPE(func->local_types[i]);
+ func_ctx->locals[aot_func_type->param_count + i] =
+ LLVMBuildAlloca(comp_ctx->builder, local_type, local_name);
+ if (!func_ctx->locals[aot_func_type->param_count + i]) {
+ aot_set_last_error("llvm build alloca failed.");
+ return false;
+ }
+ switch (func->local_types[i]) {
+ case VALUE_TYPE_I32:
+ local_value = I32_ZERO;
+ break;
+ case VALUE_TYPE_I64:
+ local_value = I64_ZERO;
+ break;
+ case VALUE_TYPE_F32:
+ local_value = F32_ZERO;
+ break;
+ case VALUE_TYPE_F64:
+ local_value = F64_ZERO;
+ break;
+ case VALUE_TYPE_V128:
+ local_value = V128_i64x2_ZERO;
+ break;
+ case VALUE_TYPE_FUNCREF:
+ case VALUE_TYPE_EXTERNREF:
+ local_value = REF_NULL;
+ break;
+ default:
+ bh_assert(0);
+ break;
+ }
+ if (!LLVMBuildStore(comp_ctx->builder, local_value,
+ func_ctx->locals[aot_func_type->param_count + i])) {
+ aot_set_last_error("llvm build store failed.");
+ return false;
+ }
+ }
+
+ if (comp_ctx->enable_stack_bound_check
+ || comp_ctx->enable_stack_estimation) {
+ if (aot_func_type->param_count + func->local_count > 0) {
+ func_ctx->last_alloca = func_ctx->locals[aot_func_type->param_count
+ + func->local_count - 1];
+ if (!(func_ctx->last_alloca =
+ LLVMBuildBitCast(comp_ctx->builder, func_ctx->last_alloca,
+ INT8_PTR_TYPE, "stack_ptr"))) {
+ aot_set_last_error("llvm build bit cast failed.");
+ return false;
+ }
+ }
+ else {
+ if (!(func_ctx->last_alloca = LLVMBuildAlloca(
+ comp_ctx->builder, INT8_TYPE, "stack_ptr"))) {
+ aot_set_last_error("llvm build alloca failed.");
+ return false;
+ }
+ }
+ }
+
+ return true;
+}
+
+static bool
+create_memory_info(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ LLVMTypeRef int8_ptr_type, uint32 func_index)
+{
+ LLVMValueRef offset, mem_info_base;
+ uint32 memory_count;
+ WASMModule *module = comp_ctx->comp_data->wasm_module;
+ WASMFunction *func = module->functions[func_index];
+ LLVMTypeRef bound_check_type;
+ bool mem_space_unchanged =
+ (!func->has_op_memory_grow && !func->has_op_func_call)
+ || (!module->possible_memory_grow);
+#if WASM_ENABLE_SHARED_MEMORY != 0
+ bool is_shared_memory;
+#endif
+
+ func_ctx->mem_space_unchanged = mem_space_unchanged;
+
+ memory_count = module->memory_count + module->import_memory_count;
+ /* If the module dosen't have memory, reserve
+ one mem_info space with empty content */
+ if (memory_count == 0)
+ memory_count = 1;
+
+ if (!(func_ctx->mem_info =
+ wasm_runtime_malloc(sizeof(AOTMemInfo) * memory_count))) {
+ return false;
+ }
+ memset(func_ctx->mem_info, 0, sizeof(AOTMemInfo));
+
+ /* Currently we only create memory info for memory 0 */
+ /* Load memory base address */
+#if WASM_ENABLE_SHARED_MEMORY != 0
+ is_shared_memory =
+ comp_ctx->comp_data->memories[0].memory_flags & 0x02 ? true : false;
+ if (is_shared_memory) {
+ LLVMValueRef shared_mem_addr;
+ offset = I32_CONST(offsetof(AOTModuleInstance, memories));
+ if (!offset) {
+ aot_set_last_error("create llvm const failed.");
+ return false;
+ }
+
+ /* aot_inst->memories */
+ if (!(shared_mem_addr = LLVMBuildInBoundsGEP2(
+ comp_ctx->builder, INT8_TYPE, func_ctx->aot_inst, &offset, 1,
+ "shared_mem_addr_offset"))) {
+ aot_set_last_error("llvm build in bounds gep failed");
+ return false;
+ }
+ if (!(shared_mem_addr =
+ LLVMBuildBitCast(comp_ctx->builder, shared_mem_addr,
+ int8_ptr_type, "shared_mem_addr_ptr"))) {
+ aot_set_last_error("llvm build bit cast failed");
+ return false;
+ }
+ /* aot_inst->memories[0] */
+ if (!(shared_mem_addr =
+ LLVMBuildLoad2(comp_ctx->builder, OPQ_PTR_TYPE,
+ shared_mem_addr, "shared_mem_addr"))) {
+ aot_set_last_error("llvm build load failed");
+ return false;
+ }
+ if (!(shared_mem_addr =
+ LLVMBuildBitCast(comp_ctx->builder, shared_mem_addr,
+ int8_ptr_type, "shared_mem_addr_ptr"))) {
+ aot_set_last_error("llvm build bit cast failed");
+ return false;
+ }
+ if (!(shared_mem_addr =
+ LLVMBuildLoad2(comp_ctx->builder, OPQ_PTR_TYPE,
+ shared_mem_addr, "shared_mem_addr"))) {
+ aot_set_last_error("llvm build load failed");
+ return false;
+ }
+ /* memories[0]->memory_data */
+ offset = I32_CONST(offsetof(AOTMemoryInstance, memory_data));
+ if (!(func_ctx->mem_info[0].mem_base_addr = LLVMBuildInBoundsGEP2(
+ comp_ctx->builder, INT8_TYPE, shared_mem_addr, &offset, 1,
+ "mem_base_addr_offset"))) {
+ aot_set_last_error("llvm build in bounds gep failed");
+ return false;
+ }
+ /* memories[0]->cur_page_count */
+ offset = I32_CONST(offsetof(AOTMemoryInstance, cur_page_count));
+ if (!(func_ctx->mem_info[0].mem_cur_page_count_addr =
+ LLVMBuildInBoundsGEP2(comp_ctx->builder, INT8_TYPE,
+ shared_mem_addr, &offset, 1,
+ "mem_cur_page_offset"))) {
+ aot_set_last_error("llvm build in bounds gep failed");
+ return false;
+ }
+ /* memories[0]->memory_data_size */
+ offset = I32_CONST(offsetof(AOTMemoryInstance, memory_data_size));
+ if (!(func_ctx->mem_info[0].mem_data_size_addr = LLVMBuildInBoundsGEP2(
+ comp_ctx->builder, INT8_TYPE, shared_mem_addr, &offset, 1,
+ "mem_data_size_offset"))) {
+ aot_set_last_error("llvm build in bounds gep failed");
+ return false;
+ }
+ }
+ else
+#endif
+ {
+ uint32 offset_of_global_table_data;
+
+ if (comp_ctx->is_jit_mode)
+ offset_of_global_table_data =
+ offsetof(WASMModuleInstance, global_table_data);
+ else
+ offset_of_global_table_data =
+ offsetof(AOTModuleInstance, global_table_data);
+
+ offset = I32_CONST(offset_of_global_table_data
+ + offsetof(AOTMemoryInstance, memory_data));
+ if (!(func_ctx->mem_info[0].mem_base_addr = LLVMBuildInBoundsGEP2(
+ comp_ctx->builder, INT8_TYPE, func_ctx->aot_inst, &offset, 1,
+ "mem_base_addr_offset"))) {
+ aot_set_last_error("llvm build in bounds gep failed");
+ return false;
+ }
+ offset = I32_CONST(offset_of_global_table_data
+ + offsetof(AOTMemoryInstance, cur_page_count));
+ if (!(func_ctx->mem_info[0].mem_cur_page_count_addr =
+ LLVMBuildInBoundsGEP2(comp_ctx->builder, INT8_TYPE,
+ func_ctx->aot_inst, &offset, 1,
+ "mem_cur_page_offset"))) {
+ aot_set_last_error("llvm build in bounds gep failed");
+ return false;
+ }
+ offset = I32_CONST(offset_of_global_table_data
+ + offsetof(AOTMemoryInstance, memory_data_size));
+ if (!(func_ctx->mem_info[0].mem_data_size_addr = LLVMBuildInBoundsGEP2(
+ comp_ctx->builder, INT8_TYPE, func_ctx->aot_inst, &offset, 1,
+ "mem_data_size_offset"))) {
+ aot_set_last_error("llvm build in bounds gep failed");
+ return false;
+ }
+ }
+ /* Store mem info base address before cast */
+ mem_info_base = func_ctx->mem_info[0].mem_base_addr;
+
+ if (!(func_ctx->mem_info[0].mem_base_addr = LLVMBuildBitCast(
+ comp_ctx->builder, func_ctx->mem_info[0].mem_base_addr,
+ int8_ptr_type, "mem_base_addr_ptr"))) {
+ aot_set_last_error("llvm build bit cast failed");
+ return false;
+ }
+ if (!(func_ctx->mem_info[0].mem_cur_page_count_addr = LLVMBuildBitCast(
+ comp_ctx->builder, func_ctx->mem_info[0].mem_cur_page_count_addr,
+ INT32_PTR_TYPE, "mem_cur_page_ptr"))) {
+ aot_set_last_error("llvm build bit cast failed");
+ return false;
+ }
+ if (!(func_ctx->mem_info[0].mem_data_size_addr = LLVMBuildBitCast(
+ comp_ctx->builder, func_ctx->mem_info[0].mem_data_size_addr,
+ INT32_PTR_TYPE, "mem_data_size_ptr"))) {
+ aot_set_last_error("llvm build bit cast failed");
+ return false;
+ }
+ if (mem_space_unchanged) {
+ if (!(func_ctx->mem_info[0].mem_base_addr = LLVMBuildLoad2(
+ comp_ctx->builder, OPQ_PTR_TYPE,
+ func_ctx->mem_info[0].mem_base_addr, "mem_base_addr"))) {
+ aot_set_last_error("llvm build load failed");
+ return false;
+ }
+ if (!(func_ctx->mem_info[0].mem_cur_page_count_addr =
+ LLVMBuildLoad2(comp_ctx->builder, I32_TYPE,
+ func_ctx->mem_info[0].mem_cur_page_count_addr,
+ "mem_cur_page_count"))) {
+ aot_set_last_error("llvm build load failed");
+ return false;
+ }
+ if (!(func_ctx->mem_info[0].mem_data_size_addr = LLVMBuildLoad2(
+ comp_ctx->builder, I32_TYPE,
+ func_ctx->mem_info[0].mem_data_size_addr, "mem_data_size"))) {
+ aot_set_last_error("llvm build load failed");
+ return false;
+ }
+ }
+#if WASM_ENABLE_SHARED_MEMORY != 0
+ else if (is_shared_memory) {
+ /* The base address for shared memory will never changed,
+ we can load the value here */
+ if (!(func_ctx->mem_info[0].mem_base_addr = LLVMBuildLoad2(
+ comp_ctx->builder, OPQ_PTR_TYPE,
+ func_ctx->mem_info[0].mem_base_addr, "mem_base_addr"))) {
+ aot_set_last_error("llvm build load failed");
+ return false;
+ }
+ }
+#endif
+
+ bound_check_type = (comp_ctx->pointer_size == sizeof(uint64))
+ ? INT64_PTR_TYPE
+ : INT32_PTR_TYPE;
+
+ /* Load memory bound check constants */
+ offset = I32_CONST(offsetof(AOTMemoryInstance, mem_bound_check_1byte)
+ - offsetof(AOTMemoryInstance, memory_data));
+ if (!(func_ctx->mem_info[0].mem_bound_check_1byte =
+ LLVMBuildInBoundsGEP2(comp_ctx->builder, INT8_TYPE, mem_info_base,
+ &offset, 1, "bound_check_1byte_offset"))) {
+ aot_set_last_error("llvm build in bounds gep failed");
+ return false;
+ }
+ if (!(func_ctx->mem_info[0].mem_bound_check_1byte = LLVMBuildBitCast(
+ comp_ctx->builder, func_ctx->mem_info[0].mem_bound_check_1byte,
+ bound_check_type, "bound_check_1byte_ptr"))) {
+ aot_set_last_error("llvm build bit cast failed");
+ return false;
+ }
+ if (mem_space_unchanged) {
+ if (!(func_ctx->mem_info[0].mem_bound_check_1byte = LLVMBuildLoad2(
+ comp_ctx->builder,
+ (comp_ctx->pointer_size == sizeof(uint64)) ? I64_TYPE
+ : I32_TYPE,
+ func_ctx->mem_info[0].mem_bound_check_1byte,
+ "bound_check_1byte"))) {
+ aot_set_last_error("llvm build load failed");
+ return false;
+ }
+ }
+
+ offset = I32_CONST(offsetof(AOTMemoryInstance, mem_bound_check_2bytes)
+ - offsetof(AOTMemoryInstance, memory_data));
+ if (!(func_ctx->mem_info[0].mem_bound_check_2bytes =
+ LLVMBuildInBoundsGEP2(comp_ctx->builder, INT8_TYPE, mem_info_base,
+ &offset, 1, "bound_check_2bytes_offset"))) {
+ aot_set_last_error("llvm build in bounds gep failed");
+ return false;
+ }
+ if (!(func_ctx->mem_info[0].mem_bound_check_2bytes = LLVMBuildBitCast(
+ comp_ctx->builder, func_ctx->mem_info[0].mem_bound_check_2bytes,
+ bound_check_type, "bound_check_2bytes_ptr"))) {
+ aot_set_last_error("llvm build bit cast failed");
+ return false;
+ }
+ if (mem_space_unchanged) {
+ if (!(func_ctx->mem_info[0].mem_bound_check_2bytes = LLVMBuildLoad2(
+ comp_ctx->builder,
+ (comp_ctx->pointer_size == sizeof(uint64)) ? I64_TYPE
+ : I32_TYPE,
+ func_ctx->mem_info[0].mem_bound_check_2bytes,
+ "bound_check_2bytes"))) {
+ aot_set_last_error("llvm build load failed");
+ return false;
+ }
+ }
+
+ offset = I32_CONST(offsetof(AOTMemoryInstance, mem_bound_check_4bytes)
+ - offsetof(AOTMemoryInstance, memory_data));
+ if (!(func_ctx->mem_info[0].mem_bound_check_4bytes =
+ LLVMBuildInBoundsGEP2(comp_ctx->builder, INT8_TYPE, mem_info_base,
+ &offset, 1, "bound_check_4bytes_offset"))) {
+ aot_set_last_error("llvm build in bounds gep failed");
+ return false;
+ }
+ if (!(func_ctx->mem_info[0].mem_bound_check_4bytes = LLVMBuildBitCast(
+ comp_ctx->builder, func_ctx->mem_info[0].mem_bound_check_4bytes,
+ bound_check_type, "bound_check_4bytes_ptr"))) {
+ aot_set_last_error("llvm build bit cast failed");
+ return false;
+ }
+ if (mem_space_unchanged) {
+ if (!(func_ctx->mem_info[0].mem_bound_check_4bytes = LLVMBuildLoad2(
+ comp_ctx->builder,
+ (comp_ctx->pointer_size == sizeof(uint64)) ? I64_TYPE
+ : I32_TYPE,
+ func_ctx->mem_info[0].mem_bound_check_4bytes,
+ "bound_check_4bytes"))) {
+ aot_set_last_error("llvm build load failed");
+ return false;
+ }
+ }
+
+ offset = I32_CONST(offsetof(AOTMemoryInstance, mem_bound_check_8bytes)
+ - offsetof(AOTMemoryInstance, memory_data));
+ if (!(func_ctx->mem_info[0].mem_bound_check_8bytes =
+ LLVMBuildInBoundsGEP2(comp_ctx->builder, INT8_TYPE, mem_info_base,
+ &offset, 1, "bound_check_8bytes_offset"))) {
+ aot_set_last_error("llvm build in bounds gep failed");
+ return false;
+ }
+ if (!(func_ctx->mem_info[0].mem_bound_check_8bytes = LLVMBuildBitCast(
+ comp_ctx->builder, func_ctx->mem_info[0].mem_bound_check_8bytes,
+ bound_check_type, "bound_check_8bytes_ptr"))) {
+ aot_set_last_error("llvm build bit cast failed");
+ return false;
+ }
+ if (mem_space_unchanged) {
+ if (!(func_ctx->mem_info[0].mem_bound_check_8bytes = LLVMBuildLoad2(
+ comp_ctx->builder,
+ (comp_ctx->pointer_size == sizeof(uint64)) ? I64_TYPE
+ : I32_TYPE,
+ func_ctx->mem_info[0].mem_bound_check_8bytes,
+ "bound_check_8bytes"))) {
+ aot_set_last_error("llvm build load failed");
+ return false;
+ }
+ }
+
+ offset = I32_CONST(offsetof(AOTMemoryInstance, mem_bound_check_16bytes)
+ - offsetof(AOTMemoryInstance, memory_data));
+ if (!(func_ctx->mem_info[0].mem_bound_check_16bytes = LLVMBuildInBoundsGEP2(
+ comp_ctx->builder, INT8_TYPE, mem_info_base, &offset, 1,
+ "bound_check_16bytes_offset"))) {
+ aot_set_last_error("llvm build in bounds gep failed");
+ return false;
+ }
+ if (!(func_ctx->mem_info[0].mem_bound_check_16bytes = LLVMBuildBitCast(
+ comp_ctx->builder, func_ctx->mem_info[0].mem_bound_check_16bytes,
+ bound_check_type, "bound_check_16bytes_ptr"))) {
+ aot_set_last_error("llvm build bit cast failed");
+ return false;
+ }
+ if (mem_space_unchanged) {
+ if (!(func_ctx->mem_info[0].mem_bound_check_16bytes = LLVMBuildLoad2(
+ comp_ctx->builder,
+ (comp_ctx->pointer_size == sizeof(uint64)) ? I64_TYPE
+ : I32_TYPE,
+ func_ctx->mem_info[0].mem_bound_check_16bytes,
+ "bound_check_16bytes"))) {
+ aot_set_last_error("llvm build load failed");
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static bool
+create_cur_exception(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
+{
+ LLVMValueRef offset;
+
+ offset = I32_CONST(offsetof(AOTModuleInstance, cur_exception));
+ func_ctx->cur_exception =
+ LLVMBuildInBoundsGEP2(comp_ctx->builder, INT8_TYPE, func_ctx->aot_inst,
+ &offset, 1, "cur_exception");
+ if (!func_ctx->cur_exception) {
+ aot_set_last_error("llvm build in bounds gep failed.");
+ return false;
+ }
+ return true;
+}
+
+static bool
+create_func_type_indexes(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
+{
+ LLVMValueRef offset, func_type_indexes_ptr;
+ LLVMTypeRef int32_ptr_type;
+
+ offset = I32_CONST(offsetof(AOTModuleInstance, func_type_indexes));
+ func_type_indexes_ptr =
+ LLVMBuildInBoundsGEP2(comp_ctx->builder, INT8_TYPE, func_ctx->aot_inst,
+ &offset, 1, "func_type_indexes_ptr");
+ if (!func_type_indexes_ptr) {
+ aot_set_last_error("llvm build add failed.");
+ return false;
+ }
+
+ if (!(int32_ptr_type = LLVMPointerType(INT32_PTR_TYPE, 0))) {
+ aot_set_last_error("llvm get pointer type failed.");
+ return false;
+ }
+
+ func_ctx->func_type_indexes =
+ LLVMBuildBitCast(comp_ctx->builder, func_type_indexes_ptr,
+ int32_ptr_type, "func_type_indexes_tmp");
+ if (!func_ctx->func_type_indexes) {
+ aot_set_last_error("llvm build bit cast failed.");
+ return false;
+ }
+
+ func_ctx->func_type_indexes =
+ LLVMBuildLoad2(comp_ctx->builder, INT32_PTR_TYPE,
+ func_ctx->func_type_indexes, "func_type_indexes");
+ if (!func_ctx->func_type_indexes) {
+ aot_set_last_error("llvm build load failed.");
+ return false;
+ }
+ return true;
+}
+
+static bool
+create_func_ptrs(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
+{
+ LLVMValueRef offset;
+
+ offset = I32_CONST(offsetof(AOTModuleInstance, func_ptrs));
+ func_ctx->func_ptrs =
+ LLVMBuildInBoundsGEP2(comp_ctx->builder, INT8_TYPE, func_ctx->aot_inst,
+ &offset, 1, "func_ptrs_offset");
+ if (!func_ctx->func_ptrs) {
+ aot_set_last_error("llvm build in bounds gep failed.");
+ return false;
+ }
+ func_ctx->func_ptrs =
+ LLVMBuildBitCast(comp_ctx->builder, func_ctx->func_ptrs,
+ comp_ctx->exec_env_type, "func_ptrs_tmp");
+ if (!func_ctx->func_ptrs) {
+ aot_set_last_error("llvm build bit cast failed.");
+ return false;
+ }
+
+ func_ctx->func_ptrs = LLVMBuildLoad2(comp_ctx->builder, OPQ_PTR_TYPE,
+ func_ctx->func_ptrs, "func_ptrs_ptr");
+ if (!func_ctx->func_ptrs) {
+ aot_set_last_error("llvm build load failed.");
+ return false;
+ }
+
+ func_ctx->func_ptrs =
+ LLVMBuildBitCast(comp_ctx->builder, func_ctx->func_ptrs,
+ comp_ctx->exec_env_type, "func_ptrs");
+ if (!func_ctx->func_ptrs) {
+ aot_set_last_error("llvm build bit cast failed.");
+ return false;
+ }
+
+ return true;
+}
+
+/**
+ * Create function compiler context
+ */
+static AOTFuncContext *
+aot_create_func_context(AOTCompData *comp_data, AOTCompContext *comp_ctx,
+ AOTFunc *func, uint32 func_index)
+{
+ AOTFuncContext *func_ctx;
+ AOTFuncType *aot_func_type = comp_data->func_types[func->func_type_index];
+ WASMModule *module = comp_ctx->comp_data->wasm_module;
+ WASMFunction *wasm_func = module->functions[func_index];
+ AOTBlock *aot_block;
+ LLVMTypeRef int8_ptr_type;
+ LLVMValueRef aot_inst_offset = I32_TWO, aot_inst_addr;
+ uint64 size;
+
+ /* Allocate memory for the function context */
+ size = offsetof(AOTFuncContext, locals)
+ + sizeof(LLVMValueRef)
+ * ((uint64)aot_func_type->param_count + func->local_count);
+ if (size >= UINT32_MAX || !(func_ctx = wasm_runtime_malloc((uint32)size))) {
+ aot_set_last_error("allocate memory failed.");
+ return NULL;
+ }
+
+ memset(func_ctx, 0, (uint32)size);
+ func_ctx->aot_func = func;
+
+ func_ctx->module = comp_ctx->module;
+
+ /* Add LLVM function */
+ if (!(func_ctx->func =
+ aot_add_llvm_func(comp_ctx, func_ctx->module, aot_func_type,
+ func_index, &func_ctx->func_type))) {
+ goto fail;
+ }
+
+ /* Create function's first AOTBlock */
+ if (!(aot_block =
+ aot_create_func_block(comp_ctx, func_ctx, func, aot_func_type))) {
+ goto fail;
+ }
+
+#if WASM_ENABLE_DEBUG_AOT != 0
+ func_ctx->debug_func = dwarf_gen_func_info(comp_ctx, func_ctx);
+#endif
+
+ aot_block_stack_push(&func_ctx->block_stack, aot_block);
+
+ /* Add local variables */
+ LLVMPositionBuilderAtEnd(comp_ctx->builder, aot_block->llvm_entry_block);
+
+ /* Save the pameters for fast access */
+ func_ctx->exec_env = LLVMGetParam(func_ctx->func, 0);
+
+ /* Get aot inst address, the layout of exec_env is:
+ exec_env->next, exec_env->prev, exec_env->module_inst, and argv_buf */
+ if (!(aot_inst_addr = LLVMBuildInBoundsGEP2(
+ comp_ctx->builder, OPQ_PTR_TYPE, func_ctx->exec_env,
+ &aot_inst_offset, 1, "aot_inst_addr"))) {
+ aot_set_last_error("llvm build in bounds gep failed");
+ goto fail;
+ }
+
+ /* Load aot inst */
+ if (!(func_ctx->aot_inst = LLVMBuildLoad2(comp_ctx->builder, OPQ_PTR_TYPE,
+ aot_inst_addr, "aot_inst"))) {
+ aot_set_last_error("llvm build load failed");
+ goto fail;
+ }
+
+ /* Get argv buffer address */
+ if (wasm_func->has_op_func_call && !create_argv_buf(comp_ctx, func_ctx)) {
+ goto fail;
+ }
+
+ /* Get native stack boundary address */
+ if (comp_ctx->enable_stack_bound_check
+ && !create_native_stack_bound(comp_ctx, func_ctx)) {
+ goto fail;
+ }
+ if (comp_ctx->enable_stack_estimation
+ && !create_native_stack_top_min(comp_ctx, func_ctx)) {
+ goto fail;
+ }
+
+ /* Get auxiliary stack info */
+ if (wasm_func->has_op_set_global_aux_stack
+ && !create_aux_stack_info(comp_ctx, func_ctx)) {
+ goto fail;
+ }
+
+ /* Get native symbol list */
+ if (comp_ctx->is_indirect_mode
+ && !create_native_symbol(comp_ctx, func_ctx)) {
+ goto fail;
+ }
+
+ /* Create local variables */
+ if (!create_local_variables(comp_data, comp_ctx, func_ctx, func)) {
+ goto fail;
+ }
+
+ if (!(int8_ptr_type = LLVMPointerType(INT8_PTR_TYPE, 0))) {
+ aot_set_last_error("llvm add pointer type failed.");
+ goto fail;
+ }
+
+ /* Create base addr, end addr, data size of mem, heap */
+ if (wasm_func->has_memory_operations
+ && !create_memory_info(comp_ctx, func_ctx, int8_ptr_type, func_index)) {
+ goto fail;
+ }
+
+ /* Load current exception */
+ if (!create_cur_exception(comp_ctx, func_ctx)) {
+ goto fail;
+ }
+
+ /* Load function type indexes */
+ if (wasm_func->has_op_call_indirect
+ && !create_func_type_indexes(comp_ctx, func_ctx)) {
+ goto fail;
+ }
+
+ /* Load function pointers */
+ if (!create_func_ptrs(comp_ctx, func_ctx)) {
+ goto fail;
+ }
+
+ return func_ctx;
+
+fail:
+ if (func_ctx->mem_info)
+ wasm_runtime_free(func_ctx->mem_info);
+ aot_block_stack_destroy(&func_ctx->block_stack);
+ wasm_runtime_free(func_ctx);
+ return NULL;
+}
+
+static void
+aot_destroy_func_contexts(AOTFuncContext **func_ctxes, uint32 count)
+{
+ uint32 i;
+
+ for (i = 0; i < count; i++)
+ if (func_ctxes[i]) {
+ if (func_ctxes[i]->mem_info)
+ wasm_runtime_free(func_ctxes[i]->mem_info);
+ aot_block_stack_destroy(&func_ctxes[i]->block_stack);
+ aot_checked_addr_list_destroy(func_ctxes[i]);
+ wasm_runtime_free(func_ctxes[i]);
+ }
+ wasm_runtime_free(func_ctxes);
+}
+
+/**
+ * Create function compiler contexts
+ */
+static AOTFuncContext **
+aot_create_func_contexts(AOTCompData *comp_data, AOTCompContext *comp_ctx)
+{
+ AOTFuncContext **func_ctxes;
+ uint64 size;
+ uint32 i;
+
+ /* Allocate memory */
+ size = sizeof(AOTFuncContext *) * (uint64)comp_data->func_count;
+ if (size >= UINT32_MAX
+ || !(func_ctxes = wasm_runtime_malloc((uint32)size))) {
+ aot_set_last_error("allocate memory failed.");
+ return NULL;
+ }
+
+ memset(func_ctxes, 0, size);
+
+ /* Create each function context */
+ for (i = 0; i < comp_data->func_count; i++) {
+ AOTFunc *func = comp_data->funcs[i];
+ if (!(func_ctxes[i] =
+ aot_create_func_context(comp_data, comp_ctx, func, i))) {
+ aot_destroy_func_contexts(func_ctxes, comp_data->func_count);
+ return NULL;
+ }
+ }
+
+ return func_ctxes;
+}
+
+static bool
+aot_set_llvm_basic_types(AOTLLVMTypes *basic_types, LLVMContextRef context)
+{
+ basic_types->int1_type = LLVMInt1TypeInContext(context);
+ basic_types->int8_type = LLVMInt8TypeInContext(context);
+ basic_types->int16_type = LLVMInt16TypeInContext(context);
+ basic_types->int32_type = LLVMInt32TypeInContext(context);
+ basic_types->int64_type = LLVMInt64TypeInContext(context);
+ basic_types->float32_type = LLVMFloatTypeInContext(context);
+ basic_types->float64_type = LLVMDoubleTypeInContext(context);
+ basic_types->void_type = LLVMVoidTypeInContext(context);
+
+ basic_types->meta_data_type = LLVMMetadataTypeInContext(context);
+
+ basic_types->int8_ptr_type = LLVMPointerType(basic_types->int8_type, 0);
+
+ if (basic_types->int8_ptr_type) {
+ basic_types->int8_pptr_type =
+ LLVMPointerType(basic_types->int8_ptr_type, 0);
+ }
+
+ basic_types->int16_ptr_type = LLVMPointerType(basic_types->int16_type, 0);
+ basic_types->int32_ptr_type = LLVMPointerType(basic_types->int32_type, 0);
+ basic_types->int64_ptr_type = LLVMPointerType(basic_types->int64_type, 0);
+ basic_types->float32_ptr_type =
+ LLVMPointerType(basic_types->float32_type, 0);
+ basic_types->float64_ptr_type =
+ LLVMPointerType(basic_types->float64_type, 0);
+
+ basic_types->i8x16_vec_type = LLVMVectorType(basic_types->int8_type, 16);
+ basic_types->i16x8_vec_type = LLVMVectorType(basic_types->int16_type, 8);
+ basic_types->i32x4_vec_type = LLVMVectorType(basic_types->int32_type, 4);
+ basic_types->i64x2_vec_type = LLVMVectorType(basic_types->int64_type, 2);
+ basic_types->f32x4_vec_type = LLVMVectorType(basic_types->float32_type, 4);
+ basic_types->f64x2_vec_type = LLVMVectorType(basic_types->float64_type, 2);
+
+ basic_types->v128_type = basic_types->i64x2_vec_type;
+ basic_types->v128_ptr_type = LLVMPointerType(basic_types->v128_type, 0);
+
+ basic_types->i1x2_vec_type = LLVMVectorType(basic_types->int1_type, 2);
+
+ basic_types->funcref_type = LLVMInt32TypeInContext(context);
+ basic_types->externref_type = LLVMInt32TypeInContext(context);
+
+ return (basic_types->int8_ptr_type && basic_types->int8_pptr_type
+ && basic_types->int16_ptr_type && basic_types->int32_ptr_type
+ && basic_types->int64_ptr_type && basic_types->float32_ptr_type
+ && basic_types->float64_ptr_type && basic_types->i8x16_vec_type
+ && basic_types->i16x8_vec_type && basic_types->i32x4_vec_type
+ && basic_types->i64x2_vec_type && basic_types->f32x4_vec_type
+ && basic_types->f64x2_vec_type && basic_types->i1x2_vec_type
+ && basic_types->meta_data_type && basic_types->funcref_type
+ && basic_types->externref_type)
+ ? true
+ : false;
+}
+
+static bool
+aot_create_llvm_consts(AOTLLVMConsts *consts, AOTCompContext *comp_ctx)
+{
+#define CREATE_I1_CONST(name, value) \
+ if (!(consts->i1_##name = \
+ LLVMConstInt(comp_ctx->basic_types.int1_type, value, true))) \
+ return false;
+
+ CREATE_I1_CONST(zero, 0)
+ CREATE_I1_CONST(one, 1)
+#undef CREATE_I1_CONST
+
+ if (!(consts->i8_zero = I8_CONST(0)))
+ return false;
+
+ if (!(consts->f32_zero = F32_CONST(0)))
+ return false;
+
+ if (!(consts->f64_zero = F64_CONST(0)))
+ return false;
+
+#define CREATE_I32_CONST(name, value) \
+ if (!(consts->i32_##name = LLVMConstInt(I32_TYPE, value, true))) \
+ return false;
+
+ CREATE_I32_CONST(min, (uint32)INT32_MIN)
+ CREATE_I32_CONST(neg_one, (uint32)-1)
+ CREATE_I32_CONST(zero, 0)
+ CREATE_I32_CONST(one, 1)
+ CREATE_I32_CONST(two, 2)
+ CREATE_I32_CONST(three, 3)
+ CREATE_I32_CONST(four, 4)
+ CREATE_I32_CONST(five, 5)
+ CREATE_I32_CONST(six, 6)
+ CREATE_I32_CONST(seven, 7)
+ CREATE_I32_CONST(eight, 8)
+ CREATE_I32_CONST(nine, 9)
+ CREATE_I32_CONST(ten, 10)
+ CREATE_I32_CONST(eleven, 11)
+ CREATE_I32_CONST(twelve, 12)
+ CREATE_I32_CONST(thirteen, 13)
+ CREATE_I32_CONST(fourteen, 14)
+ CREATE_I32_CONST(fifteen, 15)
+ CREATE_I32_CONST(31, 31)
+ CREATE_I32_CONST(32, 32)
+#undef CREATE_I32_CONST
+
+#define CREATE_I64_CONST(name, value) \
+ if (!(consts->i64_##name = LLVMConstInt(I64_TYPE, value, true))) \
+ return false;
+
+ CREATE_I64_CONST(min, (uint64)INT64_MIN)
+ CREATE_I64_CONST(neg_one, (uint64)-1)
+ CREATE_I64_CONST(zero, 0)
+ CREATE_I64_CONST(63, 63)
+ CREATE_I64_CONST(64, 64)
+#undef CREATE_I64_CONST
+
+#define CREATE_V128_CONST(name, type) \
+ if (!(consts->name##_vec_zero = LLVMConstNull(type))) \
+ return false; \
+ if (!(consts->name##_undef = LLVMGetUndef(type))) \
+ return false;
+
+ CREATE_V128_CONST(i8x16, V128_i8x16_TYPE)
+ CREATE_V128_CONST(i16x8, V128_i16x8_TYPE)
+ CREATE_V128_CONST(i32x4, V128_i32x4_TYPE)
+ CREATE_V128_CONST(i64x2, V128_i64x2_TYPE)
+ CREATE_V128_CONST(f32x4, V128_f32x4_TYPE)
+ CREATE_V128_CONST(f64x2, V128_f64x2_TYPE)
+#undef CREATE_V128_CONST
+
+#define CREATE_VEC_ZERO_MASK(slot) \
+ { \
+ LLVMTypeRef type = LLVMVectorType(I32_TYPE, slot); \
+ if (!type || !(consts->i32x##slot##_zero = LLVMConstNull(type))) \
+ return false; \
+ }
+
+ CREATE_VEC_ZERO_MASK(16)
+ CREATE_VEC_ZERO_MASK(8)
+ CREATE_VEC_ZERO_MASK(4)
+ CREATE_VEC_ZERO_MASK(2)
+#undef CREATE_VEC_ZERO_MASK
+
+ return true;
+}
+
+typedef struct ArchItem {
+ char *arch;
+ bool support_eb;
+} ArchItem;
+
+/* clang-format off */
+static ArchItem valid_archs[] = {
+ { "x86_64", false },
+ { "i386", false },
+ { "xtensa", false },
+ { "mips", true },
+ { "mipsel", false },
+ { "aarch64v8", false },
+ { "aarch64v8.1", false },
+ { "aarch64v8.2", false },
+ { "aarch64v8.3", false },
+ { "aarch64v8.4", false },
+ { "aarch64v8.5", false },
+ { "aarch64_bev8", false }, /* big endian */
+ { "aarch64_bev8.1", false },
+ { "aarch64_bev8.2", false },
+ { "aarch64_bev8.3", false },
+ { "aarch64_bev8.4", false },
+ { "aarch64_bev8.5", false },
+ { "armv4", true },
+ { "armv4t", true },
+ { "armv5t", true },
+ { "armv5te", true },
+ { "armv5tej", true },
+ { "armv6", true },
+ { "armv6kz", true },
+ { "armv6t2", true },
+ { "armv6k", true },
+ { "armv7", true },
+ { "armv6m", true },
+ { "armv6sm", true },
+ { "armv7em", true },
+ { "armv8a", true },
+ { "armv8r", true },
+ { "armv8m.base", true },
+ { "armv8m.main", true },
+ { "armv8.1m.main", true },
+ { "thumbv4", true },
+ { "thumbv4t", true },
+ { "thumbv5t", true },
+ { "thumbv5te", true },
+ { "thumbv5tej", true },
+ { "thumbv6", true },
+ { "thumbv6kz", true },
+ { "thumbv6t2", true },
+ { "thumbv6k", true },
+ { "thumbv7", true },
+ { "thumbv6m", true },
+ { "thumbv6sm", true },
+ { "thumbv7em", true },
+ { "thumbv8a", true },
+ { "thumbv8r", true },
+ { "thumbv8m.base", true },
+ { "thumbv8m.main", true },
+ { "thumbv8.1m.main", true },
+ { "riscv32", true },
+ { "riscv64", true },
+ { "arc", true }
+};
+
+static const char *valid_abis[] = {
+ "gnu",
+ "eabi",
+ "gnueabihf",
+ "msvc",
+ "ilp32",
+ "ilp32f",
+ "ilp32d",
+ "lp64",
+ "lp64f",
+ "lp64d"
+};
+/* clang-format on */
+
+static void
+print_supported_targets()
+{
+ uint32 i;
+ os_printf("Supported targets:\n");
+ for (i = 0; i < sizeof(valid_archs) / sizeof(ArchItem); i++) {
+ os_printf("%s ", valid_archs[i].arch);
+ if (valid_archs[i].support_eb)
+ os_printf("%seb ", valid_archs[i].arch);
+ }
+ os_printf("\n");
+}
+
+static void
+print_supported_abis()
+{
+ uint32 i;
+ os_printf("Supported ABI: ");
+ for (i = 0; i < sizeof(valid_abis) / sizeof(const char *); i++)
+ os_printf("%s ", valid_abis[i]);
+ os_printf("\n");
+}
+
+static bool
+check_target_arch(const char *target_arch)
+{
+ uint32 i;
+ char *arch;
+ bool support_eb;
+
+ for (i = 0; i < sizeof(valid_archs) / sizeof(ArchItem); i++) {
+ arch = valid_archs[i].arch;
+ support_eb = valid_archs[i].support_eb;
+
+ if (!strncmp(target_arch, arch, strlen(arch))
+ && ((support_eb
+ && (!strcmp(target_arch + strlen(arch), "eb")
+ || !strcmp(target_arch + strlen(arch), "")))
+ || (!support_eb && !strcmp(target_arch + strlen(arch), "")))) {
+ return true;
+ }
+ }
+ return false;
+}
+
+static bool
+check_target_abi(const char *target_abi)
+{
+ uint32 i;
+ for (i = 0; i < sizeof(valid_abis) / sizeof(char *); i++) {
+ if (!strcmp(target_abi, valid_abis[i]))
+ return true;
+ }
+ return false;
+}
+
+static void
+get_target_arch_from_triple(const char *triple, char *arch_buf, uint32 buf_size)
+{
+ uint32 i = 0;
+ while (*triple != '-' && *triple != '\0' && i < buf_size - 1)
+ arch_buf[i++] = *triple++;
+ /* Make sure buffer is long enough */
+ bh_assert(*triple == '-' || *triple == '\0');
+}
+
+void
+aot_handle_llvm_errmsg(const char *string, LLVMErrorRef err)
+{
+ char *err_msg = LLVMGetErrorMessage(err);
+ aot_set_last_error_v("%s: %s", string, err_msg);
+ LLVMDisposeErrorMessage(err_msg);
+}
+
+static bool
+create_target_machine_detect_host(AOTCompContext *comp_ctx)
+{
+ char *triple = NULL;
+ LLVMTargetRef target = NULL;
+ char *err_msg = NULL;
+ char *cpu = NULL;
+ char *features = NULL;
+ LLVMTargetMachineRef target_machine = NULL;
+ bool ret = false;
+
+ triple = LLVMGetDefaultTargetTriple();
+ if (triple == NULL) {
+ aot_set_last_error("failed to get default target triple.");
+ goto fail;
+ }
+
+ if (LLVMGetTargetFromTriple(triple, &target, &err_msg) != 0) {
+ aot_set_last_error_v("failed to get llvm target from triple %s.",
+ err_msg);
+ LLVMDisposeMessage(err_msg);
+ goto fail;
+ }
+
+ if (!LLVMTargetHasJIT(target)) {
+ aot_set_last_error("unspported JIT on this platform.");
+ goto fail;
+ }
+
+ cpu = LLVMGetHostCPUName();
+ if (cpu == NULL) {
+ aot_set_last_error("failed to get host cpu information.");
+ goto fail;
+ }
+
+ features = LLVMGetHostCPUFeatures();
+ if (features == NULL) {
+ aot_set_last_error("failed to get host cpu features.");
+ goto fail;
+ }
+
+ LOG_VERBOSE("LLVM ORCJIT detected CPU \"%s\", with features \"%s\"\n", cpu,
+ features);
+
+ /* create TargetMachine */
+ target_machine = LLVMCreateTargetMachine(
+ target, triple, cpu, features, LLVMCodeGenLevelDefault,
+ LLVMRelocDefault, LLVMCodeModelJITDefault);
+ if (!target_machine) {
+ aot_set_last_error("failed to create target machine.");
+ goto fail;
+ }
+ comp_ctx->target_machine = target_machine;
+
+ /* Save target arch */
+ get_target_arch_from_triple(triple, comp_ctx->target_arch,
+ sizeof(comp_ctx->target_arch));
+ ret = true;
+
+fail:
+ if (triple)
+ LLVMDisposeMessage(triple);
+ if (features)
+ LLVMDisposeMessage(features);
+ if (cpu)
+ LLVMDisposeMessage(cpu);
+
+ return ret;
+}
+
+static bool
+orc_jit_create(AOTCompContext *comp_ctx)
+{
+ LLVMErrorRef err;
+ LLVMOrcLLLazyJITRef orc_jit = NULL;
+ LLVMOrcLLLazyJITBuilderRef builder = NULL;
+ LLVMOrcJITTargetMachineBuilderRef jtmb = NULL;
+ bool ret = false;
+
+ builder = LLVMOrcCreateLLLazyJITBuilder();
+ if (builder == NULL) {
+ aot_set_last_error("failed to create jit builder.");
+ goto fail;
+ }
+
+ err = LLVMOrcJITTargetMachineBuilderDetectHost(&jtmb);
+ if (err != LLVMErrorSuccess) {
+ aot_handle_llvm_errmsg(
+ "quited to create LLVMOrcJITTargetMachineBuilderRef", err);
+ goto fail;
+ }
+
+ LLVMOrcLLLazyJITBuilderSetNumCompileThreads(
+ builder, WASM_ORC_JIT_COMPILE_THREAD_NUM);
+
+ /* Ownership transfer:
+ LLVMOrcJITTargetMachineBuilderRef -> LLVMOrcLLJITBuilderRef */
+ LLVMOrcLLLazyJITBuilderSetJITTargetMachineBuilder(builder, jtmb);
+ err = LLVMOrcCreateLLLazyJIT(&orc_jit, builder);
+ if (err != LLVMErrorSuccess) {
+ aot_handle_llvm_errmsg("quited to create llvm lazy orcjit instance",
+ err);
+ goto fail;
+ }
+ /* Ownership transfer: LLVMOrcLLJITBuilderRef -> LLVMOrcLLJITRef */
+ builder = NULL;
+
+ /* Ownership transfer: local -> AOTCompContext */
+ comp_ctx->orc_jit = orc_jit;
+ orc_jit = NULL;
+ ret = true;
+
+fail:
+ if (builder)
+ LLVMOrcDisposeLLLazyJITBuilder(builder);
+
+ if (orc_jit)
+ LLVMOrcDisposeLLLazyJIT(orc_jit);
+ return ret;
+}
+
+bool
+aot_compiler_init(void)
+{
+ /* Initialize LLVM environment */
+
+ LLVMInitializeCore(LLVMGetGlobalPassRegistry());
+#if WASM_ENABLE_WAMR_COMPILER != 0
+ /* Init environment of all targets for AOT compiler */
+ LLVMInitializeAllTargetInfos();
+ LLVMInitializeAllTargets();
+ LLVMInitializeAllTargetMCs();
+ LLVMInitializeAllAsmPrinters();
+#else
+ /* Init environment of native for JIT compiler */
+ LLVMInitializeNativeTarget();
+ LLVMInitializeNativeTarget();
+ LLVMInitializeNativeAsmPrinter();
+#endif
+
+ return true;
+}
+
+void
+aot_compiler_destroy(void)
+{
+ LLVMShutdown();
+}
+
+AOTCompContext *
+aot_create_comp_context(AOTCompData *comp_data, aot_comp_option_t option)
+{
+ AOTCompContext *comp_ctx, *ret = NULL;
+ LLVMTargetRef target;
+ char *triple = NULL, *triple_norm, *arch, *abi;
+ char *cpu = NULL, *features, buf[128];
+ char *triple_norm_new = NULL, *cpu_new = NULL;
+ char *err = NULL, *fp_round = "round.tonearest",
+ *fp_exce = "fpexcept.strict";
+ char triple_buf[32] = { 0 }, features_buf[128] = { 0 };
+ uint32 opt_level, size_level, i;
+ LLVMCodeModel code_model;
+ LLVMTargetDataRef target_data_ref;
+
+ /* Allocate memory */
+ if (!(comp_ctx = wasm_runtime_malloc(sizeof(AOTCompContext)))) {
+ aot_set_last_error("allocate memory failed.");
+ return NULL;
+ }
+
+ memset(comp_ctx, 0, sizeof(AOTCompContext));
+ comp_ctx->comp_data = comp_data;
+
+ /* Create LLVM context, module and builder */
+ comp_ctx->orc_thread_safe_context = LLVMOrcCreateNewThreadSafeContext();
+ if (!comp_ctx->orc_thread_safe_context) {
+ aot_set_last_error("create LLVM ThreadSafeContext failed.");
+ goto fail;
+ }
+
+ /* Get a reference to the underlying LLVMContext, note:
+ different from non LAZY JIT mode, no need to dispose this context,
+ if will be disposed when the thread safe context is disposed */
+ if (!(comp_ctx->context = LLVMOrcThreadSafeContextGetContext(
+ comp_ctx->orc_thread_safe_context))) {
+ aot_set_last_error("get context from LLVM ThreadSafeContext failed.");
+ goto fail;
+ }
+
+ if (!(comp_ctx->builder = LLVMCreateBuilderInContext(comp_ctx->context))) {
+ aot_set_last_error("create LLVM builder failed.");
+ goto fail;
+ }
+
+ /* Create LLVM module for each jit function, note:
+ different from non ORC JIT mode, no need to dispose it,
+ it will be disposed when the thread safe context is disposed */
+ if (!(comp_ctx->module = LLVMModuleCreateWithNameInContext(
+ "WASM Module", comp_ctx->context))) {
+ aot_set_last_error("create LLVM module failed.");
+ goto fail;
+ }
+
+ if (BH_LIST_ERROR == bh_list_init(&comp_ctx->native_symbols)) {
+ goto fail;
+ }
+
+#if WASM_ENABLE_DEBUG_AOT != 0
+ if (!(comp_ctx->debug_builder = LLVMCreateDIBuilder(comp_ctx->module))) {
+ aot_set_last_error("create LLVM Debug Infor builder failed.");
+ goto fail;
+ }
+
+ LLVMAddModuleFlag(
+ comp_ctx->module, LLVMModuleFlagBehaviorWarning, "Debug Info Version",
+ strlen("Debug Info Version"),
+ LLVMValueAsMetadata(LLVMConstInt(LLVMInt32Type(), 3, false)));
+
+ comp_ctx->debug_file = dwarf_gen_file_info(comp_ctx);
+ if (!comp_ctx->debug_file) {
+ aot_set_last_error("dwarf generate file info failed");
+ goto fail;
+ }
+ comp_ctx->debug_comp_unit = dwarf_gen_comp_unit_info(comp_ctx);
+ if (!comp_ctx->debug_comp_unit) {
+ aot_set_last_error("dwarf generate compile unit info failed");
+ goto fail;
+ }
+#endif
+
+ if (option->enable_bulk_memory)
+ comp_ctx->enable_bulk_memory = true;
+
+ if (option->enable_thread_mgr)
+ comp_ctx->enable_thread_mgr = true;
+
+ if (option->enable_tail_call)
+ comp_ctx->enable_tail_call = true;
+
+ if (option->enable_ref_types)
+ comp_ctx->enable_ref_types = true;
+
+ if (option->enable_aux_stack_frame)
+ comp_ctx->enable_aux_stack_frame = true;
+
+ if (option->enable_aux_stack_check)
+ comp_ctx->enable_aux_stack_check = true;
+
+ if (option->is_indirect_mode)
+ comp_ctx->is_indirect_mode = true;
+
+ if (option->disable_llvm_intrinsics)
+ comp_ctx->disable_llvm_intrinsics = true;
+
+ if (option->disable_llvm_lto)
+ comp_ctx->disable_llvm_lto = true;
+
+ if (option->enable_stack_estimation)
+ comp_ctx->enable_stack_estimation = true;
+
+ comp_ctx->opt_level = option->opt_level;
+ comp_ctx->size_level = option->size_level;
+
+ comp_ctx->custom_sections_wp = option->custom_sections;
+ comp_ctx->custom_sections_count = option->custom_sections_count;
+
+ if (option->is_jit_mode) {
+ comp_ctx->is_jit_mode = true;
+
+ /* Create TargetMachine */
+ if (!create_target_machine_detect_host(comp_ctx))
+ goto fail;
+
+ /* Create LLJIT Instance */
+ if (!orc_jit_create(comp_ctx))
+ goto fail;
+
+#ifndef OS_ENABLE_HW_BOUND_CHECK
+ comp_ctx->enable_bound_check = true;
+ /* Always enable stack boundary check if `bounds-checks`
+ is enabled */
+ comp_ctx->enable_stack_bound_check = true;
+#else
+ comp_ctx->enable_bound_check = false;
+ /* When `bounds-checks` is disabled, we set stack boundary
+ check status according to the compilation option */
+#if WASM_DISABLE_STACK_HW_BOUND_CHECK != 0
+ /* Native stack overflow check with hardware trap is disabled,
+ we need to enable the check by LLVM JITed/AOTed code */
+ comp_ctx->enable_stack_bound_check = true;
+#else
+ /* Native stack overflow check with hardware trap is enabled,
+ no need to enable the check by LLVM JITed/AOTed code */
+ comp_ctx->enable_stack_bound_check = false;
+#endif
+#endif
+ }
+ else {
+ /* Create LLVM target machine */
+ arch = option->target_arch;
+ abi = option->target_abi;
+ cpu = option->target_cpu;
+ features = option->cpu_features;
+ opt_level = option->opt_level;
+ size_level = option->size_level;
+
+ /* verify external llc compiler */
+ comp_ctx->external_llc_compiler = getenv("WAMRC_LLC_COMPILER");
+ if (comp_ctx->external_llc_compiler) {
+#if defined(_WIN32) || defined(_WIN32_)
+ comp_ctx->external_llc_compiler = NULL;
+ LOG_WARNING("External LLC compiler not supported on Windows.");
+#else
+ if (access(comp_ctx->external_llc_compiler, X_OK) != 0) {
+ LOG_WARNING("WAMRC_LLC_COMPILER [%s] not found, fallback to "
+ "default pipeline",
+ comp_ctx->external_llc_compiler);
+ comp_ctx->external_llc_compiler = NULL;
+ }
+ else {
+ comp_ctx->llc_compiler_flags = getenv("WAMRC_LLC_FLAGS");
+ LOG_VERBOSE("Using external LLC compiler [%s]",
+ comp_ctx->external_llc_compiler);
+ }
+#endif
+ }
+
+ /* verify external asm compiler */
+ if (!comp_ctx->external_llc_compiler) {
+ comp_ctx->external_asm_compiler = getenv("WAMRC_ASM_COMPILER");
+ if (comp_ctx->external_asm_compiler) {
+#if defined(_WIN32) || defined(_WIN32_)
+ comp_ctx->external_asm_compiler = NULL;
+ LOG_WARNING("External ASM compiler not supported on Windows.");
+#else
+ if (access(comp_ctx->external_asm_compiler, X_OK) != 0) {
+ LOG_WARNING(
+ "WAMRC_ASM_COMPILER [%s] not found, fallback to "
+ "default pipeline",
+ comp_ctx->external_asm_compiler);
+ comp_ctx->external_asm_compiler = NULL;
+ }
+ else {
+ comp_ctx->asm_compiler_flags = getenv("WAMRC_ASM_FLAGS");
+ LOG_VERBOSE("Using external ASM compiler [%s]",
+ comp_ctx->external_asm_compiler);
+ }
+#endif
+ }
+ }
+
+ if (arch) {
+ /* Add default sub-arch if not specified */
+ if (!strcmp(arch, "arm"))
+ arch = "armv4";
+ else if (!strcmp(arch, "armeb"))
+ arch = "armv4eb";
+ else if (!strcmp(arch, "thumb"))
+ arch = "thumbv4t";
+ else if (!strcmp(arch, "thumbeb"))
+ arch = "thumbv4teb";
+ else if (!strcmp(arch, "aarch64"))
+ arch = "aarch64v8";
+ else if (!strcmp(arch, "aarch64_be"))
+ arch = "aarch64_bev8";
+ }
+
+ /* Check target arch */
+ if (arch && !check_target_arch(arch)) {
+ if (!strcmp(arch, "help"))
+ print_supported_targets();
+ else
+ aot_set_last_error(
+ "Invalid target. "
+ "Use --target=help to list all supported targets");
+ goto fail;
+ }
+
+ /* Check target ABI */
+ if (abi && !check_target_abi(abi)) {
+ if (!strcmp(abi, "help"))
+ print_supported_abis();
+ else
+ aot_set_last_error(
+ "Invalid target ABI. "
+ "Use --target-abi=help to list all supported ABI");
+ goto fail;
+ }
+
+ /* Set default abi for riscv target */
+ if (arch && !strncmp(arch, "riscv", 5) && !abi) {
+ if (!strcmp(arch, "riscv64"))
+ abi = "lp64d";
+ else
+ abi = "ilp32d";
+ }
+
+#if defined(__APPLE__) || defined(__MACH__)
+ if (!abi) {
+ /* On MacOS platform, set abi to "gnu" to avoid generating
+ object file of Mach-O binary format which is unsupported */
+ abi = "gnu";
+ if (!arch && !cpu && !features) {
+ /* Get CPU name of the host machine to avoid checking
+ SIMD capability failed */
+ if (!(cpu = cpu_new = LLVMGetHostCPUName())) {
+ aot_set_last_error("llvm get host cpu name failed.");
+ goto fail;
+ }
+ }
+ }
+#endif
+
+ if (abi) {
+ /* Construct target triple: <arch>-<vendor>-<sys>-<abi> */
+ const char *vendor_sys;
+ char *arch1 = arch, default_arch[32] = { 0 };
+
+ if (!arch1) {
+ char *default_triple = LLVMGetDefaultTargetTriple();
+
+ if (!default_triple) {
+ aot_set_last_error(
+ "llvm get default target triple failed.");
+ goto fail;
+ }
+
+ vendor_sys = strstr(default_triple, "-");
+ bh_assert(vendor_sys);
+ bh_memcpy_s(default_arch, sizeof(default_arch), default_triple,
+ (uint32)(vendor_sys - default_triple));
+ arch1 = default_arch;
+
+ LLVMDisposeMessage(default_triple);
+ }
+
+ /**
+ * Set <vendor>-<sys> according to abi to generate the object file
+ * with the correct file format which might be different from the
+ * default object file format of the host, e.g., generating AOT file
+ * for Windows/MacOS under Linux host, or generating AOT file for
+ * Linux/MacOS under Windows host.
+ */
+ if (!strcmp(abi, "msvc")) {
+ if (!strcmp(arch1, "i386"))
+ vendor_sys = "-pc-win32-";
+ else
+ vendor_sys = "-pc-windows-";
+ }
+ else {
+ vendor_sys = "-pc-linux-";
+ }
+
+ bh_assert(strlen(arch1) + strlen(vendor_sys) + strlen(abi)
+ < sizeof(triple_buf));
+ bh_memcpy_s(triple_buf, (uint32)sizeof(triple_buf), arch1,
+ (uint32)strlen(arch1));
+ bh_memcpy_s(triple_buf + strlen(arch1),
+ (uint32)(sizeof(triple_buf) - strlen(arch1)),
+ vendor_sys, (uint32)strlen(vendor_sys));
+ bh_memcpy_s(triple_buf + strlen(arch1) + strlen(vendor_sys),
+ (uint32)(sizeof(triple_buf) - strlen(arch1)
+ - strlen(vendor_sys)),
+ abi, (uint32)strlen(abi));
+ triple = triple_buf;
+ }
+ else if (arch) {
+ /* Construct target triple: <arch>-<vendor>-<sys>-<abi> */
+ const char *vendor_sys;
+ char *default_triple = LLVMGetDefaultTargetTriple();
+
+ if (!default_triple) {
+ aot_set_last_error("llvm get default target triple failed.");
+ goto fail;
+ }
+
+ if (strstr(default_triple, "windows")) {
+ vendor_sys = "-pc-windows-";
+ if (!abi)
+ abi = "msvc";
+ }
+ else if (strstr(default_triple, "win32")) {
+ vendor_sys = "-pc-win32-";
+ if (!abi)
+ abi = "msvc";
+ }
+ else {
+ vendor_sys = "-pc-linux-";
+ if (!abi)
+ abi = "gnu";
+ }
+
+ LLVMDisposeMessage(default_triple);
+
+ bh_assert(strlen(arch) + strlen(vendor_sys) + strlen(abi)
+ < sizeof(triple_buf));
+ bh_memcpy_s(triple_buf, (uint32)sizeof(triple_buf), arch,
+ (uint32)strlen(arch));
+ bh_memcpy_s(triple_buf + strlen(arch),
+ (uint32)(sizeof(triple_buf) - strlen(arch)), vendor_sys,
+ (uint32)strlen(vendor_sys));
+ bh_memcpy_s(triple_buf + strlen(arch) + strlen(vendor_sys),
+ (uint32)(sizeof(triple_buf) - strlen(arch)
+ - strlen(vendor_sys)),
+ abi, (uint32)strlen(abi));
+ triple = triple_buf;
+ }
+
+ if (!cpu && features) {
+ aot_set_last_error("cpu isn't specified for cpu features.");
+ goto fail;
+ }
+
+ if (!triple && !cpu) {
+ /* Get a triple for the host machine */
+ if (!(triple_norm = triple_norm_new =
+ LLVMGetDefaultTargetTriple())) {
+ aot_set_last_error("llvm get default target triple failed.");
+ goto fail;
+ }
+ /* Get CPU name of the host machine */
+ if (!(cpu = cpu_new = LLVMGetHostCPUName())) {
+ aot_set_last_error("llvm get host cpu name failed.");
+ goto fail;
+ }
+ }
+ else if (triple) {
+ /* Normalize a target triple */
+ if (!(triple_norm = triple_norm_new =
+ LLVMNormalizeTargetTriple(triple))) {
+ snprintf(buf, sizeof(buf),
+ "llvm normlalize target triple (%s) failed.", triple);
+ aot_set_last_error(buf);
+ goto fail;
+ }
+ if (!cpu)
+ cpu = "";
+ }
+ else {
+ /* triple is NULL, cpu isn't NULL */
+ snprintf(buf, sizeof(buf), "target isn't specified for cpu %s.",
+ cpu);
+ aot_set_last_error(buf);
+ goto fail;
+ }
+
+ /* Add module flag and cpu feature for riscv target */
+ if (arch && !strncmp(arch, "riscv", 5)) {
+ LLVMMetadataRef meta_target_abi;
+
+ if (!(meta_target_abi = LLVMMDStringInContext2(comp_ctx->context,
+ abi, strlen(abi)))) {
+ aot_set_last_error("create metadata string failed.");
+ goto fail;
+ }
+ LLVMAddModuleFlag(comp_ctx->module, LLVMModuleFlagBehaviorError,
+ "target-abi", strlen("target-abi"),
+ meta_target_abi);
+
+ if (!strcmp(abi, "lp64d") || !strcmp(abi, "ilp32d")) {
+ if (features) {
+ snprintf(features_buf, sizeof(features_buf), "%s%s",
+ features, ",+d");
+ features = features_buf;
+ }
+ else
+ features = "+d";
+ }
+ }
+
+ if (!features)
+ features = "";
+
+ /* Get target with triple, note that LLVMGetTargetFromTriple()
+ return 0 when success, but not true. */
+ if (LLVMGetTargetFromTriple(triple_norm, &target, &err) != 0) {
+ if (err) {
+ LLVMDisposeMessage(err);
+ err = NULL;
+ }
+ snprintf(buf, sizeof(buf),
+ "llvm get target from triple (%s) failed", triple_norm);
+ aot_set_last_error(buf);
+ goto fail;
+ }
+
+ /* Save target arch */
+ get_target_arch_from_triple(triple_norm, comp_ctx->target_arch,
+ sizeof(comp_ctx->target_arch));
+
+ if (option->bounds_checks == 1 || option->bounds_checks == 0) {
+ /* Set by user */
+ comp_ctx->enable_bound_check =
+ (option->bounds_checks == 1) ? true : false;
+ }
+ else {
+ /* Unset by user, use default value */
+ if (strstr(comp_ctx->target_arch, "64")
+ && !option->is_sgx_platform) {
+ comp_ctx->enable_bound_check = false;
+ }
+ else {
+ comp_ctx->enable_bound_check = true;
+ }
+ }
+
+ if (comp_ctx->enable_bound_check) {
+ /* Always enable stack boundary check if `bounds-checks`
+ is enabled */
+ comp_ctx->enable_stack_bound_check = true;
+ }
+ else {
+ /* When `bounds-checks` is disabled, we set stack boundary
+ check status according to the input option */
+ comp_ctx->enable_stack_bound_check =
+ (option->stack_bounds_checks == 1) ? true : false;
+ }
+
+ os_printf("Create AoT compiler with:\n");
+ os_printf(" target: %s\n", comp_ctx->target_arch);
+ os_printf(" target cpu: %s\n", cpu);
+ os_printf(" cpu features: %s\n", features);
+ os_printf(" opt level: %d\n", opt_level);
+ os_printf(" size level: %d\n", size_level);
+ switch (option->output_format) {
+ case AOT_LLVMIR_UNOPT_FILE:
+ os_printf(" output format: unoptimized LLVM IR\n");
+ break;
+ case AOT_LLVMIR_OPT_FILE:
+ os_printf(" output format: optimized LLVM IR\n");
+ break;
+ case AOT_FORMAT_FILE:
+ os_printf(" output format: AoT file\n");
+ break;
+ case AOT_OBJECT_FILE:
+ os_printf(" output format: native object file\n");
+ break;
+ }
+
+ if (!LLVMTargetHasTargetMachine(target)) {
+ snprintf(buf, sizeof(buf),
+ "no target machine for this target (%s).", triple_norm);
+ aot_set_last_error(buf);
+ goto fail;
+ }
+
+ /* Report error if target isn't arc and hasn't asm backend.
+ For arc target, as it cannot emit to memory buffer of elf file
+ currently, we let it emit to assembly file instead, and then call
+ arc-gcc to compile
+ asm file to elf file, and read elf file to memory buffer. */
+ if (strncmp(comp_ctx->target_arch, "arc", 3)
+ && !LLVMTargetHasAsmBackend(target)) {
+ snprintf(buf, sizeof(buf), "no asm backend for this target (%s).",
+ LLVMGetTargetName(target));
+ aot_set_last_error(buf);
+ goto fail;
+ }
+
+ /* Set code model */
+ if (size_level == 0)
+ code_model = LLVMCodeModelLarge;
+ else if (size_level == 1)
+ code_model = LLVMCodeModelMedium;
+ else if (size_level == 2)
+ code_model = LLVMCodeModelKernel;
+ else
+ code_model = LLVMCodeModelSmall;
+
+ /* Create the target machine */
+ if (!(comp_ctx->target_machine = LLVMCreateTargetMachineWithOpts(
+ target, triple_norm, cpu, features, opt_level,
+ LLVMRelocStatic, code_model, false,
+ option->stack_usage_file))) {
+ aot_set_last_error("create LLVM target machine failed.");
+ goto fail;
+ }
+ }
+
+ if (option->enable_simd && strcmp(comp_ctx->target_arch, "x86_64") != 0
+ && strncmp(comp_ctx->target_arch, "aarch64", 7) != 0) {
+ /* Disable simd if it isn't supported by target arch */
+ option->enable_simd = false;
+ }
+
+ if (option->enable_simd) {
+ char *tmp;
+ bool check_simd_ret;
+
+ comp_ctx->enable_simd = true;
+
+ if (!(tmp = LLVMGetTargetMachineCPU(comp_ctx->target_machine))) {
+ aot_set_last_error("get CPU from Target Machine fail");
+ goto fail;
+ }
+
+ check_simd_ret =
+ aot_check_simd_compatibility(comp_ctx->target_arch, tmp);
+ LLVMDisposeMessage(tmp);
+ if (!check_simd_ret) {
+ aot_set_last_error("SIMD compatibility check failed, "
+ "try adding --cpu=<cpu> to specify a cpu "
+ "or adding --disable-simd to disable SIMD");
+ goto fail;
+ }
+ }
+
+ if (!(target_data_ref =
+ LLVMCreateTargetDataLayout(comp_ctx->target_machine))) {
+ aot_set_last_error("create LLVM target data layout failed.");
+ goto fail;
+ }
+ comp_ctx->pointer_size = LLVMPointerSize(target_data_ref);
+ LLVMDisposeTargetData(target_data_ref);
+
+ comp_ctx->optimize = true;
+ if (option->output_format == AOT_LLVMIR_UNOPT_FILE)
+ comp_ctx->optimize = false;
+
+ /* Create metadata for llvm float experimental constrained intrinsics */
+ if (!(comp_ctx->fp_rounding_mode = LLVMMDStringInContext(
+ comp_ctx->context, fp_round, (uint32)strlen(fp_round)))
+ || !(comp_ctx->fp_exception_behavior = LLVMMDStringInContext(
+ comp_ctx->context, fp_exce, (uint32)strlen(fp_exce)))) {
+ aot_set_last_error("create float llvm metadata failed.");
+ goto fail;
+ }
+
+ if (!aot_set_llvm_basic_types(&comp_ctx->basic_types, comp_ctx->context)) {
+ aot_set_last_error("create LLVM basic types failed.");
+ goto fail;
+ }
+
+ if (!aot_create_llvm_consts(&comp_ctx->llvm_consts, comp_ctx)) {
+ aot_set_last_error("create LLVM const values failed.");
+ goto fail;
+ }
+
+ /* set exec_env data type to int8** */
+ comp_ctx->exec_env_type = comp_ctx->basic_types.int8_pptr_type;
+
+ /* set aot_inst data type to int8* */
+ comp_ctx->aot_inst_type = INT8_PTR_TYPE;
+
+ /* Create function context for each function */
+ comp_ctx->func_ctx_count = comp_data->func_count;
+ if (comp_data->func_count > 0
+ && !(comp_ctx->func_ctxes =
+ aot_create_func_contexts(comp_data, comp_ctx)))
+ goto fail;
+
+ if (cpu) {
+ uint32 len = (uint32)strlen(cpu) + 1;
+ if (!(comp_ctx->target_cpu = wasm_runtime_malloc(len))) {
+ aot_set_last_error("allocate memory failed");
+ goto fail;
+ }
+ bh_memcpy_s(comp_ctx->target_cpu, len, cpu, len);
+ }
+
+ if (comp_ctx->disable_llvm_intrinsics)
+ aot_intrinsic_fill_capability_flags(comp_ctx);
+
+ ret = comp_ctx;
+
+fail:
+ if (triple_norm_new)
+ LLVMDisposeMessage(triple_norm_new);
+
+ if (cpu_new)
+ LLVMDisposeMessage(cpu_new);
+
+ if (!ret)
+ aot_destroy_comp_context(comp_ctx);
+
+ (void)i;
+ return ret;
+}
+
+void
+aot_destroy_comp_context(AOTCompContext *comp_ctx)
+{
+ if (!comp_ctx)
+ return;
+
+ if (comp_ctx->target_machine)
+ LLVMDisposeTargetMachine(comp_ctx->target_machine);
+
+ if (comp_ctx->builder)
+ LLVMDisposeBuilder(comp_ctx->builder);
+
+ if (comp_ctx->orc_thread_safe_context)
+ LLVMOrcDisposeThreadSafeContext(comp_ctx->orc_thread_safe_context);
+
+ /* Note: don't dispose comp_ctx->context and comp_ctx->module as
+ they are disposed when disposing the thread safe context */
+
+ /* Has to be the last one */
+ if (comp_ctx->orc_jit)
+ LLVMOrcDisposeLLLazyJIT(comp_ctx->orc_jit);
+
+ if (comp_ctx->func_ctxes)
+ aot_destroy_func_contexts(comp_ctx->func_ctxes,
+ comp_ctx->func_ctx_count);
+
+ if (bh_list_length(&comp_ctx->native_symbols) > 0) {
+ AOTNativeSymbol *sym = bh_list_first_elem(&comp_ctx->native_symbols);
+ while (sym) {
+ AOTNativeSymbol *t = bh_list_elem_next(sym);
+ bh_list_remove(&comp_ctx->native_symbols, sym);
+ wasm_runtime_free(sym);
+ sym = t;
+ }
+ }
+
+ if (comp_ctx->target_cpu) {
+ wasm_runtime_free(comp_ctx->target_cpu);
+ }
+
+ wasm_runtime_free(comp_ctx);
+}
+
+static bool
+insert_native_symbol(AOTCompContext *comp_ctx, const char *symbol, int32 idx)
+{
+ AOTNativeSymbol *sym = wasm_runtime_malloc(sizeof(AOTNativeSymbol));
+
+ if (!sym) {
+ aot_set_last_error("alloc native symbol failed.");
+ return false;
+ }
+
+ memset(sym, 0, sizeof(AOTNativeSymbol));
+ bh_assert(strlen(symbol) <= sizeof(sym->symbol));
+ snprintf(sym->symbol, sizeof(sym->symbol), "%s", symbol);
+ sym->index = idx;
+
+ if (BH_LIST_ERROR == bh_list_insert(&comp_ctx->native_symbols, sym)) {
+ wasm_runtime_free(sym);
+ aot_set_last_error("insert native symbol to list failed.");
+ return false;
+ }
+
+ return true;
+}
+
+int32
+aot_get_native_symbol_index(AOTCompContext *comp_ctx, const char *symbol)
+{
+ int32 idx = -1;
+ AOTNativeSymbol *sym = NULL;
+
+ sym = bh_list_first_elem(&comp_ctx->native_symbols);
+
+ /* Lookup an existing symobl record */
+
+ while (sym) {
+ if (strcmp(sym->symbol, symbol) == 0) {
+ idx = sym->index;
+ break;
+ }
+ sym = bh_list_elem_next(sym);
+ }
+
+ /* Given symbol is not exist in list, then we alloc a new index for it */
+
+ if (idx < 0) {
+ if (comp_ctx->pointer_size == sizeof(uint32)
+ && (!strncmp(symbol, "f64#", 4) || !strncmp(symbol, "i64#", 4))) {
+ idx = bh_list_length(&comp_ctx->native_symbols);
+ /* Add 4 bytes padding on 32-bit target to make sure that
+ the f64 const is stored on 8-byte aligned address */
+ if (idx & 1) {
+ if (!insert_native_symbol(comp_ctx, "__ignore", idx)) {
+ return -1;
+ }
+ }
+ }
+
+ idx = bh_list_length(&comp_ctx->native_symbols);
+ if (!insert_native_symbol(comp_ctx, symbol, idx)) {
+ return -1;
+ }
+
+ if (comp_ctx->pointer_size == sizeof(uint32)
+ && (!strncmp(symbol, "f64#", 4) || !strncmp(symbol, "i64#", 4))) {
+ /* f64 const occupies 2 pointer slots on 32-bit target */
+ if (!insert_native_symbol(comp_ctx, "__ignore", idx + 1)) {
+ return -1;
+ }
+ }
+ }
+
+ return idx;
+}
+
+void
+aot_value_stack_push(AOTValueStack *stack, AOTValue *value)
+{
+ if (!stack->value_list_head)
+ stack->value_list_head = stack->value_list_end = value;
+ else {
+ stack->value_list_end->next = value;
+ value->prev = stack->value_list_end;
+ stack->value_list_end = value;
+ }
+}
+
+AOTValue *
+aot_value_stack_pop(AOTValueStack *stack)
+{
+ AOTValue *value = stack->value_list_end;
+
+ bh_assert(stack->value_list_end);
+
+ if (stack->value_list_head == stack->value_list_end)
+ stack->value_list_head = stack->value_list_end = NULL;
+ else {
+ stack->value_list_end = stack->value_list_end->prev;
+ stack->value_list_end->next = NULL;
+ value->prev = NULL;
+ }
+
+ return value;
+}
+
+void
+aot_value_stack_destroy(AOTValueStack *stack)
+{
+ AOTValue *value = stack->value_list_head, *p;
+
+ while (value) {
+ p = value->next;
+ wasm_runtime_free(value);
+ value = p;
+ }
+
+ stack->value_list_head = NULL;
+ stack->value_list_end = NULL;
+}
+
+void
+aot_block_stack_push(AOTBlockStack *stack, AOTBlock *block)
+{
+ if (!stack->block_list_head)
+ stack->block_list_head = stack->block_list_end = block;
+ else {
+ stack->block_list_end->next = block;
+ block->prev = stack->block_list_end;
+ stack->block_list_end = block;
+ }
+}
+
+AOTBlock *
+aot_block_stack_pop(AOTBlockStack *stack)
+{
+ AOTBlock *block = stack->block_list_end;
+
+ bh_assert(stack->block_list_end);
+
+ if (stack->block_list_head == stack->block_list_end)
+ stack->block_list_head = stack->block_list_end = NULL;
+ else {
+ stack->block_list_end = stack->block_list_end->prev;
+ stack->block_list_end->next = NULL;
+ block->prev = NULL;
+ }
+
+ return block;
+}
+
+void
+aot_block_stack_destroy(AOTBlockStack *stack)
+{
+ AOTBlock *block = stack->block_list_head, *p;
+
+ while (block) {
+ p = block->next;
+ aot_value_stack_destroy(&block->value_stack);
+ aot_block_destroy(block);
+ block = p;
+ }
+
+ stack->block_list_head = NULL;
+ stack->block_list_end = NULL;
+}
+
+void
+aot_block_destroy(AOTBlock *block)
+{
+ aot_value_stack_destroy(&block->value_stack);
+ if (block->param_types)
+ wasm_runtime_free(block->param_types);
+ if (block->param_phis)
+ wasm_runtime_free(block->param_phis);
+ if (block->else_param_phis)
+ wasm_runtime_free(block->else_param_phis);
+ if (block->result_types)
+ wasm_runtime_free(block->result_types);
+ if (block->result_phis)
+ wasm_runtime_free(block->result_phis);
+ wasm_runtime_free(block);
+}
+
+bool
+aot_checked_addr_list_add(AOTFuncContext *func_ctx, uint32 local_idx,
+ uint32 offset, uint32 bytes)
+{
+ AOTCheckedAddr *node = func_ctx->checked_addr_list;
+
+ if (!(node = wasm_runtime_malloc(sizeof(AOTCheckedAddr)))) {
+ aot_set_last_error("allocate memory failed.");
+ return false;
+ }
+
+ node->local_idx = local_idx;
+ node->offset = offset;
+ node->bytes = bytes;
+
+ node->next = func_ctx->checked_addr_list;
+ func_ctx->checked_addr_list = node;
+ return true;
+}
+
+void
+aot_checked_addr_list_del(AOTFuncContext *func_ctx, uint32 local_idx)
+{
+ AOTCheckedAddr *node = func_ctx->checked_addr_list;
+ AOTCheckedAddr *node_prev = NULL, *node_next;
+
+ while (node) {
+ node_next = node->next;
+
+ if (node->local_idx == local_idx) {
+ if (!node_prev)
+ func_ctx->checked_addr_list = node_next;
+ else
+ node_prev->next = node_next;
+ wasm_runtime_free(node);
+ }
+ else {
+ node_prev = node;
+ }
+
+ node = node_next;
+ }
+}
+
+bool
+aot_checked_addr_list_find(AOTFuncContext *func_ctx, uint32 local_idx,
+ uint32 offset, uint32 bytes)
+{
+ AOTCheckedAddr *node = func_ctx->checked_addr_list;
+
+ while (node) {
+ if (node->local_idx == local_idx && node->offset == offset
+ && node->bytes >= bytes) {
+ return true;
+ }
+ node = node->next;
+ }
+
+ return false;
+}
+
+void
+aot_checked_addr_list_destroy(AOTFuncContext *func_ctx)
+{
+ AOTCheckedAddr *node = func_ctx->checked_addr_list, *node_next;
+
+ while (node) {
+ node_next = node->next;
+ wasm_runtime_free(node);
+ node = node_next;
+ }
+
+ func_ctx->checked_addr_list = NULL;
+}
+
+bool
+aot_build_zero_function_ret(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ AOTFuncType *func_type)
+{
+ LLVMValueRef ret = NULL;
+
+ if (func_type->result_count) {
+ switch (func_type->types[func_type->param_count]) {
+ case VALUE_TYPE_I32:
+ ret = LLVMBuildRet(comp_ctx->builder, I32_ZERO);
+ break;
+ case VALUE_TYPE_I64:
+ ret = LLVMBuildRet(comp_ctx->builder, I64_ZERO);
+ break;
+ case VALUE_TYPE_F32:
+ ret = LLVMBuildRet(comp_ctx->builder, F32_ZERO);
+ break;
+ case VALUE_TYPE_F64:
+ ret = LLVMBuildRet(comp_ctx->builder, F64_ZERO);
+ break;
+ case VALUE_TYPE_V128:
+ ret =
+ LLVMBuildRet(comp_ctx->builder, LLVM_CONST(i64x2_vec_zero));
+ break;
+ case VALUE_TYPE_FUNCREF:
+ case VALUE_TYPE_EXTERNREF:
+ ret = LLVMBuildRet(comp_ctx->builder, REF_NULL);
+ break;
+ default:
+ bh_assert(0);
+ }
+ }
+ else {
+ ret = LLVMBuildRetVoid(comp_ctx->builder);
+ }
+
+ if (!ret) {
+ aot_set_last_error("llvm build ret failed.");
+ return false;
+ }
+#if WASM_ENABLE_DEBUG_AOT != 0
+ LLVMMetadataRef return_location =
+ dwarf_gen_func_ret_location(comp_ctx, func_ctx);
+ LLVMInstructionSetDebugLoc(ret, return_location);
+#endif
+ return true;
+}
+
+static LLVMValueRef
+__call_llvm_intrinsic(const AOTCompContext *comp_ctx,
+ const AOTFuncContext *func_ctx, const char *name,
+ LLVMTypeRef ret_type, LLVMTypeRef *param_types,
+ int param_count, LLVMValueRef *param_values)
+{
+ LLVMValueRef func, ret;
+ LLVMTypeRef func_type;
+ const char *symname;
+ int32 func_idx;
+
+ if (comp_ctx->disable_llvm_intrinsics
+ && aot_intrinsic_check_capability(comp_ctx, name)) {
+ if (func_ctx == NULL) {
+ aot_set_last_error_v("invalid func_ctx for intrinsic: %s", name);
+ return NULL;
+ }
+
+ if (!(func_type = LLVMFunctionType(ret_type, param_types,
+ (uint32)param_count, false))) {
+ aot_set_last_error("create LLVM intrinsic function type failed.");
+ return NULL;
+ }
+ if (!(func_type = LLVMPointerType(func_type, 0))) {
+ aot_set_last_error(
+ "create LLVM intrinsic function pointer type failed.");
+ return NULL;
+ }
+
+ if (!(symname = aot_intrinsic_get_symbol(name))) {
+ aot_set_last_error_v("runtime intrinsic not implemented: %s\n",
+ name);
+ return NULL;
+ }
+
+ func_idx =
+ aot_get_native_symbol_index((AOTCompContext *)comp_ctx, symname);
+ if (func_idx < 0) {
+ aot_set_last_error_v("get runtime intrinsc index failed: %s\n",
+ name);
+ return NULL;
+ }
+
+ if (!(func = aot_get_func_from_table(comp_ctx, func_ctx->native_symbol,
+ func_type, func_idx))) {
+ aot_set_last_error_v("get runtime intrinsc failed: %s\n", name);
+ return NULL;
+ }
+ }
+ else {
+ /* Declare llvm intrinsic function if necessary */
+ if (!(func = LLVMGetNamedFunction(func_ctx->module, name))) {
+ if (!(func_type = LLVMFunctionType(ret_type, param_types,
+ (uint32)param_count, false))) {
+ aot_set_last_error(
+ "create LLVM intrinsic function type failed.");
+ return NULL;
+ }
+
+ if (!(func = LLVMAddFunction(func_ctx->module, name, func_type))) {
+ aot_set_last_error("add LLVM intrinsic function failed.");
+ return NULL;
+ }
+ }
+ }
+
+#if LLVM_VERSION_MAJOR >= 14
+ func_type =
+ LLVMFunctionType(ret_type, param_types, (uint32)param_count, false);
+#endif
+
+ /* Call the LLVM intrinsic function */
+ if (!(ret = LLVMBuildCall2(comp_ctx->builder, func_type, func, param_values,
+ (uint32)param_count, "call"))) {
+ aot_set_last_error("llvm build intrinsic call failed.");
+ return NULL;
+ }
+
+ return ret;
+}
+
+LLVMValueRef
+aot_call_llvm_intrinsic(const AOTCompContext *comp_ctx,
+ const AOTFuncContext *func_ctx, const char *intrinsic,
+ LLVMTypeRef ret_type, LLVMTypeRef *param_types,
+ int param_count, ...)
+{
+ LLVMValueRef *param_values, ret;
+ va_list argptr;
+ uint64 total_size;
+ int i = 0;
+
+ /* Create param values */
+ total_size = sizeof(LLVMValueRef) * (uint64)param_count;
+ if (total_size >= UINT32_MAX
+ || !(param_values = wasm_runtime_malloc((uint32)total_size))) {
+ aot_set_last_error("allocate memory for param values failed.");
+ return false;
+ }
+
+ /* Load each param value */
+ va_start(argptr, param_count);
+ while (i < param_count)
+ param_values[i++] = va_arg(argptr, LLVMValueRef);
+ va_end(argptr);
+
+ ret = __call_llvm_intrinsic(comp_ctx, func_ctx, intrinsic, ret_type,
+ param_types, param_count, param_values);
+
+ wasm_runtime_free(param_values);
+
+ return ret;
+}
+
+LLVMValueRef
+aot_call_llvm_intrinsic_v(const AOTCompContext *comp_ctx,
+ const AOTFuncContext *func_ctx, const char *intrinsic,
+ LLVMTypeRef ret_type, LLVMTypeRef *param_types,
+ int param_count, va_list param_value_list)
+{
+ LLVMValueRef *param_values, ret;
+ uint64 total_size;
+ int i = 0;
+
+ /* Create param values */
+ total_size = sizeof(LLVMValueRef) * (uint64)param_count;
+ if (total_size >= UINT32_MAX
+ || !(param_values = wasm_runtime_malloc((uint32)total_size))) {
+ aot_set_last_error("allocate memory for param values failed.");
+ return false;
+ }
+
+ /* Load each param value */
+ while (i < param_count)
+ param_values[i++] = va_arg(param_value_list, LLVMValueRef);
+
+ ret = __call_llvm_intrinsic(comp_ctx, func_ctx, intrinsic, ret_type,
+ param_types, param_count, param_values);
+
+ wasm_runtime_free(param_values);
+
+ return ret;
+}
+
+LLVMValueRef
+aot_get_func_from_table(const AOTCompContext *comp_ctx, LLVMValueRef base,
+ LLVMTypeRef func_type, int32 index)
+{
+ LLVMValueRef func;
+ LLVMValueRef func_addr;
+
+ if (!(func_addr = I32_CONST(index))) {
+ aot_set_last_error("construct function index failed.");
+ goto fail;
+ }
+
+ if (!(func_addr =
+ LLVMBuildInBoundsGEP2(comp_ctx->builder, OPQ_PTR_TYPE, base,
+ &func_addr, 1, "func_addr"))) {
+ aot_set_last_error("get function addr by index failed.");
+ goto fail;
+ }
+
+ func =
+ LLVMBuildLoad2(comp_ctx->builder, OPQ_PTR_TYPE, func_addr, "func_tmp");
+
+ if (func == NULL) {
+ aot_set_last_error("get function pointer failed.");
+ goto fail;
+ }
+
+ if (!(func =
+ LLVMBuildBitCast(comp_ctx->builder, func, func_type, "func"))) {
+ aot_set_last_error("cast function fialed.");
+ goto fail;
+ }
+
+ return func;
+fail:
+ return NULL;
+}
+
+LLVMValueRef
+aot_load_const_from_table(AOTCompContext *comp_ctx, LLVMValueRef base,
+ const WASMValue *value, uint8 value_type)
+{
+ LLVMValueRef const_index, const_addr, const_value;
+ LLVMTypeRef const_ptr_type, const_type;
+ char buf[128] = { 0 };
+ int32 index;
+
+ switch (value_type) {
+ case VALUE_TYPE_I32:
+ /* Store the raw int bits of i32 const as a hex string */
+ snprintf(buf, sizeof(buf), "i32#%08" PRIX32, value->i32);
+ const_ptr_type = INT32_PTR_TYPE;
+ const_type = I32_TYPE;
+ break;
+ case VALUE_TYPE_I64:
+ /* Store the raw int bits of i64 const as a hex string */
+ snprintf(buf, sizeof(buf), "i64#%016" PRIX64, value->i64);
+ const_ptr_type = INT64_PTR_TYPE;
+ const_type = I64_TYPE;
+ break;
+ case VALUE_TYPE_F32:
+ /* Store the raw int bits of f32 const as a hex string */
+ snprintf(buf, sizeof(buf), "f32#%08" PRIX32, value->i32);
+ const_ptr_type = F32_PTR_TYPE;
+ const_type = F32_TYPE;
+ break;
+ case VALUE_TYPE_F64:
+ /* Store the raw int bits of f64 const as a hex string */
+ snprintf(buf, sizeof(buf), "f64#%016" PRIX64, value->i64);
+ const_ptr_type = F64_PTR_TYPE;
+ const_type = F64_TYPE;
+ break;
+ default:
+ bh_assert(0);
+ return NULL;
+ }
+
+ /* Load f32/f64 const from exec_env->native_symbol[index] */
+
+ index = aot_get_native_symbol_index(comp_ctx, buf);
+ if (index < 0) {
+ return NULL;
+ }
+
+ if (!(const_index = I32_CONST(index))) {
+ aot_set_last_error("construct const index failed.");
+ return NULL;
+ }
+
+ if (!(const_addr =
+ LLVMBuildInBoundsGEP2(comp_ctx->builder, OPQ_PTR_TYPE, base,
+ &const_index, 1, "const_addr_tmp"))) {
+ aot_set_last_error("get const addr by index failed.");
+ return NULL;
+ }
+
+ if (!(const_addr = LLVMBuildBitCast(comp_ctx->builder, const_addr,
+ const_ptr_type, "const_addr"))) {
+ aot_set_last_error("cast const fialed.");
+ return NULL;
+ }
+
+ if (!(const_value = LLVMBuildLoad2(comp_ctx->builder, const_type,
+ const_addr, "const_value"))) {
+ aot_set_last_error("load const failed.");
+ return NULL;
+ }
+
+ (void)const_type;
+ return const_value;
+}
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/aot_llvm.h b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/aot_llvm.h
new file mode 100644
index 000000000..2a1564019
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/aot_llvm.h
@@ -0,0 +1,526 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#ifndef _AOT_LLVM_H_
+#define _AOT_LLVM_H_
+
+#include "aot.h"
+#include "llvm/Config/llvm-config.h"
+#include "llvm-c/Types.h"
+#include "llvm-c/Target.h"
+#include "llvm-c/Core.h"
+#include "llvm-c/Object.h"
+#include "llvm-c/ExecutionEngine.h"
+#include "llvm-c/Analysis.h"
+#include "llvm-c/BitWriter.h"
+#include "llvm-c/Transforms/Utils.h"
+#include "llvm-c/Transforms/Scalar.h"
+#include "llvm-c/Transforms/Vectorize.h"
+#include "llvm-c/Transforms/PassManagerBuilder.h"
+
+#include "llvm-c/Orc.h"
+#include "llvm-c/Error.h"
+#include "llvm-c/Support.h"
+#include "llvm-c/Initialization.h"
+#include "llvm-c/TargetMachine.h"
+#include "llvm-c/LLJIT.h"
+#if WASM_ENABLE_DEBUG_AOT != 0
+#include "llvm-c/DebugInfo.h"
+#endif
+
+#include "aot_orc_extra.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#if LLVM_VERSION_MAJOR < 14
+#define LLVMBuildLoad2(builder, type, value, name) \
+ LLVMBuildLoad(builder, value, name)
+
+#define LLVMBuildCall2(builder, type, func, args, num_args, name) \
+ LLVMBuildCall(builder, func, args, num_args, name)
+
+#define LLVMBuildInBoundsGEP2(builder, type, ptr, indices, num_indices, name) \
+ LLVMBuildInBoundsGEP(builder, ptr, indices, num_indices, name)
+#else
+/* Opaque pointer type */
+#define OPQ_PTR_TYPE INT8_PTR_TYPE
+#endif
+
+#ifndef NDEBUG
+#undef DEBUG_PASS
+#undef DUMP_MODULE
+// #define DEBUG_PASS
+// #define DUMP_MODULE
+#else
+#undef DEBUG_PASS
+#undef DUMP_MODULE
+#endif
+
+/**
+ * Value in the WASM operation stack, each stack element
+ * is an LLVM value
+ */
+typedef struct AOTValue {
+ struct AOTValue *next;
+ struct AOTValue *prev;
+ LLVMValueRef value;
+ /* VALUE_TYPE_I32/I64/F32/F64/VOID */
+ uint8 type;
+ bool is_local;
+ uint32 local_idx;
+} AOTValue;
+
+/**
+ * Value stack, represents stack elements in a WASM block
+ */
+typedef struct AOTValueStack {
+ AOTValue *value_list_head;
+ AOTValue *value_list_end;
+} AOTValueStack;
+
+typedef struct AOTBlock {
+ struct AOTBlock *next;
+ struct AOTBlock *prev;
+
+ /* Block index */
+ uint32 block_index;
+ /* LABEL_TYPE_BLOCK/LOOP/IF/FUNCTION */
+ uint32 label_type;
+ /* Whether it is reachable */
+ bool is_reachable;
+ /* Whether skip translation of wasm else branch */
+ bool skip_wasm_code_else;
+
+ /* code of else opcode of this block, if it is a IF block */
+ uint8 *wasm_code_else;
+ /* code end of this block */
+ uint8 *wasm_code_end;
+
+ /* LLVM label points to code begin */
+ LLVMBasicBlockRef llvm_entry_block;
+ /* LLVM label points to code else */
+ LLVMBasicBlockRef llvm_else_block;
+ /* LLVM label points to code end */
+ LLVMBasicBlockRef llvm_end_block;
+
+ /* WASM operation stack */
+ AOTValueStack value_stack;
+
+ /* Param count/types/PHIs of this block */
+ uint32 param_count;
+ uint8 *param_types;
+ LLVMValueRef *param_phis;
+ LLVMValueRef *else_param_phis;
+
+ /* Result count/types/PHIs of this block */
+ uint32 result_count;
+ uint8 *result_types;
+ LLVMValueRef *result_phis;
+} AOTBlock;
+
+/**
+ * Block stack, represents WASM block stack elements
+ */
+typedef struct AOTBlockStack {
+ AOTBlock *block_list_head;
+ AOTBlock *block_list_end;
+ /* Current block index of each block type */
+ uint32 block_index[3];
+} AOTBlockStack;
+
+typedef struct AOTCheckedAddr {
+ struct AOTCheckedAddr *next;
+ uint32 local_idx;
+ uint32 offset;
+ uint32 bytes;
+} AOTCheckedAddr, *AOTCheckedAddrList;
+
+typedef struct AOTMemInfo {
+ LLVMValueRef mem_base_addr;
+ LLVMValueRef mem_data_size_addr;
+ LLVMValueRef mem_cur_page_count_addr;
+ LLVMValueRef mem_bound_check_1byte;
+ LLVMValueRef mem_bound_check_2bytes;
+ LLVMValueRef mem_bound_check_4bytes;
+ LLVMValueRef mem_bound_check_8bytes;
+ LLVMValueRef mem_bound_check_16bytes;
+} AOTMemInfo;
+
+typedef struct AOTFuncContext {
+ AOTFunc *aot_func;
+ LLVMValueRef func;
+ LLVMTypeRef func_type;
+ /* LLVM module for this function, note that in LAZY JIT mode,
+ each aot function belongs to an individual module */
+ LLVMModuleRef module;
+ AOTBlockStack block_stack;
+
+ LLVMValueRef exec_env;
+ LLVMValueRef aot_inst;
+ LLVMValueRef argv_buf;
+ LLVMValueRef native_stack_bound;
+ LLVMValueRef native_stack_top_min_addr;
+ LLVMValueRef aux_stack_bound;
+ LLVMValueRef aux_stack_bottom;
+ LLVMValueRef native_symbol;
+ LLVMValueRef last_alloca;
+ LLVMValueRef func_ptrs;
+
+ AOTMemInfo *mem_info;
+
+ LLVMValueRef cur_exception;
+
+ bool mem_space_unchanged;
+ AOTCheckedAddrList checked_addr_list;
+
+ LLVMBasicBlockRef got_exception_block;
+ LLVMBasicBlockRef func_return_block;
+ LLVMValueRef exception_id_phi;
+ LLVMValueRef func_type_indexes;
+#if WASM_ENABLE_DEBUG_AOT != 0
+ LLVMMetadataRef debug_func;
+#endif
+ LLVMValueRef locals[1];
+} AOTFuncContext;
+
+typedef struct AOTLLVMTypes {
+ LLVMTypeRef int1_type;
+ LLVMTypeRef int8_type;
+ LLVMTypeRef int16_type;
+ LLVMTypeRef int32_type;
+ LLVMTypeRef int64_type;
+ LLVMTypeRef float32_type;
+ LLVMTypeRef float64_type;
+ LLVMTypeRef void_type;
+
+ LLVMTypeRef int8_ptr_type;
+ LLVMTypeRef int8_pptr_type;
+ LLVMTypeRef int16_ptr_type;
+ LLVMTypeRef int32_ptr_type;
+ LLVMTypeRef int64_ptr_type;
+ LLVMTypeRef float32_ptr_type;
+ LLVMTypeRef float64_ptr_type;
+
+ LLVMTypeRef v128_type;
+ LLVMTypeRef v128_ptr_type;
+ LLVMTypeRef i8x16_vec_type;
+ LLVMTypeRef i16x8_vec_type;
+ LLVMTypeRef i32x4_vec_type;
+ LLVMTypeRef i64x2_vec_type;
+ LLVMTypeRef f32x4_vec_type;
+ LLVMTypeRef f64x2_vec_type;
+
+ LLVMTypeRef i1x2_vec_type;
+
+ LLVMTypeRef meta_data_type;
+
+ LLVMTypeRef funcref_type;
+ LLVMTypeRef externref_type;
+} AOTLLVMTypes;
+
+typedef struct AOTLLVMConsts {
+ LLVMValueRef i1_zero;
+ LLVMValueRef i1_one;
+ LLVMValueRef i8_zero;
+ LLVMValueRef i32_zero;
+ LLVMValueRef i64_zero;
+ LLVMValueRef f32_zero;
+ LLVMValueRef f64_zero;
+ LLVMValueRef i32_one;
+ LLVMValueRef i32_two;
+ LLVMValueRef i32_three;
+ LLVMValueRef i32_four;
+ LLVMValueRef i32_five;
+ LLVMValueRef i32_six;
+ LLVMValueRef i32_seven;
+ LLVMValueRef i32_eight;
+ LLVMValueRef i32_nine;
+ LLVMValueRef i32_ten;
+ LLVMValueRef i32_eleven;
+ LLVMValueRef i32_twelve;
+ LLVMValueRef i32_thirteen;
+ LLVMValueRef i32_fourteen;
+ LLVMValueRef i32_fifteen;
+ LLVMValueRef i32_neg_one;
+ LLVMValueRef i64_neg_one;
+ LLVMValueRef i32_min;
+ LLVMValueRef i64_min;
+ LLVMValueRef i32_31;
+ LLVMValueRef i32_32;
+ LLVMValueRef i64_63;
+ LLVMValueRef i64_64;
+ LLVMValueRef i8x16_vec_zero;
+ LLVMValueRef i16x8_vec_zero;
+ LLVMValueRef i32x4_vec_zero;
+ LLVMValueRef i64x2_vec_zero;
+ LLVMValueRef f32x4_vec_zero;
+ LLVMValueRef f64x2_vec_zero;
+ LLVMValueRef i8x16_undef;
+ LLVMValueRef i16x8_undef;
+ LLVMValueRef i32x4_undef;
+ LLVMValueRef i64x2_undef;
+ LLVMValueRef f32x4_undef;
+ LLVMValueRef f64x2_undef;
+ LLVMValueRef i32x16_zero;
+ LLVMValueRef i32x8_zero;
+ LLVMValueRef i32x4_zero;
+ LLVMValueRef i32x2_zero;
+} AOTLLVMConsts;
+
+/**
+ * Compiler context
+ */
+typedef struct AOTCompContext {
+ AOTCompData *comp_data;
+
+ /* LLVM variables required to emit LLVM IR */
+ LLVMContextRef context;
+ LLVMBuilderRef builder;
+#if WASM_ENABLE_DEBUG_AOT
+ LLVMDIBuilderRef debug_builder;
+ LLVMMetadataRef debug_file;
+ LLVMMetadataRef debug_comp_unit;
+#endif
+ LLVMTargetMachineRef target_machine;
+ char *target_cpu;
+ char target_arch[16];
+ unsigned pointer_size;
+
+ /* Hardware intrinsic compability flags */
+ uint64 flags[8];
+
+ /* required by JIT */
+ LLVMOrcLLLazyJITRef orc_jit;
+ LLVMOrcThreadSafeContextRef orc_thread_safe_context;
+
+ LLVMModuleRef module;
+
+ bool is_jit_mode;
+
+ /* AOT indirect mode flag & symbol list */
+ bool is_indirect_mode;
+ bh_list native_symbols;
+
+ /* Bulk memory feature */
+ bool enable_bulk_memory;
+
+ /* Bounday Check */
+ bool enable_bound_check;
+
+ /* Native stack bounday Check */
+ bool enable_stack_bound_check;
+
+ /* Native stack usage estimation */
+ bool enable_stack_estimation;
+
+ /* 128-bit SIMD */
+ bool enable_simd;
+
+ /* Auxiliary stack overflow/underflow check */
+ bool enable_aux_stack_check;
+
+ /* Generate auxiliary stack frame */
+ bool enable_aux_stack_frame;
+
+ /* Thread Manager */
+ bool enable_thread_mgr;
+
+ /* Tail Call */
+ bool enable_tail_call;
+
+ /* Reference Types */
+ bool enable_ref_types;
+
+ /* Disable LLVM built-in intrinsics */
+ bool disable_llvm_intrinsics;
+
+ /* Disable LLVM link time optimization */
+ bool disable_llvm_lto;
+
+ /* Whether optimize the JITed code */
+ bool optimize;
+
+ uint32 opt_level;
+ uint32 size_level;
+
+ /* LLVM floating-point rounding mode metadata */
+ LLVMValueRef fp_rounding_mode;
+
+ /* LLVM floating-point exception behavior metadata */
+ LLVMValueRef fp_exception_behavior;
+
+ /* LLVM data types */
+ AOTLLVMTypes basic_types;
+ LLVMTypeRef exec_env_type;
+ LLVMTypeRef aot_inst_type;
+
+ /* LLVM const values */
+ AOTLLVMConsts llvm_consts;
+
+ /* Function contexts */
+ /* TODO: */
+ AOTFuncContext **func_ctxes;
+ uint32 func_ctx_count;
+ char **custom_sections_wp;
+ uint32 custom_sections_count;
+
+ /* 3rd-party toolchains */
+ /* External llc compiler, if specified, wamrc will emit the llvm-ir file and
+ * invoke the llc compiler to generate object file.
+ * This can be used when we want to benefit from the optimization of other
+ * LLVM based toolchains */
+ const char *external_llc_compiler;
+ const char *llc_compiler_flags;
+ /* External asm compiler, if specified, wamrc will emit the text-based
+ * assembly file (.s) and invoke the llc compiler to generate object file.
+ * This will be useful when the upstream LLVM doesn't support to emit object
+ * file for some architecture (such as arc) */
+ const char *external_asm_compiler;
+ const char *asm_compiler_flags;
+} AOTCompContext;
+
+enum {
+ AOT_FORMAT_FILE,
+ AOT_OBJECT_FILE,
+ AOT_LLVMIR_UNOPT_FILE,
+ AOT_LLVMIR_OPT_FILE,
+};
+
+typedef struct AOTCompOption {
+ bool is_jit_mode;
+ bool is_indirect_mode;
+ char *target_arch;
+ char *target_abi;
+ char *target_cpu;
+ char *cpu_features;
+ bool is_sgx_platform;
+ bool enable_bulk_memory;
+ bool enable_thread_mgr;
+ bool enable_tail_call;
+ bool enable_simd;
+ bool enable_ref_types;
+ bool enable_aux_stack_check;
+ bool enable_aux_stack_frame;
+ bool disable_llvm_intrinsics;
+ bool disable_llvm_lto;
+ bool enable_stack_estimation;
+ uint32 opt_level;
+ uint32 size_level;
+ uint32 output_format;
+ uint32 bounds_checks;
+ uint32 stack_bounds_checks;
+ char **custom_sections;
+ uint32 custom_sections_count;
+ const char *stack_usage_file;
+} AOTCompOption, *aot_comp_option_t;
+
+bool
+aot_compiler_init(void);
+
+void
+aot_compiler_destroy(void);
+
+AOTCompContext *
+aot_create_comp_context(AOTCompData *comp_data, aot_comp_option_t option);
+
+void
+aot_destroy_comp_context(AOTCompContext *comp_ctx);
+
+int32
+aot_get_native_symbol_index(AOTCompContext *comp_ctx, const char *symbol);
+
+bool
+aot_compile_wasm(AOTCompContext *comp_ctx);
+
+uint8 *
+aot_emit_elf_file(AOTCompContext *comp_ctx, uint32 *p_elf_file_size);
+
+void
+aot_destroy_elf_file(uint8 *elf_file);
+
+void
+aot_value_stack_push(AOTValueStack *stack, AOTValue *value);
+
+AOTValue *
+aot_value_stack_pop(AOTValueStack *stack);
+
+void
+aot_value_stack_destroy(AOTValueStack *stack);
+
+void
+aot_block_stack_push(AOTBlockStack *stack, AOTBlock *block);
+
+AOTBlock *
+aot_block_stack_pop(AOTBlockStack *stack);
+
+void
+aot_block_stack_destroy(AOTBlockStack *stack);
+
+void
+aot_block_destroy(AOTBlock *block);
+
+LLVMTypeRef
+wasm_type_to_llvm_type(AOTLLVMTypes *llvm_types, uint8 wasm_type);
+
+bool
+aot_checked_addr_list_add(AOTFuncContext *func_ctx, uint32 local_idx,
+ uint32 offset, uint32 bytes);
+
+void
+aot_checked_addr_list_del(AOTFuncContext *func_ctx, uint32 local_idx);
+
+bool
+aot_checked_addr_list_find(AOTFuncContext *func_ctx, uint32 local_idx,
+ uint32 offset, uint32 bytes);
+
+void
+aot_checked_addr_list_destroy(AOTFuncContext *func_ctx);
+
+bool
+aot_build_zero_function_ret(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ AOTFuncType *func_type);
+
+LLVMValueRef
+aot_call_llvm_intrinsic(const AOTCompContext *comp_ctx,
+ const AOTFuncContext *func_ctx, const char *intrinsic,
+ LLVMTypeRef ret_type, LLVMTypeRef *param_types,
+ int param_count, ...);
+
+LLVMValueRef
+aot_call_llvm_intrinsic_v(const AOTCompContext *comp_ctx,
+ const AOTFuncContext *func_ctx, const char *intrinsic,
+ LLVMTypeRef ret_type, LLVMTypeRef *param_types,
+ int param_count, va_list param_value_list);
+
+LLVMValueRef
+aot_get_func_from_table(const AOTCompContext *comp_ctx, LLVMValueRef base,
+ LLVMTypeRef func_type, int32 index);
+
+LLVMValueRef
+aot_load_const_from_table(AOTCompContext *comp_ctx, LLVMValueRef base,
+ const WASMValue *value, uint8 value_type);
+
+bool
+aot_check_simd_compatibility(const char *arch_c_str, const char *cpu_c_str);
+
+void
+aot_add_expand_memory_op_pass(LLVMPassManagerRef pass);
+
+void
+aot_add_simple_loop_unswitch_pass(LLVMPassManagerRef pass);
+
+void
+aot_apply_llvm_new_pass_manager(AOTCompContext *comp_ctx, LLVMModuleRef module);
+
+void
+aot_handle_llvm_errmsg(const char *string, LLVMErrorRef err);
+
+#ifdef __cplusplus
+} /* end of extern "C" */
+#endif
+
+#endif /* end of _AOT_LLVM_H_ */
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/aot_llvm_extra.cpp b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/aot_llvm_extra.cpp
new file mode 100644
index 000000000..9b77f5e6a
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/aot_llvm_extra.cpp
@@ -0,0 +1,360 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#include <llvm/Passes/StandardInstrumentations.h>
+#include <llvm/Support/Error.h>
+#include <llvm/ADT/SmallVector.h>
+#include <llvm/ADT/Twine.h>
+#include <llvm/ADT/Triple.h>
+#include <llvm/Analysis/TargetTransformInfo.h>
+#include <llvm/CodeGen/TargetPassConfig.h>
+#include <llvm/ExecutionEngine/ExecutionEngine.h>
+#include <llvm/MC/MCSubtargetInfo.h>
+#include <llvm/Support/TargetSelect.h>
+#include <llvm/Target/TargetMachine.h>
+#include <llvm-c/Core.h>
+#include <llvm-c/ExecutionEngine.h>
+#include <llvm-c/Initialization.h>
+#include <llvm/ExecutionEngine/GenericValue.h>
+#include <llvm/ExecutionEngine/JITEventListener.h>
+#include <llvm/ExecutionEngine/RTDyldMemoryManager.h>
+#include <llvm/ExecutionEngine/Orc/LLJIT.h>
+#include <llvm/IR/DerivedTypes.h>
+#include <llvm/IR/Module.h>
+#include <llvm/IR/Instructions.h>
+#include <llvm/IR/IntrinsicInst.h>
+#include <llvm/IR/LegacyPassManager.h>
+#include <llvm/Support/CommandLine.h>
+#include <llvm/Support/ErrorHandling.h>
+#include <llvm/Target/CodeGenCWrappers.h>
+#include <llvm/Target/TargetMachine.h>
+#include <llvm/Target/TargetOptions.h>
+#include <llvm/Transforms/Utils/LowerMemIntrinsics.h>
+#include <llvm/Transforms/Vectorize/LoopVectorize.h>
+#include <llvm/Transforms/Vectorize/LoadStoreVectorizer.h>
+#include <llvm/Transforms/Vectorize/SLPVectorizer.h>
+#include <llvm/Transforms/Scalar/LoopRotation.h>
+#include <llvm/Transforms/Scalar/SimpleLoopUnswitch.h>
+#include <llvm/Transforms/Scalar/LICM.h>
+#include <llvm/Transforms/Scalar/GVN.h>
+#include <llvm/Passes/PassBuilder.h>
+#include <llvm/Analysis/TargetLibraryInfo.h>
+#if LLVM_VERSION_MAJOR >= 12
+#include <llvm/Analysis/AliasAnalysis.h>
+#endif
+
+#include <cstring>
+#include "../aot/aot_runtime.h"
+#include "aot_llvm.h"
+
+using namespace llvm;
+using namespace llvm::orc;
+
+LLVM_C_EXTERN_C_BEGIN
+
+bool
+aot_check_simd_compatibility(const char *arch_c_str, const char *cpu_c_str);
+
+void
+aot_add_expand_memory_op_pass(LLVMPassManagerRef pass);
+
+void
+aot_add_simple_loop_unswitch_pass(LLVMPassManagerRef pass);
+
+void
+aot_apply_llvm_new_pass_manager(AOTCompContext *comp_ctx, LLVMModuleRef module);
+
+LLVM_C_EXTERN_C_END
+
+ExitOnError ExitOnErr;
+
+class ExpandMemoryOpPass : public llvm::ModulePass
+{
+ public:
+ static char ID;
+
+ ExpandMemoryOpPass()
+ : ModulePass(ID)
+ {}
+
+ bool runOnModule(Module &M) override;
+
+ bool expandMemIntrinsicUses(Function &F);
+ StringRef getPassName() const override
+ {
+ return "Expand memory operation intrinsics";
+ }
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override
+ {
+ AU.addRequired<TargetTransformInfoWrapperPass>();
+ }
+};
+
+char ExpandMemoryOpPass::ID = 0;
+
+bool
+ExpandMemoryOpPass::expandMemIntrinsicUses(Function &F)
+{
+ Intrinsic::ID ID = F.getIntrinsicID();
+ bool Changed = false;
+
+ for (auto I = F.user_begin(), E = F.user_end(); I != E;) {
+ Instruction *Inst = cast<Instruction>(*I);
+ ++I;
+
+ switch (ID) {
+ case Intrinsic::memcpy:
+ {
+ auto *Memcpy = cast<MemCpyInst>(Inst);
+ Function *ParentFunc = Memcpy->getParent()->getParent();
+ const TargetTransformInfo &TTI =
+ getAnalysis<TargetTransformInfoWrapperPass>().getTTI(
+ *ParentFunc);
+ expandMemCpyAsLoop(Memcpy, TTI);
+ Changed = true;
+ Memcpy->eraseFromParent();
+ break;
+ }
+ case Intrinsic::memmove:
+ {
+ auto *Memmove = cast<MemMoveInst>(Inst);
+ expandMemMoveAsLoop(Memmove);
+ Changed = true;
+ Memmove->eraseFromParent();
+ break;
+ }
+ case Intrinsic::memset:
+ {
+ auto *Memset = cast<MemSetInst>(Inst);
+ expandMemSetAsLoop(Memset);
+ Changed = true;
+ Memset->eraseFromParent();
+ break;
+ }
+ default:
+ break;
+ }
+ }
+
+ return Changed;
+}
+
+bool
+ExpandMemoryOpPass::runOnModule(Module &M)
+{
+ bool Changed = false;
+
+ for (Function &F : M) {
+ if (!F.isDeclaration())
+ continue;
+
+ switch (F.getIntrinsicID()) {
+ case Intrinsic::memcpy:
+ case Intrinsic::memmove:
+ case Intrinsic::memset:
+ if (expandMemIntrinsicUses(F))
+ Changed = true;
+ break;
+
+ default:
+ break;
+ }
+ }
+
+ return Changed;
+}
+
+void
+aot_add_expand_memory_op_pass(LLVMPassManagerRef pass)
+{
+ reinterpret_cast<legacy::PassManager *>(pass)->add(
+ new ExpandMemoryOpPass());
+}
+
+void
+aot_add_simple_loop_unswitch_pass(LLVMPassManagerRef pass)
+{
+ reinterpret_cast<legacy::PassManager *>(pass)->add(
+ createSimpleLoopUnswitchLegacyPass());
+}
+
+bool
+aot_check_simd_compatibility(const char *arch_c_str, const char *cpu_c_str)
+{
+#if WASM_ENABLE_SIMD != 0
+ if (!arch_c_str || !cpu_c_str) {
+ return false;
+ }
+
+ llvm::SmallVector<std::string, 1> targetAttributes;
+ llvm::Triple targetTriple(arch_c_str, "", "");
+ auto targetMachine =
+ std::unique_ptr<llvm::TargetMachine>(llvm::EngineBuilder().selectTarget(
+ targetTriple, "", std::string(cpu_c_str), targetAttributes));
+ if (!targetMachine) {
+ return false;
+ }
+
+ const llvm::Triple::ArchType targetArch =
+ targetMachine->getTargetTriple().getArch();
+ const llvm::MCSubtargetInfo *subTargetInfo =
+ targetMachine->getMCSubtargetInfo();
+ if (subTargetInfo == nullptr) {
+ return false;
+ }
+
+ if (targetArch == llvm::Triple::x86_64) {
+ return subTargetInfo->checkFeatures("+sse4.1");
+ }
+ else if (targetArch == llvm::Triple::aarch64) {
+ return subTargetInfo->checkFeatures("+neon");
+ }
+ else {
+ return false;
+ }
+#else
+ (void)arch_c_str;
+ (void)cpu_c_str;
+ return true;
+#endif /* WASM_ENABLE_SIMD */
+}
+
+void
+aot_apply_llvm_new_pass_manager(AOTCompContext *comp_ctx, LLVMModuleRef module)
+{
+ TargetMachine *TM =
+ reinterpret_cast<TargetMachine *>(comp_ctx->target_machine);
+ PipelineTuningOptions PTO;
+ PTO.LoopVectorization = true;
+ PTO.SLPVectorization = true;
+ PTO.LoopUnrolling = true;
+
+#ifdef DEBUG_PASS
+ PassInstrumentationCallbacks PIC;
+ PassBuilder PB(TM, PTO, None, &PIC);
+#else
+#if LLVM_VERSION_MAJOR == 12
+ PassBuilder PB(false, TM, PTO);
+#else
+ PassBuilder PB(TM, PTO);
+#endif
+#endif
+
+ /* Register all the basic analyses with the managers */
+ LoopAnalysisManager LAM;
+ FunctionAnalysisManager FAM;
+ CGSCCAnalysisManager CGAM;
+ ModuleAnalysisManager MAM;
+
+ /* Register the target library analysis directly and give it a
+ customized preset TLI */
+ std::unique_ptr<TargetLibraryInfoImpl> TLII(
+ new TargetLibraryInfoImpl(Triple(TM->getTargetTriple())));
+ FAM.registerPass([&] { return TargetLibraryAnalysis(*TLII); });
+
+ /* Register the AA manager first so that our version is the one used */
+ AAManager AA = PB.buildDefaultAAPipeline();
+ FAM.registerPass([&] { return std::move(AA); });
+
+#ifdef DEBUG_PASS
+ StandardInstrumentations SI(true, false);
+ SI.registerCallbacks(PIC, &FAM);
+#endif
+
+ PB.registerFunctionAnalyses(FAM);
+ PB.registerLoopAnalyses(LAM);
+ PB.registerModuleAnalyses(MAM);
+ PB.registerCGSCCAnalyses(CGAM);
+ PB.crossRegisterProxies(LAM, FAM, CGAM, MAM);
+
+#if LLVM_VERSION_MAJOR <= 13
+ PassBuilder::OptimizationLevel OL;
+
+ switch (comp_ctx->opt_level) {
+ case 0:
+ OL = PassBuilder::OptimizationLevel::O0;
+ break;
+ case 1:
+ OL = PassBuilder::OptimizationLevel::O1;
+ break;
+ case 2:
+ OL = PassBuilder::OptimizationLevel::O2;
+ break;
+ case 3:
+ default:
+ OL = PassBuilder::OptimizationLevel::O3;
+ break;
+ }
+#else
+ OptimizationLevel OL;
+
+ switch (comp_ctx->opt_level) {
+ case 0:
+ OL = OptimizationLevel::O0;
+ break;
+ case 1:
+ OL = OptimizationLevel::O1;
+ break;
+ case 2:
+ OL = OptimizationLevel::O2;
+ break;
+ case 3:
+ default:
+ OL = OptimizationLevel::O3;
+ break;
+ }
+#endif /* end of LLVM_VERSION_MAJOR */
+
+ bool disable_llvm_lto = comp_ctx->disable_llvm_lto;
+#if WASM_ENABLE_SPEC_TEST != 0
+ disable_llvm_lto = true;
+#endif
+
+ Module *M = reinterpret_cast<Module *>(module);
+ if (disable_llvm_lto) {
+ for (Function &F : *M) {
+ F.addFnAttr("disable-tail-calls", "true");
+ }
+ }
+
+ ModulePassManager MPM;
+ if (comp_ctx->is_jit_mode) {
+ const char *Passes =
+ "mem2reg,instcombine,simplifycfg,jump-threading,indvars";
+ ExitOnErr(PB.parsePassPipeline(MPM, Passes));
+ }
+ else {
+ FunctionPassManager FPM;
+
+ /* Apply Vectorize related passes for AOT mode */
+ FPM.addPass(LoopVectorizePass());
+ FPM.addPass(SLPVectorizerPass());
+ FPM.addPass(LoadStoreVectorizerPass());
+
+ /*
+ FPM.addPass(createFunctionToLoopPassAdaptor(LICMPass()));
+ FPM.addPass(createFunctionToLoopPassAdaptor(LoopRotatePass()));
+ FPM.addPass(createFunctionToLoopPassAdaptor(SimpleLoopUnswitchPass()));
+ */
+
+ MPM.addPass(createModuleToFunctionPassAdaptor(std::move(FPM)));
+
+ if (!disable_llvm_lto) {
+ /* Apply LTO for AOT mode */
+ if (comp_ctx->comp_data->func_count >= 10)
+ /* Adds the pre-link optimizations if the func count
+ is large enough */
+ MPM.addPass(PB.buildLTOPreLinkDefaultPipeline(OL));
+ else
+ MPM.addPass(PB.buildLTODefaultPipeline(OL, NULL));
+ }
+ else {
+ MPM.addPass(PB.buildPerModuleDefaultPipeline(OL));
+ }
+ }
+
+ MPM.run(*M, MAM);
+}
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/aot_llvm_extra2.cpp b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/aot_llvm_extra2.cpp
new file mode 100644
index 000000000..9bd44bbff
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/aot_llvm_extra2.cpp
@@ -0,0 +1,108 @@
+/*
+ * Copyright (c)2023 YAMAMOTO Takashi. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#include <llvm-c/TargetMachine.h>
+#include <llvm/MC/TargetRegistry.h>
+#include <llvm/Target/TargetMachine.h>
+
+#include "bh_assert.h"
+
+#include "aot_llvm_extra2.h"
+
+static llvm::Optional<llvm::Reloc::Model>
+convert(LLVMRelocMode reloc_mode)
+{
+ switch (reloc_mode) {
+ case LLVMRelocDefault:
+ return llvm::None;
+ case LLVMRelocStatic:
+ return llvm::Reloc::Static;
+ case LLVMRelocPIC:
+ return llvm::Reloc::PIC_;
+ case LLVMRelocDynamicNoPic:
+ return llvm::Reloc::DynamicNoPIC;
+ case LLVMRelocROPI:
+ return llvm::Reloc::ROPI;
+ case LLVMRelocRWPI:
+ return llvm::Reloc::RWPI;
+ case LLVMRelocROPI_RWPI:
+ return llvm::Reloc::ROPI_RWPI;
+ }
+ bh_assert(0);
+ return llvm::None;
+}
+
+static llvm::CodeGenOpt::Level
+convert(LLVMCodeGenOptLevel opt_level)
+{
+ switch (opt_level) {
+ case LLVMCodeGenLevelNone:
+ return llvm::CodeGenOpt::None;
+ case LLVMCodeGenLevelLess:
+ return llvm::CodeGenOpt::Less;
+ case LLVMCodeGenLevelDefault:
+ return llvm::CodeGenOpt::Default;
+ case LLVMCodeGenLevelAggressive:
+ return llvm::CodeGenOpt::Aggressive;
+ }
+ bh_assert(0);
+ return llvm::CodeGenOpt::None;
+}
+
+static llvm::Optional<llvm::CodeModel::Model>
+convert(LLVMCodeModel code_model, bool *jit)
+{
+ *jit = false;
+ switch (code_model) {
+ case LLVMCodeModelDefault:
+ return llvm::None;
+ case LLVMCodeModelJITDefault:
+ *jit = true;
+ return llvm::None;
+ case LLVMCodeModelTiny:
+ return llvm::CodeModel::Tiny;
+ case LLVMCodeModelSmall:
+ return llvm::CodeModel::Small;
+ case LLVMCodeModelKernel:
+ return llvm::CodeModel::Kernel;
+ case LLVMCodeModelMedium:
+ return llvm::CodeModel::Medium;
+ case LLVMCodeModelLarge:
+ return llvm::CodeModel::Large;
+ }
+ bh_assert(0);
+ return llvm::None;
+}
+
+LLVMTargetMachineRef
+LLVMCreateTargetMachineWithOpts(LLVMTargetRef ctarget, const char *triple,
+ const char *cpu, const char *features,
+ LLVMCodeGenOptLevel opt_level,
+ LLVMRelocMode reloc_mode,
+ LLVMCodeModel code_model,
+ bool EmitStackSizeSection,
+ const char *StackUsageOutput)
+{
+ llvm::TargetOptions opts;
+
+ // -fstack-size-section equiv
+ // emit it to ".stack_sizes" section in case of ELF
+ // you can read it with "llvm-readobj --stack-sizes"
+ opts.EmitStackSizeSection = EmitStackSizeSection;
+
+ // -fstack-usage equiv
+ if (StackUsageOutput != NULL) {
+ opts.StackUsageOutput = StackUsageOutput;
+ }
+
+ auto target = reinterpret_cast<llvm::Target *>(ctarget);
+ auto rm = convert(reloc_mode);
+ auto ol = convert(opt_level);
+ bool jit;
+ auto cm = convert(code_model, &jit);
+ auto targetmachine = target->createTargetMachine(triple, cpu, features,
+ opts, rm, cm, ol, jit);
+ return reinterpret_cast<LLVMTargetMachineRef>(targetmachine);
+}
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/aot_llvm_extra2.h b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/aot_llvm_extra2.h
new file mode 100644
index 000000000..ef99622a4
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/aot_llvm_extra2.h
@@ -0,0 +1,17 @@
+/*
+ * Copyright (c)2023 YAMAMOTO Takashi. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#include <llvm-c/TargetMachine.h>
+
+LLVM_C_EXTERN_C_BEGIN
+LLVMTargetMachineRef
+LLVMCreateTargetMachineWithOpts(LLVMTargetRef ctarget, const char *triple,
+ const char *cpu, const char *features,
+ LLVMCodeGenOptLevel opt_level,
+ LLVMRelocMode reloc_mode,
+ LLVMCodeModel code_model,
+ bool EmitStackSizeSection,
+ const char *StackUsageOutput);
+LLVM_C_EXTERN_C_END
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/aot_orc_extra.cpp b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/aot_orc_extra.cpp
new file mode 100644
index 000000000..8cf253e94
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/aot_orc_extra.cpp
@@ -0,0 +1,289 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#include "llvm-c/LLJIT.h"
+#include "llvm-c/Orc.h"
+#include "llvm-c/OrcEE.h"
+#include "llvm-c/TargetMachine.h"
+
+#include "llvm/ExecutionEngine/Orc/JITTargetMachineBuilder.h"
+#include "llvm/ExecutionEngine/Orc/LLJIT.h"
+#include "llvm/ExecutionEngine/Orc/ObjectTransformLayer.h"
+#include "llvm/ExecutionEngine/Orc/RTDyldObjectLinkingLayer.h"
+#include "llvm/ExecutionEngine/Orc/CompileOnDemandLayer.h"
+#include "llvm/ExecutionEngine/SectionMemoryManager.h"
+#include "llvm/Support/CBindingWrapping.h"
+
+#include "aot_orc_extra.h"
+#include "aot.h"
+
+using namespace llvm;
+using namespace llvm::orc;
+using GlobalValueSet = std::set<const GlobalValue *>;
+
+namespace llvm {
+namespace orc {
+
+class InProgressLookupState;
+
+class OrcV2CAPIHelper
+{
+ public:
+ using PoolEntry = SymbolStringPtr::PoolEntry;
+ using PoolEntryPtr = SymbolStringPtr::PoolEntryPtr;
+
+ // Move from SymbolStringPtr to PoolEntryPtr (no change in ref count).
+ static PoolEntryPtr moveFromSymbolStringPtr(SymbolStringPtr S)
+ {
+ PoolEntryPtr Result = nullptr;
+ std::swap(Result, S.S);
+ return Result;
+ }
+
+ // Move from a PoolEntryPtr to a SymbolStringPtr (no change in ref count).
+ static SymbolStringPtr moveToSymbolStringPtr(PoolEntryPtr P)
+ {
+ SymbolStringPtr S;
+ S.S = P;
+ return S;
+ }
+
+ // Copy a pool entry to a SymbolStringPtr (increments ref count).
+ static SymbolStringPtr copyToSymbolStringPtr(PoolEntryPtr P)
+ {
+ return SymbolStringPtr(P);
+ }
+
+ static PoolEntryPtr getRawPoolEntryPtr(const SymbolStringPtr &S)
+ {
+ return S.S;
+ }
+
+ static void retainPoolEntry(PoolEntryPtr P)
+ {
+ SymbolStringPtr S(P);
+ S.S = nullptr;
+ }
+
+ static void releasePoolEntry(PoolEntryPtr P)
+ {
+ SymbolStringPtr S;
+ S.S = P;
+ }
+
+ static InProgressLookupState *extractLookupState(LookupState &LS)
+ {
+ return LS.IPLS.release();
+ }
+
+ static void resetLookupState(LookupState &LS, InProgressLookupState *IPLS)
+ {
+ return LS.reset(IPLS);
+ }
+};
+
+} // namespace orc
+} // namespace llvm
+
+// ORC.h
+DEFINE_SIMPLE_CONVERSION_FUNCTIONS(ExecutionSession, LLVMOrcExecutionSessionRef)
+DEFINE_SIMPLE_CONVERSION_FUNCTIONS(IRTransformLayer, LLVMOrcIRTransformLayerRef)
+DEFINE_SIMPLE_CONVERSION_FUNCTIONS(JITDylib, LLVMOrcJITDylibRef)
+DEFINE_SIMPLE_CONVERSION_FUNCTIONS(JITTargetMachineBuilder,
+ LLVMOrcJITTargetMachineBuilderRef)
+DEFINE_SIMPLE_CONVERSION_FUNCTIONS(ObjectTransformLayer,
+ LLVMOrcObjectTransformLayerRef)
+DEFINE_SIMPLE_CONVERSION_FUNCTIONS(OrcV2CAPIHelper::PoolEntry,
+ LLVMOrcSymbolStringPoolEntryRef)
+DEFINE_SIMPLE_CONVERSION_FUNCTIONS(SymbolStringPool, LLVMOrcSymbolStringPoolRef)
+DEFINE_SIMPLE_CONVERSION_FUNCTIONS(ThreadSafeModule, LLVMOrcThreadSafeModuleRef)
+
+// LLJIT.h
+DEFINE_SIMPLE_CONVERSION_FUNCTIONS(LLJITBuilder, LLVMOrcLLJITBuilderRef)
+DEFINE_SIMPLE_CONVERSION_FUNCTIONS(LLLazyJITBuilder, LLVMOrcLLLazyJITBuilderRef)
+DEFINE_SIMPLE_CONVERSION_FUNCTIONS(LLLazyJIT, LLVMOrcLLLazyJITRef)
+
+void
+LLVMOrcLLJITBuilderSetNumCompileThreads(LLVMOrcLLJITBuilderRef Builder,
+ unsigned NumCompileThreads)
+{
+ unwrap(Builder)->setNumCompileThreads(NumCompileThreads);
+}
+
+LLVMOrcLLLazyJITBuilderRef
+LLVMOrcCreateLLLazyJITBuilder(void)
+{
+ return wrap(new LLLazyJITBuilder());
+}
+
+void
+LLVMOrcDisposeLLLazyJITBuilder(LLVMOrcLLLazyJITBuilderRef Builder)
+{
+ delete unwrap(Builder);
+}
+
+void
+LLVMOrcLLLazyJITBuilderSetNumCompileThreads(LLVMOrcLLLazyJITBuilderRef Builder,
+ unsigned NumCompileThreads)
+{
+ unwrap(Builder)->setNumCompileThreads(NumCompileThreads);
+}
+
+void
+LLVMOrcLLLazyJITBuilderSetJITTargetMachineBuilder(
+ LLVMOrcLLLazyJITBuilderRef Builder, LLVMOrcJITTargetMachineBuilderRef JTMP)
+{
+ unwrap(Builder)->setJITTargetMachineBuilder(*unwrap(JTMP));
+ /* Destroy the JTMP, similar to
+ LLVMOrcLLJITBuilderSetJITTargetMachineBuilder */
+ LLVMOrcDisposeJITTargetMachineBuilder(JTMP);
+}
+
+static Optional<CompileOnDemandLayer::GlobalValueSet>
+PartitionFunction(GlobalValueSet Requested)
+{
+ std::vector<const GlobalValue *> GVsToAdd;
+
+ for (auto *GV : Requested) {
+ if (isa<Function>(GV) && GV->hasName()) {
+ auto &F = cast<Function>(*GV); /* get LLVM function */
+ const Module *M = F.getParent(); /* get LLVM module */
+ auto GVName = GV->getName(); /* get the function name */
+ const char *gvname = GVName.begin(); /* C function name */
+ const char *wrapper;
+ uint32 prefix_len = strlen(AOT_FUNC_PREFIX);
+
+ /* Convert "aot_func#n_wrapper" to "aot_func#n" */
+ if (strstr(gvname, AOT_FUNC_PREFIX)
+ && (wrapper = strstr(gvname + prefix_len, "_wrapper"))) {
+ char buf[16] = { 0 };
+ char func_name[64];
+ int group_stride, i, j;
+
+ bh_assert(wrapper - (gvname + prefix_len) > 0);
+ /* Get AOT function index */
+ bh_memcpy_s(buf, (uint32)sizeof(buf), gvname + prefix_len,
+ (uint32)(wrapper - (gvname + prefix_len)));
+ i = atoi(buf);
+
+ group_stride = WASM_ORC_JIT_BACKEND_THREAD_NUM;
+
+ /* Compile some functions each time */
+ for (j = 0; j < WASM_ORC_JIT_COMPILE_THREAD_NUM; j++) {
+ snprintf(func_name, sizeof(func_name), "%s%d",
+ AOT_FUNC_PREFIX, i + j * group_stride);
+ Function *F1 = M->getFunction(func_name);
+ if (F1) {
+ LOG_DEBUG("compile func %s", func_name);
+ GVsToAdd.push_back(cast<GlobalValue>(F1));
+ }
+ }
+ }
+ }
+ }
+
+ for (auto *GV : GVsToAdd) {
+ Requested.insert(GV);
+ }
+
+ return Requested;
+}
+
+LLVMErrorRef
+LLVMOrcCreateLLLazyJIT(LLVMOrcLLLazyJITRef *Result,
+ LLVMOrcLLLazyJITBuilderRef Builder)
+{
+ assert(Result && "Result can not be null");
+
+ if (!Builder)
+ Builder = LLVMOrcCreateLLLazyJITBuilder();
+
+ auto J = unwrap(Builder)->create();
+ LLVMOrcDisposeLLLazyJITBuilder(Builder);
+
+ if (!J) {
+ Result = nullptr;
+ return 0;
+ }
+
+ LLLazyJIT *lazy_jit = J->release();
+ lazy_jit->setPartitionFunction(PartitionFunction);
+
+ *Result = wrap(lazy_jit);
+ return LLVMErrorSuccess;
+}
+
+LLVMErrorRef
+LLVMOrcDisposeLLLazyJIT(LLVMOrcLLLazyJITRef J)
+{
+ delete unwrap(J);
+ return LLVMErrorSuccess;
+}
+
+LLVMErrorRef
+LLVMOrcLLLazyJITAddLLVMIRModule(LLVMOrcLLLazyJITRef J, LLVMOrcJITDylibRef JD,
+ LLVMOrcThreadSafeModuleRef TSM)
+{
+ std::unique_ptr<ThreadSafeModule> TmpTSM(unwrap(TSM));
+ return wrap(unwrap(J)->addLazyIRModule(*unwrap(JD), std::move(*TmpTSM)));
+}
+
+LLVMErrorRef
+LLVMOrcLLLazyJITLookup(LLVMOrcLLLazyJITRef J, LLVMOrcExecutorAddress *Result,
+ const char *Name)
+{
+ assert(Result && "Result can not be null");
+
+ auto Sym = unwrap(J)->lookup(Name);
+ if (!Sym) {
+ *Result = 0;
+ return wrap(Sym.takeError());
+ }
+
+#if LLVM_VERSION_MAJOR < 15
+ *Result = Sym->getAddress();
+#else
+ *Result = Sym->getValue();
+#endif
+ return LLVMErrorSuccess;
+}
+
+LLVMOrcSymbolStringPoolEntryRef
+LLVMOrcLLLazyJITMangleAndIntern(LLVMOrcLLLazyJITRef J,
+ const char *UnmangledName)
+{
+ return wrap(OrcV2CAPIHelper::moveFromSymbolStringPtr(
+ unwrap(J)->mangleAndIntern(UnmangledName)));
+}
+
+LLVMOrcJITDylibRef
+LLVMOrcLLLazyJITGetMainJITDylib(LLVMOrcLLLazyJITRef J)
+{
+ return wrap(&unwrap(J)->getMainJITDylib());
+}
+
+const char *
+LLVMOrcLLLazyJITGetTripleString(LLVMOrcLLLazyJITRef J)
+{
+ return unwrap(J)->getTargetTriple().str().c_str();
+}
+
+LLVMOrcExecutionSessionRef
+LLVMOrcLLLazyJITGetExecutionSession(LLVMOrcLLLazyJITRef J)
+{
+ return wrap(&unwrap(J)->getExecutionSession());
+}
+
+LLVMOrcIRTransformLayerRef
+LLVMOrcLLLazyJITGetIRTransformLayer(LLVMOrcLLLazyJITRef J)
+{
+ return wrap(&unwrap(J)->getIRTransformLayer());
+}
+
+LLVMOrcObjectTransformLayerRef
+LLVMOrcLLLazyJITGetObjTransformLayer(LLVMOrcLLLazyJITRef J)
+{
+ return wrap(&unwrap(J)->getObjTransformLayer());
+}
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/aot_orc_extra.h b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/aot_orc_extra.h
new file mode 100644
index 000000000..e152b8778
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/aot_orc_extra.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#ifndef _AOT_ORC_LAZINESS_H_
+#define _AOT_ORC_LAZINESS_H_
+
+#include "llvm-c/Error.h"
+#include "llvm-c/ExternC.h"
+#include "llvm-c/LLJIT.h"
+#include "llvm-c/Orc.h"
+#include "llvm-c/Types.h"
+
+LLVM_C_EXTERN_C_BEGIN
+
+typedef struct LLVMOrcOpaqueLLLazyJITBuilder *LLVMOrcLLLazyJITBuilderRef;
+typedef struct LLVMOrcOpaqueLLLazyJIT *LLVMOrcLLLazyJITRef;
+
+// Extra bindings for LLJIT
+void
+LLVMOrcLLJITBuilderSetNumCompileThreads(LLVMOrcLLJITBuilderRef Builder,
+ unsigned NumCompileThreads);
+
+// Extra bindings for LLLazyJIT
+LLVMOrcLLLazyJITBuilderRef
+LLVMOrcCreateLLLazyJITBuilder(void);
+
+void
+LLVMOrcDisposeLLLazyJITBuilder(LLVMOrcLLLazyJITBuilderRef Builder);
+
+void
+LLVMOrcLLLazyJITBuilderSetJITTargetMachineBuilder(
+ LLVMOrcLLLazyJITBuilderRef Builder, LLVMOrcJITTargetMachineBuilderRef JTMP);
+
+void
+LLVMOrcLLLazyJITBuilderSetNumCompileThreads(LLVMOrcLLLazyJITBuilderRef Builder,
+ unsigned NumCompileThreads);
+
+LLVMErrorRef
+LLVMOrcCreateLLLazyJIT(LLVMOrcLLLazyJITRef *Result,
+ LLVMOrcLLLazyJITBuilderRef Builder);
+
+LLVMErrorRef
+LLVMOrcDisposeLLLazyJIT(LLVMOrcLLLazyJITRef J);
+
+LLVMErrorRef
+LLVMOrcLLLazyJITAddLLVMIRModule(LLVMOrcLLLazyJITRef J, LLVMOrcJITDylibRef JD,
+ LLVMOrcThreadSafeModuleRef TSM);
+
+LLVMErrorRef
+LLVMOrcLLLazyJITLookup(LLVMOrcLLLazyJITRef J, LLVMOrcExecutorAddress *Result,
+ const char *Name);
+
+LLVMOrcSymbolStringPoolEntryRef
+LLVMOrcLLLazyJITMangleAndIntern(LLVMOrcLLLazyJITRef J,
+ const char *UnmangledName);
+
+LLVMOrcJITDylibRef
+LLVMOrcLLLazyJITGetMainJITDylib(LLVMOrcLLLazyJITRef J);
+
+const char *
+LLVMOrcLLLazyJITGetTripleString(LLVMOrcLLLazyJITRef J);
+
+LLVMOrcExecutionSessionRef
+LLVMOrcLLLazyJITGetExecutionSession(LLVMOrcLLLazyJITRef J);
+
+LLVMOrcIRTransformLayerRef
+LLVMOrcLLLazyJITGetIRTransformLayer(LLVMOrcLLLazyJITRef J);
+
+LLVMOrcObjectTransformLayerRef
+LLVMOrcLLLazyJITGetObjTransformLayer(LLVMOrcLLLazyJITRef J);
+
+LLVM_C_EXTERN_C_END
+#endif
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/debug/dwarf_extractor.cpp b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/debug/dwarf_extractor.cpp
new file mode 100644
index 000000000..d5a1be85e
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/debug/dwarf_extractor.cpp
@@ -0,0 +1,510 @@
+/*
+ * Copyright (C) 2021 Ant Group. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#include "lldb/API/SBBlock.h"
+#include "lldb/API/SBCompileUnit.h"
+#include "lldb/API/SBCommandReturnObject.h"
+#include "lldb/API/SBCommandInterpreter.h"
+#include "lldb/API/SBBreakpointLocation.h"
+#include "lldb/API/SBDebugger.h"
+#include "lldb/API//SBFunction.h"
+#include "lldb/API//SBModule.h"
+#include "lldb/API//SBProcess.h"
+#include "lldb/API//SBStream.h"
+#include "lldb/API//SBSymbol.h"
+#include "lldb/API//SBTarget.h"
+#include "lldb/API//SBThread.h"
+#include "lldb/API/SBDeclaration.h"
+
+#include "dwarf_extractor.h"
+#include "../aot_llvm.h"
+
+#include "bh_log.h"
+#include "../../aot/aot_runtime.h"
+
+#include "llvm/BinaryFormat/Dwarf.h"
+
+using namespace lldb;
+
+typedef struct dwar_extractor {
+ SBDebugger debugger;
+ SBTarget target;
+ SBModule module;
+
+} dwar_extractor;
+
+#define TO_HANDLE(extractor) (dwar_extractor_handle_t)(extractor)
+
+#define TO_EXTACTOR(handle) (dwar_extractor *)(handle)
+
+static bool is_debugger_initialized;
+
+dwar_extractor_handle_t
+create_dwarf_extractor(AOTCompData *comp_data, char *file_name)
+{
+ char *arch = NULL;
+ char *platform = NULL;
+ dwar_extractor *extractor = NULL;
+
+ //__attribute__((constructor)) may be better?
+ if (!is_debugger_initialized) {
+ SBError error = SBDebugger::InitializeWithErrorHandling();
+ if (error.Fail()) {
+ LOG_ERROR("Init Dwarf Debugger failed");
+ return TO_HANDLE(NULL);
+ }
+ is_debugger_initialized = true;
+ }
+
+ SBError error;
+ SBFileSpec exe_file_spec(file_name, true);
+
+ if (!(extractor = new dwar_extractor())) {
+ LOG_ERROR("Create Dwarf Extractor error: failed to allocate memory");
+ goto fail3;
+ }
+
+ extractor->debugger = SBDebugger::Create();
+ if (!extractor->debugger.IsValid()) {
+ LOG_ERROR("Create Dwarf Debugger failed");
+ goto fail2;
+ }
+
+ extractor->target = extractor->debugger.CreateTarget(
+ file_name, arch, platform, false, error);
+
+ if (!error.Success()) {
+ LOG_ERROR("Create Dwarf target failed:%s", error.GetCString());
+ goto fail1;
+ }
+
+ if (!extractor->target.IsValid()) {
+ LOG_ERROR("Create Dwarf target not valid");
+ goto fail1;
+ }
+
+ extractor->module = extractor->target.FindModule(exe_file_spec);
+ comp_data->extractor = TO_HANDLE(extractor);
+
+ return TO_HANDLE(extractor);
+
+fail1:
+ SBDebugger::Destroy(extractor->debugger);
+
+fail2:
+ wasm_runtime_free(extractor);
+
+fail3:
+ return TO_HANDLE(NULL);
+}
+
+void
+destroy_dwarf_extractor(dwar_extractor_handle_t handle)
+{
+ dwar_extractor *extractor = TO_EXTACTOR(handle);
+ if (!extractor)
+ return;
+ extractor->debugger.DeleteTarget(extractor->target);
+ SBDebugger::Destroy(extractor->debugger);
+ delete extractor;
+ SBDebugger::Terminate();
+ is_debugger_initialized = false;
+}
+
+LLVMMetadataRef
+dwarf_gen_file_info(AOTCompContext *comp_ctx)
+{
+ dwar_extractor *extractor;
+ int units_number;
+ LLVMMetadataRef file_info = NULL;
+ const char *file_name;
+ const char *dir_name;
+
+ if (!(extractor = TO_EXTACTOR(comp_ctx->comp_data->extractor)))
+ return NULL;
+
+ units_number = extractor->module.GetNumCompileUnits();
+
+ if (units_number > 0) {
+ SBCompileUnit compile_unit = extractor->module.GetCompileUnitAtIndex(0);
+ auto filespec = compile_unit.GetFileSpec();
+ file_name = filespec.GetFilename();
+ dir_name = filespec.GetDirectory();
+ if (file_name || dir_name) {
+ file_info = LLVMDIBuilderCreateFile(comp_ctx->debug_builder,
+ file_name, strlen(file_name),
+ dir_name, strlen(dir_name));
+ }
+ }
+ return file_info;
+}
+
+#if 0
+void
+dwarf_gen_mock_vm_info(AOTCompContext *comp_ctx)
+{
+ LLVMMetadataRef file_info = NULL;
+ LLVMMetadataRef comp_unit = NULL;
+ file_info = LLVMDIBuilderCreateFile(comp_ctx->debug_builder,
+ "ant_runtime_mock.c", 18, ".", 1);
+
+ comp_unit = LLVMDIBuilderCreateCompileUnit(
+ comp_ctx->debug_builder, LLVMDWARFSourceLanguageC, file_info,
+ "ant compiler", 12, 0, NULL, 0, 1, NULL, 0, LLVMDWARFEmissionFull, 0, 0,
+ 0, "/", 1, "", 0);
+
+ LLVMTypeRef ParamTys[] = {
+ LLVMVoidType(),
+ };
+
+ LLVMTypeRef FuncTy = LLVMFunctionType(LLVMVoidType(), ParamTys, 0, 0);
+
+ LLVMValueRef Function =
+ LLVMAddFunction(comp_ctx->module, "ant_runtime_mock", FuncTy);
+
+ LLVMMetadataRef ParamTypes[0];
+ LLVMMetadataRef FunctionTy = LLVMDIBuilderCreateSubroutineType(
+ comp_ctx->debug_builder, file_info, ParamTypes, 0, LLVMDIFlagZero);
+
+ /* 0x0015 is subroutine_type */
+ LLVMMetadataRef ReplaceableFunctionMetadata =
+ LLVMDIBuilderCreateReplaceableCompositeType(
+ comp_ctx->debug_builder, 0x15, "ant_runtime_mock", 16, file_info,
+ file_info, 2, 0, 0, 0, LLVMDIFlagFwdDecl, "", 0);
+
+ LLVMMetadataRef FunctionMetadata = LLVMDIBuilderCreateFunction(
+ comp_ctx->debug_builder, file_info, "ant_runtime_mock", 16,
+ "ant_runtime_mock", 16, file_info, 2, FunctionTy, true, true, 2, LLVMDIFlagZero,
+ false);
+
+ LLVMMetadataReplaceAllUsesWith(ReplaceableFunctionMetadata,
+ FunctionMetadata);
+
+ LLVMSetSubprogram(Function, FunctionMetadata);
+
+ comp_ctx->vm_debug_comp_unit = comp_unit;
+ comp_ctx->vm_debug_file = file_info;
+ comp_ctx->vm_debug_func = FunctionMetadata;
+}
+#endif
+
+LLVMMetadataRef
+dwarf_gen_comp_unit_info(AOTCompContext *comp_ctx)
+{
+ dwar_extractor *extractor;
+ int units_number;
+ LLVMMetadataRef comp_unit = NULL;
+
+ if (!(extractor = TO_EXTACTOR(comp_ctx->comp_data->extractor)))
+ return NULL;
+
+ units_number = extractor->module.GetNumCompileUnits();
+
+ if (units_number > 0) {
+ SBCompileUnit compile_unit = extractor->module.GetCompileUnitAtIndex(0);
+ auto lang_type = compile_unit.GetLanguage();
+
+ comp_unit = LLVMDIBuilderCreateCompileUnit(
+ comp_ctx->debug_builder, LLDB_TO_LLVM_LANG_TYPE(lang_type),
+ comp_ctx->debug_file, "ant compiler", 12, 0, NULL, 0, 1, NULL, 0,
+ LLVMDWARFEmissionFull, 0, 0, 0, "/", 1, "", 0);
+ }
+ return comp_unit;
+}
+
+static LLVMDWARFTypeEncoding
+lldb_get_basic_type_encoding(BasicType basic_type)
+{
+ LLVMDWARFTypeEncoding encoding = 0;
+ switch (basic_type) {
+ case eBasicTypeUnsignedChar:
+ encoding = llvm::dwarf::DW_ATE_unsigned_char;
+ break;
+ case eBasicTypeSignedChar:
+ encoding = llvm::dwarf::DW_ATE_signed_char;
+ break;
+ case eBasicTypeUnsignedInt:
+ case eBasicTypeUnsignedLong:
+ case eBasicTypeUnsignedLongLong:
+ case eBasicTypeUnsignedWChar:
+ case eBasicTypeUnsignedInt128:
+ case eBasicTypeUnsignedShort:
+ encoding = llvm::dwarf::DW_ATE_unsigned;
+ break;
+ case eBasicTypeInt:
+ case eBasicTypeLong:
+ case eBasicTypeLongLong:
+ case eBasicTypeWChar:
+ case eBasicTypeInt128:
+ case eBasicTypeShort:
+ encoding = llvm::dwarf::DW_ATE_signed;
+ break;
+ case eBasicTypeBool:
+ encoding = llvm::dwarf::DW_ATE_boolean;
+ break;
+ case eBasicTypeHalf:
+ case eBasicTypeFloat:
+ case eBasicTypeDouble:
+ case eBasicTypeLongDouble:
+ encoding = llvm::dwarf::DW_ATE_float;
+ break;
+ default:
+ break;
+ }
+ return encoding;
+}
+
+static LLVMMetadataRef
+lldb_type_to_type_dbi(AOTCompContext *comp_ctx, SBType &type)
+{
+ LLVMMetadataRef type_info = NULL;
+ BasicType basic_type = type.GetBasicType();
+ uint64_t bit_size = type.GetByteSize() * 8;
+ LLVMDIBuilderRef DIB = comp_ctx->debug_builder;
+ LLVMDWARFTypeEncoding encoding;
+
+ if (basic_type != eBasicTypeInvalid) {
+ encoding = lldb_get_basic_type_encoding(basic_type);
+ type_info = LLVMDIBuilderCreateBasicType(
+ DIB, type.GetName(), strlen(type.GetName()), bit_size, encoding,
+ LLVMDIFlagZero);
+ }
+ else if (type.IsPointerType()) {
+ SBType pointee_type = type.GetPointeeType();
+ type_info = LLVMDIBuilderCreatePointerType(
+ DIB, lldb_type_to_type_dbi(comp_ctx, pointee_type), bit_size, 0, 0,
+ "", 0);
+ }
+
+ return type_info;
+}
+
+static LLVMMetadataRef
+lldb_function_to_function_dbi(AOTCompContext *comp_ctx, SBSymbolContext &sc,
+ AOTFuncContext *func_ctx)
+{
+ SBFunction function(sc.GetFunction());
+ const char *function_name = function.GetName();
+ const char *link_name = function.GetName();
+ SBTypeList function_args = function.GetType().GetFunctionArgumentTypes();
+ SBType return_type = function.GetType().GetFunctionReturnType();
+ const size_t num_function_args = function_args.GetSize();
+ dwar_extractor *extractor;
+
+ if (!(extractor = TO_EXTACTOR(comp_ctx->comp_data->extractor)))
+ return NULL;
+
+ LLVMDIBuilderRef DIB = comp_ctx->debug_builder;
+ LLVMMetadataRef File = comp_ctx->debug_file;
+
+ LLVMMetadataRef ParamTypes[num_function_args + 1];
+
+ ParamTypes[0] = lldb_type_to_type_dbi(comp_ctx, return_type);
+
+ for (uint32_t function_arg_idx = 0; function_arg_idx < num_function_args;
+ ++function_arg_idx) {
+ SBType function_arg_type =
+ function_args.GetTypeAtIndex(function_arg_idx);
+
+ if (function_arg_type.IsValid()) {
+ ParamTypes[function_arg_idx + 1] =
+ lldb_type_to_type_dbi(comp_ctx, function_arg_type);
+ }
+ }
+
+ LLVMMetadataRef FunctionTy = LLVMDIBuilderCreateSubroutineType(
+ DIB, File, ParamTypes, num_function_args + 1, LLVMDIFlagZero);
+
+ auto line_entry = sc.GetLineEntry();
+ LLVMMetadataRef ReplaceableFunctionMetadata =
+ LLVMDIBuilderCreateReplaceableCompositeType(
+ DIB, 0x15, function_name, strlen(function_name), File, File,
+ line_entry.GetLine(), 0, 0, 0, LLVMDIFlagFwdDecl, "", 0);
+
+ LLVMMetadataRef FunctionMetadata = LLVMDIBuilderCreateFunction(
+ DIB, File, function_name, strlen(function_name), link_name,
+ strlen(link_name), File, line_entry.GetLine(), FunctionTy, true, true,
+ line_entry.GetLine(), LLVMDIFlagZero, false);
+
+ LLVMMetadataReplaceAllUsesWith(ReplaceableFunctionMetadata,
+ FunctionMetadata);
+
+ LLVMSetSubprogram(func_ctx->func, FunctionMetadata);
+
+ LLVMMetadataRef ParamExpression =
+ LLVMDIBuilderCreateExpression(DIB, NULL, 0);
+ auto variable_list =
+ function.GetBlock().GetVariables(extractor->target, true, false, false);
+ if (num_function_args != variable_list.GetSize()) {
+ LOG_ERROR(
+ "function args number dismatch!:value number=%d, function args=%d",
+ variable_list.GetSize(), num_function_args);
+ }
+
+ LLVMMetadataRef ParamLocation = LLVMDIBuilderCreateDebugLocation(
+ comp_ctx->context, line_entry.GetLine(), 0, FunctionMetadata, NULL);
+
+ // TODO:change to void * or WasmExenv * ?
+ LLVMMetadataRef voidtype =
+ LLVMDIBuilderCreateBasicType(DIB, "void", 4, 0, 0, LLVMDIFlagZero);
+ LLVMMetadataRef voidpionter =
+ LLVMDIBuilderCreatePointerType(DIB, voidtype, 64, 0, 0, "void *", 6);
+
+ LLVMMetadataRef ParamVar = LLVMDIBuilderCreateParameterVariable(
+ DIB, FunctionMetadata, "exenv", 5, 1,
+ File, // starts form 1, and 1 is exenv,
+ line_entry.GetLine(), voidpionter, true, LLVMDIFlagZero);
+ LLVMValueRef Param = LLVMGetParam(func_ctx->func, 0);
+ LLVMBasicBlockRef block_curr = LLVMGetEntryBasicBlock(func_ctx->func);
+ LLVMDIBuilderInsertDbgValueAtEnd(DIB, Param, ParamVar, ParamExpression,
+ ParamLocation, block_curr);
+
+ for (uint32_t function_arg_idx = 0;
+ function_arg_idx < variable_list.GetSize(); ++function_arg_idx) {
+ SBValue variable(variable_list.GetValueAtIndex(function_arg_idx));
+ if (variable.IsValid()) {
+ SBDeclaration dec(variable.GetDeclaration());
+ auto valtype = variable.GetType();
+ LLVMMetadataRef ParamLocation = LLVMDIBuilderCreateDebugLocation(
+ comp_ctx->context, dec.GetLine(), dec.GetColumn(),
+ FunctionMetadata, NULL);
+ LLVMMetadataRef ParamVar = LLVMDIBuilderCreateParameterVariable(
+ DIB, FunctionMetadata, variable.GetName(),
+ strlen(variable.GetName()), function_arg_idx + 1 + 1,
+ File, // starts form 1, and 1 is exenv,
+ dec.GetLine(), ParamTypes[function_arg_idx + 1], true,
+ LLVMDIFlagZero);
+ LLVMValueRef Param =
+ LLVMGetParam(func_ctx->func, function_arg_idx + 1);
+ LLVMDIBuilderInsertDbgValueAtEnd(DIB, Param, ParamVar,
+ ParamExpression, ParamLocation,
+ block_curr);
+ }
+ }
+
+ return FunctionMetadata;
+}
+
+LLVMMetadataRef
+dwarf_gen_func_info(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
+{
+ LLVMMetadataRef func_info = NULL;
+ dwar_extractor *extractor;
+ uint64_t vm_offset;
+ AOTFunc *func = func_ctx->aot_func;
+
+ if (!(extractor = TO_EXTACTOR(comp_ctx->comp_data->extractor)))
+ return NULL;
+
+ // A code address in DWARF for WebAssembly is the offset of an
+ // instruction relative within the Code section of the WebAssembly file.
+ // For this reason Section::GetFileAddress() must return zero for the
+ // Code section. (refert to ObjectFileWasm.cpp)
+ vm_offset = func->code - comp_ctx->comp_data->wasm_module->buf_code;
+
+ auto sbaddr = extractor->target.ResolveFileAddress(vm_offset);
+ SBSymbolContext sc(sbaddr.GetSymbolContext(eSymbolContextFunction
+ | eSymbolContextLineEntry));
+ if (sc.IsValid()) {
+ SBFunction function(sc.GetFunction());
+ if (function.IsValid()) {
+ func_info = lldb_function_to_function_dbi(comp_ctx, sc, func_ctx);
+ }
+ }
+ return func_info;
+}
+
+void
+dwarf_get_func_name(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ char *name, int len)
+{
+ LLVMMetadataRef func_info = NULL;
+ dwar_extractor *extractor;
+ uint64_t vm_offset;
+ AOTFunc *func = func_ctx->aot_func;
+
+ name[0] = '\0';
+
+ if (!(extractor = TO_EXTACTOR(comp_ctx->comp_data->extractor)))
+ return;
+
+ // A code address in DWARF for WebAssembly is the offset of an
+ // instruction relative within the Code section of the WebAssembly file.
+ // For this reason Section::GetFileAddress() must return zero for the
+ // Code section. (refert to ObjectFileWasm.cpp)
+ vm_offset = func->code - comp_ctx->comp_data->wasm_module->buf_code;
+
+ auto sbaddr = extractor->target.ResolveFileAddress(vm_offset);
+ SBSymbolContext sc(sbaddr.GetSymbolContext(eSymbolContextFunction
+ | eSymbolContextLineEntry));
+ if (sc.IsValid()) {
+ SBFunction function(sc.GetFunction());
+ if (function.IsValid()) {
+ bh_strcpy_s(name, len, function.GetName());
+ }
+ }
+}
+
+LLVMMetadataRef
+dwarf_gen_location(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ uint64_t vm_offset)
+{
+ LLVMMetadataRef location_info = NULL;
+ dwar_extractor *extractor;
+ AOTFunc *func = func_ctx->aot_func;
+
+ if (!(extractor = TO_EXTACTOR(comp_ctx->comp_data->extractor)))
+ return NULL;
+
+ auto sbaddr = extractor->target.ResolveFileAddress(vm_offset);
+ SBSymbolContext sc(sbaddr.GetSymbolContext(eSymbolContextFunction
+ | eSymbolContextLineEntry));
+ if (sc.IsValid()) {
+ // TODO:need to check if the vm_offset is belong to
+ SBFunction function(sc.GetFunction());
+ if (function.IsValid()) {
+ uint64_t start = func_ctx->aot_func->code
+ - comp_ctx->comp_data->wasm_module->buf_code;
+ uint64_t end = func_ctx->aot_func->code
+ - comp_ctx->comp_data->wasm_module->buf_code
+ + func_ctx->aot_func->code_size;
+ if (function.GetStartAddress().GetOffset() <= start
+ && end <= function.GetEndAddress().GetOffset()) {
+ auto line_entry = sc.GetLineEntry();
+ location_info = LLVMDIBuilderCreateDebugLocation(
+ comp_ctx->context, line_entry.GetLine(),
+ line_entry.GetColumn(), func_ctx->debug_func, NULL);
+ // LOG_VERBOSE("Gen the location l:%d, c:%d at %lx",
+ // line_entry.GetLine(), line_entry.GetColumn(), vm_offset);
+ }
+ else
+ LOG_WARNING("the offset and function is not matched");
+ }
+ }
+ return location_info;
+}
+
+LLVMMetadataRef
+dwarf_gen_func_ret_location(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
+{
+ LLVMMetadataRef func_info = NULL;
+ dwar_extractor *extractor;
+ uint64_t vm_offset;
+ AOTFunc *func = func_ctx->aot_func;
+ LLVMMetadataRef location_info = NULL;
+
+ if (!(extractor = TO_EXTACTOR(comp_ctx->comp_data->extractor)))
+ return NULL;
+
+ // A code address in DWARF for WebAssembly is the offset of an
+ // instruction relative within the Code section of the WebAssembly file.
+ // For this reason Section::GetFileAddress() must return zero for the
+ // Code section. (refert to ObjectFileWasm.cpp)
+ vm_offset = (func->code + func->code_size - 1)
+ - comp_ctx->comp_data->wasm_module->buf_code;
+ location_info = dwarf_gen_location(comp_ctx, func_ctx, vm_offset);
+
+ return location_info;
+}
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/debug/dwarf_extractor.h b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/debug/dwarf_extractor.h
new file mode 100644
index 000000000..449d4d57c
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/debug/dwarf_extractor.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 2021 Ant Group. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#ifndef _DWARF_EXTRACTOR_H_
+#define _DWARF_EXTRACTOR_H_
+
+#include "llvm-c/DebugInfo.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef unsigned int LLDBLangType;
+#define LLDB_TO_LLVM_LANG_TYPE(lldb_lang_type) \
+ (LLVMDWARFSourceLanguage)(((lldb_lang_type) > 0 ? (lldb_lang_type)-1 : 1))
+
+struct AOTCompData;
+typedef struct AOTCompData *aot_comp_data_t;
+typedef void *dwar_extractor_handle_t;
+
+struct AOTCompContext;
+typedef struct AOTCompContext AOTCompContext;
+
+struct AOTFuncContext;
+
+typedef struct AOTFuncContext AOTFuncContext;
+dwar_extractor_handle_t
+create_dwarf_extractor(aot_comp_data_t comp_data, char *file_name);
+
+LLVMMetadataRef
+dwarf_gen_file_info(AOTCompContext *comp_ctx);
+
+LLVMMetadataRef
+dwarf_gen_comp_unit_info(AOTCompContext *comp_ctx);
+
+LLVMMetadataRef
+dwarf_gen_func_info(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx);
+
+LLVMMetadataRef
+dwarf_gen_location(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ uint64_t vm_offset);
+
+LLVMMetadataRef
+dwarf_gen_func_ret_location(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx);
+
+void
+dwarf_get_func_name(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ char *name, int len);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/iwasm_compl.cmake b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/iwasm_compl.cmake
new file mode 100644
index 000000000..4ec460304
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/iwasm_compl.cmake
@@ -0,0 +1,26 @@
+set (IWASM_COMPL_DIR ${CMAKE_CURRENT_LIST_DIR})
+
+include_directories(${IWASM_COMPL_DIR})
+
+if (WAMR_BUILD_DEBUG_AOT EQUAL 1)
+ file (GLOB_RECURSE source_all
+ ${IWASM_COMPL_DIR}/*.c
+ ${IWASM_COMPL_DIR}/*.cpp)
+else()
+ file (GLOB source_all
+ ${IWASM_COMPL_DIR}/simd/*.c
+ ${IWASM_COMPL_DIR}/simd/*.cpp
+ ${IWASM_COMPL_DIR}/*.c
+ ${IWASM_COMPL_DIR}/*.cpp)
+endif()
+
+set (IWASM_COMPL_SOURCE ${source_all})
+
+# Disalbe rtti to works with LLVM
+
+if (MSVC)
+ set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /GR-")
+else()
+ set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fno-rtti")
+endif()
+
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_access_lanes.c b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_access_lanes.c
new file mode 100644
index 000000000..4f43c35a9
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_access_lanes.c
@@ -0,0 +1,418 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#include "simd_access_lanes.h"
+#include "simd_common.h"
+#include "../aot_emit_exception.h"
+#include "../../aot/aot_runtime.h"
+
+bool
+aot_compile_simd_shuffle(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ const uint8 *frame_ip)
+{
+ LLVMValueRef vec1, vec2, mask, result;
+ uint8 imm[16] = { 0 };
+ int values[16];
+ unsigned i;
+
+ wasm_runtime_read_v128(frame_ip, (uint64 *)imm, (uint64 *)(imm + 8));
+ for (i = 0; i < 16; i++) {
+ values[i] = imm[i];
+ }
+
+ if (!(vec2 = simd_pop_v128_and_bitcast(comp_ctx, func_ctx, V128_i8x16_TYPE,
+ "vec2"))) {
+ goto fail;
+ }
+
+ if (!(vec1 = simd_pop_v128_and_bitcast(comp_ctx, func_ctx, V128_i8x16_TYPE,
+ "vec1"))) {
+ goto fail;
+ }
+
+ /* build a vector <16 x i32> */
+ if (!(mask = simd_build_const_integer_vector(comp_ctx, I32_TYPE, values,
+ 16))) {
+ goto fail;
+ }
+
+ if (!(result = LLVMBuildShuffleVector(comp_ctx->builder, vec1, vec2, mask,
+ "new_vector"))) {
+ HANDLE_FAILURE("LLVMBuildShuffleVector");
+ goto fail;
+ }
+
+ return simd_bitcast_and_push_v128(comp_ctx, func_ctx, result, "result");
+
+fail:
+ return false;
+}
+
+/*TODO: llvm.experimental.vector.*/
+/* shufflevector is not an option, since it requires *mask as a const */
+bool
+aot_compile_simd_swizzle_x86(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
+{
+ LLVMValueRef vector, mask, max_lanes, condition, mask_lanes, result;
+ LLVMTypeRef param_types[2];
+
+ if (!(mask = simd_pop_v128_and_bitcast(comp_ctx, func_ctx, V128_i8x16_TYPE,
+ "mask"))) {
+ goto fail;
+ }
+
+ if (!(vector = simd_pop_v128_and_bitcast(comp_ctx, func_ctx,
+ V128_i8x16_TYPE, "vec"))) {
+ goto fail;
+ }
+
+ /* icmp uge <16 x i8> mask, <16, 16, 16, 16, ...> */
+ if (!(max_lanes = simd_build_splat_const_integer_vector(comp_ctx, INT8_TYPE,
+ 16, 16))) {
+ goto fail;
+ }
+
+ /* if the highest bit of every i8 of mask is 1, means doesn't pick up
+ from vector */
+ /* select <16 x i1> %condition, <16 x i8> <0x80, 0x80, ...>,
+ <16 x i8> %mask */
+ if (!(mask_lanes = simd_build_splat_const_integer_vector(
+ comp_ctx, INT8_TYPE, 0x80, 16))) {
+ goto fail;
+ }
+
+ if (!(condition = LLVMBuildICmp(comp_ctx->builder, LLVMIntUGE, mask,
+ max_lanes, "compare_with_16"))) {
+ HANDLE_FAILURE("LLVMBuldICmp");
+ goto fail;
+ }
+
+ if (!(mask = LLVMBuildSelect(comp_ctx->builder, condition, mask_lanes, mask,
+ "mask"))) {
+ HANDLE_FAILURE("LLVMBuildSelect");
+ goto fail;
+ }
+
+ param_types[0] = V128_i8x16_TYPE;
+ param_types[1] = V128_i8x16_TYPE;
+ if (!(result = aot_call_llvm_intrinsic(
+ comp_ctx, func_ctx, "llvm.x86.ssse3.pshuf.b.128", V128_i8x16_TYPE,
+ param_types, 2, vector, mask))) {
+ HANDLE_FAILURE("LLVMBuildCall");
+ goto fail;
+ }
+
+ if (!(result = LLVMBuildBitCast(comp_ctx->builder, result, V128_i64x2_TYPE,
+ "ret"))) {
+ HANDLE_FAILURE("LLVMBuildBitCast");
+ goto fail;
+ }
+
+ PUSH_V128(result);
+
+ return true;
+fail:
+ return false;
+}
+
+static bool
+aot_compile_simd_swizzle_common(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx)
+{
+ LLVMValueRef vector, mask, default_lane_value, condition, max_lane_id,
+ result, idx, id, replace_with_zero, elem, elem_or_zero, undef;
+ uint8 i;
+
+ if (!(mask = simd_pop_v128_and_bitcast(comp_ctx, func_ctx, V128_i8x16_TYPE,
+ "mask"))) {
+ goto fail;
+ }
+
+ if (!(vector = simd_pop_v128_and_bitcast(comp_ctx, func_ctx,
+ V128_i8x16_TYPE, "vec"))) {
+ goto fail;
+ }
+
+ if (!(undef = LLVMGetUndef(V128_i8x16_TYPE))) {
+ HANDLE_FAILURE("LLVMGetUndef");
+ goto fail;
+ }
+
+ /* icmp uge <16 x i8> mask, <16, 16, 16, 16, ...> */
+ if (!(max_lane_id = simd_build_splat_const_integer_vector(
+ comp_ctx, INT8_TYPE, 16, 16))) {
+ goto fail;
+ }
+
+ if (!(condition = LLVMBuildICmp(comp_ctx->builder, LLVMIntUGE, mask,
+ max_lane_id, "out_of_range"))) {
+ HANDLE_FAILURE("LLVMBuldICmp");
+ goto fail;
+ }
+
+ /* if the id is out of range (>=16), set the id as 0 */
+ if (!(default_lane_value = simd_build_splat_const_integer_vector(
+ comp_ctx, INT8_TYPE, 0, 16))) {
+ goto fail;
+ }
+
+ if (!(idx = LLVMBuildSelect(comp_ctx->builder, condition,
+ default_lane_value, mask, "mask"))) {
+ HANDLE_FAILURE("LLVMBuildSelect");
+ goto fail;
+ }
+
+ for (i = 0; i < 16; i++) {
+ if (!(id = LLVMBuildExtractElement(comp_ctx->builder, idx, I8_CONST(i),
+ "id"))) {
+ HANDLE_FAILURE("LLVMBuildExtractElement");
+ goto fail;
+ }
+
+ if (!(replace_with_zero =
+ LLVMBuildExtractElement(comp_ctx->builder, condition,
+ I8_CONST(i), "replace_with_zero"))) {
+ HANDLE_FAILURE("LLVMBuildExtractElement");
+ goto fail;
+ }
+
+ if (!(elem = LLVMBuildExtractElement(comp_ctx->builder, vector, id,
+ "vector[mask[i]]"))) {
+ HANDLE_FAILURE("LLVMBuildExtractElement");
+ goto fail;
+ }
+
+ if (!(elem_or_zero =
+ LLVMBuildSelect(comp_ctx->builder, replace_with_zero,
+ I8_CONST(0), elem, "elem_or_zero"))) {
+ HANDLE_FAILURE("LLVMBuildSelect");
+ goto fail;
+ }
+
+ if (!(undef =
+ LLVMBuildInsertElement(comp_ctx->builder, undef, elem_or_zero,
+ I8_CONST(i), "new_vector"))) {
+ HANDLE_FAILURE("LLVMBuildInsertElement");
+ goto fail;
+ }
+ }
+
+ if (!(result = LLVMBuildBitCast(comp_ctx->builder, undef, V128_i64x2_TYPE,
+ "ret"))) {
+ HANDLE_FAILURE("LLVMBuildBitCast");
+ goto fail;
+ }
+
+ PUSH_V128(result);
+
+ return true;
+fail:
+ return false;
+}
+
+bool
+aot_compile_simd_swizzle(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
+{
+ if (is_target_x86(comp_ctx)) {
+ return aot_compile_simd_swizzle_x86(comp_ctx, func_ctx);
+ }
+ else {
+ return aot_compile_simd_swizzle_common(comp_ctx, func_ctx);
+ }
+}
+
+static bool
+aot_compile_simd_extract(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ uint8 lane_id, bool need_extend, bool is_signed,
+ LLVMTypeRef vector_type, LLVMTypeRef result_type,
+ unsigned aot_value_type)
+{
+ LLVMValueRef vector, lane, result;
+
+ if (!(lane = simd_lane_id_to_llvm_value(comp_ctx, lane_id))) {
+ HANDLE_FAILURE("LLVMConstInt");
+ goto fail;
+ }
+
+ /* bitcast <2 x i64> %0 to <vector_type> */
+ if (!(vector = simd_pop_v128_and_bitcast(comp_ctx, func_ctx, vector_type,
+ "vec"))) {
+ goto fail;
+ }
+
+ /* extractelement <vector_type> %vector, i8 lane_id*/
+ if (!(result = LLVMBuildExtractElement(comp_ctx->builder, vector, lane,
+ "element"))) {
+ HANDLE_FAILURE("LLVMBuildExtractElement");
+ goto fail;
+ }
+
+ if (need_extend) {
+ if (is_signed) {
+ /* sext <element_type> %element to <result_type> */
+ if (!(result = LLVMBuildSExt(comp_ctx->builder, result, result_type,
+ "ret"))) {
+ HANDLE_FAILURE("LLVMBuildSExt");
+ goto fail;
+ }
+ }
+ else {
+ /* sext <element_type> %element to <result_type> */
+ if (!(result = LLVMBuildZExt(comp_ctx->builder, result, result_type,
+ "ret"))) {
+ HANDLE_FAILURE("LLVMBuildZExt");
+ goto fail;
+ }
+ }
+ }
+
+ PUSH(result, aot_value_type);
+
+ return true;
+fail:
+ return false;
+}
+
+bool
+aot_compile_simd_extract_i8x16(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, uint8 lane_id,
+ bool is_signed)
+{
+ return aot_compile_simd_extract(comp_ctx, func_ctx, lane_id, true,
+ is_signed, V128_i8x16_TYPE, I32_TYPE,
+ VALUE_TYPE_I32);
+}
+
+bool
+aot_compile_simd_extract_i16x8(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, uint8 lane_id,
+ bool is_signed)
+{
+ return aot_compile_simd_extract(comp_ctx, func_ctx, lane_id, true,
+ is_signed, V128_i16x8_TYPE, I32_TYPE,
+ VALUE_TYPE_I32);
+}
+
+bool
+aot_compile_simd_extract_i32x4(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, uint8 lane_id)
+{
+ return aot_compile_simd_extract(comp_ctx, func_ctx, lane_id, false, false,
+ V128_i32x4_TYPE, I32_TYPE, VALUE_TYPE_I32);
+}
+
+bool
+aot_compile_simd_extract_i64x2(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, uint8 lane_id)
+{
+ return aot_compile_simd_extract(comp_ctx, func_ctx, lane_id, false, false,
+ V128_i64x2_TYPE, I64_TYPE, VALUE_TYPE_I64);
+}
+
+bool
+aot_compile_simd_extract_f32x4(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, uint8 lane_id)
+{
+ return aot_compile_simd_extract(comp_ctx, func_ctx, lane_id, false, false,
+ V128_f32x4_TYPE, F32_TYPE, VALUE_TYPE_F32);
+}
+
+bool
+aot_compile_simd_extract_f64x2(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, uint8 lane_id)
+{
+ return aot_compile_simd_extract(comp_ctx, func_ctx, lane_id, false, false,
+ V128_f64x2_TYPE, F64_TYPE, VALUE_TYPE_F64);
+}
+
+static bool
+aot_compile_simd_replace(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ uint8 lane_id, unsigned new_value_type,
+ LLVMTypeRef vector_type, bool need_reduce,
+ LLVMTypeRef element_type)
+{
+ LLVMValueRef vector, new_value, lane, result;
+
+ POP(new_value, new_value_type);
+
+ if (!(lane = simd_lane_id_to_llvm_value(comp_ctx, lane_id))) {
+ goto fail;
+ }
+
+ if (!(vector = simd_pop_v128_and_bitcast(comp_ctx, func_ctx, vector_type,
+ "vec"))) {
+ goto fail;
+ }
+
+ /* trunc <new_value_type> to <element_type> */
+ if (need_reduce) {
+ if (!(new_value = LLVMBuildTrunc(comp_ctx->builder, new_value,
+ element_type, "element"))) {
+ HANDLE_FAILURE("LLVMBuildTrunc");
+ goto fail;
+ }
+ }
+
+ /* insertelement <vector_type> %vector, <element_type> %element,
+ i32 lane */
+ if (!(result = LLVMBuildInsertElement(comp_ctx->builder, vector, new_value,
+ lane, "new_vector"))) {
+ HANDLE_FAILURE("LLVMBuildInsertElement");
+ goto fail;
+ }
+
+ return simd_bitcast_and_push_v128(comp_ctx, func_ctx, result, "reesult");
+
+fail:
+ return false;
+}
+
+bool
+aot_compile_simd_replace_i8x16(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, uint8 lane_id)
+{
+ return aot_compile_simd_replace(comp_ctx, func_ctx, lane_id, VALUE_TYPE_I32,
+ V128_i8x16_TYPE, true, INT8_TYPE);
+}
+
+bool
+aot_compile_simd_replace_i16x8(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, uint8 lane_id)
+{
+ return aot_compile_simd_replace(comp_ctx, func_ctx, lane_id, VALUE_TYPE_I32,
+ V128_i16x8_TYPE, true, INT16_TYPE);
+}
+
+bool
+aot_compile_simd_replace_i32x4(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, uint8 lane_id)
+{
+ return aot_compile_simd_replace(comp_ctx, func_ctx, lane_id, VALUE_TYPE_I32,
+ V128_i32x4_TYPE, false, I32_TYPE);
+}
+
+bool
+aot_compile_simd_replace_i64x2(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, uint8 lane_id)
+{
+ return aot_compile_simd_replace(comp_ctx, func_ctx, lane_id, VALUE_TYPE_I64,
+ V128_i64x2_TYPE, false, I64_TYPE);
+}
+
+bool
+aot_compile_simd_replace_f32x4(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, uint8 lane_id)
+{
+ return aot_compile_simd_replace(comp_ctx, func_ctx, lane_id, VALUE_TYPE_F32,
+ V128_f32x4_TYPE, false, F32_TYPE);
+}
+
+bool
+aot_compile_simd_replace_f64x2(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, uint8 lane_id)
+{
+ return aot_compile_simd_replace(comp_ctx, func_ctx, lane_id, VALUE_TYPE_F64,
+ V128_f64x2_TYPE, false, F64_TYPE);
+}
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_access_lanes.h b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_access_lanes.h
new file mode 100644
index 000000000..75ca71ced
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_access_lanes.h
@@ -0,0 +1,92 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#ifndef _SIMD_ACCESS_LANES_H_
+#define _SIMD_ACCESS_LANES_H_
+
+#include "../aot_compiler.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+bool
+aot_compile_simd_shuffle(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ const uint8 *frame_ip);
+
+bool
+aot_compile_simd_swizzle(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx);
+
+bool
+aot_compile_simd_extract_i8x16(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, uint8 lane_id,
+ bool is_signed);
+
+bool
+aot_compile_simd_extract_i16x8(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, uint8 lane_id,
+ bool is_signed);
+
+bool
+aot_compile_simd_extract_i32x4(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, uint8 lane_id);
+
+bool
+aot_compile_simd_extract_i64x2(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, uint8 lane_id);
+
+bool
+aot_compile_simd_extract_f32x4(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, uint8 lane_id);
+
+bool
+aot_compile_simd_extract_f64x2(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, uint8 lane_id);
+
+bool
+aot_compile_simd_replace_i8x16(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, uint8 lane_id);
+
+bool
+aot_compile_simd_replace_i16x8(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, uint8 lane_id);
+
+bool
+aot_compile_simd_replace_i32x4(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, uint8 lane_id);
+
+bool
+aot_compile_simd_replace_i64x2(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, uint8 lane_id);
+
+bool
+aot_compile_simd_replace_f32x4(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, uint8 lane_id);
+
+bool
+aot_compile_simd_replace_f64x2(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, uint8 lane_id);
+
+bool
+aot_compile_simd_load8_lane(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ uint8 lane_id);
+
+bool
+aot_compile_simd_load16_lane(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ uint8 lane_id);
+
+bool
+aot_compile_simd_load32_lane(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ uint8 lane_id);
+
+bool
+aot_compile_simd_load64_lane(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ uint8 lane_id);
+
+#ifdef __cplusplus
+} /* end of extern "C" */
+#endif
+
+#endif /* end of _SIMD_ACCESS_LANES_H_ */
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_bit_shifts.c b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_bit_shifts.c
new file mode 100644
index 000000000..675ffbcfe
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_bit_shifts.c
@@ -0,0 +1,144 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#include "simd_bit_shifts.h"
+#include "simd_common.h"
+#include "../aot_emit_exception.h"
+#include "../../aot/aot_runtime.h"
+
+enum integer_shift {
+ e_shift_i8x16,
+ e_shift_i16x8,
+ e_shift_i32x4,
+ e_shift_i64x2,
+};
+
+static bool
+simd_shift(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ IntShift shift_op, enum integer_shift itype)
+{
+ LLVMValueRef vector, offset, result = NULL;
+ LLVMTypeRef vector_type[] = { V128_i8x16_TYPE, V128_i16x8_TYPE,
+ V128_i32x4_TYPE, V128_i64x2_TYPE };
+ LLVMTypeRef element_type[] = { INT8_TYPE, INT16_TYPE, I32_TYPE, I64_TYPE };
+
+ LLVMValueRef undef[] = { LLVM_CONST(i8x16_undef), LLVM_CONST(i16x8_undef),
+ LLVM_CONST(i32x4_undef), LLVM_CONST(i64x2_undef) };
+ LLVMValueRef mask[] = { LLVM_CONST(i8x16_vec_zero),
+ LLVM_CONST(i16x8_vec_zero),
+ LLVM_CONST(i32x4_vec_zero),
+ LLVM_CONST(i64x2_vec_zero) };
+ LLVMValueRef lane_bits[] = {
+ LLVM_CONST(i32_eight),
+ LLVMConstInt(I32_TYPE, 16, true),
+ LLVMConstInt(I32_TYPE, 32, true),
+ LLVMConstInt(I32_TYPE, 64, true),
+ };
+
+ POP_I32(offset);
+
+ if (!(vector = simd_pop_v128_and_bitcast(comp_ctx, func_ctx,
+ vector_type[itype], "vec"))) {
+ return false;
+ }
+
+ /* offset mod LaneBits */
+ if (!lane_bits[itype]
+ || !(offset = LLVMBuildSRem(comp_ctx->builder, offset, lane_bits[itype],
+ "offset_fix"))) {
+ HANDLE_FAILURE("LLVMBuildSRem");
+ return false;
+ }
+
+ /* change type */
+ if (itype < e_shift_i32x4) {
+ offset = LLVMBuildTrunc(comp_ctx->builder, offset, element_type[itype],
+ "offset_trunc");
+ }
+ else if (itype == e_shift_i64x2) {
+ offset = LLVMBuildZExt(comp_ctx->builder, offset, element_type[itype],
+ "offset_ext");
+ }
+
+ if (!offset) {
+ HANDLE_FAILURE("LLVMBuildZext/LLVMBuildTrunc");
+ return false;
+ }
+
+ /* splat to a vector */
+ if (!(offset =
+ LLVMBuildInsertElement(comp_ctx->builder, undef[itype], offset,
+ I32_ZERO, "offset_vector_base"))) {
+ HANDLE_FAILURE("LLVMBuildInsertElement");
+ return false;
+ }
+
+ if (!(offset =
+ LLVMBuildShuffleVector(comp_ctx->builder, offset, undef[itype],
+ mask[itype], "offset_vector"))) {
+ HANDLE_FAILURE("LLVMBuildShuffleVector");
+ return false;
+ }
+
+ switch (shift_op) {
+ case INT_SHL:
+ {
+ result = LLVMBuildShl(comp_ctx->builder, vector, offset, "shl");
+ break;
+ }
+ case INT_SHR_S:
+ {
+ result = LLVMBuildAShr(comp_ctx->builder, vector, offset, "ashr");
+ break;
+ }
+ case INT_SHR_U:
+ {
+ result = LLVMBuildLShr(comp_ctx->builder, vector, offset, "lshr");
+ break;
+ }
+ default:
+ {
+ break;
+ }
+ }
+
+ if (!result) {
+ HANDLE_FAILURE("LLVMBuildShl/LLVMBuildLShr/LLVMBuildAShr");
+ goto fail;
+ }
+
+ return simd_bitcast_and_push_v128(comp_ctx, func_ctx, result, "result");
+
+fail:
+ return false;
+}
+
+bool
+aot_compile_simd_i8x16_shift(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ IntShift shift_op)
+{
+ return simd_shift(comp_ctx, func_ctx, shift_op, e_shift_i8x16);
+}
+
+bool
+aot_compile_simd_i16x8_shift(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ IntShift shift_op)
+{
+ return simd_shift(comp_ctx, func_ctx, shift_op, e_shift_i16x8);
+}
+
+bool
+aot_compile_simd_i32x4_shift(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ IntShift shift_op)
+{
+ return simd_shift(comp_ctx, func_ctx, shift_op, e_shift_i32x4);
+}
+
+bool
+aot_compile_simd_i64x2_shift(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ IntShift shift_op)
+{
+ return simd_shift(comp_ctx, func_ctx, shift_op, e_shift_i64x2);
+}
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_bit_shifts.h b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_bit_shifts.h
new file mode 100644
index 000000000..06e86cad0
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_bit_shifts.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#ifndef _SIMD_BIT_SHIFTS_H_
+#define _SIMD_BIT_SHIFTS_H_
+
+#include "../aot_compiler.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+bool
+aot_compile_simd_i8x16_shift(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ IntShift shift_op);
+
+bool
+aot_compile_simd_i16x8_shift(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ IntShift shift_op);
+
+bool
+aot_compile_simd_i32x4_shift(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ IntShift shift_op);
+
+bool
+aot_compile_simd_i64x2_shift(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ IntShift shift_op);
+
+#ifdef __cplusplus
+} /* end of extern "C" */
+#endif
+
+#endif /* end of _SIMD_BIT_SHIFTS_H_ */
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_bitmask_extracts.c b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_bitmask_extracts.c
new file mode 100644
index 000000000..67d965426
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_bitmask_extracts.c
@@ -0,0 +1,133 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#include "simd_bitmask_extracts.h"
+#include "simd_common.h"
+#include "../aot_emit_exception.h"
+#include "../../aot/aot_runtime.h"
+
+enum integer_bitmask_type {
+ e_bitmask_i8x16,
+ e_bitmask_i16x8,
+ e_bitmask_i32x4,
+ e_bitmask_i64x2,
+};
+
+/* TODO: should use a much clever intrinsic */
+static bool
+simd_build_bitmask(const AOTCompContext *comp_ctx,
+ const AOTFuncContext *func_ctx,
+ enum integer_bitmask_type itype)
+{
+ LLVMValueRef vector, mask, result;
+ uint8 i;
+ LLVMTypeRef vector_ext_type;
+
+ uint32 lanes[] = { 16, 8, 4, 2 };
+ uint32 lane_bits[] = { 8, 16, 32, 64 };
+ LLVMTypeRef element_type[] = { INT8_TYPE, INT16_TYPE, I32_TYPE, I64_TYPE };
+ LLVMTypeRef vector_type[] = { V128_i8x16_TYPE, V128_i16x8_TYPE,
+ V128_i32x4_TYPE, V128_i64x2_TYPE };
+ int32 mask_element[16] = { 0 };
+ const char *intrinsic[] = {
+ "llvm.vector.reduce.or.v16i64",
+ "llvm.vector.reduce.or.v8i64",
+ "llvm.vector.reduce.or.v4i64",
+ "llvm.vector.reduce.or.v2i64",
+ };
+
+ LLVMValueRef ashr_distance;
+
+ if (!(vector = simd_pop_v128_and_bitcast(comp_ctx, func_ctx,
+ vector_type[itype], "vec"))) {
+ goto fail;
+ }
+
+ /* fill every bit in a lange with its sign bit */
+ if (!(ashr_distance = simd_build_splat_const_integer_vector(
+ comp_ctx, element_type[itype], lane_bits[itype] - 1,
+ lanes[itype]))) {
+ goto fail;
+ }
+
+ if (!(vector = LLVMBuildAShr(comp_ctx->builder, vector, ashr_distance,
+ "vec_ashr"))) {
+ HANDLE_FAILURE("LLVMBuildAShr");
+ goto fail;
+ }
+
+ if (!(vector_ext_type = LLVMVectorType(I64_TYPE, lanes[itype]))) {
+ HANDLE_FAILURE("LLVMVectorType");
+ goto fail;
+ }
+
+ if (e_bitmask_i64x2 != itype) {
+ if (!(vector = LLVMBuildSExt(comp_ctx->builder, vector, vector_ext_type,
+ "zext_to_i64"))) {
+ goto fail;
+ }
+ }
+
+ for (i = 0; i < 16; i++) {
+ mask_element[i] = 0x1 << i;
+ }
+
+ if (!(mask = simd_build_const_integer_vector(comp_ctx, I64_TYPE,
+ mask_element, lanes[itype]))) {
+ goto fail;
+ }
+
+ if (!(vector =
+ LLVMBuildAnd(comp_ctx->builder, vector, mask, "mask_bits"))) {
+ HANDLE_FAILURE("LLVMBuildAnd");
+ goto fail;
+ }
+
+ if (!(result =
+ aot_call_llvm_intrinsic(comp_ctx, func_ctx, intrinsic[itype],
+ I64_TYPE, &vector_ext_type, 1, vector))) {
+ goto fail;
+ }
+
+ if (!(result =
+ LLVMBuildTrunc(comp_ctx->builder, result, I32_TYPE, "to_i32"))) {
+ HANDLE_FAILURE("LLVMBuildTrunc");
+ goto fail;
+ }
+
+ PUSH_I32(result);
+
+ return true;
+fail:
+ return false;
+}
+
+bool
+aot_compile_simd_i8x16_bitmask(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx)
+{
+ return simd_build_bitmask(comp_ctx, func_ctx, e_bitmask_i8x16);
+}
+
+bool
+aot_compile_simd_i16x8_bitmask(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx)
+{
+ return simd_build_bitmask(comp_ctx, func_ctx, e_bitmask_i16x8);
+}
+
+bool
+aot_compile_simd_i32x4_bitmask(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx)
+{
+ return simd_build_bitmask(comp_ctx, func_ctx, e_bitmask_i32x4);
+}
+
+bool
+aot_compile_simd_i64x2_bitmask(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx)
+{
+ return simd_build_bitmask(comp_ctx, func_ctx, e_bitmask_i64x2);
+}
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_bitmask_extracts.h b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_bitmask_extracts.h
new file mode 100644
index 000000000..aac4cc2ce
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_bitmask_extracts.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#ifndef _SIMD_BITMASK_EXTRACTS_H_
+#define _SIMD_BITMASK_EXTRACTS_H_
+
+#include "../aot_compiler.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+bool
+aot_compile_simd_i8x16_bitmask(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx);
+
+bool
+aot_compile_simd_i16x8_bitmask(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx);
+
+bool
+aot_compile_simd_i32x4_bitmask(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx);
+
+bool
+aot_compile_simd_i64x2_bitmask(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx);
+
+#ifdef __cplusplus
+} /* end of extern "C" */
+#endif
+
+#endif /* end of _SIMD_BITMASK_EXTRACTS_H_ */
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_bitwise_ops.c b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_bitwise_ops.c
new file mode 100644
index 000000000..66aef3637
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_bitwise_ops.c
@@ -0,0 +1,144 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#include "simd_bitwise_ops.h"
+#include "../aot_emit_exception.h"
+#include "../../aot/aot_runtime.h"
+
+static bool
+v128_bitwise_two_component(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ V128Bitwise bitwise_op)
+{
+ LLVMValueRef vector1, vector2, result;
+
+ POP_V128(vector2);
+ POP_V128(vector1);
+
+ switch (bitwise_op) {
+ case V128_AND:
+ if (!(result = LLVMBuildAnd(comp_ctx->builder, vector1, vector2,
+ "and"))) {
+ HANDLE_FAILURE("LLVMBuildAnd");
+ goto fail;
+ }
+ break;
+ case V128_OR:
+ if (!(result =
+ LLVMBuildOr(comp_ctx->builder, vector1, vector2, "or"))) {
+ HANDLE_FAILURE("LLVMBuildAnd");
+ goto fail;
+ }
+ break;
+ case V128_XOR:
+ if (!(result = LLVMBuildXor(comp_ctx->builder, vector1, vector2,
+ "xor"))) {
+ HANDLE_FAILURE("LLVMBuildAnd");
+ goto fail;
+ }
+ break;
+ case V128_ANDNOT:
+ {
+ /* v128.and(a, v128.not(b)) */
+ if (!(vector2 = LLVMBuildNot(comp_ctx->builder, vector2, "not"))) {
+ HANDLE_FAILURE("LLVMBuildNot");
+ goto fail;
+ }
+
+ if (!(result = LLVMBuildAnd(comp_ctx->builder, vector1, vector2,
+ "and"))) {
+ HANDLE_FAILURE("LLVMBuildAnd");
+ goto fail;
+ }
+
+ break;
+ }
+ default:
+ bh_assert(0);
+ goto fail;
+ }
+
+ PUSH_V128(result);
+ return true;
+fail:
+ return false;
+}
+
+static bool
+v128_bitwise_not(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
+{
+ LLVMValueRef vector, result;
+
+ POP_V128(vector);
+
+ if (!(result = LLVMBuildNot(comp_ctx->builder, vector, "not"))) {
+ HANDLE_FAILURE("LLVMBuildNot");
+ goto fail;
+ }
+
+ PUSH_V128(result);
+ return true;
+fail:
+ return false;
+}
+
+/* v128.or(v128.and(v1, c), v128.and(v2, v128.not(c))) */
+static bool
+v128_bitwise_bitselect(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
+{
+ LLVMValueRef vector1, vector2, vector3, result;
+
+ POP_V128(vector3);
+ POP_V128(vector2);
+ POP_V128(vector1);
+
+ if (!(vector1 =
+ LLVMBuildAnd(comp_ctx->builder, vector1, vector3, "a_and_c"))) {
+ HANDLE_FAILURE("LLVMBuildAdd");
+ goto fail;
+ }
+
+ if (!(vector3 = LLVMBuildNot(comp_ctx->builder, vector3, "not_c"))) {
+ HANDLE_FAILURE("LLVMBuildNot");
+ goto fail;
+ }
+
+ if (!(vector2 =
+ LLVMBuildAnd(comp_ctx->builder, vector2, vector3, "b_and_c"))) {
+ HANDLE_FAILURE("LLVMBuildAdd");
+ goto fail;
+ }
+
+ if (!(result =
+ LLVMBuildOr(comp_ctx->builder, vector1, vector2, "a_or_b"))) {
+ HANDLE_FAILURE("LLVMBuildOr");
+ goto fail;
+ }
+
+ PUSH_V128(result);
+
+ return true;
+fail:
+ return false;
+}
+
+bool
+aot_compile_simd_v128_bitwise(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, V128Bitwise bitwise_op)
+{
+ switch (bitwise_op) {
+ case V128_AND:
+ case V128_OR:
+ case V128_XOR:
+ case V128_ANDNOT:
+ return v128_bitwise_two_component(comp_ctx, func_ctx, bitwise_op);
+ case V128_NOT:
+ return v128_bitwise_not(comp_ctx, func_ctx);
+ case V128_BITSELECT:
+ return v128_bitwise_bitselect(comp_ctx, func_ctx);
+ default:
+ bh_assert(0);
+ return false;
+ }
+}
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_bitwise_ops.h b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_bitwise_ops.h
new file mode 100644
index 000000000..ddf81c0b7
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_bitwise_ops.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#ifndef _SIMD_BITWISE_OPS_H_
+#define _SIMD_BITWISE_OPS_H_
+
+#include "../aot_compiler.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+bool
+aot_compile_simd_v128_bitwise(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, V128Bitwise bitwise_op);
+
+#ifdef __cplusplus
+} /* end of extern "C" */
+#endif
+
+#endif /* end of _SIMD_BITWISE_OPS_H_ */
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_bool_reductions.c b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_bool_reductions.c
new file mode 100644
index 000000000..4607d680a
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_bool_reductions.c
@@ -0,0 +1,138 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#include "simd_bool_reductions.h"
+#include "simd_common.h"
+#include "../aot_emit_exception.h"
+#include "../../aot/aot_runtime.h"
+
+enum integer_all_true {
+ e_int_all_true_v16i8,
+ e_int_all_true_v8i16,
+ e_int_all_true_v4i32,
+ e_int_all_true_v2i64,
+};
+
+static bool
+simd_all_true(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ enum integer_all_true itype)
+{
+ LLVMValueRef vector, result;
+ LLVMTypeRef vector_i1_type;
+ LLVMTypeRef vector_type[] = { V128_i8x16_TYPE, V128_i16x8_TYPE,
+ V128_i32x4_TYPE, V128_i64x2_TYPE };
+ uint32 lanes[] = { 16, 8, 4, 2 };
+ const char *intrinsic[] = {
+ "llvm.vector.reduce.and.v16i1",
+ "llvm.vector.reduce.and.v8i1",
+ "llvm.vector.reduce.and.v4i1",
+ "llvm.vector.reduce.and.v2i1",
+ };
+ LLVMValueRef zero[] = {
+ LLVM_CONST(i8x16_vec_zero),
+ LLVM_CONST(i16x8_vec_zero),
+ LLVM_CONST(i32x4_vec_zero),
+ LLVM_CONST(i64x2_vec_zero),
+ };
+
+ if (!(vector_i1_type = LLVMVectorType(INT1_TYPE, lanes[itype]))) {
+ HANDLE_FAILURE("LLVMVectorType");
+ goto fail;
+ }
+
+ if (!(vector = simd_pop_v128_and_bitcast(comp_ctx, func_ctx,
+ vector_type[itype], "vector"))) {
+ goto fail;
+ }
+
+ /* compare with zero */
+ if (!(result = LLVMBuildICmp(comp_ctx->builder, LLVMIntNE, vector,
+ zero[itype], "ne_zero"))) {
+ HANDLE_FAILURE("LLVMBuildICmp");
+ goto fail;
+ }
+
+ /* check zero */
+ if (!(result =
+ aot_call_llvm_intrinsic(comp_ctx, func_ctx, intrinsic[itype],
+ INT1_TYPE, &vector_i1_type, 1, result))) {
+ goto fail;
+ }
+
+ if (!(result =
+ LLVMBuildZExt(comp_ctx->builder, result, I32_TYPE, "to_i32"))) {
+ HANDLE_FAILURE("LLVMBuildZExt");
+ goto fail;
+ }
+
+ PUSH_I32(result);
+
+ return true;
+fail:
+ return false;
+}
+
+bool
+aot_compile_simd_i8x16_all_true(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx)
+{
+ return simd_all_true(comp_ctx, func_ctx, e_int_all_true_v16i8);
+}
+
+bool
+aot_compile_simd_i16x8_all_true(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx)
+{
+ return simd_all_true(comp_ctx, func_ctx, e_int_all_true_v8i16);
+}
+
+bool
+aot_compile_simd_i32x4_all_true(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx)
+{
+ return simd_all_true(comp_ctx, func_ctx, e_int_all_true_v4i32);
+}
+
+bool
+aot_compile_simd_i64x2_all_true(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx)
+{
+ return simd_all_true(comp_ctx, func_ctx, e_int_all_true_v2i64);
+}
+
+bool
+aot_compile_simd_v128_any_true(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx)
+{
+ LLVMTypeRef vector_type;
+ LLVMValueRef vector, result;
+
+ if (!(vector_type = LLVMVectorType(INT1_TYPE, 128))) {
+ return false;
+ }
+
+ if (!(vector = simd_pop_v128_and_bitcast(comp_ctx, func_ctx, vector_type,
+ "vector"))) {
+ goto fail;
+ }
+
+ if (!(result = aot_call_llvm_intrinsic(
+ comp_ctx, func_ctx, "llvm.vector.reduce.or.v128i1", INT1_TYPE,
+ &vector_type, 1, vector))) {
+ goto fail;
+ }
+
+ if (!(result =
+ LLVMBuildZExt(comp_ctx->builder, result, I32_TYPE, "to_i32"))) {
+ HANDLE_FAILURE("LLVMBuildZExt");
+ goto fail;
+ }
+
+ PUSH_I32(result);
+
+ return true;
+fail:
+ return false;
+}
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_bool_reductions.h b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_bool_reductions.h
new file mode 100644
index 000000000..649d5a5e2
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_bool_reductions.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#ifndef _SIMD_BOOL_REDUCTIONS_H_
+#define _SIMD_BOOL_REDUCTIONS_H_
+
+#include "../aot_compiler.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+bool
+aot_compile_simd_i8x16_all_true(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx);
+
+bool
+aot_compile_simd_i16x8_all_true(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx);
+
+bool
+aot_compile_simd_i32x4_all_true(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx);
+
+bool
+aot_compile_simd_i64x2_all_true(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx);
+
+bool
+aot_compile_simd_v128_any_true(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx);
+
+#ifdef __cplusplus
+} /* end of extern "C" */
+#endif
+
+#endif /* end of _SIMD_BOOL_REDUCTIONS_H_ */
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_common.c b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_common.c
new file mode 100644
index 000000000..95bcdfdb0
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_common.c
@@ -0,0 +1,157 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#include "simd_common.h"
+
+LLVMValueRef
+simd_pop_v128_and_bitcast(const AOTCompContext *comp_ctx,
+ const AOTFuncContext *func_ctx, LLVMTypeRef vec_type,
+ const char *name)
+{
+ LLVMValueRef number;
+
+ POP_V128(number);
+
+ if (!(number =
+ LLVMBuildBitCast(comp_ctx->builder, number, vec_type, name))) {
+ HANDLE_FAILURE("LLVMBuildBitCast");
+ goto fail;
+ }
+
+ return number;
+fail:
+ return NULL;
+}
+
+bool
+simd_bitcast_and_push_v128(const AOTCompContext *comp_ctx,
+ const AOTFuncContext *func_ctx, LLVMValueRef vector,
+ const char *name)
+{
+ if (!(vector = LLVMBuildBitCast(comp_ctx->builder, vector, V128_i64x2_TYPE,
+ name))) {
+ HANDLE_FAILURE("LLVMBuildBitCast");
+ goto fail;
+ }
+
+ /* push result into the stack */
+ PUSH_V128(vector);
+
+ return true;
+fail:
+ return false;
+}
+
+LLVMValueRef
+simd_lane_id_to_llvm_value(AOTCompContext *comp_ctx, uint8 lane_id)
+{
+ LLVMValueRef lane_indexes[] = {
+ LLVM_CONST(i32_zero), LLVM_CONST(i32_one),
+ LLVM_CONST(i32_two), LLVM_CONST(i32_three),
+ LLVM_CONST(i32_four), LLVM_CONST(i32_five),
+ LLVM_CONST(i32_six), LLVM_CONST(i32_seven),
+ LLVM_CONST(i32_eight), LLVM_CONST(i32_nine),
+ LLVM_CONST(i32_ten), LLVM_CONST(i32_eleven),
+ LLVM_CONST(i32_twelve), LLVM_CONST(i32_thirteen),
+ LLVM_CONST(i32_fourteen), LLVM_CONST(i32_fifteen),
+ };
+
+ return lane_id < 16 ? lane_indexes[lane_id] : NULL;
+}
+
+LLVMValueRef
+simd_build_const_integer_vector(const AOTCompContext *comp_ctx,
+ const LLVMTypeRef element_type,
+ const int *element_value, uint32 length)
+{
+ LLVMValueRef vector = NULL;
+ LLVMValueRef *elements;
+ unsigned i;
+
+ if (!(elements = wasm_runtime_malloc(sizeof(LLVMValueRef) * length))) {
+ return NULL;
+ }
+
+ for (i = 0; i < length; i++) {
+ if (!(elements[i] =
+ LLVMConstInt(element_type, element_value[i], true))) {
+ HANDLE_FAILURE("LLVMConstInst");
+ goto fail;
+ }
+ }
+
+ if (!(vector = LLVMConstVector(elements, length))) {
+ HANDLE_FAILURE("LLVMConstVector");
+ goto fail;
+ }
+
+fail:
+ wasm_runtime_free(elements);
+ return vector;
+}
+
+LLVMValueRef
+simd_build_splat_const_integer_vector(const AOTCompContext *comp_ctx,
+ const LLVMTypeRef element_type,
+ const int64 element_value, uint32 length)
+{
+ LLVMValueRef vector = NULL, element;
+ LLVMValueRef *elements;
+ unsigned i;
+
+ if (!(elements = wasm_runtime_malloc(sizeof(LLVMValueRef) * length))) {
+ return NULL;
+ }
+
+ if (!(element = LLVMConstInt(element_type, element_value, true))) {
+ HANDLE_FAILURE("LLVMConstInt");
+ goto fail;
+ }
+
+ for (i = 0; i < length; i++) {
+ elements[i] = element;
+ }
+
+ if (!(vector = LLVMConstVector(elements, length))) {
+ HANDLE_FAILURE("LLVMConstVector");
+ goto fail;
+ }
+
+fail:
+ wasm_runtime_free(elements);
+ return vector;
+}
+
+LLVMValueRef
+simd_build_splat_const_float_vector(const AOTCompContext *comp_ctx,
+ const LLVMTypeRef element_type,
+ const float element_value, uint32 length)
+{
+ LLVMValueRef vector = NULL, element;
+ LLVMValueRef *elements;
+ unsigned i;
+
+ if (!(elements = wasm_runtime_malloc(sizeof(LLVMValueRef) * length))) {
+ return NULL;
+ }
+
+ if (!(element = LLVMConstReal(element_type, element_value))) {
+ HANDLE_FAILURE("LLVMConstReal");
+ goto fail;
+ }
+
+ for (i = 0; i < length; i++) {
+ elements[i] = element;
+ }
+
+ if (!(vector = LLVMConstVector(elements, length))) {
+ HANDLE_FAILURE("LLVMConstVector");
+ goto fail;
+ }
+
+fail:
+ wasm_runtime_free(elements);
+ return vector;
+}
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_common.h b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_common.h
new file mode 100644
index 000000000..c7a08dbc7
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_common.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#ifndef _SIMD_COMMON_H_
+#define _SIMD_COMMON_H_
+
+#include "../aot_compiler.h"
+
+static inline bool
+is_target_x86(AOTCompContext *comp_ctx)
+{
+ return !strncmp(comp_ctx->target_arch, "x86_64", 6)
+ || !strncmp(comp_ctx->target_arch, "i386", 4);
+}
+
+LLVMValueRef
+simd_pop_v128_and_bitcast(const AOTCompContext *comp_ctx,
+ const AOTFuncContext *func_ctx, LLVMTypeRef vec_type,
+ const char *name);
+
+bool
+simd_bitcast_and_push_v128(const AOTCompContext *comp_ctx,
+ const AOTFuncContext *func_ctx, LLVMValueRef vector,
+ const char *name);
+
+LLVMValueRef
+simd_lane_id_to_llvm_value(AOTCompContext *comp_ctx, uint8 lane_id);
+
+LLVMValueRef
+simd_build_const_integer_vector(const AOTCompContext *comp_ctx,
+ const LLVMTypeRef element_type,
+ const int *element_value, uint32 length);
+
+LLVMValueRef
+simd_build_splat_const_integer_vector(const AOTCompContext *comp_ctx,
+ const LLVMTypeRef element_type,
+ const int64 element_value, uint32 length);
+
+LLVMValueRef
+simd_build_splat_const_float_vector(const AOTCompContext *comp_ctx,
+ const LLVMTypeRef element_type,
+ const float element_value, uint32 length);
+#endif /* _SIMD_COMMON_H_ */ \ No newline at end of file
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_comparisons.c b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_comparisons.c
new file mode 100644
index 000000000..8a87ab25b
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_comparisons.c
@@ -0,0 +1,229 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#include "simd_comparisons.h"
+#include "simd_common.h"
+#include "../aot_emit_exception.h"
+#include "../../aot/aot_runtime.h"
+
+static bool
+float_cond_2_predicate(FloatCond cond, LLVMRealPredicate *out)
+{
+ switch (cond) {
+ case FLOAT_EQ:
+ *out = LLVMRealOEQ;
+ break;
+ case FLOAT_NE:
+ *out = LLVMRealUNE;
+ break;
+ case FLOAT_LT:
+ *out = LLVMRealOLT;
+ break;
+ case FLOAT_GT:
+ *out = LLVMRealOGT;
+ break;
+ case FLOAT_LE:
+ *out = LLVMRealOLE;
+ break;
+ case FLOAT_GE:
+ *out = LLVMRealOGE;
+ break;
+ default:
+ bh_assert(0);
+ goto fail;
+ }
+
+ return true;
+fail:
+ return false;
+}
+
+static bool
+int_cond_2_predicate(IntCond cond, LLVMIntPredicate *out)
+{
+ switch (cond) {
+ case INT_EQZ:
+ case INT_EQ:
+ *out = LLVMIntEQ;
+ break;
+ case INT_NE:
+ *out = LLVMIntNE;
+ break;
+ case INT_LT_S:
+ *out = LLVMIntSLT;
+ break;
+ case INT_LT_U:
+ *out = LLVMIntULT;
+ break;
+ case INT_GT_S:
+ *out = LLVMIntSGT;
+ break;
+ case INT_GT_U:
+ *out = LLVMIntUGT;
+ break;
+ case INT_LE_S:
+ *out = LLVMIntSLE;
+ break;
+ case INT_LE_U:
+ *out = LLVMIntULE;
+ break;
+ case INT_GE_S:
+ *out = LLVMIntSGE;
+ break;
+ case INT_GE_U:
+ *out = LLVMIntUGE;
+ break;
+ default:
+ bh_assert(0);
+ goto fail;
+ }
+
+ return true;
+fail:
+ return false;
+}
+
+static bool
+interger_vector_compare(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ IntCond cond, LLVMTypeRef vector_type)
+{
+ LLVMValueRef vec1, vec2, result;
+ LLVMIntPredicate int_pred;
+
+ if (!(vec2 = simd_pop_v128_and_bitcast(comp_ctx, func_ctx, vector_type,
+ "vec2"))) {
+ goto fail;
+ }
+
+ if (!(vec1 = simd_pop_v128_and_bitcast(comp_ctx, func_ctx, vector_type,
+ "vec1"))) {
+ goto fail;
+ }
+
+ if (!int_cond_2_predicate(cond, &int_pred)) {
+ HANDLE_FAILURE("int_cond_2_predicate");
+ goto fail;
+ }
+ /* icmp <N x iX> %vec1, %vec2 */
+ if (!(result =
+ LLVMBuildICmp(comp_ctx->builder, int_pred, vec1, vec2, "cmp"))) {
+ HANDLE_FAILURE("LLVMBuildICmp");
+ goto fail;
+ }
+
+ /* sext <N x i1> %result to <N x iX> */
+ if (!(result =
+ LLVMBuildSExt(comp_ctx->builder, result, vector_type, "ext"))) {
+ HANDLE_FAILURE("LLVMBuildSExt");
+ goto fail;
+ }
+
+ /* bitcast <N x iX> %result to <2 x i64> */
+ if (!(result = LLVMBuildBitCast(comp_ctx->builder, result, V128_i64x2_TYPE,
+ "result"))) {
+ HANDLE_FAILURE("LLVMBuildBitCast");
+ goto fail;
+ }
+
+ PUSH_V128(result);
+
+ return true;
+fail:
+ return false;
+}
+
+bool
+aot_compile_simd_i8x16_compare(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, IntCond cond)
+{
+ return interger_vector_compare(comp_ctx, func_ctx, cond, V128_i8x16_TYPE);
+}
+
+bool
+aot_compile_simd_i16x8_compare(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, IntCond cond)
+{
+ return interger_vector_compare(comp_ctx, func_ctx, cond, V128_i16x8_TYPE);
+}
+
+bool
+aot_compile_simd_i32x4_compare(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, IntCond cond)
+{
+ return interger_vector_compare(comp_ctx, func_ctx, cond, V128_i32x4_TYPE);
+}
+
+bool
+aot_compile_simd_i64x2_compare(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, IntCond cond)
+{
+ return interger_vector_compare(comp_ctx, func_ctx, cond, V128_i64x2_TYPE);
+}
+
+static bool
+float_vector_compare(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ FloatCond cond, LLVMTypeRef vector_type,
+ LLVMTypeRef result_type)
+{
+ LLVMValueRef vec1, vec2, result;
+ LLVMRealPredicate real_pred;
+
+ if (!(vec2 = simd_pop_v128_and_bitcast(comp_ctx, func_ctx, vector_type,
+ "vec2"))) {
+ goto fail;
+ }
+
+ if (!(vec1 = simd_pop_v128_and_bitcast(comp_ctx, func_ctx, vector_type,
+ "vec1"))) {
+ goto fail;
+ }
+
+ if (!float_cond_2_predicate(cond, &real_pred)) {
+ HANDLE_FAILURE("float_cond_2_predicate");
+ goto fail;
+ }
+ /* fcmp <N x iX> %vec1, %vec2 */
+ if (!(result =
+ LLVMBuildFCmp(comp_ctx->builder, real_pred, vec1, vec2, "cmp"))) {
+ HANDLE_FAILURE("LLVMBuildFCmp");
+ goto fail;
+ }
+
+ /* sext <N x i1> %result to <N x iX> */
+ if (!(result =
+ LLVMBuildSExt(comp_ctx->builder, result, result_type, "ext"))) {
+ HANDLE_FAILURE("LLVMBuildSExt");
+ goto fail;
+ }
+
+ /* bitcast <N x iX> %result to <2 x i64> */
+ if (!(result = LLVMBuildBitCast(comp_ctx->builder, result, V128_i64x2_TYPE,
+ "result"))) {
+ HANDLE_FAILURE("LLVMBuildBitCast");
+ goto fail;
+ }
+
+ PUSH_V128(result);
+
+ return true;
+fail:
+ return false;
+}
+
+bool
+aot_compile_simd_f32x4_compare(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, FloatCond cond)
+{
+ return float_vector_compare(comp_ctx, func_ctx, cond, V128_f32x4_TYPE,
+ V128_i32x4_TYPE);
+}
+
+bool
+aot_compile_simd_f64x2_compare(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, FloatCond cond)
+{
+ return float_vector_compare(comp_ctx, func_ctx, cond, V128_f64x2_TYPE,
+ V128_i64x2_TYPE);
+}
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_comparisons.h b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_comparisons.h
new file mode 100644
index 000000000..322ebefb2
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_comparisons.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#ifndef _SIMD_COMPARISONS_H_
+#define _SIMD_COMPARISONS_H_
+
+#include "../aot_compiler.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+bool
+aot_compile_simd_i8x16_compare(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, IntCond cond);
+
+bool
+aot_compile_simd_i16x8_compare(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, IntCond cond);
+
+bool
+aot_compile_simd_i32x4_compare(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, IntCond cond);
+
+bool
+aot_compile_simd_i64x2_compare(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, IntCond cond);
+
+bool
+aot_compile_simd_f32x4_compare(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, FloatCond cond);
+
+bool
+aot_compile_simd_f64x2_compare(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, FloatCond cond);
+
+#ifdef __cplusplus
+} /* end of extern "C" */
+#endif
+
+#endif /* end of _SIMD_COMPARISONS_H_ */
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_construct_values.c b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_construct_values.c
new file mode 100644
index 000000000..ceb09e370
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_construct_values.c
@@ -0,0 +1,135 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#include "simd_common.h"
+#include "simd_construct_values.h"
+#include "../aot_emit_exception.h"
+#include "../interpreter/wasm_opcode.h"
+#include "../../aot/aot_runtime.h"
+
+bool
+aot_compile_simd_v128_const(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ const uint8 *imm_bytes)
+{
+ uint64 imm1, imm2;
+ LLVMValueRef first_long, agg1, second_long, agg2;
+
+ wasm_runtime_read_v128(imm_bytes, &imm1, &imm2);
+
+ /* %agg1 = insertelement <2 x i64> undef, i16 0, i64 ${*imm} */
+ if (!(first_long = I64_CONST(imm1))) {
+ HANDLE_FAILURE("LLVMConstInt");
+ goto fail;
+ }
+
+ if (!(agg1 =
+ LLVMBuildInsertElement(comp_ctx->builder, LLVM_CONST(i64x2_undef),
+ first_long, I32_ZERO, "agg1"))) {
+ HANDLE_FAILURE("LLVMBuildInsertElement");
+ goto fail;
+ }
+
+ /* %agg2 = insertelement <2 x i64> %agg1, i16 1, i64 ${*(imm + 1)} */
+ if (!(second_long = I64_CONST(imm2))) {
+ HANDLE_FAILURE("LLVMGetUndef");
+ goto fail;
+ }
+
+ if (!(agg2 = LLVMBuildInsertElement(comp_ctx->builder, agg1, second_long,
+ I32_ONE, "agg2"))) {
+ HANDLE_FAILURE("LLVMBuildInsertElement");
+ goto fail;
+ }
+
+ PUSH_V128(agg2);
+ return true;
+fail:
+ return false;
+}
+
+bool
+aot_compile_simd_splat(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ uint8 opcode)
+{
+ uint32 opcode_index = opcode - SIMD_i8x16_splat;
+ LLVMValueRef value = NULL, base, new_vector;
+ LLVMValueRef undefs[] = {
+ LLVM_CONST(i8x16_undef), LLVM_CONST(i16x8_undef),
+ LLVM_CONST(i32x4_undef), LLVM_CONST(i64x2_undef),
+ LLVM_CONST(f32x4_undef), LLVM_CONST(f64x2_undef),
+ };
+ LLVMValueRef masks[] = {
+ LLVM_CONST(i32x16_zero), LLVM_CONST(i32x8_zero), LLVM_CONST(i32x4_zero),
+ LLVM_CONST(i32x2_zero), LLVM_CONST(i32x4_zero), LLVM_CONST(i32x2_zero),
+ };
+
+ switch (opcode) {
+ case SIMD_i8x16_splat:
+ {
+ LLVMValueRef input;
+ POP_I32(input);
+ /* trunc i32 %input to i8 */
+ value =
+ LLVMBuildTrunc(comp_ctx->builder, input, INT8_TYPE, "trunc");
+ break;
+ }
+ case SIMD_i16x8_splat:
+ {
+ LLVMValueRef input;
+ POP_I32(input);
+ /* trunc i32 %input to i16 */
+ value =
+ LLVMBuildTrunc(comp_ctx->builder, input, INT16_TYPE, "trunc");
+ break;
+ }
+ case SIMD_i32x4_splat:
+ {
+ POP_I32(value);
+ break;
+ }
+ case SIMD_i64x2_splat:
+ {
+ POP(value, VALUE_TYPE_I64);
+ break;
+ }
+ case SIMD_f32x4_splat:
+ {
+ POP(value, VALUE_TYPE_F32);
+ break;
+ }
+ case SIMD_f64x2_splat:
+ {
+ POP(value, VALUE_TYPE_F64);
+ break;
+ }
+ default:
+ {
+ break;
+ }
+ }
+
+ if (!value) {
+ goto fail;
+ }
+
+ /* insertelement <n x ty> undef, ty %value, i32 0 */
+ if (!(base = LLVMBuildInsertElement(comp_ctx->builder, undefs[opcode_index],
+ value, I32_ZERO, "base"))) {
+ HANDLE_FAILURE("LLVMBuildInsertElement");
+ goto fail;
+ }
+
+ /* shufflevector <ty1> %base, <ty2> undef, <n x i32> zeroinitializer */
+ if (!(new_vector = LLVMBuildShuffleVector(
+ comp_ctx->builder, base, undefs[opcode_index],
+ masks[opcode_index], "new_vector"))) {
+ HANDLE_FAILURE("LLVMBuildShuffleVector");
+ goto fail;
+ }
+
+ return simd_bitcast_and_push_v128(comp_ctx, func_ctx, new_vector, "result");
+fail:
+ return false;
+}
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_construct_values.h b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_construct_values.h
new file mode 100644
index 000000000..8cd50c88b
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_construct_values.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#ifndef _SIMD_CONSTRUCT_VALUES_H_
+#define _SIMD_CONSTRUCT_VALUES_H_
+
+#include "../aot_compiler.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+bool
+aot_compile_simd_v128_const(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ const uint8 *imm_bytes);
+
+bool
+aot_compile_simd_splat(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ uint8 splat_opcode);
+
+#ifdef __cplusplus
+} /* end of extern "C" */
+#endif
+
+#endif /* end of _SIMD_CONSTRUCT_VALUES_H_ */
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_conversions.c b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_conversions.c
new file mode 100644
index 000000000..e9d30bfcb
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_conversions.c
@@ -0,0 +1,743 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#include "simd_conversions.h"
+#include "simd_common.h"
+#include "../aot_emit_exception.h"
+#include "../aot_emit_numberic.h"
+#include "../../aot/aot_runtime.h"
+
+static bool
+simd_integer_narrow_x86(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ LLVMTypeRef in_vector_type, LLVMTypeRef out_vector_type,
+ const char *instrinsic)
+{
+ LLVMValueRef vector1, vector2, result;
+ LLVMTypeRef param_types[2] = { in_vector_type, in_vector_type };
+
+ if (!(vector2 = simd_pop_v128_and_bitcast(comp_ctx, func_ctx,
+ in_vector_type, "vec2"))
+ || !(vector1 = simd_pop_v128_and_bitcast(comp_ctx, func_ctx,
+ in_vector_type, "vec1"))) {
+ return false;
+ }
+
+ if (!(result = aot_call_llvm_intrinsic(comp_ctx, func_ctx, instrinsic,
+ out_vector_type, param_types, 2,
+ vector1, vector2))) {
+ HANDLE_FAILURE("LLVMBuildCall");
+ return false;
+ }
+
+ return simd_bitcast_and_push_v128(comp_ctx, func_ctx, result, "result");
+}
+
+enum integer_sat_type {
+ e_sat_i16x8 = 0,
+ e_sat_i32x4,
+ e_sat_i64x2,
+ e_sat_i32x8,
+};
+
+static LLVMValueRef
+simd_saturate(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ enum integer_sat_type itype, LLVMValueRef vector,
+ LLVMValueRef min, LLVMValueRef max, bool is_signed)
+{
+ LLVMValueRef result;
+ LLVMTypeRef vector_type;
+
+ LLVMTypeRef param_types[][2] = {
+ { V128_i16x8_TYPE, V128_i16x8_TYPE },
+ { V128_i32x4_TYPE, V128_i32x4_TYPE },
+ { V128_i64x2_TYPE, V128_i64x2_TYPE },
+ { 0 },
+ };
+
+ const char *smin_intrinsic[] = {
+ "llvm.smin.v8i16",
+ "llvm.smin.v4i32",
+ "llvm.smin.v2i64",
+ "llvm.smin.v8i32",
+ };
+
+ const char *umin_intrinsic[] = {
+ "llvm.umin.v8i16",
+ "llvm.umin.v4i32",
+ "llvm.umin.v2i64",
+ "llvm.umin.v8i32",
+ };
+
+ const char *smax_intrinsic[] = {
+ "llvm.smax.v8i16",
+ "llvm.smax.v4i32",
+ "llvm.smax.v2i64",
+ "llvm.smax.v8i32",
+ };
+
+ const char *umax_intrinsic[] = {
+ "llvm.umax.v8i16",
+ "llvm.umax.v4i32",
+ "llvm.umax.v2i64",
+ "llvm.umax.v8i32",
+ };
+
+ if (e_sat_i32x8 == itype) {
+ if (!(vector_type = LLVMVectorType(I32_TYPE, 8))) {
+ HANDLE_FAILURE("LLVMVectorType");
+ return NULL;
+ }
+
+ param_types[itype][0] = vector_type;
+ param_types[itype][1] = vector_type;
+ }
+
+ if (!(result = aot_call_llvm_intrinsic(
+ comp_ctx, func_ctx,
+ is_signed ? smin_intrinsic[itype] : umin_intrinsic[itype],
+ param_types[itype][0], param_types[itype], 2, vector, max))
+ || !(result = aot_call_llvm_intrinsic(
+ comp_ctx, func_ctx,
+ is_signed ? smax_intrinsic[itype] : umax_intrinsic[itype],
+ param_types[itype][0], param_types[itype], 2, result, min))) {
+ return NULL;
+ }
+
+ return result;
+}
+
+static bool
+simd_integer_narrow_common(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ enum integer_sat_type itype, bool is_signed)
+{
+ LLVMValueRef vec1, vec2, min, max, mask, result;
+ LLVMTypeRef in_vector_type[] = { V128_i16x8_TYPE, V128_i32x4_TYPE,
+ V128_i64x2_TYPE };
+ LLVMTypeRef min_max_type[] = { INT16_TYPE, I32_TYPE, I64_TYPE };
+ LLVMTypeRef trunc_type[3] = { 0 };
+ uint8 length[] = { 8, 4, 2 };
+
+ int64 smin[] = { 0xff80, 0xffFF8000, 0xffFFffFF80000000 };
+ int64 umin[] = { 0x0, 0x0, 0x0 };
+ int64 smax[] = { 0x007f, 0x00007fff, 0x000000007fFFffFF };
+ int64 umax[] = { 0x00ff, 0x0000ffff, 0x00000000ffFFffFF };
+
+ LLVMValueRef mask_element[] = {
+ LLVM_CONST(i32_zero), LLVM_CONST(i32_one),
+ LLVM_CONST(i32_two), LLVM_CONST(i32_three),
+ LLVM_CONST(i32_four), LLVM_CONST(i32_five),
+ LLVM_CONST(i32_six), LLVM_CONST(i32_seven),
+ LLVM_CONST(i32_eight), LLVM_CONST(i32_nine),
+ LLVM_CONST(i32_ten), LLVM_CONST(i32_eleven),
+ LLVM_CONST(i32_twelve), LLVM_CONST(i32_thirteen),
+ LLVM_CONST(i32_fourteen), LLVM_CONST(i32_fifteen),
+ };
+
+ if (!(trunc_type[0] = LLVMVectorType(INT8_TYPE, 8))
+ || !(trunc_type[1] = LLVMVectorType(INT16_TYPE, 4))
+ || !(trunc_type[2] = LLVMVectorType(I32_TYPE, 2))) {
+ HANDLE_FAILURE("LLVMVectorType");
+ return false;
+ }
+
+ if (!(vec2 = simd_pop_v128_and_bitcast(comp_ctx, func_ctx,
+ in_vector_type[itype], "vec2"))
+ || !(vec1 = simd_pop_v128_and_bitcast(comp_ctx, func_ctx,
+ in_vector_type[itype], "vec1"))) {
+ return false;
+ }
+
+ if (!(max = simd_build_splat_const_integer_vector(
+ comp_ctx, min_max_type[itype],
+ is_signed ? smax[itype] : umax[itype], length[itype]))
+ || !(min = simd_build_splat_const_integer_vector(
+ comp_ctx, min_max_type[itype],
+ is_signed ? smin[itype] : umin[itype], length[itype]))) {
+ return false;
+ }
+
+ /* sat */
+ if (!(vec1 = simd_saturate(comp_ctx, func_ctx, e_sat_i16x8, vec1, min, max,
+ is_signed))
+ || !(vec2 = simd_saturate(comp_ctx, func_ctx, e_sat_i16x8, vec2, min,
+ max, is_signed))) {
+ return false;
+ }
+
+ /* trunc */
+ if (!(vec1 = LLVMBuildTrunc(comp_ctx->builder, vec1, trunc_type[itype],
+ "vec1_trunc"))
+ || !(vec2 = LLVMBuildTrunc(comp_ctx->builder, vec2, trunc_type[itype],
+ "vec2_trunc"))) {
+ HANDLE_FAILURE("LLVMBuildTrunc");
+ return false;
+ }
+
+ /* combine */
+ if (!(mask = LLVMConstVector(mask_element, (length[itype] << 1)))) {
+ HANDLE_FAILURE("LLVMConstInt");
+ return false;
+ }
+
+ if (!(result = LLVMBuildShuffleVector(comp_ctx->builder, vec1, vec2, mask,
+ "vec_shuffle"))) {
+ HANDLE_FAILURE("LLVMBuildShuffleVector");
+ return false;
+ }
+
+ return simd_bitcast_and_push_v128(comp_ctx, func_ctx, result, "result");
+}
+
+bool
+aot_compile_simd_i8x16_narrow_i16x8(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, bool is_signed)
+{
+ if (is_target_x86(comp_ctx)) {
+ return simd_integer_narrow_x86(
+ comp_ctx, func_ctx, V128_i16x8_TYPE, V128_i8x16_TYPE,
+ is_signed ? "llvm.x86.sse2.packsswb.128"
+ : "llvm.x86.sse2.packuswb.128");
+ }
+ else {
+ return simd_integer_narrow_common(comp_ctx, func_ctx, e_sat_i16x8,
+ is_signed);
+ }
+}
+
+bool
+aot_compile_simd_i16x8_narrow_i32x4(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, bool is_signed)
+{
+ if (is_target_x86(comp_ctx)) {
+ return simd_integer_narrow_x86(comp_ctx, func_ctx, V128_i32x4_TYPE,
+ V128_i16x8_TYPE,
+ is_signed ? "llvm.x86.sse2.packssdw.128"
+ : "llvm.x86.sse41.packusdw");
+ }
+ else {
+ return simd_integer_narrow_common(comp_ctx, func_ctx, e_sat_i32x4,
+ is_signed);
+ }
+}
+
+bool
+aot_compile_simd_i32x4_narrow_i64x2(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, bool is_signed)
+{
+ /* TODO: x86 intrinsics */
+ return simd_integer_narrow_common(comp_ctx, func_ctx, e_sat_i64x2,
+ is_signed);
+}
+
+enum integer_extend_type {
+ e_ext_i8x16,
+ e_ext_i16x8,
+ e_ext_i32x4,
+};
+
+static LLVMValueRef
+simd_integer_extension(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ enum integer_extend_type itype, LLVMValueRef vector,
+ bool lower_half, bool is_signed)
+{
+ LLVMValueRef mask, sub_vector, result;
+ LLVMValueRef bits[] = {
+ LLVM_CONST(i32_zero), LLVM_CONST(i32_one),
+ LLVM_CONST(i32_two), LLVM_CONST(i32_three),
+ LLVM_CONST(i32_four), LLVM_CONST(i32_five),
+ LLVM_CONST(i32_six), LLVM_CONST(i32_seven),
+ LLVM_CONST(i32_eight), LLVM_CONST(i32_nine),
+ LLVM_CONST(i32_ten), LLVM_CONST(i32_eleven),
+ LLVM_CONST(i32_twelve), LLVM_CONST(i32_thirteen),
+ LLVM_CONST(i32_fourteen), LLVM_CONST(i32_fifteen),
+ };
+ LLVMTypeRef out_vector_type[] = { V128_i16x8_TYPE, V128_i32x4_TYPE,
+ V128_i64x2_TYPE };
+ LLVMValueRef undef[] = { LLVM_CONST(i8x16_undef), LLVM_CONST(i16x8_undef),
+ LLVM_CONST(i32x4_undef) };
+ uint32 sub_vector_length[] = { 8, 4, 2 };
+
+ if (!(mask = lower_half ? LLVMConstVector(bits, sub_vector_length[itype])
+ : LLVMConstVector(bits + sub_vector_length[itype],
+ sub_vector_length[itype]))) {
+ HANDLE_FAILURE("LLVMConstVector");
+ return false;
+ }
+
+ /* retrive the low or high half */
+ if (!(sub_vector = LLVMBuildShuffleVector(comp_ctx->builder, vector,
+ undef[itype], mask, "half"))) {
+ HANDLE_FAILURE("LLVMBuildShuffleVector");
+ return false;
+ }
+
+ if (is_signed) {
+ if (!(result = LLVMBuildSExt(comp_ctx->builder, sub_vector,
+ out_vector_type[itype], "sext"))) {
+ HANDLE_FAILURE("LLVMBuildSExt");
+ return false;
+ }
+ }
+ else {
+ if (!(result = LLVMBuildZExt(comp_ctx->builder, sub_vector,
+ out_vector_type[itype], "zext"))) {
+ HANDLE_FAILURE("LLVMBuildZExt");
+ return false;
+ }
+ }
+
+ return result;
+}
+
+static bool
+simd_integer_extension_wrapper(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx,
+ enum integer_extend_type itype, bool lower_half,
+ bool is_signed)
+{
+ LLVMValueRef vector, result;
+
+ LLVMTypeRef in_vector_type[] = { V128_i8x16_TYPE, V128_i16x8_TYPE,
+ V128_i32x4_TYPE };
+
+ if (!(vector = simd_pop_v128_and_bitcast(comp_ctx, func_ctx,
+ in_vector_type[itype], "vec"))) {
+ return false;
+ }
+
+ if (!(result = simd_integer_extension(comp_ctx, func_ctx, itype, vector,
+ lower_half, is_signed))) {
+ return false;
+ }
+
+ return simd_bitcast_and_push_v128(comp_ctx, func_ctx, result, "result");
+}
+
+bool
+aot_compile_simd_i16x8_extend_i8x16(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, bool lower_half,
+ bool is_signed)
+{
+ return simd_integer_extension_wrapper(comp_ctx, func_ctx, e_ext_i8x16,
+ lower_half, is_signed);
+}
+
+bool
+aot_compile_simd_i32x4_extend_i16x8(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, bool lower_half,
+ bool is_signed)
+{
+ return simd_integer_extension_wrapper(comp_ctx, func_ctx, e_ext_i16x8,
+ lower_half, is_signed);
+}
+
+bool
+aot_compile_simd_i64x2_extend_i32x4(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, bool lower_half,
+ bool is_signed)
+{
+ return simd_integer_extension_wrapper(comp_ctx, func_ctx, e_ext_i32x4,
+ lower_half, is_signed);
+}
+
+static LLVMValueRef
+simd_trunc_sat(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ const char *intrinsics, LLVMTypeRef in_vector_type,
+ LLVMTypeRef out_vector_type)
+{
+ LLVMValueRef vector, result;
+ LLVMTypeRef param_types[] = { in_vector_type };
+
+ if (!(vector = simd_pop_v128_and_bitcast(comp_ctx, func_ctx, in_vector_type,
+ "vector"))) {
+ return false;
+ }
+
+ if (!(result = aot_call_llvm_intrinsic(comp_ctx, func_ctx, intrinsics,
+ out_vector_type, param_types, 1,
+ vector))) {
+ return false;
+ }
+
+ return result;
+}
+
+bool
+aot_compile_simd_i32x4_trunc_sat_f32x4(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, bool is_signed)
+{
+ LLVMValueRef result;
+ if (!(result = simd_trunc_sat(comp_ctx, func_ctx,
+ is_signed ? "llvm.fptosi.sat.v4i32.v4f32"
+ : "llvm.fptoui.sat.v4i32.v4f32",
+ V128_f32x4_TYPE, V128_i32x4_TYPE))) {
+ return false;
+ }
+
+ return simd_bitcast_and_push_v128(comp_ctx, func_ctx, result, "result");
+}
+
+bool
+aot_compile_simd_i32x4_trunc_sat_f64x2(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, bool is_signed)
+{
+ LLVMValueRef result, zero, mask;
+ LLVMTypeRef out_vector_type;
+ LLVMValueRef lanes[] = {
+ LLVM_CONST(i32_zero),
+ LLVM_CONST(i32_one),
+ LLVM_CONST(i32_two),
+ LLVM_CONST(i32_three),
+ };
+
+ if (!(out_vector_type = LLVMVectorType(I32_TYPE, 2))) {
+ HANDLE_FAILURE("LLVMVectorType");
+ return false;
+ }
+
+ if (!(result = simd_trunc_sat(comp_ctx, func_ctx,
+ is_signed ? "llvm.fptosi.sat.v2i32.v2f64"
+ : "llvm.fptoui.sat.v2i32.v2f64",
+ V128_f64x2_TYPE, out_vector_type))) {
+ return false;
+ }
+
+ if (!(zero = LLVMConstNull(out_vector_type))) {
+ HANDLE_FAILURE("LLVMConstNull");
+ return false;
+ }
+
+ /* v2i32 -> v4i32 */
+ if (!(mask = LLVMConstVector(lanes, 4))) {
+ HANDLE_FAILURE("LLVMConstVector");
+ return false;
+ }
+
+ if (!(result = LLVMBuildShuffleVector(comp_ctx->builder, result, zero, mask,
+ "extend"))) {
+ HANDLE_FAILURE("LLVMBuildShuffleVector");
+ return false;
+ }
+
+ return simd_bitcast_and_push_v128(comp_ctx, func_ctx, result, "result");
+}
+
+static LLVMValueRef
+simd_integer_convert(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ bool is_signed, LLVMValueRef vector,
+ LLVMTypeRef out_vector_type)
+
+{
+ LLVMValueRef result;
+ result = is_signed ? LLVMBuildSIToFP(comp_ctx->builder, vector,
+ out_vector_type, "converted")
+ : LLVMBuildUIToFP(comp_ctx->builder, vector,
+ out_vector_type, "converted");
+ if (!result) {
+ HANDLE_FAILURE("LLVMBuildSIToFP/LLVMBuildUIToFP");
+ }
+
+ return result;
+}
+
+bool
+aot_compile_simd_f32x4_convert_i32x4(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, bool is_signed)
+{
+ LLVMValueRef vector, result;
+
+ if (!(vector = simd_pop_v128_and_bitcast(comp_ctx, func_ctx,
+ V128_i32x4_TYPE, "vec"))) {
+ return false;
+ }
+
+ if (!(result = simd_integer_convert(comp_ctx, func_ctx, is_signed, vector,
+ V128_f32x4_TYPE))) {
+ return false;
+ }
+
+ return simd_bitcast_and_push_v128(comp_ctx, func_ctx, result, "result");
+}
+
+bool
+aot_compile_simd_f64x2_convert_i32x4(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, bool is_signed)
+{
+ LLVMValueRef vector, mask, result;
+ LLVMValueRef lanes[] = {
+ LLVM_CONST(i32_zero),
+ LLVM_CONST(i32_one),
+ };
+ LLVMTypeRef out_vector_type;
+
+ if (!(vector = simd_pop_v128_and_bitcast(comp_ctx, func_ctx,
+ V128_i32x4_TYPE, "vec"))) {
+ return false;
+ }
+
+ if (!(out_vector_type = LLVMVectorType(F64_TYPE, 4))) {
+ HANDLE_FAILURE("LLVMVectorType");
+ return false;
+ }
+
+ if (!(result = simd_integer_convert(comp_ctx, func_ctx, is_signed, vector,
+ out_vector_type))) {
+ return false;
+ }
+
+ /* v4f64 -> v2f64 */
+ if (!(mask = LLVMConstVector(lanes, 2))) {
+ HANDLE_FAILURE("LLVMConstVector");
+ return false;
+ }
+
+ if (!(result = LLVMBuildShuffleVector(comp_ctx->builder, result, result,
+ mask, "trunc"))) {
+ HANDLE_FAILURE("LLVMBuildShuffleVector");
+ return false;
+ }
+
+ return simd_bitcast_and_push_v128(comp_ctx, func_ctx, result, "result");
+}
+
+static bool
+simd_extadd_pairwise(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ LLVMTypeRef in_vector_type, LLVMTypeRef out_vector_type,
+ bool is_signed)
+{
+ LLVMValueRef vector, even_mask, odd_mask, sub_vector_even, sub_vector_odd,
+ result;
+
+ LLVMValueRef even_element[] = {
+ LLVM_CONST(i32_zero), LLVM_CONST(i32_two), LLVM_CONST(i32_four),
+ LLVM_CONST(i32_six), LLVM_CONST(i32_eight), LLVM_CONST(i32_ten),
+ LLVM_CONST(i32_twelve), LLVM_CONST(i32_fourteen),
+ };
+
+ LLVMValueRef odd_element[] = {
+ LLVM_CONST(i32_one), LLVM_CONST(i32_three),
+ LLVM_CONST(i32_five), LLVM_CONST(i32_seven),
+ LLVM_CONST(i32_nine), LLVM_CONST(i32_eleven),
+ LLVM_CONST(i32_thirteen), LLVM_CONST(i32_fifteen),
+ };
+
+ /* assumption about i16x8 from i8x16 and i32x4 from i16x8 */
+ uint8 mask_length = V128_i16x8_TYPE == out_vector_type ? 8 : 4;
+
+ if (!(vector = simd_pop_v128_and_bitcast(comp_ctx, func_ctx, in_vector_type,
+ "vector"))) {
+ return false;
+ }
+
+ if (!(even_mask = LLVMConstVector(even_element, mask_length))
+ || !(odd_mask = LLVMConstVector(odd_element, mask_length))) {
+ HANDLE_FAILURE("LLVMConstVector");
+ return false;
+ }
+
+ /* shuffle a <16xi8> vector to two <8xi8> vectors */
+ if (!(sub_vector_even = LLVMBuildShuffleVector(
+ comp_ctx->builder, vector, vector, even_mask, "pick_even"))
+ || !(sub_vector_odd = LLVMBuildShuffleVector(
+ comp_ctx->builder, vector, vector, odd_mask, "pick_odd"))) {
+ HANDLE_FAILURE("LLVMBuildShuffleVector");
+ return false;
+ }
+
+ /* sext/zext <8xi8> to <8xi16> */
+ if (is_signed) {
+ if (!(sub_vector_even =
+ LLVMBuildSExt(comp_ctx->builder, sub_vector_even,
+ out_vector_type, "even_sext"))
+ || !(sub_vector_odd =
+ LLVMBuildSExt(comp_ctx->builder, sub_vector_odd,
+ out_vector_type, "odd_sext"))) {
+ HANDLE_FAILURE("LLVMBuildSExt");
+ return false;
+ }
+ }
+ else {
+ if (!(sub_vector_even =
+ LLVMBuildZExt(comp_ctx->builder, sub_vector_even,
+ out_vector_type, "even_zext"))
+ || !(sub_vector_odd =
+ LLVMBuildZExt(comp_ctx->builder, sub_vector_odd,
+ out_vector_type, "odd_zext"))) {
+ HANDLE_FAILURE("LLVMBuildZExt");
+ return false;
+ }
+ }
+
+ if (!(result = LLVMBuildAdd(comp_ctx->builder, sub_vector_even,
+ sub_vector_odd, "sum"))) {
+ HANDLE_FAILURE("LLVMBuildAdd");
+ return false;
+ }
+
+ return simd_bitcast_and_push_v128(comp_ctx, func_ctx, result, "result");
+}
+
+bool
+aot_compile_simd_i16x8_extadd_pairwise_i8x16(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx,
+ bool is_signed)
+{
+ return simd_extadd_pairwise(comp_ctx, func_ctx, V128_i8x16_TYPE,
+ V128_i16x8_TYPE, is_signed);
+}
+
+bool
+aot_compile_simd_i32x4_extadd_pairwise_i16x8(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx,
+ bool is_signed)
+{
+ return simd_extadd_pairwise(comp_ctx, func_ctx, V128_i16x8_TYPE,
+ V128_i32x4_TYPE, is_signed);
+}
+
+bool
+aot_compile_simd_i16x8_q15mulr_sat(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx)
+{
+ LLVMValueRef lhs, rhs, pad, offset, min, max, result;
+ LLVMTypeRef vector_ext_type;
+
+ if (!(rhs = simd_pop_v128_and_bitcast(comp_ctx, func_ctx, V128_i16x8_TYPE,
+ "rhs"))
+ || !(lhs = simd_pop_v128_and_bitcast(comp_ctx, func_ctx,
+ V128_i16x8_TYPE, "lhs"))) {
+ return false;
+ }
+
+ if (!(vector_ext_type = LLVMVectorType(I32_TYPE, 8))) {
+ HANDLE_FAILURE("LLVMVectorType");
+ return false;
+ }
+
+ if (!(lhs = LLVMBuildSExt(comp_ctx->builder, lhs, vector_ext_type,
+ "lhs_v8i32"))
+ || !(rhs = LLVMBuildSExt(comp_ctx->builder, rhs, vector_ext_type,
+ "rhs_v8i32"))) {
+ HANDLE_FAILURE("LLVMBuildSExt");
+ return false;
+ }
+
+ /* 0x4000 and 15*/
+ if (!(pad = simd_build_splat_const_integer_vector(comp_ctx, I32_TYPE,
+ 0x4000, 8))
+ || !(offset = simd_build_splat_const_integer_vector(comp_ctx, I32_TYPE,
+ 15, 8))) {
+ return false;
+ }
+
+ /* TODO: looking for x86 intrinsics about integer"fused multiply-and-add" */
+ /* S.SignedSaturate((x * y + 0x4000) >> 15) */
+ if (!(result = LLVMBuildMul(comp_ctx->builder, lhs, rhs, "mul"))) {
+ HANDLE_FAILURE("LLVMBuildMul");
+ return false;
+ }
+
+ if (!(result = LLVMBuildAdd(comp_ctx->builder, result, pad, "add"))) {
+ HANDLE_FAILURE("LLVMBuildAdd");
+ return false;
+ }
+
+ if (!(result = LLVMBuildAShr(comp_ctx->builder, result, offset, "ashr"))) {
+ HANDLE_FAILURE("LLVMBuildAShr");
+ return false;
+ }
+
+ if (!(min = simd_build_splat_const_integer_vector(comp_ctx, I32_TYPE,
+ 0xffff8000, 8))
+ || !(max = simd_build_splat_const_integer_vector(comp_ctx, I32_TYPE,
+ 0x00007fff, 8))) {
+ return false;
+ }
+
+ /* sat after trunc will let *sat* part be optimized */
+ if (!(result = simd_saturate(comp_ctx, func_ctx, e_sat_i32x8, result, min,
+ max, true))) {
+ return false;
+ }
+
+ if (!(result = LLVMBuildTrunc(comp_ctx->builder, result, V128_i16x8_TYPE,
+ "down_to_v8i16"))) {
+ HANDLE_FAILURE("LLVMBuidlTrunc");
+ return false;
+ }
+
+ return simd_bitcast_and_push_v128(comp_ctx, func_ctx, result, "result");
+}
+
+enum integer_extmul_type {
+ e_i16x8_extmul_i8x16,
+ e_i32x4_extmul_i16x8,
+ e_i64x2_extmul_i32x4,
+};
+
+static bool
+simd_integer_extmul(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ bool lower_half, bool is_signed,
+ enum integer_extmul_type itype)
+{
+ LLVMValueRef vec1, vec2, result;
+ enum integer_extend_type ext_type[] = {
+ e_ext_i8x16,
+ e_ext_i16x8,
+ e_ext_i32x4,
+ };
+ LLVMTypeRef in_vector_type[] = {
+ V128_i8x16_TYPE,
+ V128_i16x8_TYPE,
+ V128_i32x4_TYPE,
+ };
+
+ if (!(vec1 = simd_pop_v128_and_bitcast(comp_ctx, func_ctx,
+ in_vector_type[itype], "vec1"))
+ || !(vec2 = simd_pop_v128_and_bitcast(comp_ctx, func_ctx,
+ in_vector_type[itype], "vec2"))) {
+ return false;
+ }
+
+ if (!(vec1 = simd_integer_extension(comp_ctx, func_ctx, ext_type[itype],
+ vec1, lower_half, is_signed))
+ || !(vec2 = simd_integer_extension(comp_ctx, func_ctx, ext_type[itype],
+ vec2, lower_half, is_signed))) {
+ return false;
+ }
+
+ if (!(result = LLVMBuildMul(comp_ctx->builder, vec1, vec2, "product"))) {
+ return false;
+ }
+
+ return simd_bitcast_and_push_v128(comp_ctx, func_ctx, result, "result");
+}
+
+bool
+aot_compile_simd_i16x8_extmul_i8x16(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, bool lower_half,
+ bool is_signed)
+{
+ return simd_integer_extmul(comp_ctx, func_ctx, lower_half, is_signed,
+ e_i16x8_extmul_i8x16);
+}
+
+bool
+aot_compile_simd_i32x4_extmul_i16x8(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, bool lower_half,
+ bool is_signed)
+{
+ return simd_integer_extmul(comp_ctx, func_ctx, lower_half, is_signed,
+ e_i32x4_extmul_i16x8);
+}
+
+bool
+aot_compile_simd_i64x2_extmul_i32x4(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, bool lower_half,
+ bool is_signed)
+{
+ return simd_integer_extmul(comp_ctx, func_ctx, lower_half, is_signed,
+ e_i64x2_extmul_i32x4);
+} \ No newline at end of file
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_conversions.h b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_conversions.h
new file mode 100644
index 000000000..87b8bd684
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_conversions.h
@@ -0,0 +1,90 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#ifndef _SIMD_CONVERSIONS_H_
+#define _SIMD_CONVERSIONS_H_
+
+#include "../aot_compiler.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+bool
+aot_compile_simd_i8x16_narrow_i16x8(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, bool is_signed);
+
+bool
+aot_compile_simd_i16x8_narrow_i32x4(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, bool is_signed);
+
+bool
+aot_compile_simd_i32x4_narrow_i64x2(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, bool is_signed);
+
+bool
+aot_compile_simd_i16x8_extend_i8x16(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, bool is_low,
+ bool is_signed);
+
+bool
+aot_compile_simd_i32x4_extend_i16x8(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, bool is_low,
+ bool is_signed);
+
+bool
+aot_compile_simd_i64x2_extend_i32x4(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, bool lower_half,
+ bool is_signed);
+
+bool
+aot_compile_simd_i32x4_trunc_sat_f32x4(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx,
+ bool is_signed);
+
+bool
+aot_compile_simd_i32x4_trunc_sat_f64x2(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx,
+ bool is_signed);
+
+bool
+aot_compile_simd_f32x4_convert_i32x4(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, bool is_signed);
+
+bool
+aot_compile_simd_f64x2_convert_i32x4(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, bool is_signed);
+bool
+aot_compile_simd_i16x8_extadd_pairwise_i8x16(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx,
+ bool is_signed);
+
+bool
+aot_compile_simd_i32x4_extadd_pairwise_i16x8(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx,
+ bool is_signed);
+bool
+aot_compile_simd_i16x8_q15mulr_sat(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx);
+
+bool
+aot_compile_simd_i16x8_extmul_i8x16(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, bool is_low,
+ bool is_signed);
+
+bool
+aot_compile_simd_i32x4_extmul_i16x8(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, bool is_low,
+ bool is_signed);
+
+bool
+aot_compile_simd_i64x2_extmul_i32x4(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, bool lower_half,
+ bool is_signed);
+#ifdef __cplusplus
+} /* end of extern "C" */
+#endif
+
+#endif /* end of _SIMD_CONVERSIONS_H_ */
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_floating_point.c b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_floating_point.c
new file mode 100644
index 000000000..d850fe8f7
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_floating_point.c
@@ -0,0 +1,388 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#include "simd_floating_point.h"
+#include "simd_common.h"
+#include "../aot_emit_exception.h"
+#include "../aot_emit_numberic.h"
+#include "../../aot/aot_runtime.h"
+
+static bool
+simd_v128_float_arith(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ FloatArithmetic arith_op, LLVMTypeRef vector_type)
+{
+ LLVMValueRef lhs, rhs, result = NULL;
+
+ if (!(rhs =
+ simd_pop_v128_and_bitcast(comp_ctx, func_ctx, vector_type, "rhs"))
+ || !(lhs = simd_pop_v128_and_bitcast(comp_ctx, func_ctx, vector_type,
+ "lhs"))) {
+ return false;
+ }
+
+ switch (arith_op) {
+ case FLOAT_ADD:
+ result = LLVMBuildFAdd(comp_ctx->builder, lhs, rhs, "sum");
+ break;
+ case FLOAT_SUB:
+ result = LLVMBuildFSub(comp_ctx->builder, lhs, rhs, "difference");
+ break;
+ case FLOAT_MUL:
+ result = LLVMBuildFMul(comp_ctx->builder, lhs, rhs, "product");
+ break;
+ case FLOAT_DIV:
+ result = LLVMBuildFDiv(comp_ctx->builder, lhs, rhs, "quotient");
+ break;
+ default:
+ return false;
+ }
+
+ if (!result) {
+ HANDLE_FAILURE(
+ "LLVMBuildFAdd/LLVMBuildFSub/LLVMBuildFMul/LLVMBuildFDiv");
+ return false;
+ }
+
+ return simd_bitcast_and_push_v128(comp_ctx, func_ctx, result, "result");
+}
+
+bool
+aot_compile_simd_f32x4_arith(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ FloatArithmetic arith_op)
+{
+ return simd_v128_float_arith(comp_ctx, func_ctx, arith_op, V128_f32x4_TYPE);
+}
+
+bool
+aot_compile_simd_f64x2_arith(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ FloatArithmetic arith_op)
+{
+ return simd_v128_float_arith(comp_ctx, func_ctx, arith_op, V128_f64x2_TYPE);
+}
+
+static bool
+simd_v128_float_neg(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ LLVMTypeRef vector_type)
+{
+ LLVMValueRef vector, result;
+
+ if (!(vector = simd_pop_v128_and_bitcast(comp_ctx, func_ctx, vector_type,
+ "vector"))) {
+ return false;
+ }
+
+ if (!(result = LLVMBuildFNeg(comp_ctx->builder, vector, "neg"))) {
+ HANDLE_FAILURE("LLVMBuildFNeg");
+ return false;
+ }
+
+ return simd_bitcast_and_push_v128(comp_ctx, func_ctx, result, "result");
+}
+
+bool
+aot_compile_simd_f32x4_neg(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
+{
+ return simd_v128_float_neg(comp_ctx, func_ctx, V128_f32x4_TYPE);
+}
+
+bool
+aot_compile_simd_f64x2_neg(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
+{
+ return simd_v128_float_neg(comp_ctx, func_ctx, V128_f64x2_TYPE);
+}
+
+static bool
+simd_float_intrinsic(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ LLVMTypeRef vector_type, const char *intrinsic)
+{
+ LLVMValueRef vector, result;
+ LLVMTypeRef param_types[1] = { vector_type };
+
+ if (!(vector = simd_pop_v128_and_bitcast(comp_ctx, func_ctx, vector_type,
+ "vector"))) {
+ return false;
+ }
+
+ if (!(result =
+ aot_call_llvm_intrinsic(comp_ctx, func_ctx, intrinsic,
+ vector_type, param_types, 1, vector))) {
+ HANDLE_FAILURE("LLVMBuildCall");
+ return false;
+ }
+
+ return simd_bitcast_and_push_v128(comp_ctx, func_ctx, result, "result");
+}
+
+bool
+aot_compile_simd_f32x4_abs(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
+{
+ return simd_float_intrinsic(comp_ctx, func_ctx, V128_f32x4_TYPE,
+ "llvm.fabs.v4f32");
+}
+
+bool
+aot_compile_simd_f64x2_abs(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
+{
+ return simd_float_intrinsic(comp_ctx, func_ctx, V128_f64x2_TYPE,
+ "llvm.fabs.v2f64");
+}
+
+bool
+aot_compile_simd_f32x4_round(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
+{
+ return simd_float_intrinsic(comp_ctx, func_ctx, V128_f32x4_TYPE,
+ "llvm.round.v4f32");
+}
+
+bool
+aot_compile_simd_f64x2_round(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
+{
+ return simd_float_intrinsic(comp_ctx, func_ctx, V128_f64x2_TYPE,
+ "llvm.round.v2f64");
+}
+
+bool
+aot_compile_simd_f32x4_sqrt(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
+{
+ return simd_float_intrinsic(comp_ctx, func_ctx, V128_f32x4_TYPE,
+ "llvm.sqrt.v4f32");
+}
+
+bool
+aot_compile_simd_f64x2_sqrt(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
+{
+ return simd_float_intrinsic(comp_ctx, func_ctx, V128_f64x2_TYPE,
+ "llvm.sqrt.v2f64");
+}
+
+bool
+aot_compile_simd_f32x4_ceil(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
+{
+ return simd_float_intrinsic(comp_ctx, func_ctx, V128_f32x4_TYPE,
+ "llvm.ceil.v4f32");
+}
+
+bool
+aot_compile_simd_f64x2_ceil(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
+{
+ return simd_float_intrinsic(comp_ctx, func_ctx, V128_f64x2_TYPE,
+ "llvm.ceil.v2f64");
+}
+
+bool
+aot_compile_simd_f32x4_floor(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
+{
+ return simd_float_intrinsic(comp_ctx, func_ctx, V128_f32x4_TYPE,
+ "llvm.floor.v4f32");
+}
+
+bool
+aot_compile_simd_f64x2_floor(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
+{
+ return simd_float_intrinsic(comp_ctx, func_ctx, V128_f64x2_TYPE,
+ "llvm.floor.v2f64");
+}
+
+bool
+aot_compile_simd_f32x4_trunc(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
+{
+ return simd_float_intrinsic(comp_ctx, func_ctx, V128_f32x4_TYPE,
+ "llvm.trunc.v4f32");
+}
+
+bool
+aot_compile_simd_f64x2_trunc(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
+{
+ return simd_float_intrinsic(comp_ctx, func_ctx, V128_f64x2_TYPE,
+ "llvm.trunc.v2f64");
+}
+
+bool
+aot_compile_simd_f32x4_nearest(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx)
+{
+ return simd_float_intrinsic(comp_ctx, func_ctx, V128_f32x4_TYPE,
+ "llvm.rint.v4f32");
+}
+
+bool
+aot_compile_simd_f64x2_nearest(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx)
+{
+ return simd_float_intrinsic(comp_ctx, func_ctx, V128_f64x2_TYPE,
+ "llvm.rint.v2f64");
+}
+
+static bool
+simd_float_cmp(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ FloatArithmetic arith_op, LLVMTypeRef vector_type)
+{
+ LLVMValueRef lhs, rhs, result;
+ LLVMRealPredicate op = FLOAT_MIN == arith_op ? LLVMRealULT : LLVMRealUGT;
+
+ if (!(rhs =
+ simd_pop_v128_and_bitcast(comp_ctx, func_ctx, vector_type, "rhs"))
+ || !(lhs = simd_pop_v128_and_bitcast(comp_ctx, func_ctx, vector_type,
+ "lhs"))) {
+ return false;
+ }
+
+ if (!(result = LLVMBuildFCmp(comp_ctx->builder, op, lhs, rhs, "cmp"))) {
+ HANDLE_FAILURE("LLVMBuildFCmp");
+ return false;
+ }
+
+ if (!(result =
+ LLVMBuildSelect(comp_ctx->builder, result, lhs, rhs, "select"))) {
+ HANDLE_FAILURE("LLVMBuildSelect");
+ return false;
+ }
+
+ return simd_bitcast_and_push_v128(comp_ctx, func_ctx, result, "result");
+}
+
+/*TODO: sugggest non-IA platforms check with "llvm.minimum.*" and
+ * "llvm.maximum.*" firstly */
+bool
+aot_compile_simd_f32x4_min_max(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, bool run_min)
+{
+ return simd_float_cmp(comp_ctx, func_ctx, run_min ? FLOAT_MIN : FLOAT_MAX,
+ V128_f32x4_TYPE);
+}
+
+bool
+aot_compile_simd_f64x2_min_max(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, bool run_min)
+{
+ return simd_float_cmp(comp_ctx, func_ctx, run_min ? FLOAT_MIN : FLOAT_MAX,
+ V128_f64x2_TYPE);
+}
+
+static bool
+simd_float_pmin_max(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ LLVMTypeRef vector_type, const char *intrinsic)
+{
+ LLVMValueRef lhs, rhs, result;
+ LLVMTypeRef param_types[2];
+
+ param_types[0] = vector_type;
+ param_types[1] = vector_type;
+
+ if (!(rhs =
+ simd_pop_v128_and_bitcast(comp_ctx, func_ctx, vector_type, "rhs"))
+ || !(lhs = simd_pop_v128_and_bitcast(comp_ctx, func_ctx, vector_type,
+ "lhs"))) {
+ return false;
+ }
+
+ if (!(result =
+ aot_call_llvm_intrinsic(comp_ctx, func_ctx, intrinsic,
+ vector_type, param_types, 2, lhs, rhs))) {
+ return false;
+ }
+
+ return simd_bitcast_and_push_v128(comp_ctx, func_ctx, result, "result");
+}
+
+bool
+aot_compile_simd_f32x4_pmin_pmax(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, bool run_min)
+{
+ return simd_float_pmin_max(comp_ctx, func_ctx, V128_f32x4_TYPE,
+ run_min ? "llvm.minnum.v4f32"
+ : "llvm.maxnum.v4f32");
+}
+
+bool
+aot_compile_simd_f64x2_pmin_pmax(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, bool run_min)
+{
+ return simd_float_pmin_max(comp_ctx, func_ctx, V128_f64x2_TYPE,
+ run_min ? "llvm.minnum.v2f64"
+ : "llvm.maxnum.v2f64");
+}
+
+bool
+aot_compile_simd_f64x2_demote(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx)
+{
+ LLVMValueRef vector, elem_0, elem_1, result;
+
+ if (!(vector = simd_pop_v128_and_bitcast(comp_ctx, func_ctx,
+ V128_f64x2_TYPE, "vector"))) {
+ return false;
+ }
+
+ if (!(elem_0 = LLVMBuildExtractElement(comp_ctx->builder, vector,
+ LLVM_CONST(i32_zero), "elem_0"))
+ || !(elem_1 = LLVMBuildExtractElement(comp_ctx->builder, vector,
+ LLVM_CONST(i32_one), "elem_1"))) {
+ HANDLE_FAILURE("LLVMBuildExtractElement");
+ return false;
+ }
+
+ /* fptrunc <f64> elem to <f32> */
+ if (!(elem_0 = LLVMBuildFPTrunc(comp_ctx->builder, elem_0, F32_TYPE,
+ "elem_0_trunc"))
+ || !(elem_1 = LLVMBuildFPTrunc(comp_ctx->builder, elem_1, F32_TYPE,
+ "elem_1_trunc"))) {
+ HANDLE_FAILURE("LLVMBuildFPTrunc");
+ return false;
+ }
+
+ if (!(result = LLVMBuildInsertElement(comp_ctx->builder,
+ LLVM_CONST(f32x4_vec_zero), elem_0,
+ LLVM_CONST(i32_zero), "new_vector_0"))
+ || !(result =
+ LLVMBuildInsertElement(comp_ctx->builder, result, elem_1,
+ LLVM_CONST(i32_one), "new_vector_1"))) {
+ HANDLE_FAILURE("LLVMBuildInsertElement");
+ return false;
+ }
+
+ return simd_bitcast_and_push_v128(comp_ctx, func_ctx, result, "result");
+}
+
+bool
+aot_compile_simd_f32x4_promote(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx)
+{
+ LLVMValueRef vector, elem_0, elem_1, result;
+
+ if (!(vector = simd_pop_v128_and_bitcast(comp_ctx, func_ctx,
+ V128_f32x4_TYPE, "vector"))) {
+ return false;
+ }
+
+ if (!(elem_0 = LLVMBuildExtractElement(comp_ctx->builder, vector,
+ LLVM_CONST(i32_zero), "elem_0"))
+ || !(elem_1 = LLVMBuildExtractElement(comp_ctx->builder, vector,
+ LLVM_CONST(i32_one), "elem_1"))) {
+ HANDLE_FAILURE("LLVMBuildExtractElement");
+ return false;
+ }
+
+ /* fpext <f32> elem to <f64> */
+ if (!(elem_0 =
+ LLVMBuildFPExt(comp_ctx->builder, elem_0, F64_TYPE, "elem_0_ext"))
+ || !(elem_1 = LLVMBuildFPExt(comp_ctx->builder, elem_1, F64_TYPE,
+ "elem_1_ext"))) {
+ HANDLE_FAILURE("LLVMBuildFPExt");
+ return false;
+ }
+
+ if (!(result = LLVMBuildInsertElement(comp_ctx->builder,
+ LLVM_CONST(f64x2_vec_zero), elem_0,
+ LLVM_CONST(i32_zero), "new_vector_0"))
+ || !(result =
+ LLVMBuildInsertElement(comp_ctx->builder, result, elem_1,
+ LLVM_CONST(i32_one), "new_vector_1"))) {
+ HANDLE_FAILURE("LLVMBuildInsertElement");
+ return false;
+ }
+
+ return simd_bitcast_and_push_v128(comp_ctx, func_ctx, result, "result");
+}
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_floating_point.h b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_floating_point.h
new file mode 100644
index 000000000..213b4391f
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_floating_point.h
@@ -0,0 +1,107 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#ifndef _SIMD_FLOATING_POINT_H_
+#define _SIMD_FLOATING_POINT_H_
+
+#include "../aot_compiler.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+bool
+aot_compile_simd_f32x4_arith(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ FloatArithmetic arith_op);
+
+bool
+aot_compile_simd_f64x2_arith(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ FloatArithmetic arith_op);
+
+bool
+aot_compile_simd_f32x4_neg(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx);
+
+bool
+aot_compile_simd_f64x2_neg(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx);
+
+bool
+aot_compile_simd_f32x4_abs(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx);
+
+bool
+aot_compile_simd_f64x2_abs(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx);
+
+bool
+aot_compile_simd_f32x4_round(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx);
+
+bool
+aot_compile_simd_f64x2_round(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx);
+
+bool
+aot_compile_simd_f32x4_sqrt(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx);
+
+bool
+aot_compile_simd_f64x2_sqrt(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx);
+
+bool
+aot_compile_simd_f32x4_ceil(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx);
+
+bool
+aot_compile_simd_f64x2_ceil(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx);
+
+bool
+aot_compile_simd_f32x4_floor(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx);
+
+bool
+aot_compile_simd_f64x2_floor(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx);
+
+bool
+aot_compile_simd_f32x4_trunc(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx);
+
+bool
+aot_compile_simd_f64x2_trunc(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx);
+
+bool
+aot_compile_simd_f32x4_nearest(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx);
+
+bool
+aot_compile_simd_f64x2_nearest(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx);
+
+bool
+aot_compile_simd_f32x4_min_max(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, bool run_min);
+
+bool
+aot_compile_simd_f64x2_min_max(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, bool run_min);
+
+bool
+aot_compile_simd_f32x4_pmin_pmax(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, bool run_min);
+
+bool
+aot_compile_simd_f64x2_pmin_pmax(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx, bool run_min);
+
+bool
+aot_compile_simd_f64x2_demote(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx);
+
+bool
+aot_compile_simd_f32x4_promote(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx);
+
+#ifdef __cplusplus
+} /* end of extern "C" */
+#endif
+
+#endif /* end of _SIMD_FLOATING_POINT_H_ */
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_int_arith.c b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_int_arith.c
new file mode 100644
index 000000000..1d0e6967b
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_int_arith.c
@@ -0,0 +1,406 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#include "simd_int_arith.h"
+#include "simd_common.h"
+#include "../aot_emit_exception.h"
+#include "../../aot/aot_runtime.h"
+
+static bool
+simd_integer_arith(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ V128Arithmetic arith_op, LLVMTypeRef vector_type)
+{
+ LLVMValueRef lhs, rhs, result = NULL;
+
+ if (!(rhs =
+ simd_pop_v128_and_bitcast(comp_ctx, func_ctx, vector_type, "rhs"))
+ || !(lhs = simd_pop_v128_and_bitcast(comp_ctx, func_ctx, vector_type,
+ "lhs"))) {
+ return false;
+ }
+
+ switch (arith_op) {
+ case V128_ADD:
+ result = LLVMBuildAdd(comp_ctx->builder, lhs, rhs, "sum");
+ break;
+ case V128_SUB:
+ result = LLVMBuildSub(comp_ctx->builder, lhs, rhs, "difference");
+ break;
+ case V128_MUL:
+ result = LLVMBuildMul(comp_ctx->builder, lhs, rhs, "product");
+ break;
+ default:
+ HANDLE_FAILURE("Unsupport arith_op");
+ break;
+ }
+
+ if (!result) {
+ HANDLE_FAILURE("LLVMBuildAdd/LLVMBuildSub/LLVMBuildMul");
+ return false;
+ }
+
+ return simd_bitcast_and_push_v128(comp_ctx, func_ctx, result, "result");
+}
+
+bool
+aot_compile_simd_i8x16_arith(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ V128Arithmetic arith_op)
+{
+ return simd_integer_arith(comp_ctx, func_ctx, arith_op, V128_i8x16_TYPE);
+}
+
+bool
+aot_compile_simd_i16x8_arith(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ V128Arithmetic arith_op)
+{
+ return simd_integer_arith(comp_ctx, func_ctx, arith_op, V128_i16x8_TYPE);
+}
+
+bool
+aot_compile_simd_i32x4_arith(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ V128Arithmetic arith_op)
+{
+ return simd_integer_arith(comp_ctx, func_ctx, arith_op, V128_i32x4_TYPE);
+}
+
+bool
+aot_compile_simd_i64x2_arith(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ V128Arithmetic arith_op)
+{
+ return simd_integer_arith(comp_ctx, func_ctx, arith_op, V128_i64x2_TYPE);
+}
+
+static bool
+simd_neg(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx, LLVMTypeRef type)
+{
+ LLVMValueRef vector, result;
+
+ if (!(vector =
+ simd_pop_v128_and_bitcast(comp_ctx, func_ctx, type, "vector"))) {
+ return false;
+ }
+
+ if (!(result = LLVMBuildNeg(comp_ctx->builder, vector, "neg"))) {
+ HANDLE_FAILURE("LLVMBuildNeg");
+ return false;
+ }
+
+ return simd_bitcast_and_push_v128(comp_ctx, func_ctx, result, "result");
+}
+
+bool
+aot_compile_simd_i8x16_neg(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
+{
+ return simd_neg(comp_ctx, func_ctx, V128_i8x16_TYPE);
+}
+
+bool
+aot_compile_simd_i16x8_neg(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
+{
+ return simd_neg(comp_ctx, func_ctx, V128_i16x8_TYPE);
+}
+
+bool
+aot_compile_simd_i32x4_neg(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
+{
+ return simd_neg(comp_ctx, func_ctx, V128_i32x4_TYPE);
+}
+
+bool
+aot_compile_simd_i64x2_neg(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
+{
+ return simd_neg(comp_ctx, func_ctx, V128_i64x2_TYPE);
+}
+
+bool
+aot_compile_simd_i8x16_popcnt(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx)
+{
+ LLVMValueRef vector, result;
+
+ if (!(vector = simd_pop_v128_and_bitcast(comp_ctx, func_ctx,
+ V128_i8x16_TYPE, "vector"))) {
+ return false;
+ }
+
+ if (!(result = aot_call_llvm_intrinsic(comp_ctx, func_ctx,
+ "llvm.ctpop.v16i8", V128_i8x16_TYPE,
+ &V128_i8x16_TYPE, 1, vector))) {
+ return false;
+ }
+
+ return simd_bitcast_and_push_v128(comp_ctx, func_ctx, result, "result");
+}
+
+static bool
+simd_v128_cmp(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ LLVMTypeRef vector_type, V128Arithmetic arith_op, bool is_signed)
+{
+ LLVMValueRef lhs, rhs, result;
+ LLVMIntPredicate op;
+
+ if (!(rhs =
+ simd_pop_v128_and_bitcast(comp_ctx, func_ctx, vector_type, "rhs"))
+ || !(lhs = simd_pop_v128_and_bitcast(comp_ctx, func_ctx, vector_type,
+ "lhs"))) {
+ return false;
+ }
+
+ if (V128_MIN == arith_op) {
+ op = is_signed ? LLVMIntSLT : LLVMIntULT;
+ }
+ else {
+ op = is_signed ? LLVMIntSGT : LLVMIntUGT;
+ }
+
+ if (!(result = LLVMBuildICmp(comp_ctx->builder, op, lhs, rhs, "cmp"))) {
+ HANDLE_FAILURE("LLVMBuildICmp");
+ return false;
+ }
+
+ if (!(result =
+ LLVMBuildSelect(comp_ctx->builder, result, lhs, rhs, "select"))) {
+ HANDLE_FAILURE("LLVMBuildSelect");
+ return false;
+ }
+
+ return simd_bitcast_and_push_v128(comp_ctx, func_ctx, result, "result");
+}
+
+bool
+aot_compile_simd_i8x16_cmp(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ V128Arithmetic arith_op, bool is_signed)
+{
+ return simd_v128_cmp(comp_ctx, func_ctx, V128_i8x16_TYPE, arith_op,
+ is_signed);
+}
+
+bool
+aot_compile_simd_i16x8_cmp(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ V128Arithmetic arith_op, bool is_signed)
+{
+ return simd_v128_cmp(comp_ctx, func_ctx, V128_i16x8_TYPE, arith_op,
+ is_signed);
+}
+
+bool
+aot_compile_simd_i32x4_cmp(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ V128Arithmetic arith_op, bool is_signed)
+{
+ return simd_v128_cmp(comp_ctx, func_ctx, V128_i32x4_TYPE, arith_op,
+ is_signed);
+}
+
+/* llvm.abs.* */
+static bool
+simd_v128_abs(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ char *intrinsic, LLVMTypeRef vector_type)
+{
+ LLVMValueRef vector, result;
+ LLVMTypeRef param_types[] = { vector_type, INT1_TYPE };
+
+ if (!(vector = simd_pop_v128_and_bitcast(comp_ctx, func_ctx, vector_type,
+ "vec"))) {
+ return false;
+ }
+
+ if (!(result = aot_call_llvm_intrinsic(comp_ctx, func_ctx, intrinsic,
+ vector_type, param_types, 2, vector,
+ /* is_int_min_poison */
+ LLVM_CONST(i1_zero)))) {
+ return false;
+ }
+
+ return simd_bitcast_and_push_v128(comp_ctx, func_ctx, result, "result");
+}
+
+bool
+aot_compile_simd_i8x16_abs(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
+{
+ return simd_v128_abs(comp_ctx, func_ctx, "llvm.abs.v16i8", V128_i8x16_TYPE);
+}
+
+bool
+aot_compile_simd_i16x8_abs(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
+{
+ return simd_v128_abs(comp_ctx, func_ctx, "llvm.abs.v8i16", V128_i16x8_TYPE);
+}
+
+bool
+aot_compile_simd_i32x4_abs(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
+{
+ return simd_v128_abs(comp_ctx, func_ctx, "llvm.abs.v4i32", V128_i32x4_TYPE);
+}
+
+bool
+aot_compile_simd_i64x2_abs(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
+{
+ return simd_v128_abs(comp_ctx, func_ctx, "llvm.abs.v2i64", V128_i64x2_TYPE);
+}
+
+enum integer_avgr_u {
+ e_avgr_u_i8x16,
+ e_avgr_u_i16x8,
+ e_avgr_u_i32x4,
+};
+
+/* TODO: try int_x86_mmx_pavg_b and int_x86_mmx_pavg_w */
+/* (v1 + v2 + 1) / 2 */
+static bool
+simd_v128_avg(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ enum integer_avgr_u itype)
+{
+ LLVMValueRef lhs, rhs, ones, result;
+ LLVMTypeRef vector_ext_type;
+ LLVMTypeRef vector_type[] = {
+ V128_i8x16_TYPE,
+ V128_i16x8_TYPE,
+ V128_i32x4_TYPE,
+ };
+ unsigned lanes[] = { 16, 8, 4 };
+
+ if (!(rhs = simd_pop_v128_and_bitcast(comp_ctx, func_ctx,
+ vector_type[itype], "rhs"))
+ || !(lhs = simd_pop_v128_and_bitcast(comp_ctx, func_ctx,
+ vector_type[itype], "lhs"))) {
+ return false;
+ }
+
+ if (!(vector_ext_type = LLVMVectorType(I64_TYPE, lanes[itype]))) {
+ HANDLE_FAILURE("LLVMVectorType");
+ return false;
+ }
+
+ if (!(lhs = LLVMBuildZExt(comp_ctx->builder, lhs, vector_ext_type,
+ "zext_to_i64"))
+ || !(rhs = LLVMBuildZExt(comp_ctx->builder, rhs, vector_ext_type,
+ "zext_to_i64"))) {
+ HANDLE_FAILURE("LLVMBuildZExt");
+ return false;
+ }
+
+ /* by default, add will do signed/unsigned overflow */
+ if (!(result = LLVMBuildAdd(comp_ctx->builder, lhs, rhs, "l_add_r"))) {
+ HANDLE_FAILURE("LLVMBuildAdd");
+ return false;
+ }
+
+ if (!(ones = simd_build_splat_const_integer_vector(comp_ctx, I64_TYPE, 1,
+ lanes[itype]))) {
+ return false;
+ }
+
+ if (!(result = LLVMBuildAdd(comp_ctx->builder, result, ones, "plus_1"))) {
+ HANDLE_FAILURE("LLVMBuildAdd");
+ return false;
+ }
+
+ if (!(result = LLVMBuildLShr(comp_ctx->builder, result, ones, "avg"))) {
+ HANDLE_FAILURE("LLVMBuildLShr");
+ return false;
+ }
+
+ if (!(result = LLVMBuildTrunc(comp_ctx->builder, result, vector_type[itype],
+ "to_orig_type"))) {
+ HANDLE_FAILURE("LLVMBuildTrunc");
+ return false;
+ }
+
+ return simd_bitcast_and_push_v128(comp_ctx, func_ctx, result, "result");
+}
+
+bool
+aot_compile_simd_i8x16_avgr_u(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx)
+{
+ return simd_v128_avg(comp_ctx, func_ctx, e_avgr_u_i8x16);
+}
+
+bool
+aot_compile_simd_i16x8_avgr_u(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx)
+{
+ return simd_v128_avg(comp_ctx, func_ctx, e_avgr_u_i16x8);
+}
+
+bool
+aot_compile_simd_i32x4_avgr_u(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx)
+{
+ return simd_v128_avg(comp_ctx, func_ctx, e_avgr_u_i32x4);
+}
+
+bool
+aot_compile_simd_i32x4_dot_i16x8(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx)
+{
+ LLVMValueRef vec1, vec2, even_mask, odd_mask, zero, result;
+ LLVMTypeRef vector_ext_type;
+ LLVMValueRef even_element[] = {
+ LLVM_CONST(i32_zero),
+ LLVM_CONST(i32_two),
+ LLVM_CONST(i32_four),
+ LLVM_CONST(i32_six),
+ };
+ LLVMValueRef odd_element[] = {
+ LLVM_CONST(i32_one),
+ LLVM_CONST(i32_three),
+ LLVM_CONST(i32_five),
+ LLVM_CONST(i32_seven),
+ };
+
+ if (!(vec1 = simd_pop_v128_and_bitcast(comp_ctx, func_ctx, V128_i16x8_TYPE,
+ "vec1"))
+ || !(vec2 = simd_pop_v128_and_bitcast(comp_ctx, func_ctx,
+ V128_i16x8_TYPE, "vec2"))) {
+ return false;
+ }
+
+ if (!(vector_ext_type = LLVMVectorType(I32_TYPE, 8))) {
+ HANDLE_FAILURE("LLVMVectorType");
+ return false;
+ }
+
+ /* sext <v8i16> to <v8i32> */
+ if (!(vec1 = LLVMBuildSExt(comp_ctx->builder, vec1, vector_ext_type,
+ "vec1_v8i32"))
+ || !(vec2 = LLVMBuildSExt(comp_ctx->builder, vec2, vector_ext_type,
+ "vec2_v8i32"))) {
+ HANDLE_FAILURE("LLVMBuildSExt");
+ return false;
+ }
+
+ if (!(result = LLVMBuildMul(comp_ctx->builder, vec1, vec2, "product"))) {
+ HANDLE_FAILURE("LLVMBuildMul");
+ return false;
+ }
+
+ /* pick elements with even indexes and odd indexes */
+ if (!(even_mask = LLVMConstVector(even_element, 4))
+ || !(odd_mask = LLVMConstVector(odd_element, 4))) {
+ HANDLE_FAILURE("LLVMConstVector");
+ return false;
+ }
+
+ if (!(zero = simd_build_splat_const_integer_vector(comp_ctx, I32_TYPE, 0,
+ 8))) {
+ return false;
+ }
+
+ if (!(vec1 = LLVMBuildShuffleVector(comp_ctx->builder, result, zero,
+ even_mask, "even_result"))
+ || !(vec2 = LLVMBuildShuffleVector(comp_ctx->builder, result, zero,
+ odd_mask, "odd_result"))) {
+ HANDLE_FAILURE("LLVMBuildShuffleVector");
+ return false;
+ }
+
+ if (!(result = LLVMBuildAdd(comp_ctx->builder, vec1, vec2, "new_vec"))) {
+ HANDLE_FAILURE("LLVMBuildAdd");
+ return false;
+ }
+
+ return simd_bitcast_and_push_v128(comp_ctx, func_ctx, result, "result");
+}
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_int_arith.h b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_int_arith.h
new file mode 100644
index 000000000..a7a21170a
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_int_arith.h
@@ -0,0 +1,91 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#ifndef _SIMD_INT_ARITH_H_
+#define _SIMD_INT_ARITH_H_
+
+#include "../aot_compiler.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+bool
+aot_compile_simd_i8x16_arith(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ V128Arithmetic cond);
+
+bool
+aot_compile_simd_i16x8_arith(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ V128Arithmetic cond);
+
+bool
+aot_compile_simd_i32x4_arith(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ V128Arithmetic cond);
+
+bool
+aot_compile_simd_i64x2_arith(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ V128Arithmetic cond);
+
+bool
+aot_compile_simd_i8x16_neg(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx);
+
+bool
+aot_compile_simd_i16x8_neg(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx);
+
+bool
+aot_compile_simd_i32x4_neg(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx);
+
+bool
+aot_compile_simd_i64x2_neg(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx);
+
+bool
+aot_compile_simd_i8x16_popcnt(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx);
+
+bool
+aot_compile_simd_i8x16_cmp(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ V128Arithmetic arith_op, bool is_signed);
+
+bool
+aot_compile_simd_i16x8_cmp(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ V128Arithmetic arith_op, bool is_signed);
+
+bool
+aot_compile_simd_i32x4_cmp(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ V128Arithmetic arith_op, bool is_signed);
+
+bool
+aot_compile_simd_i8x16_abs(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx);
+
+bool
+aot_compile_simd_i16x8_abs(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx);
+
+bool
+aot_compile_simd_i32x4_abs(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx);
+
+bool
+aot_compile_simd_i64x2_abs(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx);
+
+bool
+aot_compile_simd_i8x16_avgr_u(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx);
+
+bool
+aot_compile_simd_i16x8_avgr_u(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx);
+
+bool
+aot_compile_simd_i32x4_avgr_u(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx);
+
+bool
+aot_compile_simd_i32x4_dot_i16x8(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx);
+
+#ifdef __cplusplus
+} /* end of extern "C" */
+#endif
+
+#endif /* end of _SIMD_INT_ARITH_H_ */
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_load_store.c b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_load_store.c
new file mode 100644
index 000000000..d166e954c
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_load_store.c
@@ -0,0 +1,331 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#include "simd_common.h"
+#include "simd_load_store.h"
+#include "../aot_emit_exception.h"
+#include "../aot_emit_memory.h"
+#include "../../aot/aot_runtime.h"
+#include "../../interpreter/wasm_opcode.h"
+
+/* data_length in bytes */
+static LLVMValueRef
+simd_load(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx, uint32 align,
+ uint32 offset, uint32 data_length, LLVMTypeRef ptr_type,
+ LLVMTypeRef data_type)
+{
+ LLVMValueRef maddr, data;
+
+ if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset,
+ data_length))) {
+ HANDLE_FAILURE("aot_check_memory_overflow");
+ return NULL;
+ }
+
+ if (!(maddr = LLVMBuildBitCast(comp_ctx->builder, maddr, ptr_type,
+ "data_ptr"))) {
+ HANDLE_FAILURE("LLVMBuildBitCast");
+ return NULL;
+ }
+
+ if (!(data = LLVMBuildLoad2(comp_ctx->builder, data_type, maddr, "data"))) {
+ HANDLE_FAILURE("LLVMBuildLoad");
+ return NULL;
+ }
+
+ LLVMSetAlignment(data, 1);
+
+ return data;
+}
+
+bool
+aot_compile_simd_v128_load(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ uint32 align, uint32 offset)
+{
+ LLVMValueRef result;
+
+ if (!(result = simd_load(comp_ctx, func_ctx, align, offset, 16,
+ V128_PTR_TYPE, V128_TYPE))) {
+ return false;
+ }
+
+ PUSH_V128(result);
+
+ return true;
+fail:
+ return false;
+}
+
+bool
+aot_compile_simd_load_extend(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ uint8 opcode, uint32 align, uint32 offset)
+{
+ LLVMValueRef sub_vector, result;
+ uint32 opcode_index = opcode - SIMD_v128_load8x8_s;
+ bool signeds[] = { true, false, true, false, true, false };
+ LLVMTypeRef vector_types[] = {
+ V128_i16x8_TYPE, V128_i16x8_TYPE, V128_i32x4_TYPE,
+ V128_i32x4_TYPE, V128_i64x2_TYPE, V128_i64x2_TYPE,
+ };
+ LLVMTypeRef sub_vector_types[] = {
+ LLVMVectorType(INT8_TYPE, 8), LLVMVectorType(INT8_TYPE, 8),
+ LLVMVectorType(INT16_TYPE, 4), LLVMVectorType(INT16_TYPE, 4),
+ LLVMVectorType(I32_TYPE, 2), LLVMVectorType(I32_TYPE, 2),
+ };
+ LLVMTypeRef sub_vector_type, sub_vector_ptr_type;
+
+ bh_assert(opcode_index < 6);
+
+ sub_vector_type = sub_vector_types[opcode_index];
+
+ /* to vector ptr type */
+ if (!sub_vector_type
+ || !(sub_vector_ptr_type = LLVMPointerType(sub_vector_type, 0))) {
+ HANDLE_FAILURE("LLVMPointerType");
+ return false;
+ }
+
+ if (!(sub_vector = simd_load(comp_ctx, func_ctx, align, offset, 8,
+ sub_vector_ptr_type, sub_vector_type))) {
+ return false;
+ }
+
+ if (signeds[opcode_index]) {
+ if (!(result = LLVMBuildSExt(comp_ctx->builder, sub_vector,
+ vector_types[opcode_index], "vector"))) {
+ HANDLE_FAILURE("LLVMBuildSExt");
+ return false;
+ }
+ }
+ else {
+ if (!(result = LLVMBuildZExt(comp_ctx->builder, sub_vector,
+ vector_types[opcode_index], "vector"))) {
+ HANDLE_FAILURE("LLVMBuildZExt");
+ return false;
+ }
+ }
+
+ return simd_bitcast_and_push_v128(comp_ctx, func_ctx, result, "result");
+}
+
+bool
+aot_compile_simd_load_splat(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ uint8 opcode, uint32 align, uint32 offset)
+{
+ uint32 opcode_index = opcode - SIMD_v128_load8_splat;
+ LLVMValueRef element, result;
+ LLVMTypeRef element_ptr_types[] = { INT8_PTR_TYPE, INT16_PTR_TYPE,
+ INT32_PTR_TYPE, INT64_PTR_TYPE };
+ LLVMTypeRef element_data_types[] = { INT8_TYPE, INT16_TYPE, I32_TYPE,
+ I64_TYPE };
+ uint32 data_lengths[] = { 1, 2, 4, 8 };
+ LLVMValueRef undefs[] = {
+ LLVM_CONST(i8x16_undef),
+ LLVM_CONST(i16x8_undef),
+ LLVM_CONST(i32x4_undef),
+ LLVM_CONST(i64x2_undef),
+ };
+ LLVMValueRef masks[] = {
+ LLVM_CONST(i32x16_zero),
+ LLVM_CONST(i32x8_zero),
+ LLVM_CONST(i32x4_zero),
+ LLVM_CONST(i32x2_zero),
+ };
+
+ bh_assert(opcode_index < 4);
+
+ if (!(element = simd_load(comp_ctx, func_ctx, align, offset,
+ data_lengths[opcode_index],
+ element_ptr_types[opcode_index],
+ element_data_types[opcode_index]))) {
+ return false;
+ }
+
+ if (!(result =
+ LLVMBuildInsertElement(comp_ctx->builder, undefs[opcode_index],
+ element, I32_ZERO, "base"))) {
+ HANDLE_FAILURE("LLVMBuildInsertElement");
+ return false;
+ }
+
+ if (!(result = LLVMBuildShuffleVector(comp_ctx->builder, result,
+ undefs[opcode_index],
+ masks[opcode_index], "vector"))) {
+ HANDLE_FAILURE("LLVMBuildShuffleVector");
+ return false;
+ }
+
+ return simd_bitcast_and_push_v128(comp_ctx, func_ctx, result, "result");
+}
+
+bool
+aot_compile_simd_load_lane(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ uint8 opcode, uint32 align, uint32 offset,
+ uint8 lane_id)
+{
+ LLVMValueRef element, vector;
+ uint32 opcode_index = opcode - SIMD_v128_load8_lane;
+ uint32 data_lengths[] = { 1, 2, 4, 8 };
+ LLVMTypeRef element_ptr_types[] = { INT8_PTR_TYPE, INT16_PTR_TYPE,
+ INT32_PTR_TYPE, INT64_PTR_TYPE };
+ LLVMTypeRef element_data_types[] = { INT8_TYPE, INT16_TYPE, I32_TYPE,
+ I64_TYPE };
+ LLVMTypeRef vector_types[] = { V128_i8x16_TYPE, V128_i16x8_TYPE,
+ V128_i32x4_TYPE, V128_i64x2_TYPE };
+ LLVMValueRef lane = simd_lane_id_to_llvm_value(comp_ctx, lane_id);
+
+ bh_assert(opcode_index < 4);
+
+ if (!(vector = simd_pop_v128_and_bitcast(
+ comp_ctx, func_ctx, vector_types[opcode_index], "src"))) {
+ return false;
+ }
+
+ if (!(element = simd_load(comp_ctx, func_ctx, align, offset,
+ data_lengths[opcode_index],
+ element_ptr_types[opcode_index],
+ element_data_types[opcode_index]))) {
+ return false;
+ }
+
+ if (!(vector = LLVMBuildInsertElement(comp_ctx->builder, vector, element,
+ lane, "dst"))) {
+ HANDLE_FAILURE("LLVMBuildInsertElement");
+ return false;
+ }
+
+ return simd_bitcast_and_push_v128(comp_ctx, func_ctx, vector, "result");
+}
+
+bool
+aot_compile_simd_load_zero(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ uint8 opcode, uint32 align, uint32 offset)
+{
+ LLVMValueRef element, result, mask;
+ uint32 opcode_index = opcode - SIMD_v128_load32_zero;
+ uint32 data_lengths[] = { 4, 8 };
+ LLVMTypeRef element_ptr_types[] = { INT32_PTR_TYPE, INT64_PTR_TYPE };
+ LLVMTypeRef element_data_types[] = { I32_TYPE, I64_TYPE };
+ LLVMValueRef zero[] = {
+ LLVM_CONST(i32x4_vec_zero),
+ LLVM_CONST(i64x2_vec_zero),
+ };
+ LLVMValueRef undef[] = {
+ LLVM_CONST(i32x4_undef),
+ LLVM_CONST(i64x2_undef),
+ };
+ uint32 mask_length[] = { 4, 2 };
+ LLVMValueRef mask_element[][4] = {
+ { LLVM_CONST(i32_zero), LLVM_CONST(i32_four), LLVM_CONST(i32_five),
+ LLVM_CONST(i32_six) },
+ { LLVM_CONST(i32_zero), LLVM_CONST(i32_two) },
+ };
+
+ bh_assert(opcode_index < 2);
+
+ if (!(element = simd_load(comp_ctx, func_ctx, align, offset,
+ data_lengths[opcode_index],
+ element_ptr_types[opcode_index],
+ element_data_types[opcode_index]))) {
+ return false;
+ }
+
+ if (!(result =
+ LLVMBuildInsertElement(comp_ctx->builder, undef[opcode_index],
+ element, I32_ZERO, "vector"))) {
+ HANDLE_FAILURE("LLVMBuildInsertElement");
+ return false;
+ }
+
+ /* fill in other lanes with zero */
+ if (!(mask = LLVMConstVector(mask_element[opcode_index],
+ mask_length[opcode_index]))) {
+ HANDLE_FAILURE("LLConstVector");
+ return false;
+ }
+
+ if (!(result = LLVMBuildShuffleVector(comp_ctx->builder, result,
+ zero[opcode_index], mask,
+ "fill_in_zero"))) {
+ HANDLE_FAILURE("LLVMBuildShuffleVector");
+ return false;
+ }
+
+ return simd_bitcast_and_push_v128(comp_ctx, func_ctx, result, "result");
+}
+
+/* data_length in bytes */
+static bool
+simd_store(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx, uint32 align,
+ uint32 offset, uint32 data_length, LLVMValueRef value,
+ LLVMTypeRef value_ptr_type)
+{
+ LLVMValueRef maddr, result;
+
+ if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset,
+ data_length)))
+ return false;
+
+ if (!(maddr = LLVMBuildBitCast(comp_ctx->builder, maddr, value_ptr_type,
+ "data_ptr"))) {
+ HANDLE_FAILURE("LLVMBuildBitCast");
+ return false;
+ }
+
+ if (!(result = LLVMBuildStore(comp_ctx->builder, value, maddr))) {
+ HANDLE_FAILURE("LLVMBuildStore");
+ return false;
+ }
+
+ LLVMSetAlignment(result, 1);
+
+ return true;
+}
+
+bool
+aot_compile_simd_v128_store(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ uint32 align, uint32 offset)
+{
+ LLVMValueRef value;
+
+ POP_V128(value);
+
+ return simd_store(comp_ctx, func_ctx, align, offset, 16, value,
+ V128_PTR_TYPE);
+fail:
+ return false;
+}
+
+bool
+aot_compile_simd_store_lane(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ uint8 opcode, uint32 align, uint32 offset,
+ uint8 lane_id)
+{
+ LLVMValueRef element, vector;
+ uint32 data_lengths[] = { 1, 2, 4, 8 };
+ LLVMTypeRef element_ptr_types[] = { INT8_PTR_TYPE, INT16_PTR_TYPE,
+ INT32_PTR_TYPE, INT64_PTR_TYPE };
+ uint32 opcode_index = opcode - SIMD_v128_store8_lane;
+ LLVMTypeRef vector_types[] = { V128_i8x16_TYPE, V128_i16x8_TYPE,
+ V128_i32x4_TYPE, V128_i64x2_TYPE };
+ LLVMValueRef lane = simd_lane_id_to_llvm_value(comp_ctx, lane_id);
+
+ bh_assert(opcode_index < 4);
+
+ if (!(vector = simd_pop_v128_and_bitcast(
+ comp_ctx, func_ctx, vector_types[opcode_index], "src"))) {
+ return false;
+ }
+
+ if (!(element = LLVMBuildExtractElement(comp_ctx->builder, vector, lane,
+ "element"))) {
+ HANDLE_FAILURE("LLVMBuildExtractElement");
+ return false;
+ }
+
+ return simd_store(comp_ctx, func_ctx, align, offset,
+ data_lengths[opcode_index], element,
+ element_ptr_types[opcode_index]);
+}
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_load_store.h b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_load_store.h
new file mode 100644
index 000000000..fd118ec1b
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_load_store.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#ifndef _SIMD_LOAD_STORE_H_
+#define _SIMD_LOAD_STORE_H_
+
+#include "../aot_compiler.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+bool
+aot_compile_simd_v128_load(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ uint32 align, uint32 offset);
+
+bool
+aot_compile_simd_load_extend(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ uint8 opcode, uint32 align, uint32 offset);
+
+bool
+aot_compile_simd_load_splat(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ uint8 opcode, uint32 align, uint32 offset);
+
+bool
+aot_compile_simd_load_lane(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ uint8 opcode, uint32 align, uint32 offset,
+ uint8 lane_id);
+
+bool
+aot_compile_simd_load_zero(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ uint8 opcode, uint32 align, uint32 offset);
+
+bool
+aot_compile_simd_v128_store(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ uint32 align, uint32 offset);
+
+bool
+aot_compile_simd_store_lane(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ uint8 opcode, uint32 align, uint32 offset,
+ uint8 lane_id);
+
+#ifdef __cplusplus
+} /* end of extern "C" */
+#endif
+
+#endif /* end of _SIMD_LOAD_STORE_H_ */
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_sat_int_arith.c b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_sat_int_arith.c
new file mode 100644
index 000000000..1de4520a7
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_sat_int_arith.c
@@ -0,0 +1,81 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#include "simd_sat_int_arith.h"
+#include "simd_common.h"
+#include "../aot_emit_exception.h"
+#include "../../aot/aot_runtime.h"
+
+static bool
+simd_sat_int_arith(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ LLVMTypeRef vector_type, const char *intrinsics)
+{
+ LLVMValueRef lhs, rhs, result;
+ LLVMTypeRef param_types[2];
+
+ if (!(rhs =
+ simd_pop_v128_and_bitcast(comp_ctx, func_ctx, vector_type, "rhs"))
+ || !(lhs = simd_pop_v128_and_bitcast(comp_ctx, func_ctx, vector_type,
+ "lhs"))) {
+ return false;
+ }
+
+ param_types[0] = vector_type;
+ param_types[1] = vector_type;
+
+ if (!(result =
+ aot_call_llvm_intrinsic(comp_ctx, func_ctx, intrinsics,
+ vector_type, param_types, 2, lhs, rhs))) {
+ HANDLE_FAILURE("LLVMBuildCall");
+ return false;
+ }
+
+ return simd_bitcast_and_push_v128(comp_ctx, func_ctx, result, "result");
+}
+
+bool
+aot_compile_simd_i8x16_saturate(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx,
+ V128Arithmetic arith_op, bool is_signed)
+{
+ char *intrinsics[][2] = {
+ { "llvm.sadd.sat.v16i8", "llvm.uadd.sat.v16i8" },
+ { "llvm.ssub.sat.v16i8", "llvm.usub.sat.v16i8" },
+ };
+
+ return simd_sat_int_arith(comp_ctx, func_ctx, V128_i8x16_TYPE,
+ is_signed ? intrinsics[arith_op][0]
+ : intrinsics[arith_op][1]);
+}
+
+bool
+aot_compile_simd_i16x8_saturate(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx,
+ V128Arithmetic arith_op, bool is_signed)
+{
+ char *intrinsics[][2] = {
+ { "llvm.sadd.sat.v8i16", "llvm.uadd.sat.v8i16" },
+ { "llvm.ssub.sat.v8i16", "llvm.usub.sat.v8i16" },
+ };
+
+ return simd_sat_int_arith(comp_ctx, func_ctx, V128_i16x8_TYPE,
+ is_signed ? intrinsics[arith_op][0]
+ : intrinsics[arith_op][1]);
+}
+
+bool
+aot_compile_simd_i32x4_saturate(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx,
+ V128Arithmetic arith_op, bool is_signed)
+{
+ char *intrinsics[][2] = {
+ { "llvm.sadd.sat.v4i32", "llvm.uadd.sat.v4i32" },
+ { "llvm.ssub.sat.v4i32", "llvm.usub.sat.v4i32" },
+ };
+
+ return simd_sat_int_arith(comp_ctx, func_ctx, V128_i16x8_TYPE,
+ is_signed ? intrinsics[arith_op][0]
+ : intrinsics[arith_op][1]);
+}
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_sat_int_arith.h b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_sat_int_arith.h
new file mode 100644
index 000000000..e30acaaf4
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/compilation/simd/simd_sat_int_arith.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#ifndef _SIMD_SAT_INT_ARITH_H_
+#define _SIMD_SAT_INT_ARITH_H_
+
+#include "../aot_compiler.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+bool
+aot_compile_simd_i8x16_saturate(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx,
+ V128Arithmetic arith_op, bool is_signed);
+
+bool
+aot_compile_simd_i16x8_saturate(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx,
+ V128Arithmetic arith_op, bool is_signed);
+
+bool
+aot_compile_simd_i32x4_saturate(AOTCompContext *comp_ctx,
+ AOTFuncContext *func_ctx,
+ V128Arithmetic arith_op, bool is_signed);
+#ifdef __cplusplus
+} /* end of extern "C" */
+#endif
+
+#endif /* end of _SIMD_SAT_INT_ARITH_H_ */