summaryrefslogtreecommitdiffstats
path: root/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/common/arch
diff options
context:
space:
mode:
Diffstat (limited to 'fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/common/arch')
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/common/arch/invokeNative_aarch64.s81
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/common/arch/invokeNative_aarch64_simd.s79
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/common/arch/invokeNative_arc.s69
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/common/arch/invokeNative_arm.s75
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/common/arch/invokeNative_arm_vfp.s86
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/common/arch/invokeNative_em64.asm62
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/common/arch/invokeNative_em64.s64
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/common/arch/invokeNative_em64_simd.asm62
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/common/arch/invokeNative_em64_simd.s64
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/common/arch/invokeNative_general.c114
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/common/arch/invokeNative_ia32.asm27
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/common/arch/invokeNative_ia32.s37
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/common/arch/invokeNative_mingw_x64.s57
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/common/arch/invokeNative_mingw_x64_simd.s57
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/common/arch/invokeNative_mips.s74
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/common/arch/invokeNative_osx_universal.s18
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/common/arch/invokeNative_riscv.S148
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/common/arch/invokeNative_thumb.s91
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/common/arch/invokeNative_thumb_vfp.s100
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/common/arch/invokeNative_xtensa.s74
20 files changed, 1439 insertions, 0 deletions
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/common/arch/invokeNative_aarch64.s b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/common/arch/invokeNative_aarch64.s
new file mode 100644
index 000000000..ea5cbcb36
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/common/arch/invokeNative_aarch64.s
@@ -0,0 +1,81 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+ .text
+ .align 2
+#ifndef BH_PLATFORM_DARWIN
+ .globl invokeNative
+ .type invokeNative, function
+invokeNative:
+#else
+ .globl _invokeNative
+_invokeNative:
+#endif /* end of BH_PLATFORM_DARWIN */
+
+/*
+ * Arguments passed in:
+ *
+ * x0 function ptr
+ * x1 argv
+ * x2 nstacks
+ */
+
+ sub sp, sp, #0x30
+ stp x19, x20, [sp, #0x20] /* save the registers */
+ stp x21, x22, [sp, #0x10]
+ stp x23, x24, [sp, #0x0]
+
+ mov x19, x0 /* x19 = function ptr */
+ mov x20, x1 /* x20 = argv */
+ mov x21, x2 /* x21 = nstacks */
+ mov x22, sp /* save the sp before call function */
+
+ /* Fill in float-point registers */
+ ldp d0, d1, [x20], #16 /* d0 = argv[0], d1 = argv[1] */
+ ldp d2, d3, [x20], #16 /* d2 = argv[2], d3 = argv[3] */
+ ldp d4, d5, [x20], #16 /* d4 = argv[4], d5 = argv[5] */
+ ldp d6, d7, [x20], #16 /* d6 = argv[6], d7 = argv[7] */
+
+ /* Fill integer registers */
+ ldp x0, x1, [x20], #16 /* x0 = argv[8] = exec_env, x1 = argv[9] */
+ ldp x2, x3, [x20], #16 /* x2 = argv[10], x3 = argv[11] */
+ ldp x4, x5, [x20], #16 /* x4 = argv[12], x5 = argv[13] */
+ ldp x6, x7, [x20], #16 /* x6 = argv[14], x7 = argv[15] */
+
+ /* Now x20 points to stack args */
+
+ /* Directly call the fucntion if no args in stack */
+ cmp x21, #0
+ beq call_func
+
+ /* Fill all stack args: reserve stack space and fill one by one */
+ mov x23, sp
+ bic sp, x23, #15 /* Ensure stack is 16 bytes aligned */
+ lsl x23, x21, #3 /* x23 = nstacks * 8 */
+ add x23, x23, #15 /* x23 = (x23 + 15) & ~15 */
+ bic x23, x23, #15
+ sub sp, sp, x23 /* reserved stack space for stack arguments */
+ mov x23, sp
+
+loop_stack_args: /* copy stack arguments to stack */
+ cmp x21, #0
+ beq call_func
+ ldr x24, [x20], #8
+ str x24, [x23], #8
+ sub x21, x21, #1
+ b loop_stack_args
+
+call_func:
+ mov x20, x30 /* save x30(lr) */
+ blr x19
+ mov sp, x22 /* restore sp which is saved before calling fuction*/
+
+return:
+ mov x30, x20 /* restore x30(lr) */
+ ldp x19, x20, [sp, #0x20] /* restore the registers in stack */
+ ldp x21, x22, [sp, #0x10]
+ ldp x23, x24, [sp, #0x0]
+ add sp, sp, #0x30 /* restore sp */
+ ret
+
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/common/arch/invokeNative_aarch64_simd.s b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/common/arch/invokeNative_aarch64_simd.s
new file mode 100644
index 000000000..a6ccc1508
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/common/arch/invokeNative_aarch64_simd.s
@@ -0,0 +1,79 @@
+/*
+ * Copyright (C) 2020 Intel Corporation Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+ .text
+ .align 2
+#ifndef BH_PLATFORM_DARWIN
+ .globl invokeNative
+ .type invokeNative, function
+invokeNative:
+#else
+ .globl _invokeNative
+_invokeNative:
+#endif /* end of BH_PLATFORM_DARWIN */
+
+/*
+ * Arguments passed in:
+ *
+ * x0 function ptr
+ * x1 argv
+ * x2 nstacks
+ */
+
+ sub sp, sp, #0x30
+ stp x19, x20, [sp, #0x20] /* save the registers */
+ stp x21, x22, [sp, #0x10]
+ stp x23, x24, [sp, #0x0]
+
+ mov x19, x0 /* x19 = function ptr */
+ mov x20, x1 /* x20 = argv */
+ mov x21, x2 /* x21 = nstacks */
+ mov x22, sp /* save the sp before call function */
+
+ /* Fill in float-point registers */
+ ld1 {v0.2D, v1.2D, v2.2D, v3.2D}, [x20], #64 /* v0 = argv[0], v1 = argv[1], v2 = argv[2], v3 = argv[3]*/
+ ld1 {v4.2D, v5.2D, v6.2D, v7.2D}, [x20], #64 /* v4 = argv[4], v5 = argv[5], v6 = argv[6], v7 = argv[7]*/
+
+ /* Fill inteter registers */
+ ldp x0, x1, [x20], #16 /* x0 = argv[8] = exec_env, x1 = argv[9] */
+ ldp x2, x3, [x20], #16 /* x2 = argv[10], x3 = argv[11] */
+ ldp x4, x5, [x20], #16 /* x4 = argv[12], x5 = argv[13] */
+ ldp x6, x7, [x20], #16 /* x6 = argv[14], x7 = argv[15] */
+
+ /* Now x20 points to stack args */
+
+ /* Directly call the fucntion if no args in stack */
+ cmp x21, #0
+ beq call_func
+
+ /* Fill all stack args: reserve stack space and fill one by one */
+ mov x23, sp
+ bic sp, x23, #15 /* Ensure stack is 16 bytes aligned */
+ lsl x23, x21, #3 /* x23 = nstacks * 8 */
+ add x23, x23, #15 /* x23 = (x23 + 15) & ~15 */
+ bic x23, x23, #15
+ sub sp, sp, x23 /* reserved stack space for stack arguments */
+ mov x23, sp
+
+loop_stack_args: /* copy stack arguments to stack */
+ cmp x21, #0
+ beq call_func
+ ldr x24, [x20], #8
+ str x24, [x23], #8
+ sub x21, x21, #1
+ b loop_stack_args
+
+call_func:
+ mov x20, x30 /* save x30(lr) */
+ blr x19
+ mov sp, x22 /* restore sp which is saved before calling fuction*/
+
+return:
+ mov x30, x20 /* restore x30(lr) */
+ ldp x19, x20, [sp, #0x20] /* restore the registers in stack */
+ ldp x21, x22, [sp, #0x10]
+ ldp x23, x24, [sp, #0x0]
+ add sp, sp, #0x30 /* restore sp */
+ ret
+
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/common/arch/invokeNative_arc.s b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/common/arch/invokeNative_arc.s
new file mode 100644
index 000000000..e448eea65
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/common/arch/invokeNative_arc.s
@@ -0,0 +1,69 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+ .text
+ .align 2
+#ifndef BH_PLATFORM_DARWIN
+ .globl invokeNative
+ .type invokeNative, function
+invokeNative:
+#else
+ .globl _invokeNative
+_invokeNative:
+#endif /* end of BH_PLATFORM_DARWIN */
+
+/*
+ * Arguments passed in:
+ * r0: function ptr
+ * r1: argv
+ * r2: nstacks
+ * ARC ABI:
+ * r0-r7: function arguments, caller-saved
+ * r8-r12: temp registers, caller-saved
+ */
+
+ push_s blink /* push return addr */
+ st.aw fp, [sp, -4] /* push fp */
+ mov fp, sp /* fp = sp */
+
+ mov r8, r0 /* r8 = func_ptr */
+ mov r9, r1 /* r9 = argv */
+ mov r10, r2 /* r10 = nstacks */
+
+ ld r0, [r9, 0] /* r0 = argv[0] */
+ ld r1, [r9, 4] /* r1 = argv[1] */
+ ld r2, [r9, 8] /* r2 = argv[2] */
+ ld r3, [r9, 12] /* r3 = argv[3] */
+ ld r4, [r9, 16] /* r4 = argv[4] */
+ ld r5, [r9, 20] /* r5 = argv[5] */
+ ld r6, [r9, 24] /* r6 = argv[6] */
+ ld r7, [r9, 28] /* r7 = argv[7] */
+
+ add r9, r9, 32 /* r9 = stack_args */
+ breq r10, 0, call_func /* if (r10 == 0) goto call_func */
+
+ asl r11, r10, 2 /* r11 = nstacks * 4 */
+ sub sp, sp, r11 /* sp = sp - nstacks * 4 */
+ and sp, sp, ~7 /* make sp 8-byte aligned */
+ mov r11, sp /* r11 = sp */
+
+loop_stack_args:
+ breq r10, 0, call_func /* if (r10 == 0) goto call_func */
+ ld r12, [r9] /* r12 = stack_args[i] */
+ st r12, [r11] /* stack[i] = r12 */
+ add r9, r9, 4 /* r9 = r9 + 4 */
+ add r11, r11, 4 /* r11 = r11 + 4 */
+ sub r10, r10, 1 /* r10 = r10 + 1 */
+ j loop_stack_args
+
+call_func:
+ jl [r8] /* call function */
+
+ mov sp, fp /* sp = fp */
+ ld.ab fp, [sp, 4] /* pop fp */
+ pop_s blink /* pop return addr */
+ j_s [blink] /* ret */
+ nop_s
+
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/common/arch/invokeNative_arm.s b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/common/arch/invokeNative_arm.s
new file mode 100644
index 000000000..bfe8e3b09
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/common/arch/invokeNative_arm.s
@@ -0,0 +1,75 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+ .text
+ .align 2
+#ifndef BH_PLATFORM_DARWIN
+ .globl invokeNative
+ .type invokeNative, function
+invokeNative:
+#else
+ .globl _invokeNative
+_invokeNative:
+#endif /* end of BH_PLATFORM_DARWIN */
+
+/*
+ * Arguments passed in:
+ *
+ * r0 function ptr
+ * r1 argv
+ * r2 argc
+ */
+
+ stmfd sp!, {r4, r5, r6, r7, lr}
+ sub sp, sp, #4 /* make sp 8 byte aligned */
+ mov ip, r0 /* ip = function ptr */
+ mov r4, r1 /* r4 = argv */
+ mov r5, r2 /* r5 = argc */
+
+ cmp r5, #1 /* at least one argument required: exec_env */
+ blt return
+
+ mov r6, #0 /* increased stack size */
+
+ ldr r0, [r4], #4 /* r0 = argv[0] = exec_env */
+ cmp r5, #1
+ beq call_func
+
+ ldr r1, [r4], #4 /* r1 = argv[1] */
+ cmp r5, #2
+ beq call_func
+
+ ldr r2, [r4], #4 /* r2 = argv[2] */
+ cmp r5, #3
+ beq call_func
+
+ ldr r3, [r4], #4 /* r3 = argv[3] */
+ cmp r5, #4
+ beq call_func
+
+ sub r5, r5, #4 /* argc -= 4, now we have r0 ~ r3 */
+
+ /* Ensure address is 8 byte aligned */
+ mov r6, r5, lsl#2 /* r6 = argc * 4 */
+ add r6, r6, #7 /* r6 = (r6 + 7) & ~7 */
+ bic r6, r6, #7
+ sub sp, sp, r6 /* reserved stack space for left arguments */
+ mov r7, sp
+
+loop_args: /* copy left arguments to stack */
+ cmp r5, #0
+ beq call_func
+ ldr lr, [r4], #4
+ str lr, [r7], #4
+ sub r5, r5, #1
+ b loop_args
+
+call_func:
+ blx ip
+ add sp, sp, r6 /* restore sp */
+
+return:
+ add sp, sp, #4
+ ldmfd sp!, {r4, r5, r6, r7, lr}
+ bx lr
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/common/arch/invokeNative_arm_vfp.s b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/common/arch/invokeNative_arm_vfp.s
new file mode 100644
index 000000000..78a4bab82
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/common/arch/invokeNative_arm_vfp.s
@@ -0,0 +1,86 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+ .text
+ .align 2
+#ifndef BH_PLATFORM_DARWIN
+ .globl invokeNative
+ .type invokeNative, function
+invokeNative:
+#else
+ .globl _invokeNative
+_invokeNative:
+#endif /* end of BH_PLATFORM_DARWIN */
+
+/*
+ * Arguments passed in:
+ *
+ * r0 function ptr
+ * r1 argv
+ * r2 nstacks
+ */
+
+ stmfd sp!, {r4, r5, r6, r7, lr}
+ sub sp, sp, #4 /* make sp 8 byte aligned */
+ mov ip, r0 /* ip = function ptr */
+ mov r4, r1 /* r4 = argv */
+ mov r5, r2 /* r5 = nstacks */
+ mov r6, sp
+
+ /* Fill all int args */
+ ldr r0, [r4], #4 /* r0 = *(int*)&argv[0] = exec_env */
+ ldr r1, [r4], #4 /* r1 = *(int*)&argv[1] */
+ ldr r2, [r4], #4 /* r2 = *(int*)&argv[2] */
+ ldr r3, [r4], #4 /* r3 = *(int*)&argv[3] */
+
+ /* Fill all float/double args to 16 single-precision registers, s0-s15, */
+ /* which may also be accessed as 8 double-precision registers, d0-d7 (with */
+ /* d0 overlapping s0, s1; d1 overlapping s2, s3; etc). */
+ vldr s0, [r4, #0] /* s0 = *(float*)&argv[4] */
+ vldr s1, [r4, #4]
+ vldr s2, [r4, #8]
+ vldr s3, [r4, #12]
+ vldr s4, [r4, #16]
+ vldr s5, [r4, #20]
+ vldr s6, [r4, #24]
+ vldr s7, [r4, #28]
+ vldr s8, [r4, #32]
+ vldr s9, [r4, #36]
+ vldr s10, [r4, #40]
+ vldr s11, [r4, #44]
+ vldr s12, [r4, #48]
+ vldr s13, [r4, #52]
+ vldr s14, [r4, #56]
+ vldr s15, [r4, #60]
+ /* Directly call the fucntion if no args in stack */
+ cmp r5, #0
+ beq call_func
+
+
+ /* Fill all stack args: reserve stack space and fill one by one */
+ add r4, r4, #64 /* r4 points to stack args */
+ bic sp, sp, #7 /* Ensure stack is 8 byte aligned */
+ mov r7, r5, lsl#2 /* r7 = nstacks * 4 */
+ add r7, r7, #7 /* r7 = (r7 + 7) & ~7 */
+ bic r7, r7, #7
+ sub sp, sp, r7 /* reserved stack space for stack arguments */
+ mov r7, sp
+
+loop_stack_args: /* copy stack arguments to stack */
+ cmp r5, #0
+ beq call_func
+ ldr lr, [r4], #4 /* Note: caller should insure int64 and */
+ str lr, [r7], #4 /* double are placed in 8 bytes aligned address */
+ sub r5, r5, #1
+ b loop_stack_args
+
+call_func:
+ blx ip
+ mov sp, r6 /* restore sp */
+
+return:
+ add sp, sp, #4 /* make sp 8 byte aligned */
+ ldmfd sp!, {r4, r5, r6, r7, lr}
+ bx lr
+
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/common/arch/invokeNative_em64.asm b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/common/arch/invokeNative_em64.asm
new file mode 100644
index 000000000..df8115397
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/common/arch/invokeNative_em64.asm
@@ -0,0 +1,62 @@
+;
+; Copyright (C) 2019 Intel Corporation. All rights reserved.
+; SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+;
+
+_TEXT SEGMENT
+ ; rcx func_ptr
+ ; rdx argv
+ ; r8 n_stacks
+
+invokeNative PROC
+ push rbp
+ mov rbp, rsp
+
+ mov r10, rcx ; func_ptr
+ mov rax, rdx ; argv
+ mov rcx, r8 ; n_stacks
+
+; fill all fp args
+ movsd xmm0, qword ptr [rax + 0]
+ movsd xmm1, qword ptr [rax + 8]
+ movsd xmm2, qword ptr [rax + 16]
+ movsd xmm3, qword ptr [rax + 24]
+
+; check for stack args
+ cmp rcx, 0
+ jz cycle_end
+
+ mov rdx, rsp
+ and rdx, 15
+ jz no_abort
+ int 3
+no_abort:
+ mov rdx, rcx
+ and rdx, 1
+ shl rdx, 3
+ sub rsp, rdx
+
+; store stack args
+ lea r9, qword ptr [rax + rcx * 8 + 56]
+ sub r9, rsp ; offset
+cycle:
+ push qword ptr [rsp + r9]
+ loop cycle
+
+cycle_end:
+ mov rcx, [rax + 32]
+ mov rdx, [rax + 40]
+ mov r8, [rax + 48]
+ mov r9, [rax + 56]
+
+ sub rsp, 32 ; shadow space
+
+ call r10
+ leave
+ ret
+
+invokeNative ENDP
+
+_TEXT ENDS
+
+END
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/common/arch/invokeNative_em64.s b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/common/arch/invokeNative_em64.s
new file mode 100644
index 000000000..739e84e4c
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/common/arch/invokeNative_em64.s
@@ -0,0 +1,64 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+ .text
+ .align 2
+#ifndef BH_PLATFORM_DARWIN
+.globl invokeNative
+ .type invokeNative, @function
+invokeNative:
+#else
+.globl _invokeNative
+_invokeNative:
+#endif /* end of BH_PLATFORM_DARWIN */
+ /* rdi - function ptr */
+ /* rsi - argv */
+ /* rdx - n_stacks */
+
+ push %rbp
+ mov %rsp, %rbp
+
+ mov %rdx, %r10
+ mov %rsp, %r11 /* Check that stack is aligned on */
+ and $8, %r11 /* 16 bytes. This code may be removed */
+ je check_stack_succ /* when we are sure that compiler always */
+ int3 /* calls us with aligned stack */
+check_stack_succ:
+ mov %r10, %r11 /* Align stack on 16 bytes before pushing */
+ and $1, %r11 /* stack arguments in case we have an odd */
+ shl $3, %r11 /* number of stack arguments */
+ sub %r11, %rsp
+ /* store memory args */
+ movq %rdi, %r11 /* func ptr */
+ movq %r10, %rcx /* counter */
+ lea 64+48-8(%rsi,%rcx,8), %r10
+ sub %rsp, %r10
+ cmpq $0, %rcx
+ je push_args_end
+push_args:
+ push 0(%rsp,%r10)
+ loop push_args
+push_args_end:
+ /* fill all fp args */
+ movq 0x00(%rsi), %xmm0
+ movq 0x08(%rsi), %xmm1
+ movq 0x10(%rsi), %xmm2
+ movq 0x18(%rsi), %xmm3
+ movq 0x20(%rsi), %xmm4
+ movq 0x28(%rsi), %xmm5
+ movq 0x30(%rsi), %xmm6
+ movq 0x38(%rsi), %xmm7
+
+ /* fill all int args */
+ movq 0x40(%rsi), %rdi
+ movq 0x50(%rsi), %rdx
+ movq 0x58(%rsi), %rcx
+ movq 0x60(%rsi), %r8
+ movq 0x68(%rsi), %r9
+ movq 0x48(%rsi), %rsi
+
+ call *%r11
+ leave
+ ret
+
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/common/arch/invokeNative_em64_simd.asm b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/common/arch/invokeNative_em64_simd.asm
new file mode 100644
index 000000000..084a0f667
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/common/arch/invokeNative_em64_simd.asm
@@ -0,0 +1,62 @@
+;
+; Copyright (C) 2019 Intel Corporation. All rights reserved.
+; SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+;
+
+_TEXT SEGMENT
+ ; rcx func_ptr
+ ; rdx argv
+ ; r8 n_stacks
+
+invokeNative PROC
+ push rbp
+ mov rbp, rsp
+
+ mov r10, rcx ; func_ptr
+ mov rax, rdx ; argv
+ mov rcx, r8 ; n_stacks
+
+; fill all fp args
+ movdqu xmm0, xmmword ptr [rax + 0]
+ movdqu xmm1, xmmword ptr [rax + 16]
+ movdqu xmm2, xmmword ptr [rax + 32]
+ movdqu xmm3, xmmword ptr [rax + 48]
+
+; check for stack args
+ cmp rcx, 0
+ jz cycle_end
+
+ mov rdx, rsp
+ and rdx, 15
+ jz no_abort
+ int 3
+no_abort:
+ mov rdx, rcx
+ and rdx, 1
+ shl rdx, 3
+ sub rsp, rdx
+
+; store stack args
+ lea r9, qword ptr [rax + rcx * 8 + 88]
+ sub r9, rsp ; offset
+cycle:
+ push qword ptr [rsp + r9]
+ loop cycle
+
+cycle_end:
+ mov rcx, [rax + 64]
+ mov rdx, [rax + 72]
+ mov r8, [rax + 80]
+ mov r9, [rax + 88]
+
+ sub rsp, 32 ; shadow space
+
+ call r10
+ leave
+ ret
+
+invokeNative ENDP
+
+_TEXT ENDS
+
+END
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/common/arch/invokeNative_em64_simd.s b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/common/arch/invokeNative_em64_simd.s
new file mode 100644
index 000000000..0043ac941
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/common/arch/invokeNative_em64_simd.s
@@ -0,0 +1,64 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+ .text
+ .align 2
+#ifndef BH_PLATFORM_DARWIN
+.globl invokeNative
+ .type invokeNative, @function
+invokeNative:
+#else
+.globl _invokeNative
+_invokeNative:
+#endif /* end of BH_PLATFORM_DARWIN */
+ /* rdi - function ptr */
+ /* rsi - argv */
+ /* rdx - n_stacks */
+
+ push %rbp
+ mov %rsp, %rbp
+
+ mov %rdx, %r10
+ mov %rsp, %r11 /* Check that stack is aligned on */
+ and $8, %r11 /* 16 bytes. This code may be removed */
+ je check_stack_succ /* when we are sure that compiler always */
+ int3 /* calls us with aligned stack */
+check_stack_succ:
+ mov %r10, %r11 /* Align stack on 16 bytes before pushing */
+ and $1, %r11 /* stack arguments in case we have an odd */
+ shl $3, %r11 /* number of stack arguments */
+ sub %r11, %rsp
+ /* store memory args */
+ movq %rdi, %r11 /* func ptr */
+ movq %r10, %rcx /* counter */
+ lea 128+48-8(%rsi,%rcx,8), %r10
+ sub %rsp, %r10
+ cmpq $0, %rcx
+ je push_args_end
+push_args:
+ push 0(%rsp,%r10)
+ loop push_args
+push_args_end:
+ /* fill all fp args */
+ movdqu 0x00(%rsi), %xmm0
+ movdqu 0x10(%rsi), %xmm1
+ movdqu 0x20(%rsi), %xmm2
+ movdqu 0x30(%rsi), %xmm3
+ movdqu 0x40(%rsi), %xmm4
+ movdqu 0x50(%rsi), %xmm5
+ movdqu 0x60(%rsi), %xmm6
+ movdqu 0x70(%rsi), %xmm7
+
+ /* fill all int args */
+ movq 0x80(%rsi), %rdi
+ movq 0x90(%rsi), %rdx
+ movq 0x98(%rsi), %rcx
+ movq 0xa0(%rsi), %r8
+ movq 0xa8(%rsi), %r9
+ movq 0x88(%rsi), %rsi
+
+ call *%r11
+ leave
+ ret
+
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/common/arch/invokeNative_general.c b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/common/arch/invokeNative_general.c
new file mode 100644
index 000000000..4799c9fa8
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/common/arch/invokeNative_general.c
@@ -0,0 +1,114 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#include "../wasm_runtime_common.h"
+#include "../wasm_exec_env.h"
+
+void
+invokeNative(void (*native_code)(), uint32 argv[], uint32 argc)
+{
+ bh_assert(argc >= sizeof(WASMExecEnv *) / sizeof(uint32));
+
+ switch (argc) {
+ case 0:
+ native_code();
+ break;
+ case 1:
+ native_code(argv[0]);
+ break;
+ case 2:
+ native_code(argv[0], argv[1]);
+ break;
+ case 3:
+ native_code(argv[0], argv[1], argv[2]);
+ break;
+ case 4:
+ native_code(argv[0], argv[1], argv[2], argv[3]);
+ break;
+ case 5:
+ native_code(argv[0], argv[1], argv[2], argv[3], argv[4]);
+ break;
+ case 6:
+ native_code(argv[0], argv[1], argv[2], argv[3], argv[4], argv[5]);
+ break;
+ case 7:
+ native_code(argv[0], argv[1], argv[2], argv[3], argv[4], argv[5],
+ argv[6]);
+ break;
+ case 8:
+ native_code(argv[0], argv[1], argv[2], argv[3], argv[4], argv[5],
+ argv[6], argv[7]);
+ break;
+ case 9:
+ native_code(argv[0], argv[1], argv[2], argv[3], argv[4], argv[5],
+ argv[6], argv[7], argv[8]);
+ break;
+ case 10:
+ native_code(argv[0], argv[1], argv[2], argv[3], argv[4], argv[5],
+ argv[6], argv[7], argv[8], argv[9]);
+ break;
+ case 11:
+ native_code(argv[0], argv[1], argv[2], argv[3], argv[4], argv[5],
+ argv[6], argv[7], argv[8], argv[9], argv[10]);
+ break;
+ case 12:
+ native_code(argv[0], argv[1], argv[2], argv[3], argv[4], argv[5],
+ argv[6], argv[7], argv[8], argv[9], argv[10], argv[11]);
+ break;
+ case 13:
+ native_code(argv[0], argv[1], argv[2], argv[3], argv[4], argv[5],
+ argv[6], argv[7], argv[8], argv[9], argv[10], argv[11],
+ argv[12]);
+ break;
+ case 14:
+ native_code(argv[0], argv[1], argv[2], argv[3], argv[4], argv[5],
+ argv[6], argv[7], argv[8], argv[9], argv[10], argv[11],
+ argv[12], argv[13]);
+ break;
+ case 15:
+ native_code(argv[0], argv[1], argv[2], argv[3], argv[4], argv[5],
+ argv[6], argv[7], argv[8], argv[9], argv[10], argv[11],
+ argv[12], argv[13], argv[14]);
+ break;
+ case 16:
+ native_code(argv[0], argv[1], argv[2], argv[3], argv[4], argv[5],
+ argv[6], argv[7], argv[8], argv[9], argv[10], argv[11],
+ argv[12], argv[13], argv[14], argv[15]);
+ break;
+ case 17:
+ native_code(argv[0], argv[1], argv[2], argv[3], argv[4], argv[5],
+ argv[6], argv[7], argv[8], argv[9], argv[10], argv[11],
+ argv[12], argv[13], argv[14], argv[15], argv[16]);
+ break;
+ case 18:
+ native_code(argv[0], argv[1], argv[2], argv[3], argv[4], argv[5],
+ argv[6], argv[7], argv[8], argv[9], argv[10], argv[11],
+ argv[12], argv[13], argv[14], argv[15], argv[16],
+ argv[17]);
+ break;
+ case 19:
+ native_code(argv[0], argv[1], argv[2], argv[3], argv[4], argv[5],
+ argv[6], argv[7], argv[8], argv[9], argv[10], argv[11],
+ argv[12], argv[13], argv[14], argv[15], argv[16],
+ argv[17], argv[18]);
+ break;
+ case 20:
+ native_code(argv[0], argv[1], argv[2], argv[3], argv[4], argv[5],
+ argv[6], argv[7], argv[8], argv[9], argv[10], argv[11],
+ argv[12], argv[13], argv[14], argv[15], argv[16],
+ argv[17], argv[18], argv[19]);
+ break;
+ default:
+ {
+ /* FIXME: If this happen, add more cases. */
+ WASMExecEnv *exec_env = *(WASMExecEnv **)argv;
+ WASMModuleInstanceCommon *module_inst = exec_env->module_inst;
+ wasm_runtime_set_exception(
+ module_inst,
+ "the argument number of native function exceeds maximum");
+ return;
+ }
+ }
+}
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/common/arch/invokeNative_ia32.asm b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/common/arch/invokeNative_ia32.asm
new file mode 100644
index 000000000..c52c8d6ed
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/common/arch/invokeNative_ia32.asm
@@ -0,0 +1,27 @@
+;
+; Copyright (C) 2019 Intel Corporation. All rights reserved.
+; SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+;
+
+ .386
+ .model flat
+ .code
+_invokeNative PROC
+ push ebp
+ mov ebp,esp
+ mov ecx, [ebp+16] ; ecx = argc */
+ mov edx, [ebp+12] ; edx = argv */
+ test ecx, ecx
+ jz skip_push_args ; if ecx == 0, skip pushing arguments */
+ lea edx, [edx+ecx*4-4] ; edx = edx + ecx * 4 - 4 */
+ sub edx,esp ; edx = edx - esp */
+loop_push:
+ push [esp+edx]
+ loop loop_push ; loop ecx counts */
+skip_push_args:
+ mov edx, [ebp+8] ; edx = func_ptr */
+ call edx
+ leave
+ ret
+_invokeNative ENDP
+END \ No newline at end of file
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/common/arch/invokeNative_ia32.s b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/common/arch/invokeNative_ia32.s
new file mode 100644
index 000000000..de1c1a5e1
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/common/arch/invokeNative_ia32.s
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+ .text
+ .align 2
+#ifndef BH_PLATFORM_DARWIN
+.globl invokeNative
+ .type invokeNative, @function
+invokeNative:
+#else
+.globl _invokeNative
+_invokeNative:
+#endif /* end of BH_PLATFORM_DARWIN */
+ push %ebp
+ movl %esp, %ebp
+ movl 16(%ebp), %ecx /* ecx = argc */
+ leal 2(%ecx), %edx /* edx = ecx + 2 (count return address and saved ebp) */
+ andl $3, %edx /* edx = edx % 4 */
+ jz stack_aligned /* if edx == 0, stack is already 16 bytes aligned */
+ leal -16(%esp, %edx, 4), %esp /* esp = esp - 16 + edx * 4 */
+stack_aligned:
+ test %ecx, %ecx
+ jz skip_push_args /* if ecx == 0, skip pushing arguments */
+ movl 12(%ebp), %edx /* edx = argv */
+ leal -4(%edx,%ecx,4), %edx /* edx = edx + ecx * 4 - 4 */
+ subl %esp, %edx /* edx = edx - esp */
+1:
+ push 0(%esp,%edx)
+ loop 1b /* loop ecx counts */
+skip_push_args:
+ movl 8(%ebp), %edx /* edx = func_ptr */
+ call *%edx
+ leave
+ ret
+
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/common/arch/invokeNative_mingw_x64.s b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/common/arch/invokeNative_mingw_x64.s
new file mode 100644
index 000000000..cefaa28c1
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/common/arch/invokeNative_mingw_x64.s
@@ -0,0 +1,57 @@
+# Copyright (C) 2019 Intel Corporation. All rights reserved.
+# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+
+.text
+.align 2
+.globl invokeNative
+invokeNative:
+
+ # %rcx func_ptr
+ # %rdx argv
+ # %r8 n_stacks
+
+ push %rbp
+ mov %rsp, %rbp
+
+ mov %rcx, %r10 # func_ptr
+ mov %rdx, %rax # argv
+ mov %r8, %rcx # n_stacks
+
+ # fill all fp args
+ movsd 0(%rax), %xmm0
+ movsd 8(%rax), %xmm1
+ movsd 16(%rax), %xmm2
+ movsd 24(%rax), %xmm3
+
+ # check for stack args
+ cmp $0, %rcx
+ jz cycle_end
+
+ mov %rsp, %rdx
+ and $15, %rdx
+ jz no_abort
+ int $3
+no_abort:
+ mov %rcx, %rdx
+ and $1, %rdx
+ shl $3, %rdx
+ sub %rdx, %rsp
+
+ # store stack args
+ lea 56(%rax, %rcx, 8), %r9
+ sub %rsp, %r9 # offset
+cycle:
+ push (%rsp, %r9)
+ loop cycle
+
+cycle_end:
+ mov 32(%rax), %rcx
+ mov 40(%rax), %rdx
+ mov 48(%rax), %r8
+ mov 56(%rax), %r9
+
+ sub $32, %rsp # shadow space
+
+ call *%r10
+ leave
+ ret
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/common/arch/invokeNative_mingw_x64_simd.s b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/common/arch/invokeNative_mingw_x64_simd.s
new file mode 100644
index 000000000..48ae52480
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/common/arch/invokeNative_mingw_x64_simd.s
@@ -0,0 +1,57 @@
+# Copyright (C) 2019 Intel Corporation. All rights reserved.
+# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+
+.text
+.align 2
+.globl invokeNative
+invokeNative:
+
+ # %rcx func_ptr
+ # %rdx argv
+ # %r8 n_stacks
+
+ push %rbp
+ mov %rsp, %rbp
+
+ mov %rcx, %r10 # func_ptr
+ mov %rdx, %rax # argv
+ mov %r8, %rcx # n_stacks
+
+ # fill all fp args
+ movdqu 0(%rax), %xmm0
+ movdqu 16(%rax), %xmm1
+ movdqu 32(%rax), %xmm2
+ movdqu 48(%rax), %xmm3
+
+ # check for stack args
+ cmp $0, %rcx
+ jz cycle_end
+
+ mov %rsp, %rdx
+ and $15, %rdx
+ jz no_abort
+ int $3
+no_abort:
+ mov %rcx, %rdx
+ and $1, %rdx
+ shl $3, %rdx
+ sub %rdx, %rsp
+
+ # store stack args
+ lea 88(%rax, %rcx, 8), %r9
+ sub %rsp, %r9 # offset
+cycle:
+ push (%rsp, %r9)
+ loop cycle
+
+cycle_end:
+ mov 64(%rax), %rcx
+ mov 72(%rax), %rdx
+ mov 80(%rax), %r8
+ mov 88(%rax), %r9
+
+ sub $32, %rsp # shadow space
+
+ call *%r10
+ leave
+ ret
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/common/arch/invokeNative_mips.s b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/common/arch/invokeNative_mips.s
new file mode 100644
index 000000000..645f4f2ec
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/common/arch/invokeNative_mips.s
@@ -0,0 +1,74 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+ .text
+ .align 2
+ .globl invokeNative
+ .ent invokeNative
+ .type invokeNative, @function
+
+/**
+ * On function entry parameters:
+ * $4 = func_ptr
+ * $5 = args
+ * $6 = arg_num
+ */
+
+invokeNative:
+ .frame $fp, 8, $0
+ .mask 0x00000000, 0
+ .fmask 0x00000000, 0
+
+ /* Fixed part of frame */
+ subu $sp, 8
+
+ /* save registers */
+ sw $31, 4($sp)
+ sw $fp, 0($sp)
+
+ /* set frame pointer to bottom of fixed frame */
+ move $fp, $sp
+
+ /* allocate enough stack space */
+ sll $11, $6, 2 /* $11 == arg_num * 4 */
+ subu $sp, $11
+
+ /* make 8-byte aligned */
+ and $sp, ~7
+
+ move $9, $sp
+ move $25, $4 /* $25 = func_ptr */
+
+push_args:
+ beq $6, 0, done /* arg_num == 0 ? */
+ lw $8, 0($5) /* $8 = *args */
+ sw $8, 0($9) /* store $8 to stack */
+ addu $5, 4 /* args++ */
+ addu $9, 4 /* sp++ */
+ subu $6, 1 /* arg_num-- */
+ j push_args
+
+done:
+ lw $4, 0($sp) /* Load $4..$7 from stack */
+ lw $5, 4($sp)
+ lw $6, 8($sp)
+ lw $7, 12($sp)
+ ldc1 $f12, 0($sp) /* Load $f12, $f13, $f14, $f15 */
+ ldc1 $f14, 8($sp)
+
+ jalr $25 /* call function */
+
+ nop
+
+ /* restore saved registers */
+ move $sp, $fp
+ lw $31, 4($sp)
+ lw $fp, 0($sp)
+
+ /* pop frame */
+ addu $sp, $sp, 8
+
+ j $31
+ .end invokeNative
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/common/arch/invokeNative_osx_universal.s b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/common/arch/invokeNative_osx_universal.s
new file mode 100644
index 000000000..e2ca654fd
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/common/arch/invokeNative_osx_universal.s
@@ -0,0 +1,18 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#if defined(__aarch64__)
+#if WASM_ENABLE_SIMD == 0
+#include "invokeNative_aarch64.s"
+#else
+#include "invokeNative_aarch64_simd.s"
+#endif
+#else
+#if WASM_ENABLE_SIMD == 0
+#include "invokeNative_em64.s"
+#else
+#include "invokeNative_em64_simd.s"
+#endif
+#endif \ No newline at end of file
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/common/arch/invokeNative_riscv.S b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/common/arch/invokeNative_riscv.S
new file mode 100644
index 000000000..0908f73cc
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/common/arch/invokeNative_riscv.S
@@ -0,0 +1,148 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+/*
+ * The float abi macros used bellow are from risc-v c api:
+ * https://github.com/riscv/riscv-c-api-doc/blob/master/riscv-c-api.md
+ *
+ */
+
+#if defined(__riscv_float_abi_soft)
+#define RV_FPREG_SIZE 0
+#elif defined(__riscv_float_abi_single)
+#define RV_OP_LOADFPREG flw
+#define RV_OP_STROEFPREG fsw
+#define RV_FPREG_SIZE 4
+#elif defined(__riscv_float_abi_double)
+#define RV_OP_LOADFPREG fld
+#define RV_OP_STROEFPREG fsd
+#define RV_FPREG_SIZE 8
+#endif
+
+#if __riscv_xlen == 32
+#define RV_OP_LOADREG lw
+#define RV_OP_STOREREG sw
+#define RV_REG_SIZE 4
+#define RV_REG_SHIFT 2
+#define RV_FP_OFFSET (8 * RV_REG_SIZE)
+#define RV_INT_OFFSET 0
+#else
+#define RV_OP_LOADREG ld
+#define RV_OP_STOREREG sd
+#define RV_REG_SIZE 8
+#define RV_REG_SHIFT 3
+#define RV_FP_OFFSET 0
+#define RV_INT_OFFSET (8 * RV_FPREG_SIZE)
+#endif
+
+ .text
+ .align 2
+#ifndef BH_PLATFORM_DARWIN
+ .globl invokeNative
+ .type invokeNative, function
+invokeNative:
+#else
+ .globl _invokeNative
+_invokeNative:
+#endif /* end of BH_PLATFORM_DARWIN */
+
+/*
+ * Arguments passed in:
+ *
+ * a0 function ptr
+ * a1 argv
+ * a2 nstacks
+ */
+
+/*
+ * sp (stack pointer)
+ * |- sd/sw to store 64/32-bit values from register to memory
+ * |- ld/lw to load from stack to register
+ * fp/s0 (frame pointer)
+ * a0-a7 (8 integer arguments)
+ * |- sd/sw to store
+ * |- ld/lw to load
+ * fa0-a7 (8 float arguments)
+ * |- fsd/fsw to store
+ * |- fld/fsw to load
+ * t0-t6 (temporaries regisgers)
+ * |- caller saved
+ */
+
+ /* reserve space on stack to save return address and frame pointer */
+ addi sp, sp, - 2 * RV_REG_SIZE
+ RV_OP_STOREREG fp, 0 * RV_REG_SIZE(sp) /* save frame pointer */
+ RV_OP_STOREREG ra, 1 * RV_REG_SIZE(sp) /* save return address */
+
+ mv fp, sp /* set frame pointer to bottom of fixed frame */
+
+ /* save function ptr, argv & nstacks */
+ mv t0, a0 /* t0 = function ptr */
+ mv t1, a1 /* t1 = argv array address */
+ mv t2, a2 /* t2 = nstack */
+
+#ifndef __riscv_float_abi_soft
+ /* fill in fa0-7 float-registers*/
+ RV_OP_LOADFPREG fa0, RV_FP_OFFSET + 0 * RV_FPREG_SIZE(t1) /* fa0 */
+ RV_OP_LOADFPREG fa1, RV_FP_OFFSET + 1 * RV_FPREG_SIZE(t1) /* fa1 */
+ RV_OP_LOADFPREG fa2, RV_FP_OFFSET + 2 * RV_FPREG_SIZE(t1) /* fa2 */
+ RV_OP_LOADFPREG fa3, RV_FP_OFFSET + 3 * RV_FPREG_SIZE(t1) /* fa3 */
+ RV_OP_LOADFPREG fa4, RV_FP_OFFSET + 4 * RV_FPREG_SIZE(t1) /* fa4 */
+ RV_OP_LOADFPREG fa5, RV_FP_OFFSET + 5 * RV_FPREG_SIZE(t1) /* fa5 */
+ RV_OP_LOADFPREG fa6, RV_FP_OFFSET + 6 * RV_FPREG_SIZE(t1) /* fa6 */
+ RV_OP_LOADFPREG fa7, RV_FP_OFFSET + 7 * RV_FPREG_SIZE(t1) /* fa7 */
+#endif
+
+ /* fill in a0-7 integer-registers*/
+ RV_OP_LOADREG a0, RV_INT_OFFSET + 0 * RV_REG_SIZE(t1) /* a0 */
+ RV_OP_LOADREG a1, RV_INT_OFFSET + 1 * RV_REG_SIZE(t1) /* a1 */
+ RV_OP_LOADREG a2, RV_INT_OFFSET + 2 * RV_REG_SIZE(t1) /* a2 */
+ RV_OP_LOADREG a3, RV_INT_OFFSET + 3 * RV_REG_SIZE(t1) /* a3 */
+ RV_OP_LOADREG a4, RV_INT_OFFSET + 4 * RV_REG_SIZE(t1) /* a4 */
+ RV_OP_LOADREG a5, RV_INT_OFFSET + 5 * RV_REG_SIZE(t1) /* a5 */
+ RV_OP_LOADREG a6, RV_INT_OFFSET + 6 * RV_REG_SIZE(t1) /* a6 */
+ RV_OP_LOADREG a7, RV_INT_OFFSET + 7 * RV_REG_SIZE(t1) /* a7 */
+
+ /* t1 points to stack args */
+
+ /* RV_FPREG_SIZE is zero when __riscv_float_abi_soft defined */
+ addi t1, t1, RV_REG_SIZE * 8 + RV_FPREG_SIZE * 8
+
+ /* directly call the function if no args in stack,
+ x0 always holds 0 */
+ beq t2, x0, call_func
+
+ /* reserve enough stack space for function arguments */
+ sll t3, t2, RV_REG_SHIFT /* shift left 3 bits. t3 = n_stacks * 8 */
+ sub sp, sp, t3
+
+ /* make 16-byte aligned */
+ li t3, 15
+ not t3, t3
+ and sp, sp, t3
+
+ /* save sp in t4 register */
+ mv t4, sp
+
+ /* copy left arguments from caller stack to own frame stack */
+loop_stack_args:
+ beq t2, x0, call_func
+ RV_OP_LOADREG t5, 0(t1) /* load stack argument, t5 = argv[i] */
+ RV_OP_STOREREG t5, 0(t4) /* store t5 to reseved stack, sp[j] = t5 */
+ addi t1, t1, RV_REG_SIZE /* move to next stack argument */
+ addi t4, t4, RV_REG_SIZE /* move to next stack pointer */
+ addi t2, t2, -1 /* decrease t2 every loop, nstacks = nstacks -1 */
+ j loop_stack_args
+
+call_func:
+ jalr t0
+
+ /* restore registers pushed in stack or saved in another register */
+return:
+ mv sp, fp /* restore sp saved in fp before function call */
+ RV_OP_LOADREG fp, 0 * RV_REG_SIZE(sp) /* load previous frame poniter to fp register */
+ RV_OP_LOADREG ra, 1 * RV_REG_SIZE(sp) /* load previous return address to ra register */
+ addi sp, sp, 2 * RV_REG_SIZE /* pop frame, restore sp */
+ jr ra
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/common/arch/invokeNative_thumb.s b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/common/arch/invokeNative_thumb.s
new file mode 100644
index 000000000..3669fe77e
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/common/arch/invokeNative_thumb.s
@@ -0,0 +1,91 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+ .text
+ .align 2
+#ifndef BH_PLATFORM_DARWIN
+ .globl invokeNative
+ .type invokeNative, function
+invokeNative:
+#else
+ .globl _invokeNative
+_invokeNative:
+#endif /* end of BH_PLATFORM_DARWIN */
+
+/*
+ * Arguments passed in:
+ *
+ * r0 function ptr
+ * r1 argv
+ * r2 argc
+ */
+
+ push {r4, r5, r6, r7}
+ push {lr}
+ sub sp, sp, #4 /* make sp 8 byte aligned */
+ mov ip, r0 /* ip = function ptr */
+ mov r4, r1 /* r4 = argv */
+ mov r5, r2 /* r5 = argc */
+
+ cmp r5, #1 /* at least one argument required: exec_env */
+ blt return
+
+ mov r6, #0 /* increased stack size */
+
+ ldr r0, [r4] /* r0 = argv[0] = exec_env */
+ add r4, r4, #4 /* r4 += 4 */
+ cmp r5, #1
+ beq call_func
+
+ ldr r1, [r4] /* r1 = argv[1] */
+ add r4, r4, #4
+ cmp r5, #2
+ beq call_func
+
+ ldr r2, [r4] /* r2 = argv[2] */
+ add r4, r4, #4
+ cmp r5, #3
+ beq call_func
+
+ ldr r3, [r4] /* r3 = argv[3] */
+ add r4, r4, #4
+ cmp r5, #4
+ beq call_func
+
+ sub r5, r5, #4 /* argc -= 4, now we have r0 ~ r3 */
+
+ /* Ensure address is 8 byte aligned */
+ lsl r6, r5, #2 /* r6 = argc * 4 */
+ mov r7, #7
+ add r6, r6, r7 /* r6 = (r6 + 7) & ~7 */
+ bic r6, r6, r7
+ add r6, r6, #4 /* +4 because odd(5) registers are in stack */
+ mov r7, sp
+ sub r7, r7, r6 /* reserved stack space for left arguments */
+ mov sp, r7
+
+ mov lr, r2 /* save r2 */
+loop_args: /* copy left arguments to stack */
+ cmp r5, #0
+ beq call_func1
+ ldr r2, [r4]
+ add r4, r4, #4
+ str r2, [r7]
+ add r7, r7, #4
+ sub r5, r5, #1
+ b loop_args
+
+call_func1:
+ mov r2, lr /* restore r2 */
+
+call_func:
+ blx ip
+ add sp, sp, r6 /* restore sp */
+
+return:
+ add sp, sp, #4 /* make sp 8 byte aligned */
+ pop {r3}
+ pop {r4, r5, r6, r7}
+ mov lr, r3
+ bx lr
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/common/arch/invokeNative_thumb_vfp.s b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/common/arch/invokeNative_thumb_vfp.s
new file mode 100644
index 000000000..218cd91e0
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/common/arch/invokeNative_thumb_vfp.s
@@ -0,0 +1,100 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+ .text
+ .align 2
+#ifndef BH_PLATFORM_DARWIN
+ .globl invokeNative
+ .type invokeNative, function
+invokeNative:
+#else
+ .globl _invokeNative
+_invokeNative:
+#endif /* end of BH_PLATFORM_DARWIN */
+
+/*
+ * Arguments passed in:
+ *
+ * r0 function ptr
+ * r1 argv
+ * r2 nstacks
+ */
+
+ push {r4, r5, r6, r7}
+ push {lr}
+ sub sp, sp, #4 /* make sp 8 byte aligned */
+ mov ip, r0 /* ip = function ptr */
+ mov r4, r1 /* r4 = argv */
+ mov r5, r2 /* r5 = nstacks */
+ mov r7, sp
+
+ /* Fill all int args */
+ ldr r0, [r4, #0] /* r0 = *(int*)&argv[0] = exec_env */
+ ldr r1, [r4, #4] /* r1 = *(int*)&argv[1] */
+ ldr r2, [r4, #8] /* r2 = *(int*)&argv[2] */
+ ldr r3, [r4, #12] /* r3 = *(int*)&argv[3] */
+ add r4, r4, #16 /* r4 points to float args */
+
+ /* Fill all float/double args to 16 single-precision registers, s0-s15, */
+ /* which may also be accessed as 8 double-precision registers, d0-d7 (with */
+ /* d0 overlapping s0, s1; d1 overlapping s2, s3; etc). */
+ vldr s0, [r4, #0] /* s0 = *(float*)&argv[4] */
+ vldr s1, [r4, #4]
+ vldr s2, [r4, #8]
+ vldr s3, [r4, #12]
+ vldr s4, [r4, #16]
+ vldr s5, [r4, #20]
+ vldr s6, [r4, #24]
+ vldr s7, [r4, #28]
+ vldr s8, [r4, #32]
+ vldr s9, [r4, #36]
+ vldr s10, [r4, #40]
+ vldr s11, [r4, #44]
+ vldr s12, [r4, #48]
+ vldr s13, [r4, #52]
+ vldr s14, [r4, #56]
+ vldr s15, [r4, #60]
+ /* Directly call the fucntion if no args in stack */
+ cmp r5, #0
+ beq call_func
+
+ mov lr, r2 /* save r2 */
+
+ /* Fill all stack args: reserve stack space and fill ony by one */
+ add r4, r4, #64 /* r4 points to stack args */
+ mov r6, sp
+ mov r7, #7
+ bic r6, r6, r7 /* Ensure stack is 8 byte aligned */
+ lsl r2, r5, #2 /* r2 = nstacks * 4 */
+ add r2, r2, #7 /* r2 = (r2 + 7) & ~7 */
+ bic r2, r2, r7
+ sub r6, r6, r2 /* reserved stack space for stack arguments */
+ mov r7, sp
+ mov sp, r6
+
+loop_stack_args: /* copy stack arguments to stack */
+ cmp r5, #0
+ beq call_func1
+ ldr r2, [r4] /* Note: caller should insure int64 and */
+ add r4, r4, #4 /* double are placed in 8 bytes aligned address */
+ str r2, [r6]
+ add r6, r6, #4
+
+ sub r5, r5, #1
+ b loop_stack_args
+
+call_func1:
+ mov r2, lr /* restore r2 */
+
+call_func:
+ blx ip
+ mov sp, r7 /* restore sp */
+
+return:
+ add sp, sp, #4 /* make sp 8 byte aligned */
+ pop {r3}
+ pop {r4, r5, r6, r7}
+ mov lr, r3
+ bx lr
+
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/common/arch/invokeNative_xtensa.s b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/common/arch/invokeNative_xtensa.s
new file mode 100644
index 000000000..ce03f12c1
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/common/arch/invokeNative_xtensa.s
@@ -0,0 +1,74 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+ .text
+ .align 2
+ .global invokeNative
+ .type invokeNative,function
+
+/*
+ * Arguments passed in:
+ *
+ * a2 function pntr
+ * a3 argv
+ * a4 argc
+ */
+
+invokeNative:
+ entry a1, 256
+
+ blti a4, 1, return /* at least one argument required: exec_env */
+
+ /* register a10 ~ a15 are used to pass first 6 arguments */
+
+ l32i.n a10, a3, 0
+ beqi a4, 1, call_func
+
+ l32i.n a11, a3, 4
+ beqi a4, 2, call_func
+
+ l32i.n a12, a3, 8
+ beqi a4, 3, call_func
+
+ l32i.n a13, a3, 12
+ beqi a4, 4, call_func
+
+ l32i.n a14, a3, 16
+ beqi a4, 5, call_func
+
+ l32i.n a15, a3, 20
+ beqi a4, 6, call_func
+
+ /* left arguments are passed through stack */
+
+ addi a4, a4, -6
+ addi a3, a3, 24 /* move argv pointer */
+ mov.n a6, a1 /* store stack pointer */
+ addi a7, a1, 256 /* stack boundary */
+
+loop_args:
+ beqi a4, 0, call_func
+ bge a6, a7, call_func /* reach stack boundary */
+
+ l32i.n a5, a3, 0 /* load argument to a5 */
+ s32i.n a5, a6, 0 /* push data to stack */
+
+ addi a4, a4, -1 /* decrease argc */
+ addi a3, a3, 4 /* move argv pointer */
+ addi a6, a6, 4 /* move stack pointer */
+
+ j loop_args
+
+call_func:
+ mov.n a8, a2
+ callx8 a8
+
+ /* the result returned from callee is stored in a2
+ mov the result to a10 so the caller of this function
+ can receive the value */
+ mov.n a2, a10
+ mov.n a3, a11
+
+return:
+ retw.n