summaryrefslogtreecommitdiffstats
path: root/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared
diff options
context:
space:
mode:
Diffstat (limited to 'fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared')
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/coap/er-coap/LICENSE.md30
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/coap/er-coap/coap-constants.h194
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/coap/extension/coap_ext.h20
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/coap/lib_coap.cmake12
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/mem-alloc/SConscript33
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/mem-alloc/ems/ems_alloc.c794
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/mem-alloc/ems/ems_gc.h168
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/mem-alloc/ems/ems_gc_internal.h306
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/mem-alloc/ems/ems_hmu.c91
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/mem-alloc/ems/ems_kfc.c297
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/mem-alloc/mem_alloc.c190
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/mem-alloc/mem_alloc.cmake19
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/mem-alloc/mem_alloc.h55
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/README.md10
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/alios/alios_platform.c71
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/alios/alios_thread.c361
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/alios/alios_time.c12
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/alios/platform_internal.h67
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/alios/shared_platform.cmake16
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/android/platform_init.c171
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/android/platform_internal.h155
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/android/shared_platform.cmake18
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/common/freertos/freertos_malloc.c28
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/common/freertos/freertos_thread.c454
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/common/freertos/freertos_time.c13
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/common/freertos/platform_api_freertos.cmake8
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/common/math/COPYRIGHT126
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/common/math/math.c1681
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/common/math/platform_api_math.cmake8
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/common/posix/platform_api_posix.cmake8
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/common/posix/posix_malloc.c72
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/common/posix/posix_memmap.c253
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/common/posix/posix_socket.c1028
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/common/posix/posix_thread.c680
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/common/posix/posix_time.c17
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/darwin/platform_init.c43
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/darwin/platform_internal.h109
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/darwin/shared_platform.cmake18
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/ego/platform_init.c6
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/ego/platform_internal.h6
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/ego/shared_platform.cmake20
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/esp-idf/espidf_malloc.c84
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/esp-idf/espidf_memmap.c51
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/esp-idf/espidf_platform.c252
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/esp-idf/espidf_socket.c228
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/esp-idf/espidf_thread.c233
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/esp-idf/platform_internal.h115
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/esp-idf/shared_platform.cmake13
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/freebsd/platform_init.c43
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/freebsd/platform_internal.h108
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/freebsd/shared_platform.cmake18
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/include/platform_api_extension.h1039
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/include/platform_api_vmcore.h145
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/include/platform_common.h204
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/linux-sgx/platform_internal.h75
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/linux-sgx/sgx_file.c1117
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/linux-sgx/sgx_file.h266
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/linux-sgx/sgx_ipfs.c532
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/linux-sgx/sgx_ipfs.h61
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/linux-sgx/sgx_platform.c197
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/linux-sgx/sgx_pthread.c91
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/linux-sgx/sgx_pthread.h35
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/linux-sgx/sgx_rsrv_mem_mngr.h95
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/linux-sgx/sgx_signal.c31
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/linux-sgx/sgx_signal.h57
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/linux-sgx/sgx_socket.c1222
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/linux-sgx/sgx_socket.h332
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/linux-sgx/sgx_thread.c212
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/linux-sgx/sgx_time.c135
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/linux-sgx/sgx_time.h50
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/linux-sgx/sgx_wamr.edl158
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/linux-sgx/shared_platform.cmake38
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/linux-sgx/untrusted/file.c321
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/linux-sgx/untrusted/pthread.c54
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/linux-sgx/untrusted/signal.c11
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/linux-sgx/untrusted/socket.c148
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/linux-sgx/untrusted/time.c44
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/linux/platform_init.c43
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/linux/platform_internal.h106
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/linux/shared_platform.cmake18
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/nuttx/nuttx_platform.c259
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/nuttx/platform_internal.h130
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/nuttx/shared_platform.cmake14
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/riot/platform_internal.h79
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/riot/riot_platform.c81
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/riot/riot_thread.c432
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/riot/riot_time.c34
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/riot/shared_platform.cmake17
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/rt-thread/SConscript34
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/rt-thread/platform_internal.h48
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/rt-thread/rtt_platform.c209
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/rt-thread/shared_platform.cmake19
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/vxworks/platform_init.c43
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/vxworks/platform_internal.h102
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/vxworks/shared_platform.cmake18
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/windows/platform_init.c75
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/windows/platform_internal.h138
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/windows/shared_platform.cmake19
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/windows/win_atomic.cpp22
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/windows/win_malloc.c30
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/windows/win_memmap.c146
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/windows/win_socket.c541
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/windows/win_thread.c750
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/windows/win_time.c20
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/zephyr/platform_internal.h149
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/zephyr/shared_platform.cmake16
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/zephyr/zephyr_platform.c223
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/zephyr/zephyr_thread.c576
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/zephyr/zephyr_time.c12
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/utils/SConscript17
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/utils/bh_assert.c25
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/utils/bh_assert.h42
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/utils/bh_common.c163
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/utils/bh_common.h73
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/utils/bh_hashmap.c337
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/utils/bh_hashmap.h168
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/utils/bh_list.c111
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/utils/bh_list.h109
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/utils/bh_log.c107
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/utils/bh_log.h88
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/utils/bh_platform.h38
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/utils/bh_queue.c256
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/utils/bh_queue.h80
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/utils/bh_vector.c279
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/utils/bh_vector.h126
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/utils/runtime_timer.c469
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/utils/runtime_timer.h51
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/utils/shared_utils.cmake12
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/utils/uncommon/SConscript32
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/utils/uncommon/bh_getopt.c65
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/utils/uncommon/bh_getopt.h28
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/utils/uncommon/bh_read_file.c117
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/utils/uncommon/bh_read_file.h22
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/utils/uncommon/shared_uncommon.cmake11
134 files changed, 22712 insertions, 0 deletions
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/coap/er-coap/LICENSE.md b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/coap/er-coap/LICENSE.md
new file mode 100644
index 000000000..f4b1a054c
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/coap/er-coap/LICENSE.md
@@ -0,0 +1,30 @@
+Copyright (c) (Year), (Name of copyright holder)
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+
+1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+3. Neither the name of the copyright holder nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/coap/er-coap/coap-constants.h b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/coap/er-coap/coap-constants.h
new file mode 100644
index 000000000..1de2ed9d8
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/coap/er-coap/coap-constants.h
@@ -0,0 +1,194 @@
+/*
+ * Copyright (c) 2013, Institute for Pervasive Computing, ETH Zurich
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the Institute nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE INSTITUTE AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE INSTITUTE OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * This file is part of the Contiki operating system.
+ */
+
+/**
+ * \file
+ * Collection of constants specified in the CoAP standard.
+ * \author
+ * Matthias Kovatsch <kovatsch@inf.ethz.ch>
+ */
+
+/**
+ * \addtogroup coap
+ * @{
+ */
+
+#ifndef COAP_CONSTANTS_H_
+#define COAP_CONSTANTS_H_
+
+/* clang-format off */
+#define COAP_DEFAULT_PORT 5683
+#define COAP_DEFAULT_SECURE_PORT 5684
+
+#define COAP_DEFAULT_MAX_AGE 60
+#define COAP_RESPONSE_TIMEOUT 3
+#define COAP_RESPONSE_RANDOM_FACTOR 1.5
+#define COAP_MAX_RETRANSMIT 4
+
+#define COAP_HEADER_LEN 4 /* | version:0x03 type:0x0C tkl:0xF0 | code | mid:0x00FF | mid:0xFF00 | */
+#define COAP_TOKEN_LEN 8 /* The maximum number of bytes for the Token */
+#define COAP_ETAG_LEN 8 /* The maximum number of bytes for the ETag */
+
+#define COAP_HEADER_VERSION_MASK 0xC0
+#define COAP_HEADER_VERSION_POSITION 6
+#define COAP_HEADER_TYPE_MASK 0x30
+#define COAP_HEADER_TYPE_POSITION 4
+#define COAP_HEADER_TOKEN_LEN_MASK 0x0F
+#define COAP_HEADER_TOKEN_LEN_POSITION 0
+
+#define COAP_HEADER_OPTION_DELTA_MASK 0xF0
+#define COAP_HEADER_OPTION_SHORT_LENGTH_MASK 0x0F
+/* clang-format on */
+
+/* CoAP message types */
+typedef enum {
+ COAP_TYPE_CON, /* confirmables */
+ COAP_TYPE_NON, /* non-confirmables */
+ COAP_TYPE_ACK, /* acknowledgements */
+ COAP_TYPE_RST /* reset */
+} coap_message_type_t;
+
+/* clang-format off */
+/* CoAP request method codes */
+typedef enum {
+ COAP_GET = 1,
+ COAP_POST, COAP_PUT,
+ COAP_DELETE
+} coap_method_t;
+/* clang-format on */
+
+/* CoAP response codes */
+typedef enum {
+ COAP_NO_ERROR = 0,
+
+ CREATED_2_01 = 65, /* CREATED */
+ DELETED_2_02 = 66, /* DELETED */
+ VALID_2_03 = 67, /* NOT_MODIFIED */
+ CHANGED_2_04 = 68, /* CHANGED */
+ CONTENT_2_05 = 69, /* OK */
+ CONTINUE_2_31 = 95, /* CONTINUE */
+
+ BAD_REQUEST_4_00 = 128, /* BAD_REQUEST */
+ UNAUTHORIZED_4_01 = 129, /* UNAUTHORIZED */
+ BAD_OPTION_4_02 = 130, /* BAD_OPTION */
+ FORBIDDEN_4_03 = 131, /* FORBIDDEN */
+ NOT_FOUND_4_04 = 132, /* NOT_FOUND */
+ METHOD_NOT_ALLOWED_4_05 = 133, /* METHOD_NOT_ALLOWED */
+ NOT_ACCEPTABLE_4_06 = 134, /* NOT_ACCEPTABLE */
+ PRECONDITION_FAILED_4_12 = 140, /* BAD_REQUEST */
+ REQUEST_ENTITY_TOO_LARGE_4_13 = 141, /* REQUEST_ENTITY_TOO_LARGE */
+ UNSUPPORTED_MEDIA_TYPE_4_15 = 143, /* UNSUPPORTED_MEDIA_TYPE */
+
+ INTERNAL_SERVER_ERROR_5_00 = 160, /* INTERNAL_SERVER_ERROR */
+ NOT_IMPLEMENTED_5_01 = 161, /* NOT_IMPLEMENTED */
+ BAD_GATEWAY_5_02 = 162, /* BAD_GATEWAY */
+ SERVICE_UNAVAILABLE_5_03 = 163, /* SERVICE_UNAVAILABLE */
+ GATEWAY_TIMEOUT_5_04 = 164, /* GATEWAY_TIMEOUT */
+ PROXYING_NOT_SUPPORTED_5_05 = 165, /* PROXYING_NOT_SUPPORTED */
+
+ /* Erbium errors */
+ MEMORY_ALLOCATION_ERROR = 192,
+ PACKET_SERIALIZATION_ERROR,
+
+ /* Erbium hooks */
+ MANUAL_RESPONSE,
+ PING_RESPONSE
+} coap_status_t;
+
+/* CoAP header option numbers */
+typedef enum {
+ COAP_OPTION_IF_MATCH = 1, /* 0-8 B */
+ COAP_OPTION_URI_HOST = 3, /* 1-255 B */
+ COAP_OPTION_ETAG = 4, /* 1-8 B */
+ COAP_OPTION_IF_NONE_MATCH = 5, /* 0 B */
+ COAP_OPTION_OBSERVE = 6, /* 0-3 B */
+ COAP_OPTION_URI_PORT = 7, /* 0-2 B */
+ COAP_OPTION_LOCATION_PATH = 8, /* 0-255 B */
+ COAP_OPTION_URI_PATH = 11, /* 0-255 B */
+ COAP_OPTION_CONTENT_FORMAT = 12, /* 0-2 B */
+ COAP_OPTION_MAX_AGE = 14, /* 0-4 B */
+ COAP_OPTION_URI_QUERY = 15, /* 0-255 B */
+ COAP_OPTION_ACCEPT = 17, /* 0-2 B */
+ COAP_OPTION_LOCATION_QUERY = 20, /* 0-255 B */
+ COAP_OPTION_BLOCK2 = 23, /* 1-3 B */
+ COAP_OPTION_BLOCK1 = 27, /* 1-3 B */
+ COAP_OPTION_SIZE2 = 28, /* 0-4 B */
+ COAP_OPTION_PROXY_URI = 35, /* 1-1034 B */
+ COAP_OPTION_PROXY_SCHEME = 39, /* 1-255 B */
+ COAP_OPTION_SIZE1 = 60, /* 0-4 B */
+} coap_option_t;
+
+/* CoAP Content-Formats */
+typedef enum {
+ TEXT_PLAIN = 0,
+ TEXT_XML = 1,
+ TEXT_CSV = 2,
+ TEXT_HTML = 3,
+ IMAGE_GIF = 21,
+ IMAGE_JPEG = 22,
+ IMAGE_PNG = 23,
+ IMAGE_TIFF = 24,
+ AUDIO_RAW = 25,
+ VIDEO_RAW = 26,
+ APPLICATION_LINK_FORMAT = 40,
+ APPLICATION_XML = 41,
+ APPLICATION_OCTET_STREAM = 42,
+ APPLICATION_RDF_XML = 43,
+ APPLICATION_SOAP_XML = 44,
+ APPLICATION_ATOM_XML = 45,
+ APPLICATION_XMPP_XML = 46,
+ APPLICATION_EXI = 47,
+ APPLICATION_FASTINFOSET = 48,
+ APPLICATION_SOAP_FASTINFOSET = 49,
+ APPLICATION_JSON = 50,
+ APPLICATION_X_OBIX_BINARY = 51
+} coap_content_format_t;
+
+/**
+ * Resource flags for allowed methods and special functionalities.
+ */
+typedef enum {
+ NO_FLAGS = 0,
+
+ /* methods to handle */
+ METHOD_GET = (1 << 0),
+ METHOD_POST = (1 << 1),
+ METHOD_PUT = (1 << 2),
+ METHOD_DELETE = (1 << 3),
+
+ /* special flags */
+ HAS_SUB_RESOURCES = (1 << 4),
+ IS_SEPARATE = (1 << 5),
+ IS_OBSERVABLE = (1 << 6),
+ IS_PERIODIC = (1 << 7)
+} coap_resource_flags_t;
+
+#endif /* COAP_CONSTANTS_H_ */
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/coap/extension/coap_ext.h b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/coap/extension/coap_ext.h
new file mode 100644
index 000000000..f61deac27
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/coap/extension/coap_ext.h
@@ -0,0 +1,20 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#ifndef COAP_EXTENSION_COAP_EXT_H_
+#define COAP_EXTENSION_COAP_EXT_H_
+
+#include "coap-constants.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define COAP_EVENT (COAP_DELETE + 2)
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* COAP_EXTENSION_COAP_EXT_H_ */
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/coap/lib_coap.cmake b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/coap/lib_coap.cmake
new file mode 100644
index 000000000..8970e5d6c
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/coap/lib_coap.cmake
@@ -0,0 +1,12 @@
+# Copyright (C) 2019 Intel Corporation. All rights reserved.
+# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+
+set (LIB_COAP_DIR ${CMAKE_CURRENT_LIST_DIR})
+
+include_directories(${LIB_COAP_DIR}/er-coap)
+include_directories(${LIB_COAP_DIR}/extension)
+
+file (GLOB_RECURSE source_all ${LIB_COAP_DIR}/*.c)
+
+set (LIB_COAP_SOURCE ${source_all})
+
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/mem-alloc/SConscript b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/mem-alloc/SConscript
new file mode 100644
index 000000000..602d87158
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/mem-alloc/SConscript
@@ -0,0 +1,33 @@
+#
+# Copyright (c) 2021, RT-Thread Development Team
+#
+# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+#
+
+from building import *
+import os
+
+cwd = GetCurrentDir()
+
+src = Split('''
+''')
+
+
+def addSrcFiles(arr, path):
+ for f in os.listdir(path):
+ fpath = os.path.join(path, f);
+ if os.path.isfile(fpath):
+ ext = os.path.splitext(fpath)[-1]
+ if ext == '.c' or ext == '.cpp':
+ arr += [fpath]
+ elif os.path.isdir(fpath):
+ addSrcFiles(arr, fpath)
+
+
+
+addSrcFiles(src, cwd);
+CPPPATH = [cwd, cwd+'/../include']
+
+group = DefineGroup('iwasm_platform_core', src, depend = [''], CPPPATH = CPPPATH)
+
+Return('group')
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/mem-alloc/ems/ems_alloc.c b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/mem-alloc/ems/ems_alloc.c
new file mode 100644
index 000000000..5c2a628a2
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/mem-alloc/ems/ems_alloc.c
@@ -0,0 +1,794 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#include "ems_gc_internal.h"
+
+static inline bool
+hmu_is_in_heap(void *hmu, gc_uint8 *heap_base_addr, gc_uint8 *heap_end_addr)
+{
+ gc_uint8 *addr = (gc_uint8 *)hmu;
+ return (addr >= heap_base_addr && addr < heap_end_addr) ? true : false;
+}
+
+/**
+ * Remove a node from the tree it belongs to
+ *
+ * @param p the node to remove, can not be NULL, can not be the ROOT node
+ * the node will be removed from the tree, and the left, right and
+ * parent pointers of the node @p will be set to be NULL. Other fields
+ * won't be touched. The tree will be re-organized so that the order
+ * conditions are still satisified.
+ */
+static bool
+remove_tree_node(gc_heap_t *heap, hmu_tree_node_t *p)
+{
+ hmu_tree_node_t *q = NULL, **slot = NULL, *parent;
+ hmu_tree_node_t *root = heap->kfc_tree_root;
+ gc_uint8 *base_addr = heap->base_addr;
+ gc_uint8 *end_addr = base_addr + heap->current_size;
+
+ bh_assert(p);
+
+ parent = p->parent;
+ if (!parent || p == root /* p can not be the ROOT node */
+ || !hmu_is_in_heap(p, base_addr, end_addr)
+ || (parent != root && !hmu_is_in_heap(parent, base_addr, end_addr))) {
+ goto fail;
+ }
+
+ /* get the slot which holds pointer to node p */
+ if (p == p->parent->right) {
+ /* Don't use `slot = &p->parent->right` to avoid compiler warning */
+ slot = (hmu_tree_node_t **)((uint8 *)p->parent
+ + offsetof(hmu_tree_node_t, right));
+ }
+ else if (p == p->parent->left) {
+ /* p should be a child of its parent */
+ /* Don't use `slot = &p->parent->left` to avoid compiler warning */
+ slot = (hmu_tree_node_t **)((uint8 *)p->parent
+ + offsetof(hmu_tree_node_t, left));
+ }
+ else {
+ goto fail;
+ }
+
+ /**
+ * algorithms used to remove node p
+ * case 1: if p has no left child, replace p with its right child
+ * case 2: if p has no right child, replace p with its left child
+ * case 3: otherwise, find p's predecessor, remove it from the tree
+ * and replace p with it.
+ * use predecessor can keep the left <= root < right condition.
+ */
+
+ if (!p->left) {
+ /* move right child up*/
+ *slot = p->right;
+ if (p->right) {
+ if (!hmu_is_in_heap(p->right, base_addr, end_addr)) {
+ goto fail;
+ }
+ p->right->parent = p->parent;
+ }
+
+ p->left = p->right = p->parent = NULL;
+ return true;
+ }
+
+ if (!p->right) {
+ /* move left child up*/
+ *slot = p->left;
+ if (!hmu_is_in_heap(p->left, base_addr, end_addr)) {
+ goto fail;
+ }
+ /* p->left can never be NULL unless it is corrupted. */
+ p->left->parent = p->parent;
+
+ p->left = p->right = p->parent = NULL;
+ return true;
+ }
+
+ /* both left & right exist, find p's predecessor at first*/
+ q = p->left;
+ if (!hmu_is_in_heap(q, base_addr, end_addr)) {
+ goto fail;
+ }
+ while (q->right) {
+ q = q->right;
+ if (!hmu_is_in_heap(q, base_addr, end_addr)) {
+ goto fail;
+ }
+ }
+
+ /* remove from the tree*/
+ if (!remove_tree_node(heap, q))
+ return false;
+
+ *slot = q;
+ q->parent = p->parent;
+ q->left = p->left;
+ q->right = p->right;
+ if (q->left) {
+ if (!hmu_is_in_heap(q->left, base_addr, end_addr)) {
+ goto fail;
+ }
+ q->left->parent = q;
+ }
+ if (q->right) {
+ if (!hmu_is_in_heap(q->right, base_addr, end_addr)) {
+ goto fail;
+ }
+ q->right->parent = q;
+ }
+
+ p->left = p->right = p->parent = NULL;
+
+ return true;
+fail:
+ heap->is_heap_corrupted = true;
+ return false;
+}
+
+static bool
+unlink_hmu(gc_heap_t *heap, hmu_t *hmu)
+{
+ gc_uint8 *base_addr, *end_addr;
+ gc_size_t size;
+
+ bh_assert(gci_is_heap_valid(heap));
+ bh_assert(hmu && (gc_uint8 *)hmu >= heap->base_addr
+ && (gc_uint8 *)hmu < heap->base_addr + heap->current_size);
+
+ if (hmu_get_ut(hmu) != HMU_FC) {
+ heap->is_heap_corrupted = true;
+ return false;
+ }
+
+ base_addr = heap->base_addr;
+ end_addr = base_addr + heap->current_size;
+ size = hmu_get_size(hmu);
+
+ if (HMU_IS_FC_NORMAL(size)) {
+ uint32 node_idx = size >> 3;
+ hmu_normal_node_t *node_prev = NULL, *node_next;
+ hmu_normal_node_t *node = heap->kfc_normal_list[node_idx].next;
+
+ while (node) {
+ if (!hmu_is_in_heap(node, base_addr, end_addr)) {
+ heap->is_heap_corrupted = true;
+ return false;
+ }
+ node_next = get_hmu_normal_node_next(node);
+ if ((hmu_t *)node == hmu) {
+ if (!node_prev) /* list head */
+ heap->kfc_normal_list[node_idx].next = node_next;
+ else
+ set_hmu_normal_node_next(node_prev, node_next);
+ break;
+ }
+ node_prev = node;
+ node = node_next;
+ }
+
+ if (!node) {
+ os_printf("[GC_ERROR]couldn't find the node in the normal list\n");
+ }
+ }
+ else {
+ if (!remove_tree_node(heap, (hmu_tree_node_t *)hmu))
+ return false;
+ }
+ return true;
+}
+
+static void
+hmu_set_free_size(hmu_t *hmu)
+{
+ gc_size_t size;
+ bh_assert(hmu && hmu_get_ut(hmu) == HMU_FC);
+
+ size = hmu_get_size(hmu);
+ *((uint32 *)((char *)hmu + size) - 1) = size;
+}
+
+/**
+ * Add free chunk back to KFC
+ *
+ * @param heap should not be NULL and it should be a valid heap
+ * @param hmu should not be NULL and it should be a HMU of length @size inside
+ * @heap hmu should be 8-bytes aligned
+ * @param size should be positive and multiple of 8
+ * hmu with size @size will be added into KFC as a new FC.
+ */
+bool
+gci_add_fc(gc_heap_t *heap, hmu_t *hmu, gc_size_t size)
+{
+ gc_uint8 *base_addr, *end_addr;
+ hmu_normal_node_t *np = NULL;
+ hmu_tree_node_t *root = NULL, *tp = NULL, *node = NULL;
+ uint32 node_idx;
+
+ bh_assert(gci_is_heap_valid(heap));
+ bh_assert(hmu && (gc_uint8 *)hmu >= heap->base_addr
+ && (gc_uint8 *)hmu < heap->base_addr + heap->current_size);
+ bh_assert(((gc_uint32)(uintptr_t)hmu_to_obj(hmu) & 7) == 0);
+ bh_assert(size > 0
+ && ((gc_uint8 *)hmu) + size
+ <= heap->base_addr + heap->current_size);
+ bh_assert(!(size & 7));
+
+ base_addr = heap->base_addr;
+ end_addr = base_addr + heap->current_size;
+
+ hmu_set_ut(hmu, HMU_FC);
+ hmu_set_size(hmu, size);
+ hmu_set_free_size(hmu);
+
+ if (HMU_IS_FC_NORMAL(size)) {
+ np = (hmu_normal_node_t *)hmu;
+ if (!hmu_is_in_heap(np, base_addr, end_addr)) {
+ heap->is_heap_corrupted = true;
+ return false;
+ }
+
+ node_idx = size >> 3;
+ set_hmu_normal_node_next(np, heap->kfc_normal_list[node_idx].next);
+ heap->kfc_normal_list[node_idx].next = np;
+ return true;
+ }
+
+ /* big block */
+ node = (hmu_tree_node_t *)hmu;
+ node->size = size;
+ node->left = node->right = node->parent = NULL;
+
+ /* find proper node to link this new node to */
+ root = heap->kfc_tree_root;
+ tp = root;
+ bh_assert(tp->size < size);
+ while (1) {
+ if (tp->size < size) {
+ if (!tp->right) {
+ tp->right = node;
+ node->parent = tp;
+ break;
+ }
+ tp = tp->right;
+ }
+ else { /* tp->size >= size */
+ if (!tp->left) {
+ tp->left = node;
+ node->parent = tp;
+ break;
+ }
+ tp = tp->left;
+ }
+ if (!hmu_is_in_heap(tp, base_addr, end_addr)) {
+ heap->is_heap_corrupted = true;
+ return false;
+ }
+ }
+ return true;
+}
+
+/**
+ * Find a proper hmu for required memory size
+ *
+ * @param heap should not be NULL and should be a valid heap
+ * @param size should cover the header and should be 8 bytes aligned
+ * GC will not be performed here.
+ * Heap extension will not be performed here.
+ *
+ * @return hmu allocated if success, which will be aligned to 8 bytes,
+ * NULL otherwise
+ */
+static hmu_t *
+alloc_hmu(gc_heap_t *heap, gc_size_t size)
+{
+ gc_uint8 *base_addr, *end_addr;
+ hmu_normal_list_t *normal_head = NULL;
+ hmu_normal_node_t *p = NULL;
+ uint32 node_idx = 0, init_node_idx = 0;
+ hmu_tree_node_t *root = NULL, *tp = NULL, *last_tp = NULL;
+ hmu_t *next, *rest;
+ uintptr_t tp_ret;
+
+ bh_assert(gci_is_heap_valid(heap));
+ bh_assert(size > 0 && !(size & 7));
+
+ base_addr = heap->base_addr;
+ end_addr = base_addr + heap->current_size;
+
+ if (size < GC_SMALLEST_SIZE)
+ size = GC_SMALLEST_SIZE;
+
+ /* check normal list at first*/
+ if (HMU_IS_FC_NORMAL(size)) {
+ /* find a non-empty slot in normal_node_list with good size*/
+ init_node_idx = (size >> 3);
+ for (node_idx = init_node_idx; node_idx < HMU_NORMAL_NODE_CNT;
+ node_idx++) {
+ normal_head = heap->kfc_normal_list + node_idx;
+ if (normal_head->next)
+ break;
+ normal_head = NULL;
+ }
+
+ /* found in normal list*/
+ if (normal_head) {
+ bh_assert(node_idx >= init_node_idx);
+
+ p = normal_head->next;
+ if (!hmu_is_in_heap(p, base_addr, end_addr)) {
+ heap->is_heap_corrupted = true;
+ return NULL;
+ }
+ normal_head->next = get_hmu_normal_node_next(p);
+ if (((gc_int32)(uintptr_t)hmu_to_obj(p) & 7) != 0) {
+ heap->is_heap_corrupted = true;
+ return NULL;
+ }
+
+ if ((gc_size_t)node_idx != (uint32)init_node_idx
+ /* with bigger size*/
+ && ((gc_size_t)node_idx << 3) >= size + GC_SMALLEST_SIZE) {
+ rest = (hmu_t *)(((char *)p) + size);
+ if (!gci_add_fc(heap, rest, (node_idx << 3) - size)) {
+ return NULL;
+ }
+ hmu_mark_pinuse(rest);
+ }
+ else {
+ size = node_idx << 3;
+ next = (hmu_t *)((char *)p + size);
+ if (hmu_is_in_heap(next, base_addr, end_addr))
+ hmu_mark_pinuse(next);
+ }
+
+ heap->total_free_size -= size;
+ if ((heap->current_size - heap->total_free_size)
+ > heap->highmark_size)
+ heap->highmark_size =
+ heap->current_size - heap->total_free_size;
+
+ hmu_set_size((hmu_t *)p, size);
+ return (hmu_t *)p;
+ }
+ }
+
+ /* need to find a node in tree*/
+ root = heap->kfc_tree_root;
+
+ /* find the best node*/
+ bh_assert(root);
+ tp = root->right;
+ while (tp) {
+ if (!hmu_is_in_heap(tp, base_addr, end_addr)) {
+ heap->is_heap_corrupted = true;
+ return NULL;
+ }
+
+ if (tp->size < size) {
+ tp = tp->right;
+ continue;
+ }
+
+ /* record the last node with size equal to or bigger than given size*/
+ last_tp = tp;
+ tp = tp->left;
+ }
+
+ if (last_tp) {
+ bh_assert(last_tp->size >= size);
+
+ /* alloc in last_p*/
+
+ /* remove node last_p from tree*/
+ if (!remove_tree_node(heap, last_tp))
+ return NULL;
+
+ if (last_tp->size >= size + GC_SMALLEST_SIZE) {
+ rest = (hmu_t *)((char *)last_tp + size);
+ if (!gci_add_fc(heap, rest, last_tp->size - size))
+ return NULL;
+ hmu_mark_pinuse(rest);
+ }
+ else {
+ size = last_tp->size;
+ next = (hmu_t *)((char *)last_tp + size);
+ if (hmu_is_in_heap(next, base_addr, end_addr))
+ hmu_mark_pinuse(next);
+ }
+
+ heap->total_free_size -= size;
+ if ((heap->current_size - heap->total_free_size) > heap->highmark_size)
+ heap->highmark_size = heap->current_size - heap->total_free_size;
+
+ hmu_set_size((hmu_t *)last_tp, size);
+ tp_ret = (uintptr_t)last_tp;
+ return (hmu_t *)tp_ret;
+ }
+
+ return NULL;
+}
+
+/**
+ * Find a proper HMU with given size
+ *
+ * @param heap should not be NULL and should be a valid heap
+ * @param size should cover the header and should be 8 bytes aligned
+ *
+ * Note: This function will try several ways to satisfy the allocation request:
+ * 1. Find a proper on available HMUs.
+ * 2. GC will be triggered if 1 failed.
+ * 3. Find a proper on available HMUS.
+ * 4. Return NULL if 3 failed
+ *
+ * @return hmu allocated if success, which will be aligned to 8 bytes,
+ * NULL otherwise
+ */
+static hmu_t *
+alloc_hmu_ex(gc_heap_t *heap, gc_size_t size)
+{
+ bh_assert(gci_is_heap_valid(heap));
+ bh_assert(size > 0 && !(size & 7));
+
+ return alloc_hmu(heap, size);
+}
+
+static unsigned long g_total_malloc = 0;
+static unsigned long g_total_free = 0;
+
+#if BH_ENABLE_GC_VERIFY == 0
+gc_object_t
+gc_alloc_vo(void *vheap, gc_size_t size)
+#else
+gc_object_t
+gc_alloc_vo_internal(void *vheap, gc_size_t size, const char *file, int line)
+#endif
+{
+ gc_heap_t *heap = (gc_heap_t *)vheap;
+ hmu_t *hmu = NULL;
+ gc_object_t ret = (gc_object_t)NULL;
+ gc_size_t tot_size = 0, tot_size_unaligned;
+
+ /* hmu header + prefix + obj + suffix */
+ tot_size_unaligned = HMU_SIZE + OBJ_PREFIX_SIZE + size + OBJ_SUFFIX_SIZE;
+ /* aligned size*/
+ tot_size = GC_ALIGN_8(tot_size_unaligned);
+ if (tot_size < size)
+ /* integer overflow */
+ return NULL;
+
+ if (heap->is_heap_corrupted) {
+ os_printf("[GC_ERROR]Heap is corrupted, allocate memory failed.\n");
+ return NULL;
+ }
+
+ os_mutex_lock(&heap->lock);
+
+ hmu = alloc_hmu_ex(heap, tot_size);
+ if (!hmu)
+ goto finish;
+
+ bh_assert(hmu_get_size(hmu) >= tot_size);
+ /* the total size allocated may be larger than
+ the required size, reset it here */
+ tot_size = hmu_get_size(hmu);
+
+ g_total_malloc += tot_size;
+
+ hmu_set_ut(hmu, HMU_VO);
+ hmu_unfree_vo(hmu);
+
+#if BH_ENABLE_GC_VERIFY != 0
+ hmu_init_prefix_and_suffix(hmu, tot_size, file, line);
+#endif
+
+ ret = hmu_to_obj(hmu);
+ if (tot_size > tot_size_unaligned)
+ /* clear buffer appended by GC_ALIGN_8() */
+ memset((uint8 *)ret + size, 0, tot_size - tot_size_unaligned);
+
+finish:
+ os_mutex_unlock(&heap->lock);
+ return ret;
+}
+
+#if BH_ENABLE_GC_VERIFY == 0
+gc_object_t
+gc_realloc_vo(void *vheap, void *ptr, gc_size_t size)
+#else
+gc_object_t
+gc_realloc_vo_internal(void *vheap, void *ptr, gc_size_t size, const char *file,
+ int line)
+#endif
+{
+ gc_heap_t *heap = (gc_heap_t *)vheap;
+ hmu_t *hmu = NULL, *hmu_old = NULL, *hmu_next;
+ gc_object_t ret = (gc_object_t)NULL, obj_old = (gc_object_t)ptr;
+ gc_size_t tot_size, tot_size_unaligned, tot_size_old = 0, tot_size_next;
+ gc_size_t obj_size, obj_size_old;
+ gc_uint8 *base_addr, *end_addr;
+ hmu_type_t ut;
+
+ /* hmu header + prefix + obj + suffix */
+ tot_size_unaligned = HMU_SIZE + OBJ_PREFIX_SIZE + size + OBJ_SUFFIX_SIZE;
+ /* aligned size*/
+ tot_size = GC_ALIGN_8(tot_size_unaligned);
+ if (tot_size < size)
+ /* integer overflow */
+ return NULL;
+
+ if (heap->is_heap_corrupted) {
+ os_printf("[GC_ERROR]Heap is corrupted, allocate memory failed.\n");
+ return NULL;
+ }
+
+ if (obj_old) {
+ hmu_old = obj_to_hmu(obj_old);
+ tot_size_old = hmu_get_size(hmu_old);
+ if (tot_size <= tot_size_old)
+ /* current node alreay meets requirement */
+ return obj_old;
+ }
+
+ base_addr = heap->base_addr;
+ end_addr = base_addr + heap->current_size;
+
+ os_mutex_lock(&heap->lock);
+
+ if (hmu_old) {
+ hmu_next = (hmu_t *)((char *)hmu_old + tot_size_old);
+ if (hmu_is_in_heap(hmu_next, base_addr, end_addr)) {
+ ut = hmu_get_ut(hmu_next);
+ tot_size_next = hmu_get_size(hmu_next);
+ if (ut == HMU_FC && tot_size <= tot_size_old + tot_size_next) {
+ /* current node and next node meets requirement */
+ if (!unlink_hmu(heap, hmu_next)) {
+ os_mutex_unlock(&heap->lock);
+ return NULL;
+ }
+ hmu_set_size(hmu_old, tot_size);
+ memset((char *)hmu_old + tot_size_old, 0,
+ tot_size - tot_size_old);
+#if BH_ENABLE_GC_VERIFY != 0
+ hmu_init_prefix_and_suffix(hmu_old, tot_size, file, line);
+#endif
+ if (tot_size < tot_size_old + tot_size_next) {
+ hmu_next = (hmu_t *)((char *)hmu_old + tot_size);
+ tot_size_next = tot_size_old + tot_size_next - tot_size;
+ if (!gci_add_fc(heap, hmu_next, tot_size_next)) {
+ os_mutex_unlock(&heap->lock);
+ return NULL;
+ }
+ }
+ os_mutex_unlock(&heap->lock);
+ return obj_old;
+ }
+ }
+ }
+
+ hmu = alloc_hmu_ex(heap, tot_size);
+ if (!hmu)
+ goto finish;
+
+ bh_assert(hmu_get_size(hmu) >= tot_size);
+ /* the total size allocated may be larger than
+ the required size, reset it here */
+ tot_size = hmu_get_size(hmu);
+ g_total_malloc += tot_size;
+
+ hmu_set_ut(hmu, HMU_VO);
+ hmu_unfree_vo(hmu);
+
+#if BH_ENABLE_GC_VERIFY != 0
+ hmu_init_prefix_and_suffix(hmu, tot_size, file, line);
+#endif
+
+ ret = hmu_to_obj(hmu);
+
+finish:
+
+ if (ret) {
+ obj_size = tot_size - HMU_SIZE - OBJ_PREFIX_SIZE - OBJ_SUFFIX_SIZE;
+ memset(ret, 0, obj_size);
+ if (obj_old) {
+ obj_size_old =
+ tot_size_old - HMU_SIZE - OBJ_PREFIX_SIZE - OBJ_SUFFIX_SIZE;
+ bh_memcpy_s(ret, obj_size, obj_old, obj_size_old);
+ }
+ }
+
+ os_mutex_unlock(&heap->lock);
+
+ if (ret && obj_old)
+ gc_free_vo(vheap, obj_old);
+
+ return ret;
+}
+
+/**
+ * Do some checking to see if given pointer is a possible valid heap
+ * @return GC_TRUE if all checking passed, GC_FALSE otherwise
+ */
+int
+gci_is_heap_valid(gc_heap_t *heap)
+{
+ if (!heap)
+ return GC_FALSE;
+ if (heap->heap_id != (gc_handle_t)heap)
+ return GC_FALSE;
+
+ return GC_TRUE;
+}
+
+#if BH_ENABLE_GC_VERIFY == 0
+int
+gc_free_vo(void *vheap, gc_object_t obj)
+#else
+int
+gc_free_vo_internal(void *vheap, gc_object_t obj, const char *file, int line)
+#endif
+{
+ gc_heap_t *heap = (gc_heap_t *)vheap;
+ gc_uint8 *base_addr, *end_addr;
+ hmu_t *hmu = NULL;
+ hmu_t *prev = NULL;
+ hmu_t *next = NULL;
+ gc_size_t size = 0;
+ hmu_type_t ut;
+ int ret = GC_SUCCESS;
+
+ if (!obj) {
+ return GC_SUCCESS;
+ }
+
+ if (heap->is_heap_corrupted) {
+ os_printf("[GC_ERROR]Heap is corrupted, free memory failed.\n");
+ return GC_ERROR;
+ }
+
+ hmu = obj_to_hmu(obj);
+
+ base_addr = heap->base_addr;
+ end_addr = base_addr + heap->current_size;
+
+ os_mutex_lock(&heap->lock);
+
+ if (hmu_is_in_heap(hmu, base_addr, end_addr)) {
+#if BH_ENABLE_GC_VERIFY != 0
+ hmu_verify(heap, hmu);
+#endif
+ ut = hmu_get_ut(hmu);
+ if (ut == HMU_VO) {
+ if (hmu_is_vo_freed(hmu)) {
+ bh_assert(0);
+ ret = GC_ERROR;
+ goto out;
+ }
+
+ size = hmu_get_size(hmu);
+
+ g_total_free += size;
+
+ heap->total_free_size += size;
+
+ if (!hmu_get_pinuse(hmu)) {
+ prev = (hmu_t *)((char *)hmu - *((int *)hmu - 1));
+
+ if (hmu_is_in_heap(prev, base_addr, end_addr)
+ && hmu_get_ut(prev) == HMU_FC) {
+ size += hmu_get_size(prev);
+ hmu = prev;
+ if (!unlink_hmu(heap, prev)) {
+ ret = GC_ERROR;
+ goto out;
+ }
+ }
+ }
+
+ next = (hmu_t *)((char *)hmu + size);
+ if (hmu_is_in_heap(next, base_addr, end_addr)) {
+ if (hmu_get_ut(next) == HMU_FC) {
+ size += hmu_get_size(next);
+ if (!unlink_hmu(heap, next)) {
+ ret = GC_ERROR;
+ goto out;
+ }
+ next = (hmu_t *)((char *)hmu + size);
+ }
+ }
+
+ if (!gci_add_fc(heap, hmu, size)) {
+ ret = GC_ERROR;
+ goto out;
+ }
+
+ if (hmu_is_in_heap(next, base_addr, end_addr)) {
+ hmu_unmark_pinuse(next);
+ }
+ }
+ else {
+ ret = GC_ERROR;
+ goto out;
+ }
+ ret = GC_SUCCESS;
+ goto out;
+ }
+
+out:
+ os_mutex_unlock(&heap->lock);
+ return ret;
+}
+
+void
+gc_dump_heap_stats(gc_heap_t *heap)
+{
+ os_printf("heap: %p, heap start: %p\n", heap, heap->base_addr);
+ os_printf("total free: %" PRIu32 ", current: %" PRIu32
+ ", highmark: %" PRIu32 "\n",
+ heap->total_free_size, heap->current_size, heap->highmark_size);
+ os_printf("g_total_malloc=%lu, g_total_free=%lu, occupied=%lu\n",
+ g_total_malloc, g_total_free, g_total_malloc - g_total_free);
+}
+
+uint32
+gc_get_heap_highmark_size(gc_heap_t *heap)
+{
+ return heap->highmark_size;
+}
+
+void
+gci_dump(gc_heap_t *heap)
+{
+ hmu_t *cur = NULL, *end = NULL;
+ hmu_type_t ut;
+ gc_size_t size;
+ int i = 0, p, mark;
+ char inuse = 'U';
+
+ cur = (hmu_t *)heap->base_addr;
+ end = (hmu_t *)((char *)heap->base_addr + heap->current_size);
+
+ while (cur < end) {
+ ut = hmu_get_ut(cur);
+ size = hmu_get_size(cur);
+ p = hmu_get_pinuse(cur);
+ mark = hmu_is_jo_marked(cur);
+
+ if (ut == HMU_VO)
+ inuse = 'V';
+ else if (ut == HMU_JO)
+ inuse = hmu_is_jo_marked(cur) ? 'J' : 'j';
+ else if (ut == HMU_FC)
+ inuse = 'F';
+
+ if (size == 0 || size > (uint32)((uint8 *)end - (uint8 *)cur)) {
+ os_printf("[GC_ERROR]Heap is corrupted, heap dump failed.\n");
+ heap->is_heap_corrupted = true;
+ return;
+ }
+
+ os_printf("#%d %08" PRIx32 " %" PRIx32 " %d %d"
+ " %c %" PRId32 "\n",
+ i, (int32)((char *)cur - (char *)heap->base_addr), (int32)ut,
+ p, mark, inuse, (int32)hmu_obj_size(size));
+#if BH_ENABLE_GC_VERIFY != 0
+ if (inuse == 'V') {
+ gc_object_prefix_t *prefix = (gc_object_prefix_t *)(cur + 1);
+ os_printf("#%s:%d\n", prefix->file_name, prefix->line_no);
+ }
+#endif
+
+ cur = (hmu_t *)((char *)cur + size);
+ i++;
+ }
+
+ if (cur != end) {
+ os_printf("[GC_ERROR]Heap is corrupted, heap dump failed.\n");
+ heap->is_heap_corrupted = true;
+ }
+}
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/mem-alloc/ems/ems_gc.h b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/mem-alloc/ems/ems_gc.h
new file mode 100644
index 000000000..9a74d0046
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/mem-alloc/ems/ems_gc.h
@@ -0,0 +1,168 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+/**
+ * @file ems_gc.h
+ * @date Wed Aug 3 10:46:38 2011
+ *
+ * @brief This file defines GC modules types and interfaces.
+ */
+
+#ifndef _EMS_GC_H
+#define _EMS_GC_H
+
+#include "bh_platform.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define GC_HEAD_PADDING 4
+
+#define NULL_REF ((gc_object_t)NULL)
+
+#define GC_SUCCESS (0)
+#define GC_ERROR (-1)
+
+#define GC_TRUE (1)
+#define GC_FALSE (0)
+
+#define GC_MAX_HEAP_SIZE (256 * BH_KB)
+
+typedef void *gc_handle_t;
+typedef void *gc_object_t;
+typedef int64 gc_int64;
+typedef uint32 gc_uint32;
+typedef int32 gc_int32;
+typedef uint16 gc_uint16;
+typedef int16 gc_int16;
+typedef uint8 gc_uint8;
+typedef int8 gc_int8;
+typedef uint32 gc_size_t;
+
+typedef enum {
+ GC_STAT_TOTAL = 0,
+ GC_STAT_FREE,
+ GC_STAT_HIGHMARK,
+} GC_STAT_INDEX;
+
+/**
+ * GC initialization from a buffer, which is separated into
+ * two parts: the beginning of the buffer is used to create
+ * the heap structure, and the left is used to create the
+ * actual pool data
+ *
+ * @param buf the buffer to be initialized to a heap
+ * @param buf_size the size of buffer
+ *
+ * @return gc handle if success, NULL otherwise
+ */
+gc_handle_t
+gc_init_with_pool(char *buf, gc_size_t buf_size);
+
+/**
+ * GC initialization from heap struct buffer and pool buffer
+ *
+ * @param struct_buf the struct buffer to create the heap structure
+ * @param struct_buf_size the size of struct buffer
+ * @param pool_buf the pool buffer to create pool data
+ * @param pool_buf_size the size of poll buffer
+ *
+ * @return gc handle if success, NULL otherwise
+ */
+gc_handle_t
+gc_init_with_struct_and_pool(char *struct_buf, gc_size_t struct_buf_size,
+ char *pool_buf, gc_size_t pool_buf_size);
+
+/**
+ * Destroy heap which is initilized from a buffer
+ *
+ * @param handle handle to heap needed destroy
+ *
+ * @return GC_SUCCESS if success
+ * GC_ERROR for bad parameters or failed system resource freeing.
+ */
+int
+gc_destroy_with_pool(gc_handle_t handle);
+
+/**
+ * Return heap struct size
+ */
+uint32
+gc_get_heap_struct_size(void);
+
+/**
+ * Migrate heap from one pool buf to another pool buf
+ *
+ * @param handle handle of the new heap
+ * @param pool_buf_new the new pool buffer
+ * @param pool_buf_size the size of new pool buffer
+ *
+ * @return GC_SUCCESS if success, GC_ERROR otherwise
+ */
+int
+gc_migrate(gc_handle_t handle, char *pool_buf_new, gc_size_t pool_buf_size);
+
+/**
+ * Check whether the heap is corrupted
+ *
+ * @param handle handle of the heap
+ *
+ * @return true if success, false otherwise
+ */
+bool
+gc_is_heap_corrupted(gc_handle_t handle);
+
+/**
+ * Get Heap Stats
+ *
+ * @param stats [out] integer array to save heap stats
+ * @param size [in] the size of stats
+ * @param mmt [in] type of heap, MMT_SHARED or MMT_INSTANCE
+ */
+void *
+gc_heap_stats(void *heap, uint32 *stats, int size);
+
+#if BH_ENABLE_GC_VERIFY == 0
+
+gc_object_t
+gc_alloc_vo(void *heap, gc_size_t size);
+
+gc_object_t
+gc_realloc_vo(void *heap, void *ptr, gc_size_t size);
+
+int
+gc_free_vo(void *heap, gc_object_t obj);
+
+#else /* else of BH_ENABLE_GC_VERIFY */
+
+gc_object_t
+gc_alloc_vo_internal(void *heap, gc_size_t size, const char *file, int line);
+
+gc_object_t
+gc_realloc_vo_internal(void *heap, void *ptr, gc_size_t size, const char *file,
+ int line);
+
+int
+gc_free_vo_internal(void *heap, gc_object_t obj, const char *file, int line);
+
+/* clang-format off */
+#define gc_alloc_vo(heap, size) \
+ gc_alloc_vo_internal(heap, size, __FILE__, __LINE__)
+
+#define gc_realloc_vo(heap, ptr, size) \
+ gc_realloc_vo_internal(heap, ptr, size, __FILE__, __LINE__)
+
+#define gc_free_vo(heap, obj) \
+ gc_free_vo_internal(heap, obj, __FILE__, __LINE__)
+/* clang-format on */
+
+#endif /* end of BH_ENABLE_GC_VERIFY */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/mem-alloc/ems/ems_gc_internal.h b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/mem-alloc/ems/ems_gc_internal.h
new file mode 100644
index 000000000..e1ff9d61d
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/mem-alloc/ems/ems_gc_internal.h
@@ -0,0 +1,306 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#ifndef _EMS_GC_INTERNAL_H
+#define _EMS_GC_INTERNAL_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "bh_platform.h"
+#include "ems_gc.h"
+
+/* HMU (heap memory unit) basic block type */
+typedef enum hmu_type_enum {
+ HMU_TYPE_MIN = 0,
+ HMU_TYPE_MAX = 3,
+ HMU_JO = 3,
+ HMU_VO = 2,
+ HMU_FC = 1,
+ HMU_FM = 0
+} hmu_type_t;
+
+typedef struct hmu_struct {
+ gc_uint32 header;
+} hmu_t;
+
+#if BH_ENABLE_GC_VERIFY != 0
+
+#if UINTPTR_MAX > UINT32_MAX
+/* 2 prefix paddings for 64-bit pointer */
+#define GC_OBJECT_PREFIX_PADDING_CNT 2
+#else
+/* 3 prefix paddings for 32-bit pointer */
+#define GC_OBJECT_PREFIX_PADDING_CNT 3
+#endif
+#define GC_OBJECT_SUFFIX_PADDING_CNT 4
+#define GC_OBJECT_PADDING_VALUE (0x12345678)
+
+typedef struct gc_object_prefix {
+ const char *file_name;
+ gc_int32 line_no;
+ gc_int32 size;
+ gc_uint32 padding[GC_OBJECT_PREFIX_PADDING_CNT];
+} gc_object_prefix_t;
+
+typedef struct gc_object_suffix {
+ gc_uint32 padding[GC_OBJECT_SUFFIX_PADDING_CNT];
+} gc_object_suffix_t;
+
+#define OBJ_PREFIX_SIZE (sizeof(gc_object_prefix_t))
+#define OBJ_SUFFIX_SIZE (sizeof(gc_object_suffix_t))
+
+void
+hmu_init_prefix_and_suffix(hmu_t *hmu, gc_size_t tot_size,
+ const char *file_name, int line_no);
+
+void
+hmu_verify(void *vheap, hmu_t *hmu);
+
+#define SKIP_OBJ_PREFIX(p) ((void *)((gc_uint8 *)(p) + OBJ_PREFIX_SIZE))
+#define SKIP_OBJ_SUFFIX(p) ((void *)((gc_uint8 *)(p) + OBJ_SUFFIX_SIZE))
+
+#define OBJ_EXTRA_SIZE (HMU_SIZE + OBJ_PREFIX_SIZE + OBJ_SUFFIX_SIZE)
+
+#else /* else of BH_ENABLE_GC_VERIFY */
+
+#define OBJ_PREFIX_SIZE 0
+#define OBJ_SUFFIX_SIZE 0
+
+#define SKIP_OBJ_PREFIX(p) ((void *)((gc_uint8 *)(p) + OBJ_PREFIX_SIZE))
+#define SKIP_OBJ_SUFFIX(p) ((void *)((gc_uint8 *)(p) + OBJ_SUFFIX_SIZE))
+
+#define OBJ_EXTRA_SIZE (HMU_SIZE + OBJ_PREFIX_SIZE + OBJ_SUFFIX_SIZE)
+
+#endif /* end of BH_ENABLE_GC_VERIFY */
+
+#define hmu_obj_size(s) ((s)-OBJ_EXTRA_SIZE)
+
+#define GC_ALIGN_8(s) (((uint32)(s) + 7) & (uint32)~7)
+
+#define GC_SMALLEST_SIZE \
+ GC_ALIGN_8(HMU_SIZE + OBJ_PREFIX_SIZE + OBJ_SUFFIX_SIZE + 8)
+#define GC_GET_REAL_SIZE(x) \
+ GC_ALIGN_8(HMU_SIZE + OBJ_PREFIX_SIZE + OBJ_SUFFIX_SIZE \
+ + (((x) > 8) ? (x) : 8))
+
+/**
+ * hmu bit operation
+ */
+
+#define SETBIT(v, offset) (v) |= ((uint32)1 << (offset))
+#define GETBIT(v, offset) ((v) & ((uint32)1 << (offset)) ? 1 : 0)
+#define CLRBIT(v, offset) (v) &= (~((uint32)1 << (offset)))
+
+/* clang-format off */
+#define SETBITS(v, offset, size, value) \
+ do { \
+ (v) &= ~((((uint32)1 << size) - 1) << offset); \
+ (v) |= ((uint32)value << offset); \
+ } while (0)
+#define CLRBITS(v, offset, size) \
+ (v) &= ~((((uint32)1 << size) - 1) << offset)
+#define GETBITS(v, offset, size) \
+ (((v) & (((((uint32)1 << size) - 1) << offset))) >> offset)
+/* clang-format on */
+
+/**
+ * gc object layout definition
+ */
+
+#define HMU_SIZE (sizeof(hmu_t))
+
+#define hmu_to_obj(hmu) (gc_object_t)(SKIP_OBJ_PREFIX((hmu_t *)(hmu) + 1))
+#define obj_to_hmu(obj) ((hmu_t *)((gc_uint8 *)(obj)-OBJ_PREFIX_SIZE) - 1)
+
+#define HMU_UT_SIZE 2
+#define HMU_UT_OFFSET 30
+
+/* clang-format off */
+#define hmu_get_ut(hmu) \
+ GETBITS((hmu)->header, HMU_UT_OFFSET, HMU_UT_SIZE)
+#define hmu_set_ut(hmu, type) \
+ SETBITS((hmu)->header, HMU_UT_OFFSET, HMU_UT_SIZE, type)
+#define hmu_is_ut_valid(tp) \
+ (tp >= HMU_TYPE_MIN && tp <= HMU_TYPE_MAX)
+/* clang-format on */
+
+/* P in use bit means the previous chunk is in use */
+#define HMU_P_OFFSET 29
+
+#define hmu_mark_pinuse(hmu) SETBIT((hmu)->header, HMU_P_OFFSET)
+#define hmu_unmark_pinuse(hmu) CLRBIT((hmu)->header, HMU_P_OFFSET)
+#define hmu_get_pinuse(hmu) GETBIT((hmu)->header, HMU_P_OFFSET)
+
+#define HMU_JO_VT_SIZE 27
+#define HMU_JO_VT_OFFSET 0
+#define HMU_JO_MB_OFFSET 28
+
+#define hmu_mark_jo(hmu) SETBIT((hmu)->header, HMU_JO_MB_OFFSET)
+#define hmu_unmark_jo(hmu) CLRBIT((hmu)->header, HMU_JO_MB_OFFSET)
+#define hmu_is_jo_marked(hmu) GETBIT((hmu)->header, HMU_JO_MB_OFFSET)
+
+/**
+ * The hmu size is divisible by 8, its lowest 3 bits are 0, so we only
+ * store its higher bits of bit [29..3], and bit [2..0] are not stored.
+ * After that, the maximal heap size can be enlarged from (1<<27) = 128MB
+ * to (1<<27) * 8 = 1GB.
+ */
+#define HMU_SIZE_SIZE 27
+#define HMU_SIZE_OFFSET 0
+
+#define HMU_VO_FB_OFFSET 28
+
+#define hmu_is_vo_freed(hmu) GETBIT((hmu)->header, HMU_VO_FB_OFFSET)
+#define hmu_unfree_vo(hmu) CLRBIT((hmu)->header, HMU_VO_FB_OFFSET)
+
+#define hmu_get_size(hmu) \
+ (GETBITS((hmu)->header, HMU_SIZE_OFFSET, HMU_SIZE_SIZE) << 3)
+#define hmu_set_size(hmu, size) \
+ SETBITS((hmu)->header, HMU_SIZE_OFFSET, HMU_SIZE_SIZE, ((size) >> 3))
+
+/**
+ * HMU free chunk management
+ */
+
+#ifndef HMU_NORMAL_NODE_CNT
+#define HMU_NORMAL_NODE_CNT 32
+#endif
+#define HMU_FC_NORMAL_MAX_SIZE ((HMU_NORMAL_NODE_CNT - 1) << 3)
+#define HMU_IS_FC_NORMAL(size) ((size) < HMU_FC_NORMAL_MAX_SIZE)
+#if HMU_FC_NORMAL_MAX_SIZE >= GC_MAX_HEAP_SIZE
+#error "Too small GC_MAX_HEAP_SIZE"
+#endif
+
+typedef struct hmu_normal_node {
+ hmu_t hmu_header;
+ gc_int32 next_offset;
+} hmu_normal_node_t;
+
+typedef struct hmu_normal_list {
+ hmu_normal_node_t *next;
+} hmu_normal_list_t;
+
+static inline hmu_normal_node_t *
+get_hmu_normal_node_next(hmu_normal_node_t *node)
+{
+ return node->next_offset
+ ? (hmu_normal_node_t *)((uint8 *)node + node->next_offset)
+ : NULL;
+}
+
+static inline void
+set_hmu_normal_node_next(hmu_normal_node_t *node, hmu_normal_node_t *next)
+{
+ if (next) {
+ bh_assert((uint8 *)next - (uint8 *)node < INT32_MAX);
+ node->next_offset = (gc_int32)(intptr_t)((uint8 *)next - (uint8 *)node);
+ }
+ else {
+ node->next_offset = 0;
+ }
+}
+
+/**
+ * Define hmu_tree_node as a packed struct, since it is at the 4-byte
+ * aligned address and the size of hmu_head is 4, so in 64-bit target,
+ * the left/right/parent fields will be at 8-byte aligned address,
+ * we can access them directly.
+ */
+#if UINTPTR_MAX == UINT64_MAX
+#if defined(_MSC_VER)
+__pragma(pack(push, 1));
+#define __attr_packed
+#elif defined(__GNUC__) || defined(__clang__)
+#define __attr_packed __attribute__((packed))
+#else
+#error "packed attribute isn't used to define struct hmu_tree_node"
+#endif
+#else /* else of UINTPTR_MAX == UINT64_MAX */
+#define __attr_packed
+#endif
+
+typedef struct hmu_tree_node {
+ hmu_t hmu_header;
+ struct hmu_tree_node *left;
+ struct hmu_tree_node *right;
+ struct hmu_tree_node *parent;
+ gc_size_t size;
+} __attr_packed hmu_tree_node_t;
+
+#if UINTPTR_MAX == UINT64_MAX
+#if defined(_MSC_VER)
+__pragma(pack(pop));
+#endif
+#endif
+
+bh_static_assert(sizeof(hmu_tree_node_t) == 8 + 3 * sizeof(void *));
+bh_static_assert(offsetof(hmu_tree_node_t, left) == 4);
+
+#define ASSERT_TREE_NODE_ALIGNED_ACCESS(tree_node) \
+ do { \
+ bh_assert((((uintptr_t)&tree_node->left) & (sizeof(uintptr_t) - 1)) \
+ == 0); \
+ } while (0)
+
+typedef struct gc_heap_struct {
+ /* for double checking*/
+ gc_handle_t heap_id;
+
+ gc_uint8 *base_addr;
+ gc_size_t current_size;
+
+ korp_mutex lock;
+
+ hmu_normal_list_t kfc_normal_list[HMU_NORMAL_NODE_CNT];
+
+#if UINTPTR_MAX == UINT64_MAX
+ /* make kfc_tree_root_buf 4-byte aligned and not 8-byte aligned,
+ so kfc_tree_root's left/right/parent fields are 8-byte aligned
+ and we can access them directly */
+ uint32 __padding;
+#endif
+ uint8 kfc_tree_root_buf[sizeof(hmu_tree_node_t)];
+ /* point to kfc_tree_root_buf, the order in kfc_tree is:
+ size[left] <= size[cur] < size[right] */
+ hmu_tree_node_t *kfc_tree_root;
+
+ /* whether heap is corrupted, e.g. the hmu nodes are modified
+ by user */
+ bool is_heap_corrupted;
+
+ gc_size_t init_size;
+ gc_size_t highmark_size;
+ gc_size_t total_free_size;
+} gc_heap_t;
+
+/**
+ * MISC internal used APIs
+ */
+
+bool
+gci_add_fc(gc_heap_t *heap, hmu_t *hmu, gc_size_t size);
+
+int
+gci_is_heap_valid(gc_heap_t *heap);
+
+/**
+ * Verify heap integrity
+ */
+void
+gci_verify_heap(gc_heap_t *heap);
+
+/**
+ * Dump heap nodes
+ */
+void
+gci_dump(gc_heap_t *heap);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* end of _EMS_GC_INTERNAL_H */
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/mem-alloc/ems/ems_hmu.c b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/mem-alloc/ems/ems_hmu.c
new file mode 100644
index 000000000..41745e161
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/mem-alloc/ems/ems_hmu.c
@@ -0,0 +1,91 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#include "ems_gc_internal.h"
+
+#if BH_ENABLE_GC_VERIFY != 0
+
+/**
+ * Set default value to prefix and suffix
+ * @param hmu should not be NULL and should have been correctly initilized
+ * (except prefix and suffix part)
+ * @param tot_size is offered here because hmu_get_size can not be used
+ * till now. tot_size should not be smaller than OBJ_EXTRA_SIZE.
+ * For VO, tot_size should be equal to object total size.
+ */
+void
+hmu_init_prefix_and_suffix(hmu_t *hmu, gc_size_t tot_size,
+ const char *file_name, int line_no)
+{
+ gc_object_prefix_t *prefix = NULL;
+ gc_object_suffix_t *suffix = NULL;
+ gc_uint32 i = 0;
+
+ bh_assert(hmu);
+ bh_assert(hmu_get_ut(hmu) == HMU_JO || hmu_get_ut(hmu) == HMU_VO);
+ bh_assert(tot_size >= OBJ_EXTRA_SIZE);
+ bh_assert(!(tot_size & 7));
+ bh_assert(hmu_get_ut(hmu) != HMU_VO || hmu_get_size(hmu) >= tot_size);
+
+ prefix = (gc_object_prefix_t *)(hmu + 1);
+ suffix =
+ (gc_object_suffix_t *)((gc_uint8 *)hmu + tot_size - OBJ_SUFFIX_SIZE);
+ prefix->file_name = file_name;
+ prefix->line_no = line_no;
+ prefix->size = tot_size;
+
+ for (i = 0; i < GC_OBJECT_PREFIX_PADDING_CNT; i++) {
+ prefix->padding[i] = GC_OBJECT_PADDING_VALUE;
+ }
+
+ for (i = 0; i < GC_OBJECT_SUFFIX_PADDING_CNT; i++) {
+ suffix->padding[i] = GC_OBJECT_PADDING_VALUE;
+ }
+}
+
+void
+hmu_verify(void *vheap, hmu_t *hmu)
+{
+ gc_heap_t *heap = (gc_heap_t *)vheap;
+ gc_object_prefix_t *prefix = NULL;
+ gc_object_suffix_t *suffix = NULL;
+ gc_uint32 i = 0;
+ hmu_type_t ut;
+ gc_size_t size = 0;
+ int is_padding_ok = 1;
+
+ bh_assert(hmu);
+ ut = hmu_get_ut(hmu);
+ bh_assert(hmu_is_ut_valid(ut));
+
+ prefix = (gc_object_prefix_t *)(hmu + 1);
+ size = prefix->size;
+ suffix = (gc_object_suffix_t *)((gc_uint8 *)hmu + size - OBJ_SUFFIX_SIZE);
+
+ if (ut == HMU_VO || ut == HMU_JO) {
+ /* check padding*/
+ for (i = 0; i < GC_OBJECT_PREFIX_PADDING_CNT; i++) {
+ if (prefix->padding[i] != GC_OBJECT_PADDING_VALUE) {
+ is_padding_ok = 0;
+ break;
+ }
+ }
+ for (i = 0; i < GC_OBJECT_SUFFIX_PADDING_CNT; i++) {
+ if (suffix->padding[i] != GC_OBJECT_PADDING_VALUE) {
+ is_padding_ok = 0;
+ break;
+ }
+ }
+
+ if (!is_padding_ok) {
+ os_printf("Invalid padding for object created at %s:%d\n",
+ (prefix->file_name ? prefix->file_name : ""),
+ prefix->line_no);
+ heap->is_heap_corrupted = true;
+ }
+ }
+}
+
+#endif /* end of BH_ENABLE_GC_VERIFY */
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/mem-alloc/ems/ems_kfc.c b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/mem-alloc/ems/ems_kfc.c
new file mode 100644
index 000000000..80d202679
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/mem-alloc/ems/ems_kfc.c
@@ -0,0 +1,297 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#include "ems_gc_internal.h"
+
+static gc_handle_t
+gc_init_internal(gc_heap_t *heap, char *base_addr, gc_size_t heap_max_size)
+{
+ hmu_tree_node_t *root = NULL, *q = NULL;
+ int ret;
+
+ memset(heap, 0, sizeof *heap);
+
+ ret = os_mutex_init(&heap->lock);
+ if (ret != BHT_OK) {
+ os_printf("[GC_ERROR]failed to init lock\n");
+ return NULL;
+ }
+
+ /* init all data structures*/
+ heap->current_size = heap_max_size;
+ heap->base_addr = (gc_uint8 *)base_addr;
+ heap->heap_id = (gc_handle_t)heap;
+
+ heap->total_free_size = heap->current_size;
+ heap->highmark_size = 0;
+
+ root = heap->kfc_tree_root = (hmu_tree_node_t *)heap->kfc_tree_root_buf;
+ memset(root, 0, sizeof *root);
+ root->size = sizeof *root;
+ hmu_set_ut(&root->hmu_header, HMU_FC);
+ hmu_set_size(&root->hmu_header, sizeof *root);
+
+ q = (hmu_tree_node_t *)heap->base_addr;
+ memset(q, 0, sizeof *q);
+ hmu_set_ut(&q->hmu_header, HMU_FC);
+ hmu_set_size(&q->hmu_header, heap->current_size);
+
+ ASSERT_TREE_NODE_ALIGNED_ACCESS(q);
+ ASSERT_TREE_NODE_ALIGNED_ACCESS(root);
+
+ hmu_mark_pinuse(&q->hmu_header);
+ root->right = q;
+ q->parent = root;
+ q->size = heap->current_size;
+
+ bh_assert(root->size <= HMU_FC_NORMAL_MAX_SIZE);
+
+ return heap;
+}
+
+gc_handle_t
+gc_init_with_pool(char *buf, gc_size_t buf_size)
+{
+ char *buf_end = buf + buf_size;
+ char *buf_aligned = (char *)(((uintptr_t)buf + 7) & (uintptr_t)~7);
+ char *base_addr = buf_aligned + sizeof(gc_heap_t);
+ gc_heap_t *heap = (gc_heap_t *)buf_aligned;
+ gc_size_t heap_max_size;
+
+ if (buf_size < APP_HEAP_SIZE_MIN) {
+ os_printf("[GC_ERROR]heap init buf size (%" PRIu32 ") < %" PRIu32 "\n",
+ buf_size, (uint32)APP_HEAP_SIZE_MIN);
+ return NULL;
+ }
+
+ base_addr =
+ (char *)(((uintptr_t)base_addr + 7) & (uintptr_t)~7) + GC_HEAD_PADDING;
+ heap_max_size = (uint32)(buf_end - base_addr) & (uint32)~7;
+
+#if WASM_ENABLE_MEMORY_TRACING != 0
+ os_printf("Heap created, total size: %u\n", buf_size);
+ os_printf(" heap struct size: %u\n", sizeof(gc_heap_t));
+ os_printf(" actual heap size: %u\n", heap_max_size);
+ os_printf(" padding bytes: %u\n",
+ buf_size - sizeof(gc_heap_t) - heap_max_size);
+#endif
+ return gc_init_internal(heap, base_addr, heap_max_size);
+}
+
+gc_handle_t
+gc_init_with_struct_and_pool(char *struct_buf, gc_size_t struct_buf_size,
+ char *pool_buf, gc_size_t pool_buf_size)
+{
+ gc_heap_t *heap = (gc_heap_t *)struct_buf;
+ char *base_addr = pool_buf + GC_HEAD_PADDING;
+ char *pool_buf_end = pool_buf + pool_buf_size;
+ gc_size_t heap_max_size;
+
+ if ((((uintptr_t)struct_buf) & 7) != 0) {
+ os_printf("[GC_ERROR]heap init struct buf not 8-byte aligned\n");
+ return NULL;
+ }
+
+ if (struct_buf_size < sizeof(gc_handle_t)) {
+ os_printf("[GC_ERROR]heap init struct buf size (%" PRIu32 ") < %zu\n",
+ struct_buf_size, sizeof(gc_handle_t));
+ return NULL;
+ }
+
+ if ((((uintptr_t)pool_buf) & 7) != 0) {
+ os_printf("[GC_ERROR]heap init pool buf not 8-byte aligned\n");
+ return NULL;
+ }
+
+ if (pool_buf_size < APP_HEAP_SIZE_MIN) {
+ os_printf("[GC_ERROR]heap init buf size (%" PRIu32 ") < %u\n",
+ pool_buf_size, APP_HEAP_SIZE_MIN);
+ return NULL;
+ }
+
+ heap_max_size = (uint32)(pool_buf_end - base_addr) & (uint32)~7;
+
+#if WASM_ENABLE_MEMORY_TRACING != 0
+ os_printf("Heap created, total size: %u\n",
+ struct_buf_size + pool_buf_size);
+ os_printf(" heap struct size: %u\n", sizeof(gc_heap_t));
+ os_printf(" actual heap size: %u\n", heap_max_size);
+ os_printf(" padding bytes: %u\n", pool_buf_size - heap_max_size);
+#endif
+ return gc_init_internal(heap, base_addr, heap_max_size);
+}
+
+int
+gc_destroy_with_pool(gc_handle_t handle)
+{
+ gc_heap_t *heap = (gc_heap_t *)handle;
+ int ret = GC_SUCCESS;
+
+#if BH_ENABLE_GC_VERIFY != 0
+ hmu_t *cur = (hmu_t *)heap->base_addr;
+ hmu_t *end = (hmu_t *)((char *)heap->base_addr + heap->current_size);
+
+ if (!heap->is_heap_corrupted
+ && (hmu_t *)((char *)cur + hmu_get_size(cur)) != end) {
+ os_printf("Memory leak detected:\n");
+ gci_dump(heap);
+ ret = GC_ERROR;
+ }
+#endif
+
+ os_mutex_destroy(&heap->lock);
+ memset(heap, 0, sizeof(gc_heap_t));
+ return ret;
+}
+
+uint32
+gc_get_heap_struct_size()
+{
+ return sizeof(gc_heap_t);
+}
+
+static void
+adjust_ptr(uint8 **p_ptr, intptr_t offset)
+{
+ if (*p_ptr)
+ *p_ptr = (uint8 *)((intptr_t)(*p_ptr) + offset);
+}
+
+int
+gc_migrate(gc_handle_t handle, char *pool_buf_new, gc_size_t pool_buf_size)
+{
+ gc_heap_t *heap = (gc_heap_t *)handle;
+ char *base_addr_new = pool_buf_new + GC_HEAD_PADDING;
+ char *pool_buf_end = pool_buf_new + pool_buf_size;
+ intptr_t offset = (uint8 *)base_addr_new - (uint8 *)heap->base_addr;
+ hmu_t *cur = NULL, *end = NULL;
+ hmu_tree_node_t *tree_node;
+ uint8 **p_left, **p_right, **p_parent;
+ gc_size_t heap_max_size, size;
+
+ if ((((uintptr_t)pool_buf_new) & 7) != 0) {
+ os_printf("[GC_ERROR]heap migrate pool buf not 8-byte aligned\n");
+ return GC_ERROR;
+ }
+
+ heap_max_size = (uint32)(pool_buf_end - base_addr_new) & (uint32)~7;
+
+ if (pool_buf_end < base_addr_new || heap_max_size < heap->current_size) {
+ os_printf("[GC_ERROR]heap migrate invlaid pool buf size\n");
+ return GC_ERROR;
+ }
+
+ if (offset == 0)
+ return 0;
+
+ if (heap->is_heap_corrupted) {
+ os_printf("[GC_ERROR]Heap is corrupted, heap migrate failed.\n");
+ return GC_ERROR;
+ }
+
+ heap->base_addr = (uint8 *)base_addr_new;
+
+ ASSERT_TREE_NODE_ALIGNED_ACCESS(heap->kfc_tree_root);
+
+ p_left = (uint8 **)((uint8 *)heap->kfc_tree_root
+ + offsetof(hmu_tree_node_t, left));
+ p_right = (uint8 **)((uint8 *)heap->kfc_tree_root
+ + offsetof(hmu_tree_node_t, right));
+ p_parent = (uint8 **)((uint8 *)heap->kfc_tree_root
+ + offsetof(hmu_tree_node_t, parent));
+ adjust_ptr(p_left, offset);
+ adjust_ptr(p_right, offset);
+ adjust_ptr(p_parent, offset);
+
+ cur = (hmu_t *)heap->base_addr;
+ end = (hmu_t *)((char *)heap->base_addr + heap->current_size);
+
+ while (cur < end) {
+ size = hmu_get_size(cur);
+
+ if (size <= 0 || size > (uint32)((uint8 *)end - (uint8 *)cur)) {
+ os_printf("[GC_ERROR]Heap is corrupted, heap migrate failed.\n");
+ heap->is_heap_corrupted = true;
+ return GC_ERROR;
+ }
+
+ if (hmu_get_ut(cur) == HMU_FC && !HMU_IS_FC_NORMAL(size)) {
+ tree_node = (hmu_tree_node_t *)cur;
+
+ ASSERT_TREE_NODE_ALIGNED_ACCESS(tree_node);
+
+ p_left = (uint8 **)((uint8 *)tree_node
+ + offsetof(hmu_tree_node_t, left));
+ p_right = (uint8 **)((uint8 *)tree_node
+ + offsetof(hmu_tree_node_t, right));
+ p_parent = (uint8 **)((uint8 *)tree_node
+ + offsetof(hmu_tree_node_t, parent));
+ adjust_ptr(p_left, offset);
+ adjust_ptr(p_right, offset);
+ if (tree_node->parent != heap->kfc_tree_root)
+ /* The root node belongs to heap structure,
+ it is fixed part and isn't changed. */
+ adjust_ptr(p_parent, offset);
+ }
+ cur = (hmu_t *)((char *)cur + size);
+ }
+
+ if (cur != end) {
+ os_printf("[GC_ERROR]Heap is corrupted, heap migrate failed.\n");
+ heap->is_heap_corrupted = true;
+ return GC_ERROR;
+ }
+
+ return 0;
+}
+
+bool
+gc_is_heap_corrupted(gc_handle_t handle)
+{
+ gc_heap_t *heap = (gc_heap_t *)handle;
+
+ return heap->is_heap_corrupted ? true : false;
+}
+
+#if BH_ENABLE_GC_VERIFY != 0
+void
+gci_verify_heap(gc_heap_t *heap)
+{
+ hmu_t *cur = NULL, *end = NULL;
+
+ bh_assert(heap && gci_is_heap_valid(heap));
+ cur = (hmu_t *)heap->base_addr;
+ end = (hmu_t *)(heap->base_addr + heap->current_size);
+ while (cur < end) {
+ hmu_verify(heap, cur);
+ cur = (hmu_t *)((gc_uint8 *)cur + hmu_get_size(cur));
+ }
+ bh_assert(cur == end);
+}
+#endif
+
+void *
+gc_heap_stats(void *heap_arg, uint32 *stats, int size)
+{
+ int i;
+ gc_heap_t *heap = (gc_heap_t *)heap_arg;
+
+ for (i = 0; i < size; i++) {
+ switch (i) {
+ case GC_STAT_TOTAL:
+ stats[i] = heap->current_size;
+ break;
+ case GC_STAT_FREE:
+ stats[i] = heap->total_free_size;
+ break;
+ case GC_STAT_HIGHMARK:
+ stats[i] = heap->highmark_size;
+ break;
+ default:
+ break;
+ }
+ }
+ return heap;
+}
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/mem-alloc/mem_alloc.c b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/mem-alloc/mem_alloc.c
new file mode 100644
index 000000000..f952c1858
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/mem-alloc/mem_alloc.c
@@ -0,0 +1,190 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#include "mem_alloc.h"
+
+#if DEFAULT_MEM_ALLOCATOR == MEM_ALLOCATOR_EMS
+
+#include "ems/ems_gc.h"
+
+mem_allocator_t
+mem_allocator_create(void *mem, uint32_t size)
+{
+ return gc_init_with_pool((char *)mem, size);
+}
+
+mem_allocator_t
+mem_allocator_create_with_struct_and_pool(void *struct_buf,
+ uint32_t struct_buf_size,
+ void *pool_buf,
+ uint32_t pool_buf_size)
+{
+ return gc_init_with_struct_and_pool((char *)struct_buf, struct_buf_size,
+ pool_buf, pool_buf_size);
+}
+
+int
+mem_allocator_destroy(mem_allocator_t allocator)
+{
+ return gc_destroy_with_pool((gc_handle_t)allocator);
+}
+
+uint32
+mem_allocator_get_heap_struct_size()
+{
+ return gc_get_heap_struct_size();
+}
+
+void *
+mem_allocator_malloc(mem_allocator_t allocator, uint32_t size)
+{
+ return gc_alloc_vo((gc_handle_t)allocator, size);
+}
+
+void *
+mem_allocator_realloc(mem_allocator_t allocator, void *ptr, uint32_t size)
+{
+ return gc_realloc_vo((gc_handle_t)allocator, ptr, size);
+}
+
+void
+mem_allocator_free(mem_allocator_t allocator, void *ptr)
+{
+ if (ptr)
+ gc_free_vo((gc_handle_t)allocator, ptr);
+}
+
+int
+mem_allocator_migrate(mem_allocator_t allocator, char *pool_buf_new,
+ uint32 pool_buf_size)
+{
+ return gc_migrate((gc_handle_t)allocator, pool_buf_new, pool_buf_size);
+}
+
+bool
+mem_allocator_is_heap_corrupted(mem_allocator_t allocator)
+{
+ return gc_is_heap_corrupted((gc_handle_t)allocator);
+}
+
+bool
+mem_allocator_get_alloc_info(mem_allocator_t allocator, void *mem_alloc_info)
+{
+ gc_heap_stats((gc_handle_t)allocator, mem_alloc_info, 3);
+ return true;
+}
+
+#else /* else of DEFAULT_MEM_ALLOCATOR */
+
+#include "tlsf/tlsf.h"
+
+typedef struct mem_allocator_tlsf {
+ tlsf_t tlsf;
+ korp_mutex lock;
+} mem_allocator_tlsf;
+
+mem_allocator_t
+mem_allocator_create(void *mem, uint32_t size)
+{
+ mem_allocator_tlsf *allocator_tlsf;
+ tlsf_t tlsf;
+ char *mem_aligned = (char *)(((uintptr_t)mem + 3) & ~3);
+
+ if (size < 1024) {
+ printf("Create mem allocator failed: pool size must be "
+ "at least 1024 bytes.\n");
+ return NULL;
+ }
+
+ size -= mem_aligned - (char *)mem;
+ mem = (void *)mem_aligned;
+
+ tlsf = tlsf_create_with_pool(mem, size);
+ if (!tlsf) {
+ printf("Create mem allocator failed: tlsf_create_with_pool failed.\n");
+ return NULL;
+ }
+
+ allocator_tlsf = tlsf_malloc(tlsf, sizeof(mem_allocator_tlsf));
+ if (!allocator_tlsf) {
+ printf("Create mem allocator failed: tlsf_malloc failed.\n");
+ tlsf_destroy(tlsf);
+ return NULL;
+ }
+
+ allocator_tlsf->tlsf = tlsf;
+
+ if (os_mutex_init(&allocator_tlsf->lock)) {
+ printf("Create mem allocator failed: tlsf_malloc failed.\n");
+ tlsf_free(tlsf, allocator_tlsf);
+ tlsf_destroy(tlsf);
+ return NULL;
+ }
+
+ return allocator_tlsf;
+}
+
+void
+mem_allocator_destroy(mem_allocator_t allocator)
+{
+ mem_allocator_tlsf *allocator_tlsf = (mem_allocator_tlsf *)allocator;
+ tlsf_t tlsf = allocator_tlsf->tlsf;
+
+ os_mutex_destroy(&allocator_tlsf->lock);
+ tlsf_free(tlsf, allocator_tlsf);
+ tlsf_destroy(tlsf);
+}
+
+void *
+mem_allocator_malloc(mem_allocator_t allocator, uint32_t size)
+{
+ void *ret;
+ mem_allocator_tlsf *allocator_tlsf = (mem_allocator_tlsf *)allocator;
+
+ if (size == 0)
+ /* tlsf doesn't allow to allocate 0 byte */
+ size = 1;
+
+ os_mutex_lock(&allocator_tlsf->lock);
+ ret = tlsf_malloc(allocator_tlsf->tlsf, size);
+ os_mutex_unlock(&allocator_tlsf->lock);
+ return ret;
+}
+
+void *
+mem_allocator_realloc(mem_allocator_t allocator, void *ptr, uint32_t size)
+{
+ void *ret;
+ mem_allocator_tlsf *allocator_tlsf = (mem_allocator_tlsf *)allocator;
+
+ if (size == 0)
+ /* tlsf doesn't allow to allocate 0 byte */
+ size = 1;
+
+ os_mutex_lock(&allocator_tlsf->lock);
+ ret = tlsf_realloc(allocator_tlsf->tlsf, ptr, size);
+ os_mutex_unlock(&allocator_tlsf->lock);
+ return ret;
+}
+
+void
+mem_allocator_free(mem_allocator_t allocator, void *ptr)
+{
+ if (ptr) {
+ mem_allocator_tlsf *allocator_tlsf = (mem_allocator_tlsf *)allocator;
+ os_mutex_lock(&allocator_tlsf->lock);
+ tlsf_free(allocator_tlsf->tlsf, ptr);
+ os_mutex_unlock(&allocator_tlsf->lock);
+ }
+}
+
+int
+mem_allocator_migrate(mem_allocator_t allocator, mem_allocator_t allocator_old)
+{
+ return tlsf_migrate((mem_allocator_tlsf *)allocator,
+ (mem_allocator_tlsf *)allocator_old);
+}
+
+#endif /* end of DEFAULT_MEM_ALLOCATOR */
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/mem-alloc/mem_alloc.cmake b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/mem-alloc/mem_alloc.cmake
new file mode 100644
index 000000000..c0b4157f4
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/mem-alloc/mem_alloc.cmake
@@ -0,0 +1,19 @@
+# Copyright (C) 2019 Intel Corporation. All rights reserved.
+# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+
+
+set (MEM_ALLOC_DIR ${CMAKE_CURRENT_LIST_DIR})
+
+include_directories(${MEM_ALLOC_DIR})
+
+if (WAMR_BUILD_GC_VERIFY EQUAL 1)
+ add_definitions (-DBH_ENABLE_GC_VERIFY=1)
+endif ()
+
+file (GLOB_RECURSE source_all
+ ${MEM_ALLOC_DIR}/ems/*.c
+ ${MEM_ALLOC_DIR}/tlsf/*.c
+ ${MEM_ALLOC_DIR}/mem_alloc.c)
+
+set (MEM_ALLOC_SHARED_SOURCE ${source_all})
+
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/mem-alloc/mem_alloc.h b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/mem-alloc/mem_alloc.h
new file mode 100644
index 000000000..1f35b2792
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/mem-alloc/mem_alloc.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#ifndef __MEM_ALLOC_H
+#define __MEM_ALLOC_H
+
+#include "bh_platform.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef void *mem_allocator_t;
+
+mem_allocator_t
+mem_allocator_create(void *mem, uint32_t size);
+
+mem_allocator_t
+mem_allocator_create_with_struct_and_pool(void *struct_buf,
+ uint32_t struct_buf_size,
+ void *pool_buf,
+ uint32_t pool_buf_size);
+
+int
+mem_allocator_destroy(mem_allocator_t allocator);
+
+uint32
+mem_allocator_get_heap_struct_size(void);
+
+void *
+mem_allocator_malloc(mem_allocator_t allocator, uint32_t size);
+
+void *
+mem_allocator_realloc(mem_allocator_t allocator, void *ptr, uint32_t size);
+
+void
+mem_allocator_free(mem_allocator_t allocator, void *ptr);
+
+int
+mem_allocator_migrate(mem_allocator_t allocator, char *pool_buf_new,
+ uint32 pool_buf_size);
+
+bool
+mem_allocator_is_heap_corrupted(mem_allocator_t allocator);
+
+bool
+mem_allocator_get_alloc_info(mem_allocator_t allocator, void *mem_alloc_info);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* #ifndef __MEM_ALLOC_H */
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/README.md b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/README.md
new file mode 100644
index 000000000..de6f1cc68
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/README.md
@@ -0,0 +1,10 @@
+This folder contains the platform abstract layer for multiple platforms. To support a new platform, you can simply create a new folder here and implement all the APIs defined in [`include`](./include) folder.
+
+
+
+Refer to [port_wamr.md](../../../doc/port_wamr.md) for how to port WAMR to a target platform.
+
+
+
+
+
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/alios/alios_platform.c b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/alios/alios_platform.c
new file mode 100644
index 000000000..c9f5f17e6
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/alios/alios_platform.c
@@ -0,0 +1,71 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#include "platform_api_vmcore.h"
+#include "platform_api_extension.h"
+
+int
+os_thread_sys_init();
+
+void
+os_thread_sys_destroy();
+
+int
+bh_platform_init()
+{
+ return os_thread_sys_init();
+}
+
+void
+bh_platform_destroy()
+{
+ os_thread_sys_destroy();
+}
+
+void *
+os_malloc(unsigned size)
+{
+ return NULL;
+}
+
+void *
+os_realloc(void *ptr, unsigned size)
+{
+ return NULL;
+}
+
+void
+os_free(void *ptr)
+{}
+
+int
+os_dumps_proc_mem_info(char *out, unsigned int size)
+{
+ return -1;
+}
+
+void *
+os_mmap(void *hint, size_t size, int prot, int flags)
+{
+ if ((uint64)size >= UINT32_MAX)
+ return NULL;
+ return BH_MALLOC((uint32)size);
+}
+
+void
+os_munmap(void *addr, size_t size)
+{
+ return BH_FREE(addr);
+}
+
+int
+os_mprotect(void *addr, size_t size, int prot)
+{
+ return 0;
+}
+
+void
+os_dcache_flush()
+{}
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/alios/alios_thread.c b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/alios/alios_thread.c
new file mode 100644
index 000000000..0efd2f394
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/alios/alios_thread.c
@@ -0,0 +1,361 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#include "platform_api_vmcore.h"
+#include "platform_api_extension.h"
+
+/* clang-format off */
+#define bh_assert(v) do { \
+ if (!(v)) { \
+ printf("\nASSERTION FAILED: %s, at %s, line %d\n", \
+ #v, __FILE__, __LINE__); \
+ aos_reboot(); \
+ while (1); \
+ } \
+} while (0)
+/* clang-format on */
+
+struct os_thread_data;
+typedef struct os_thread_wait_node {
+ aos_sem_t sem;
+ os_thread_wait_list next;
+} os_thread_wait_node;
+
+typedef struct os_thread_data {
+ /* Thread body */
+ aos_task_t thread;
+ /* Thread start routine */
+ thread_start_routine_t start_routine;
+ /* Thread start routine argument */
+ void *arg;
+ /* Thread local root */
+ void *tlr;
+ /* Wait node of current thread */
+ os_thread_wait_node wait_node;
+ /* Lock for waiting list */
+ aos_mutex_t wait_list_lock;
+ /* Waiting list of other threads who are joining this thread */
+ os_thread_wait_list thread_wait_list;
+} os_thread_data;
+
+static bool is_thread_sys_inited = false;
+
+/* Thread data of supervisor thread */
+static os_thread_data supervisor_thread_data;
+
+/* Thread data key */
+static aos_task_key_t thread_data_key;
+
+/* Thread name index */
+static int thread_name_index;
+
+int
+os_thread_sys_init()
+{
+ if (is_thread_sys_inited)
+ return BHT_OK;
+
+ if (aos_task_key_create(&thread_data_key) != 0)
+ return BHT_ERROR;
+
+ /* Initialize supervisor thread data */
+ memset(&supervisor_thread_data, 0, sizeof(supervisor_thread_data));
+
+ if (aos_sem_new(&supervisor_thread_data.wait_node.sem, 1) != 0) {
+ aos_task_key_delete(thread_data_key);
+ return BHT_ERROR;
+ }
+
+ if (aos_task_setspecific(thread_data_key, &supervisor_thread_data)) {
+ aos_sem_free(&supervisor_thread_data.wait_node.sem);
+ aos_task_key_delete(thread_data_key);
+ return BHT_ERROR;
+ }
+
+ is_thread_sys_inited = true;
+ return BHT_OK;
+}
+
+void
+os_thread_sys_destroy()
+{
+ if (is_thread_sys_inited) {
+ aos_task_key_delete(thread_data_key);
+ aos_sem_free(&supervisor_thread_data.wait_node.sem);
+ is_thread_sys_inited = false;
+ }
+}
+
+static os_thread_data *
+thread_data_current()
+{
+ return aos_task_getspecific(thread_data_key);
+}
+
+static void
+os_thread_cleanup(void)
+{
+ os_thread_data *thread_data = thread_data_current();
+ os_thread_wait_list thread_wait_list;
+ aos_mutex_t *wait_list_lock;
+ aos_sem_t *wait_node_sem;
+
+ bh_assert(thread_data != NULL);
+ wait_list_lock = &thread_data->wait_list_lock;
+ thread_wait_list = thread_data->thread_wait_list;
+ wait_node_sem = &thread_data->wait_node.sem;
+
+ /* Free thread data firstly */
+ BH_FREE(thread_data);
+
+ aos_mutex_lock(wait_list_lock, AOS_WAIT_FOREVER);
+ if (thread_wait_list) {
+ /* Signal each joining thread */
+ os_thread_wait_list head = thread_wait_list;
+ while (head) {
+ os_thread_wait_list next = head->next;
+ aos_sem_signal(&head->sem);
+ head = next;
+ }
+ }
+ aos_mutex_unlock(wait_list_lock);
+
+ /* Free sem and lock */
+ aos_sem_free(wait_node_sem);
+ aos_mutex_free(wait_list_lock);
+}
+
+static void
+os_thread_wrapper(void *arg)
+{
+ os_thread_data *thread_data = arg;
+
+ /* Set thread custom data */
+ if (!aos_task_setspecific(thread_data_key, thread_data))
+ thread_data->start_routine(thread_data->arg);
+
+ os_thread_cleanup();
+}
+
+int
+os_thread_create(korp_tid *p_tid, thread_start_routine_t start, void *arg,
+ unsigned int stack_size)
+{
+ return os_thread_create_with_prio(p_tid, start, arg, stack_size,
+ BH_THREAD_DEFAULT_PRIORITY);
+}
+
+int
+os_thread_create_with_prio(korp_tid *p_tid, thread_start_routine_t start,
+ void *arg, unsigned int stack_size, int prio)
+{
+ os_thread_data *thread_data;
+ char thread_name[32];
+
+ if (!p_tid || !stack_size)
+ return BHT_ERROR;
+
+ /* Create and initialize thread data */
+ if (!(thread_data = BH_MALLOC(sizeof(os_thread_data))))
+ return BHT_ERROR;
+
+ memset(thread_data, 0, sizeof(os_thread_data));
+
+ thread_data->start_routine = start;
+ thread_data->arg = arg;
+
+ if (aos_sem_new(&thread_data->wait_node.sem, 1) != 0)
+ goto fail1;
+
+ if (aos_mutex_new(&thread_data->wait_list_lock))
+ goto fail2;
+
+ snprintf(thread_name, sizeof(thread_name), "%s%d", "wasm-thread-",
+ ++thread_name_index);
+
+ /* Create the thread */
+ if (aos_task_new_ext((aos_task_t *)thread_data, thread_name,
+ os_thread_wrapper, thread_data, stack_size, prio))
+ goto fail3;
+
+ aos_msleep(10);
+ *p_tid = (korp_tid)thread_data;
+ return BHT_OK;
+
+fail3:
+ aos_mutex_free(&thread_data->wait_list_lock);
+fail2:
+ aos_sem_free(&thread_data->wait_node.sem);
+fail1:
+ BH_FREE(thread_data);
+ return BHT_ERROR;
+}
+
+korp_tid
+os_self_thread()
+{
+ return (korp_tid)aos_task_getspecific(thread_data_key);
+}
+
+int
+os_thread_join(korp_tid thread, void **value_ptr)
+{
+ (void)value_ptr;
+ os_thread_data *thread_data, *curr_thread_data;
+
+ /* Get thread data of current thread */
+ curr_thread_data = thread_data_current();
+ curr_thread_data->wait_node.next = NULL;
+
+ /* Get thread data */
+ thread_data = (os_thread_data *)thread;
+
+ aos_mutex_lock(&thread_data->wait_list_lock, AOS_WAIT_FOREVER);
+ if (!thread_data->thread_wait_list)
+ thread_data->thread_wait_list = &curr_thread_data->wait_node;
+ else {
+ /* Add to end of waiting list */
+ os_thread_wait_node *p = thread_data->thread_wait_list;
+ while (p->next)
+ p = p->next;
+ p->next = &curr_thread_data->wait_node;
+ }
+ aos_mutex_unlock(&thread_data->wait_list_lock);
+
+ /* Wait the sem */
+ aos_sem_wait(&curr_thread_data->wait_node.sem, AOS_WAIT_FOREVER);
+
+ return BHT_OK;
+}
+
+int
+os_mutex_init(korp_mutex *mutex)
+{
+ return aos_mutex_new(mutex) == 0 ? BHT_OK : BHT_ERROR;
+}
+
+int
+os_mutex_destroy(korp_mutex *mutex)
+{
+ aos_mutex_free(mutex);
+ return BHT_OK;
+}
+
+int
+os_mutex_lock(korp_mutex *mutex)
+{
+ return aos_mutex_lock(mutex, AOS_WAIT_FOREVER);
+}
+
+int
+os_mutex_unlock(korp_mutex *mutex)
+{
+ return aos_mutex_unlock(mutex);
+}
+
+int
+os_cond_init(korp_cond *cond)
+{
+ if (aos_mutex_new(&cond->wait_list_lock) != 0)
+ return BHT_ERROR;
+
+ cond->thread_wait_list = NULL;
+ return BHT_OK;
+}
+
+int
+os_cond_destroy(korp_cond *cond)
+{
+ aos_mutex_free(&cond->wait_list_lock);
+ return BHT_OK;
+}
+
+static int
+os_cond_wait_internal(korp_cond *cond, korp_mutex *mutex, bool timed,
+ uint32 mills)
+{
+ os_thread_wait_node *node = &thread_data_current()->wait_node;
+
+ node->next = NULL;
+
+ aos_mutex_lock(&cond->wait_list_lock, AOS_WAIT_FOREVER);
+ if (!cond->thread_wait_list)
+ cond->thread_wait_list = node;
+ else {
+ /* Add to end of wait list */
+ os_thread_wait_node *p = cond->thread_wait_list;
+ while (p->next)
+ p = p->next;
+ p->next = node;
+ }
+ aos_mutex_unlock(&cond->wait_list_lock);
+
+ /* Unlock mutex, wait sem and lock mutex again */
+ aos_mutex_unlock(mutex);
+ aos_sem_wait(&node->sem, timed ? mills : AOS_WAIT_FOREVER);
+ aos_mutex_lock(mutex, AOS_WAIT_FOREVER);
+
+ /* Remove wait node from wait list */
+ aos_mutex_lock(&cond->wait_list_lock, AOS_WAIT_FOREVER);
+ if (cond->thread_wait_list == node)
+ cond->thread_wait_list = node->next;
+ else {
+ /* Remove from the wait list */
+ os_thread_wait_node *p = cond->thread_wait_list;
+ while (p->next != node)
+ p = p->next;
+ p->next = node->next;
+ }
+ aos_mutex_unlock(&cond->wait_list_lock);
+
+ return BHT_OK;
+}
+
+int
+os_cond_wait(korp_cond *cond, korp_mutex *mutex)
+{
+ return os_cond_wait_internal(cond, mutex, false, 0);
+}
+
+int
+os_cond_reltimedwait(korp_cond *cond, korp_mutex *mutex, uint64 useconds)
+{
+ if (useconds == BHT_WAIT_FOREVER) {
+ return os_cond_wait_internal(cond, mutex, false, 0);
+ }
+ else {
+ uint64 mills_64 = useconds / 1000;
+ uint32 mills;
+
+ if (mills_64 < (uint64)(UINT32_MAX - 1)) {
+ mills = (uint64)mills_64;
+ }
+ else {
+ mills = UINT32_MAX - 1;
+ os_printf("Warning: os_cond_reltimedwait exceeds limit, "
+ "set to max timeout instead\n");
+ }
+ return os_cond_wait_internal(cond, mutex, true, mills);
+ }
+}
+
+int
+os_cond_signal(korp_cond *cond)
+{
+ /* Signal the head wait node of wait list */
+ aos_mutex_lock(&cond->wait_list_lock, AOS_WAIT_FOREVER);
+ if (cond->thread_wait_list)
+ aos_sem_signal(&cond->thread_wait_list->sem);
+ aos_mutex_unlock(&cond->wait_list_lock);
+
+ return BHT_OK;
+}
+
+uint8 *
+os_thread_get_stack_boundary()
+{
+ /* TODO: get alios stack boundary */
+ return NULL;
+}
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/alios/alios_time.c b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/alios/alios_time.c
new file mode 100644
index 000000000..549252738
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/alios/alios_time.c
@@ -0,0 +1,12 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#include "platform_api_vmcore.h"
+
+uint64
+os_time_get_boot_microsecond()
+{
+ return (uint64)aos_now_ms() * 1000;
+}
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/alios/platform_internal.h b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/alios/platform_internal.h
new file mode 100644
index 000000000..f6a4ba11e
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/alios/platform_internal.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#ifndef _PLATFORM_INTERNAL_H
+#define _PLATFORM_INTERNAL_H
+
+#include <aos/kernel.h>
+#include <inttypes.h>
+#include <stdbool.h>
+#include <stdarg.h>
+#include <ctype.h>
+#include <limits.h>
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#ifndef BH_PLATFORM_ALIOS_THINGS
+#define BH_PLATFORM_ALIOS_THINGS
+#endif
+
+#define BH_APPLET_PRESERVED_STACK_SIZE (2 * BH_KB)
+
+/* Default thread priority */
+#define BH_THREAD_DEFAULT_PRIORITY 30
+
+typedef aos_task_t korp_thread;
+typedef korp_thread *korp_tid;
+typedef aos_task_t *aos_tid_t;
+typedef aos_mutex_t korp_mutex;
+typedef aos_sem_t korp_sem;
+
+struct os_thread_wait_node;
+typedef struct os_thread_wait_node *os_thread_wait_list;
+typedef struct korp_cond {
+ aos_mutex_t wait_list_lock;
+ os_thread_wait_list thread_wait_list;
+} korp_cond;
+
+#define os_printf printf
+#define os_vprintf vprintf
+
+/* clang-format off */
+/* math functions which are not provided by os*/
+double sqrt(double x);
+double floor(double x);
+double ceil(double x);
+double fmin(double x, double y);
+double fmax(double x, double y);
+double rint(double x);
+double fabs(double x);
+double trunc(double x);
+float sqrtf(float x);
+float floorf(float x);
+float ceilf(float x);
+float fminf(float x, float y);
+float fmaxf(float x, float y);
+float rintf(float x);
+float fabsf(float x);
+float truncf(float x);
+int signbit(double x);
+int isnan(double x);
+/* clang-format on */
+
+#endif /* end of _BH_PLATFORM_H */
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/alios/shared_platform.cmake b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/alios/shared_platform.cmake
new file mode 100644
index 000000000..a3aaddd4a
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/alios/shared_platform.cmake
@@ -0,0 +1,16 @@
+# Copyright (C) 2019 Intel Corporation. All rights reserved.
+# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+
+set (PLATFORM_SHARED_DIR ${CMAKE_CURRENT_LIST_DIR})
+
+add_definitions(-DBH_PLATFORM_ALIOS_THINGS)
+
+include_directories(${PLATFORM_SHARED_DIR})
+include_directories(${PLATFORM_SHARED_DIR}/../include)
+
+include (${CMAKE_CURRENT_LIST_DIR}/../common/math/platform_api_math.cmake)
+
+file (GLOB_RECURSE source_all ${PLATFORM_SHARED_DIR}/*.c)
+
+set (PLATFORM_SHARED_SOURCE ${source_all} ${PLATFORM_COMMON_MATH_SOURCE})
+
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/android/platform_init.c b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/android/platform_init.c
new file mode 100644
index 000000000..1e7cf4447
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/android/platform_init.c
@@ -0,0 +1,171 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#include "platform_api_vmcore.h"
+#include "platform_api_extension.h"
+
+#define API_NOT_SUPPORT_ERROR(API, VERSION) \
+ __android_log_print(ANDROID_LOG_ERROR, "wasm_runtime::", \
+ "%s() is only supported when __ANDROID_API__ >= %s.", \
+ #API, #VERSION);
+
+int
+bh_platform_init()
+{
+ return 0;
+}
+
+void
+bh_platform_destroy()
+{}
+
+int
+os_printf(const char *fmt, ...)
+{
+ int ret;
+ va_list ap;
+
+ va_start(ap, fmt);
+ ret = __android_log_vprint(ANDROID_LOG_INFO, "wasm_runtime::", fmt, ap);
+ va_end(ap);
+
+ return ret;
+}
+
+int
+os_vprintf(const char *fmt, va_list ap)
+{
+ return __android_log_vprint(ANDROID_LOG_INFO, "wasm_runtime::", fmt, ap);
+}
+
+#if __ANDROID_API__ < 19
+
+int
+futimens(int __dir_fd, const struct timespec __times[2])
+{
+ API_NOT_SUPPORT_ERROR(futimens, 19);
+ return -1;
+}
+
+#endif
+
+#if __ANDROID_API__ < 21
+
+int
+posix_fallocate(int __fd, off_t __offset, off_t __length)
+{
+ API_NOT_SUPPORT_ERROR(posix_fallocate, 21);
+ return -1;
+}
+
+int
+posix_fadvise(int fd, off_t offset, off_t len, int advice)
+{
+ API_NOT_SUPPORT_ERROR(posix_fadvise, 21);
+ return -1;
+}
+
+int
+linkat(int __old_dir_fd, const char *__old_path, int __new_dir_fd,
+ const char *__new_path, int __flags)
+{
+ API_NOT_SUPPORT_ERROR(linkat, 21);
+ return -1;
+}
+
+int
+symlinkat(const char *__old_path, int __new_dir_fd, const char *__new_path)
+{
+ API_NOT_SUPPORT_ERROR(symlinkat, 21);
+ return -1;
+}
+
+ssize_t
+readlinkat(int __dir_fd, const char *__path, char *__buf, size_t __buf_size)
+{
+ API_NOT_SUPPORT_ERROR(readlinkat, 21);
+ return -1;
+}
+
+int
+accept4(int __fd, struct sockaddr *__addr, socklen_t *__addr_length,
+ int __flags)
+{
+ API_NOT_SUPPORT_ERROR(accept4, 21);
+ return -1;
+}
+
+int
+dup3(int oldfd, int newfd, int cloexec)
+{
+ API_NOT_SUPPORT_ERROR(dup3, 21);
+ return -1;
+}
+
+int
+pthread_condattr_setclock(pthread_condattr_t *attr, clockid_t clock_id)
+{
+ API_NOT_SUPPORT_ERROR(pthread_condattr_setclock, 21);
+ return -1;
+}
+
+int
+epoll_create1(int flags)
+{
+ API_NOT_SUPPORT_ERROR(epoll_create1, 21);
+ return -1;
+}
+
+int
+epoll_pwait(int epfd, struct epoll_event *events, int maxevents, int timeout,
+ const sigset_t *sigmask)
+{
+ API_NOT_SUPPORT_ERROR(epoll_pwait, 21);
+ return -1;
+}
+
+int
+inotify_init1(int flags)
+{
+ API_NOT_SUPPORT_ERROR(inotify_init1, 21);
+ return -1;
+}
+
+#endif
+
+#if __ANDROID_API__ < 23
+
+long
+telldir(DIR *__dir)
+{
+ API_NOT_SUPPORT_ERROR(telldir, 23);
+ return -1;
+}
+
+void
+seekdir(DIR *__dir, long __location)
+{
+ API_NOT_SUPPORT_ERROR(seekdir, 23);
+}
+
+#endif
+
+#if __ANDROID_API__ < 24
+
+ssize_t
+preadv(int __fd, const struct iovec *__iov, int __count, off_t __offset)
+{
+ API_NOT_SUPPORT_ERROR(preadv, 24);
+ return -1;
+}
+
+ssize_t
+pwritev(int __fd, const struct iovec *__iov, int __count, off_t __offset)
+{
+ API_NOT_SUPPORT_ERROR(pwritev, 24);
+ return -1;
+}
+
+#endif
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/android/platform_internal.h b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/android/platform_internal.h
new file mode 100644
index 000000000..521fa0c55
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/android/platform_internal.h
@@ -0,0 +1,155 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#ifndef _PLATFORM_INTERNAL_H
+#define _PLATFORM_INTERNAL_H
+
+#include <inttypes.h>
+#include <stdbool.h>
+#include <assert.h>
+#include <time.h>
+#include <string.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <math.h>
+#include <stdarg.h>
+#include <ctype.h>
+#include <pthread.h>
+#include <signal.h>
+#include <semaphore.h>
+#include <limits.h>
+#include <dirent.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <poll.h>
+#include <sched.h>
+#include <errno.h>
+#include <netinet/in.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/mman.h>
+#include <sys/time.h>
+#include <sys/uio.h>
+#include <sys/ioctl.h>
+#include <sys/socket.h>
+#include <sys/resource.h>
+#include <android/log.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifndef BH_PLATFORM_ANDROID
+#define BH_PLATFORM_ANDROID
+#endif
+
+/* Stack size of applet threads's native part. */
+#define BH_APPLET_PRESERVED_STACK_SIZE (32 * 1024)
+
+/* Default thread priority */
+#define BH_THREAD_DEFAULT_PRIORITY 0
+
+typedef pthread_t korp_tid;
+typedef pthread_mutex_t korp_mutex;
+typedef pthread_cond_t korp_cond;
+typedef pthread_t korp_thread;
+typedef sem_t korp_sem;
+
+#define OS_THREAD_MUTEX_INITIALIZER PTHREAD_MUTEX_INITIALIZER
+
+#define os_thread_local_attribute __thread
+
+#define bh_socket_t int
+
+#if WASM_DISABLE_HW_BOUND_CHECK == 0
+#if defined(BUILD_TARGET_X86_64) || defined(BUILD_TARGET_AMD_64) \
+ || defined(BUILD_TARGET_AARCH64) || defined(BUILD_TARGET_RISCV64_LP64D) \
+ || defined(BUILD_TARGET_RISCV64_LP64)
+
+#include <setjmp.h>
+
+#define OS_ENABLE_HW_BOUND_CHECK
+
+typedef jmp_buf korp_jmpbuf;
+
+#define os_setjmp setjmp
+#define os_longjmp longjmp
+#define os_alloca alloca
+
+#define os_getpagesize getpagesize
+
+typedef void (*os_signal_handler)(void *sig_addr);
+
+int
+os_thread_signal_init(os_signal_handler handler);
+
+void
+os_thread_signal_destroy();
+
+bool
+os_thread_signal_inited();
+
+void
+os_signal_unmask();
+
+void
+os_sigreturn();
+#endif /* end of BUILD_TARGET_X86_64/AMD_64/AARCH64/RISCV64 */
+#endif /* end of WASM_DISABLE_HW_BOUND_CHECK */
+
+typedef long int __syscall_slong_t;
+
+#if __ANDROID_API__ < 19
+
+int
+futimens(int __dir_fd, const struct timespec __times[2]);
+
+#endif
+
+#if __ANDROID_API__ < 21
+
+int
+posix_fallocate(int __fd, off_t __offset, off_t __length);
+
+int
+posix_fadvise(int fd, off_t offset, off_t len, int advice);
+
+int
+linkat(int __old_dir_fd, const char *__old_path, int __new_dir_fd,
+ const char *__new_path, int __flags);
+
+int
+symlinkat(const char *__old_path, int __new_dir_fd, const char *__new_path);
+
+ssize_t
+readlinkat(int __dir_fd, const char *__path, char *__buf, size_t __buf_size);
+
+#endif
+
+#if __ANDROID_API__ < 23
+
+long
+telldir(DIR *__dir);
+
+void
+seekdir(DIR *__dir, long __location);
+
+#endif
+
+#if __ANDROID_API__ < 24
+
+ssize_t
+preadv(int __fd, const struct iovec *__iov, int __count, off_t __offset);
+
+ssize_t
+pwritev(int __fd, const struct iovec *__iov, int __count, off_t __offset);
+
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* end of _PLATFORM_INTERNAL_H */
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/android/shared_platform.cmake b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/android/shared_platform.cmake
new file mode 100644
index 000000000..13beb8e77
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/android/shared_platform.cmake
@@ -0,0 +1,18 @@
+# Copyright (C) 2019 Intel Corporation. All rights reserved.
+# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+
+set (PLATFORM_SHARED_DIR ${CMAKE_CURRENT_LIST_DIR})
+
+add_definitions(-DBH_PLATFORM_ANDROID)
+
+include_directories(${PLATFORM_SHARED_DIR})
+include_directories(${PLATFORM_SHARED_DIR}/../include)
+
+include (${CMAKE_CURRENT_LIST_DIR}/../common/posix/platform_api_posix.cmake)
+
+file (GLOB_RECURSE source_all ${PLATFORM_SHARED_DIR}/*.c)
+
+set (PLATFORM_SHARED_SOURCE ${source_all} ${PLATFORM_COMMON_POSIX_SOURCE})
+
+file (GLOB header ${PLATFORM_SHARED_DIR}/../include/*.h)
+LIST (APPEND RUNTIME_LIB_HEADER_LIST ${header})
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/common/freertos/freertos_malloc.c b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/common/freertos/freertos_malloc.c
new file mode 100644
index 000000000..e47a8cce1
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/common/freertos/freertos_malloc.c
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#include "platform_api_vmcore.h"
+
+void *
+os_malloc(unsigned size)
+{
+ return NULL;
+}
+
+void *
+os_realloc(void *ptr, unsigned size)
+{
+ return NULL;
+}
+
+void
+os_free(void *ptr)
+{}
+
+int
+os_dumps_proc_mem_info(char *out, unsigned int size)
+{
+ return -1;
+}
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/common/freertos/freertos_thread.c b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/common/freertos/freertos_thread.c
new file mode 100644
index 000000000..9f68bc8f9
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/common/freertos/freertos_thread.c
@@ -0,0 +1,454 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#include "platform_api_vmcore.h"
+#include "platform_api_extension.h"
+
+/* clang-format off */
+#define bh_assert(v) do { \
+ if (!(v)) { \
+ int _count = 1; \
+ os_printf("\nASSERTION FAILED: %s, at %s, line %d\n",\
+ #v, __FILE__, __LINE__); \
+ /* divived by 0 to make it abort */ \
+ os_printf("%d\n", _count / (_count - 1)); \
+ while (1); \
+ } \
+} while (0)
+/* clang-format on */
+
+struct os_thread_data;
+typedef struct os_thread_wait_node {
+ /* Binary semaphore */
+ SemaphoreHandle_t sem;
+ os_thread_wait_list next;
+} os_thread_wait_node;
+
+typedef struct os_thread_data {
+ /* Next thread data */
+ struct os_thread_data *next;
+ /* Thread handle */
+ TaskHandle_t handle;
+ /* Thread start routine */
+ thread_start_routine_t start_routine;
+ /* Thread start routine argument */
+ void *arg;
+ /* Thread local root */
+ void *tlr;
+ /* Wait node of current thread */
+ os_thread_wait_node wait_node;
+ /* Lock for waiting list */
+ SemaphoreHandle_t wait_list_lock;
+ /* Waiting list of other threads who are joining this thread */
+ os_thread_wait_list thread_wait_list;
+} os_thread_data;
+
+static bool is_thread_sys_inited = false;
+
+/* Lock for thread data list */
+static SemaphoreHandle_t thread_data_lock;
+
+/* Thread data list */
+static os_thread_data *thread_data_list = NULL;
+/* Thread data of supervisor thread */
+static os_thread_data supervisor_thread_data;
+
+/* Thread name index */
+static int thread_name_index;
+
+static void
+thread_data_list_add(os_thread_data *thread_data)
+{
+ xSemaphoreTake(thread_data_lock, portMAX_DELAY);
+ if (!thread_data_list)
+ thread_data_list = thread_data;
+ else {
+ /* If already in list, just return */
+ os_thread_data *p = thread_data_list;
+ while (p) {
+ if (p == thread_data) {
+ xSemaphoreGive(thread_data_lock);
+ return;
+ }
+ p = p->next;
+ }
+
+ /* Set as head of list */
+ thread_data->next = thread_data_list;
+ thread_data_list = thread_data;
+ }
+ xSemaphoreGive(thread_data_lock);
+}
+
+static void
+thread_data_list_remove(os_thread_data *thread_data)
+{
+ xSemaphoreTake(thread_data_lock, portMAX_DELAY);
+ if (thread_data_list) {
+ if (thread_data_list == thread_data)
+ thread_data_list = thread_data_list->next;
+ else {
+ /* Search and remove it from list */
+ os_thread_data *p = thread_data_list;
+ while (p && p->next != thread_data)
+ p = p->next;
+ if (p && p->next == thread_data)
+ p->next = p->next->next;
+ }
+ }
+ xSemaphoreGive(thread_data_lock);
+}
+
+static os_thread_data *
+thread_data_list_lookup(TaskHandle_t handle)
+{
+ xSemaphoreTake(thread_data_lock, portMAX_DELAY);
+ if (thread_data_list) {
+ os_thread_data *p = thread_data_list;
+ while (p) {
+ if (p->handle == handle) {
+ /* Found */
+ xSemaphoreGive(thread_data_lock);
+ return p;
+ }
+ p = p->next;
+ }
+ }
+ xSemaphoreGive(thread_data_lock);
+ return NULL;
+}
+
+int
+os_thread_sys_init()
+{
+ if (is_thread_sys_inited)
+ return BHT_OK;
+
+ if (!(thread_data_lock = xSemaphoreCreateMutex()))
+ return BHT_ERROR;
+
+ /* Initialize supervisor thread data */
+ memset(&supervisor_thread_data, 0, sizeof(supervisor_thread_data));
+
+ if (!(supervisor_thread_data.wait_node.sem = xSemaphoreCreateBinary())) {
+ vSemaphoreDelete(thread_data_lock);
+ return BHT_ERROR;
+ }
+
+ supervisor_thread_data.handle = xTaskGetCurrentTaskHandle();
+ /* Set as head of thread data list */
+ thread_data_list = &supervisor_thread_data;
+
+ is_thread_sys_inited = true;
+ return BHT_OK;
+}
+
+void
+os_thread_sys_destroy()
+{
+ if (is_thread_sys_inited) {
+ vSemaphoreDelete(supervisor_thread_data.wait_node.sem);
+ vSemaphoreDelete(thread_data_lock);
+ is_thread_sys_inited = false;
+ }
+}
+
+static os_thread_data *
+thread_data_current()
+{
+ TaskHandle_t handle = xTaskGetCurrentTaskHandle();
+ return thread_data_list_lookup(handle);
+}
+
+static void
+os_thread_cleanup(void)
+{
+ os_thread_data *thread_data = thread_data_current();
+ os_thread_wait_list thread_wait_list;
+ SemaphoreHandle_t wait_list_lock;
+ SemaphoreHandle_t wait_node_sem;
+
+ bh_assert(thread_data != NULL);
+ wait_list_lock = thread_data->wait_list_lock;
+ thread_wait_list = thread_data->thread_wait_list;
+ wait_node_sem = thread_data->wait_node.sem;
+
+ xSemaphoreTake(wait_list_lock, portMAX_DELAY);
+ if (thread_wait_list) {
+ /* Signal each joining thread */
+ os_thread_wait_list head = thread_wait_list;
+ while (head) {
+ os_thread_wait_list next = head->next;
+ xSemaphoreGive(head->sem);
+ head = next;
+ }
+ }
+ xSemaphoreGive(wait_list_lock);
+
+ /* Free sem and lock */
+ vSemaphoreDelete(wait_node_sem);
+ vSemaphoreDelete(wait_list_lock);
+
+ thread_data_list_remove(thread_data);
+ BH_FREE(thread_data);
+}
+
+static void
+os_thread_wrapper(void *arg)
+{
+ os_thread_data *thread_data = arg;
+
+ thread_data->handle = xTaskGetCurrentTaskHandle();
+ thread_data_list_add(thread_data);
+
+ thread_data->start_routine(thread_data->arg);
+ os_thread_cleanup();
+ vTaskDelete(NULL);
+}
+
+int
+os_thread_create(korp_tid *p_tid, thread_start_routine_t start, void *arg,
+ unsigned int stack_size)
+{
+ return os_thread_create_with_prio(p_tid, start, arg, stack_size,
+ BH_THREAD_DEFAULT_PRIORITY);
+}
+
+int
+os_thread_create_with_prio(korp_tid *p_tid, thread_start_routine_t start,
+ void *arg, unsigned int stack_size, int prio)
+{
+ os_thread_data *thread_data;
+ char thread_name[32];
+
+ if (!p_tid || !stack_size)
+ return BHT_ERROR;
+
+ /* Create and initialize thread data */
+ if (!(thread_data = BH_MALLOC(sizeof(os_thread_data))))
+ return BHT_ERROR;
+
+ memset(thread_data, 0, sizeof(os_thread_data));
+
+ thread_data->start_routine = start;
+ thread_data->arg = arg;
+
+ if (!(thread_data->wait_node.sem = xSemaphoreCreateBinary()))
+ goto fail1;
+
+ if (!(thread_data->wait_list_lock = xSemaphoreCreateMutex()))
+ goto fail2;
+
+ snprintf(thread_name, sizeof(thread_name), "%s%d", "wasm-thread-",
+ ++thread_name_index);
+
+ /* Create the thread */
+ if (pdPASS
+ != xTaskCreate(os_thread_wrapper, thread_name, stack_size / 4,
+ thread_data, prio, &thread_data->handle))
+ goto fail3;
+
+ thread_data_list_add(thread_data);
+ *p_tid = thread_data->handle;
+ return BHT_OK;
+
+fail3:
+ vSemaphoreDelete(thread_data->wait_list_lock);
+fail2:
+ vSemaphoreDelete(thread_data->wait_node.sem);
+fail1:
+ BH_FREE(thread_data);
+ return BHT_ERROR;
+}
+
+korp_tid
+os_self_thread()
+{
+ return xTaskGetCurrentTaskHandle();
+}
+
+int
+os_thread_join(korp_tid thread, void **value_ptr)
+{
+ os_thread_data *thread_data, *curr_thread_data;
+ TaskHandle_t handle = thread;
+
+ (void)value_ptr;
+
+ /* Get thread data of current thread */
+ curr_thread_data = thread_data_current();
+ curr_thread_data->wait_node.next = NULL;
+
+ /* Get thread data */
+ thread_data = thread_data_list_lookup(handle);
+
+ xSemaphoreTake(thread_data->wait_list_lock, portMAX_DELAY);
+ if (!thread_data->thread_wait_list)
+ thread_data->thread_wait_list = &curr_thread_data->wait_node;
+ else {
+ /* Add to end of waiting list */
+ os_thread_wait_node *p = thread_data->thread_wait_list;
+ while (p->next)
+ p = p->next;
+ p->next = &curr_thread_data->wait_node;
+ }
+ xSemaphoreGive(thread_data->wait_list_lock);
+
+ /* Wait the sem */
+ xSemaphoreTake(curr_thread_data->wait_node.sem, portMAX_DELAY);
+ return BHT_OK;
+}
+
+int
+os_mutex_init(korp_mutex *mutex)
+{
+ SemaphoreHandle_t semaphore;
+
+ if (!(semaphore = xSemaphoreCreateMutex()))
+ return BHT_ERROR;
+ mutex->sem = semaphore;
+ mutex->is_recursive = false;
+ return BHT_OK;
+}
+
+int
+os_recursive_mutex_init(korp_mutex *mutex)
+{
+ SemaphoreHandle_t semaphore;
+
+ if (!(semaphore = xSemaphoreCreateRecursiveMutex()))
+ return BHT_ERROR;
+ mutex->sem = semaphore;
+ mutex->is_recursive = true;
+ return BHT_OK;
+}
+
+int
+os_mutex_destroy(korp_mutex *mutex)
+{
+ vSemaphoreDelete(mutex->sem);
+ return BHT_OK;
+}
+
+int
+os_mutex_lock(korp_mutex *mutex)
+{
+ int ret = -1;
+
+ if (!mutex->is_recursive)
+ ret = xSemaphoreTake(mutex->sem, portMAX_DELAY);
+ else
+ ret = xSemaphoreTakeRecursive(mutex->sem, portMAX_DELAY);
+ return ret == pdPASS ? BHT_OK : BHT_ERROR;
+}
+
+int
+os_mutex_unlock(korp_mutex *mutex)
+{
+ int ret = -1;
+
+ if (!mutex->is_recursive)
+ ret = xSemaphoreGive(mutex->sem);
+ else
+ ret = xSemaphoreGiveRecursive(mutex->sem);
+ return ret == pdPASS ? BHT_OK : BHT_ERROR;
+}
+
+int
+os_cond_init(korp_cond *cond)
+{
+ if (!(cond->wait_list_lock = xSemaphoreCreateMutex()))
+ return BHT_ERROR;
+
+ cond->thread_wait_list = NULL;
+ return BHT_OK;
+}
+
+int
+os_cond_destroy(korp_cond *cond)
+{
+ vSemaphoreDelete(cond->wait_list_lock);
+ return BHT_OK;
+}
+
+static int
+os_cond_wait_internal(korp_cond *cond, korp_mutex *mutex, bool timed, int mills)
+{
+ os_thread_wait_node *node = &thread_data_current()->wait_node;
+
+ node->next = NULL;
+
+ xSemaphoreTake(cond->wait_list_lock, portMAX_DELAY);
+ if (!cond->thread_wait_list)
+ cond->thread_wait_list = node;
+ else {
+ /* Add to end of wait list */
+ os_thread_wait_node *p = cond->thread_wait_list;
+ while (p->next)
+ p = p->next;
+ p->next = node;
+ }
+ xSemaphoreGive(cond->wait_list_lock);
+
+ /* Unlock mutex, wait sem and lock mutex again */
+ os_mutex_unlock(mutex);
+ xSemaphoreTake(node->sem, timed ? mills / portTICK_RATE_MS : portMAX_DELAY);
+ os_mutex_lock(mutex);
+
+ /* Remove wait node from wait list */
+ xSemaphoreTake(cond->wait_list_lock, portMAX_DELAY);
+ if (cond->thread_wait_list == node)
+ cond->thread_wait_list = node->next;
+ else {
+ /* Remove from the wait list */
+ os_thread_wait_node *p = cond->thread_wait_list;
+ while (p->next != node)
+ p = p->next;
+ p->next = node->next;
+ }
+ xSemaphoreGive(cond->wait_list_lock);
+
+ return BHT_OK;
+}
+
+int
+os_cond_wait(korp_cond *cond, korp_mutex *mutex)
+{
+ return os_cond_wait_internal(cond, mutex, false, 0);
+}
+
+int
+os_cond_reltimedwait(korp_cond *cond, korp_mutex *mutex, uint64 useconds)
+{
+ if (useconds == BHT_WAIT_FOREVER) {
+ return os_cond_wait_internal(cond, mutex, false, 0);
+ }
+ else {
+ uint64 mills_64 = useconds / 1000;
+ int32 mills;
+
+ if (mills_64 < (uint64)INT32_MAX) {
+ mills = (int32)mills_64;
+ }
+ else {
+ mills = INT32_MAX;
+ os_printf("Warning: os_cond_reltimedwait exceeds limit, "
+ "set to max timeout instead\n");
+ }
+ return os_cond_wait_internal(cond, mutex, true, mills);
+ }
+}
+
+int
+os_cond_signal(korp_cond *cond)
+{
+ /* Signal the head wait node of wait list */
+ xSemaphoreTake(cond->wait_list_lock, portMAX_DELAY);
+ if (cond->thread_wait_list)
+ xSemaphoreGive(cond->thread_wait_list->sem);
+ xSemaphoreGive(cond->wait_list_lock);
+
+ return BHT_OK;
+}
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/common/freertos/freertos_time.c b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/common/freertos/freertos_time.c
new file mode 100644
index 000000000..4497d8c6c
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/common/freertos/freertos_time.c
@@ -0,0 +1,13 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#include "platform_api_vmcore.h"
+
+uint64
+os_time_get_boot_microsecond()
+{
+ TickType_t ticks = xTaskGetTickCount();
+ return (uint64)1000 * 1000 / configTICK_RATE_HZ * ticks;
+}
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/common/freertos/platform_api_freertos.cmake b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/common/freertos/platform_api_freertos.cmake
new file mode 100644
index 000000000..ebfc19d78
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/common/freertos/platform_api_freertos.cmake
@@ -0,0 +1,8 @@
+# Copyright (C) 2019 Intel Corporation. All rights reserved.
+# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+
+set (PLATFORM_COMMON_FREERTOS_DIR ${CMAKE_CURRENT_LIST_DIR})
+
+file (GLOB_RECURSE source_all ${PLATFORM_COMMON_FREERTOS_DIR}/*.c)
+
+set (PLATFORM_COMMON_FREERTOS_SOURCE ${source_all} )
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/common/math/COPYRIGHT b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/common/math/COPYRIGHT
new file mode 100644
index 000000000..a0e1c83a9
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/common/math/COPYRIGHT
@@ -0,0 +1,126 @@
+# $FreeBSD$
+# @(#)COPYRIGHT 8.2 (Berkeley) 3/21/94
+
+The compilation of software known as FreeBSD is distributed under the
+following terms:
+
+Copyright (c) 1992-2019 The FreeBSD Project.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGE.
+
+The 4.4BSD and 4.4BSD-Lite software is distributed under the following
+terms:
+
+All of the documentation and software included in the 4.4BSD and 4.4BSD-Lite
+Releases is copyrighted by The Regents of the University of California.
+
+Copyright 1979, 1980, 1983, 1986, 1988, 1989, 1991, 1992, 1993, 1994
+ The Regents of the University of California. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+3. All advertising materials mentioning features or use of this software
+ must display the following acknowledgement:
+This product includes software developed by the University of
+California, Berkeley and its contributors.
+4. Neither the name of the University nor the names of its contributors
+ may be used to endorse or promote products derived from this software
+ without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGE.
+
+The Institute of Electrical and Electronics Engineers and the American
+National Standards Committee X3, on Information Processing Systems have
+given us permission to reprint portions of their documentation.
+
+In the following statement, the phrase ``this text'' refers to portions
+of the system documentation.
+
+Portions of this text are reprinted and reproduced in electronic form in
+the second BSD Networking Software Release, from IEEE Std 1003.1-1988, IEEE
+Standard Portable Operating System Interface for Computer Environments
+(POSIX), copyright C 1988 by the Institute of Electrical and Electronics
+Engineers, Inc. In the event of any discrepancy between these versions
+and the original IEEE Standard, the original IEEE Standard is the referee
+document.
+
+In the following statement, the phrase ``This material'' refers to portions
+of the system documentation.
+
+This material is reproduced with permission from American National
+Standards Committee X3, on Information Processing Systems. Computer and
+Business Equipment Manufacturers Association (CBEMA), 311 First St., NW,
+Suite 500, Washington, DC 20001-2178. The developmental work of
+Programming Language C was completed by the X3J11 Technical Committee.
+
+The views and conclusions contained in the software and documentation are
+those of the authors and should not be interpreted as representing official
+policies, either expressed or implied, of the Regents of the University
+of California.
+
+
+NOTE: The copyright of UC Berkeley's Berkeley Software Distribution ("BSD")
+source has been updated. The copyright addendum may be found at
+ftp://ftp.cs.berkeley.edu/pub/4bsd/README.Impt.License.Change and is
+included below.
+
+July 22, 1999
+
+To All Licensees, Distributors of Any Version of BSD:
+
+As you know, certain of the Berkeley Software Distribution ("BSD") source
+code files require that further distributions of products containing all or
+portions of the software, acknowledge within their advertising materials
+that such products contain software developed by UC Berkeley and its
+contributors.
+
+Specifically, the provision reads:
+
+" * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors."
+
+Effective immediately, licensees and distributors are no longer required to
+include the acknowledgement within advertising materials. Accordingly, the
+foregoing paragraph of those BSD Unix files containing it is hereby deleted
+in its entirety.
+
+William Hoskins
+Director, Office of Technology Licensing
+University of California, Berkeley
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/common/math/math.c b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/common/math/math.c
new file mode 100644
index 000000000..2ba9f4d28
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/common/math/math.c
@@ -0,0 +1,1681 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2004 David Schultz <das@FreeBSD.ORG>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#include "platform_common.h"
+
+#define __FDLIBM_STDC__
+
+#ifndef FLT_EVAL_METHOD
+#define FLT_EVAL_METHOD 0
+#endif
+
+typedef uint32_t u_int32_t;
+typedef uint64_t u_int64_t;
+
+typedef union u32double_tag {
+ int *pint;
+ double *pdouble;
+} U32DOUBLE;
+
+static inline int *
+pdouble2pint(double *pdouble)
+{
+ U32DOUBLE u;
+ u.pdouble = pdouble;
+ return u.pint;
+}
+
+typedef union {
+ double value;
+ struct {
+ u_int32_t lsw;
+ u_int32_t msw;
+ } parts;
+ struct {
+ u_int64_t w;
+ } xparts;
+} ieee_double_shape_type_little;
+
+typedef union {
+ double value;
+ struct {
+ u_int32_t msw;
+ u_int32_t lsw;
+ } parts;
+ struct {
+ u_int64_t w;
+ } xparts;
+} ieee_double_shape_type_big;
+
+typedef union {
+ double d;
+ struct {
+ unsigned int manl : 32;
+ unsigned int manh : 20;
+ unsigned int exp : 11;
+ unsigned int sign : 1;
+ } bits;
+} IEEEd2bits_L;
+
+typedef union {
+ double d;
+ struct {
+ unsigned int sign : 1;
+ unsigned int exp : 11;
+ unsigned int manh : 20;
+ unsigned int manl : 32;
+ } bits;
+} IEEEd2bits_B;
+
+typedef union {
+ float f;
+ struct {
+ unsigned int man : 23;
+ unsigned int exp : 8;
+ unsigned int sign : 1;
+ } bits;
+} IEEEf2bits_L;
+
+typedef union {
+ float f;
+ struct {
+ unsigned int sign : 1;
+ unsigned int exp : 8;
+ unsigned int man : 23;
+ } bits;
+} IEEEf2bits_B;
+
+static union {
+ int a;
+ char b;
+} __ue = { .a = 1 };
+
+#define is_little_endian() (__ue.b == 1)
+
+#define __HIL(x) *(1 + pdouble2pint(&x))
+#define __LOL(x) *(pdouble2pint(&x))
+#define __HIB(x) *(pdouble2pint(&x))
+#define __LOB(x) *(1 + pdouble2pint(&x))
+
+/* Get two 32 bit ints from a double. */
+
+#define EXTRACT_WORDS_L(ix0, ix1, d) \
+ do { \
+ ieee_double_shape_type_little ew_u; \
+ ew_u.value = (d); \
+ (ix0) = ew_u.parts.msw; \
+ (ix1) = ew_u.parts.lsw; \
+ } while (0)
+
+/* Set a double from two 32 bit ints. */
+
+#define INSERT_WORDS_L(d, ix0, ix1) \
+ do { \
+ ieee_double_shape_type_little iw_u; \
+ iw_u.parts.msw = (ix0); \
+ iw_u.parts.lsw = (ix1); \
+ (d) = iw_u.value; \
+ } while (0)
+
+/* Get two 32 bit ints from a double. */
+
+#define EXTRACT_WORDS_B(ix0, ix1, d) \
+ do { \
+ ieee_double_shape_type_big ew_u; \
+ ew_u.value = (d); \
+ (ix0) = ew_u.parts.msw; \
+ (ix1) = ew_u.parts.lsw; \
+ } while (0)
+
+/* Set a double from two 32 bit ints. */
+
+#define INSERT_WORDS_B(d, ix0, ix1) \
+ do { \
+ ieee_double_shape_type_big iw_u; \
+ iw_u.parts.msw = (ix0); \
+ iw_u.parts.lsw = (ix1); \
+ (d) = iw_u.value; \
+ } while (0)
+
+/* Get the more significant 32 bit int from a double. */
+#define GET_HIGH_WORD_L(i, d) \
+ do { \
+ ieee_double_shape_type_little gh_u; \
+ gh_u.value = (d); \
+ (i) = gh_u.parts.msw; \
+ } while (0)
+
+/* Get the more significant 32 bit int from a double. */
+#define GET_HIGH_WORD_B(i, d) \
+ do { \
+ ieee_double_shape_type_big gh_u; \
+ gh_u.value = (d); \
+ (i) = gh_u.parts.msw; \
+ } while (0)
+
+/* Set the more significant 32 bits of a double from an int. */
+#define SET_HIGH_WORD_L(d, v) \
+ do { \
+ ieee_double_shape_type_little sh_u; \
+ sh_u.value = (d); \
+ sh_u.parts.msw = (v); \
+ (d) = sh_u.value; \
+ } while (0)
+
+/* Set the more significant 32 bits of a double from an int. */
+#define SET_HIGH_WORD_B(d, v) \
+ do { \
+ ieee_double_shape_type_big sh_u; \
+ sh_u.value = (d); \
+ sh_u.parts.msw = (v); \
+ (d) = sh_u.value; \
+ } while (0)
+
+/* Set the less significant 32 bits of a double from an int. */
+#define SET_LOW_WORD_L(d, v) \
+ do { \
+ ieee_double_shape_type_little sh_u; \
+ sh_u.value = (d); \
+ sh_u.parts.lsw = (v); \
+ (d) = sh_u.value; \
+ } while (0)
+
+/* Set the more significant 32 bits of a double from an int. */
+#define SET_LOW_WORD_B(d, v) \
+ do { \
+ ieee_double_shape_type_big sh_u; \
+ sh_u.value = (d); \
+ sh_u.parts.lsw = (v); \
+ (d) = sh_u.value; \
+ } while (0)
+
+/* Get the less significant 32 bit int from a double. */
+#define GET_LOW_WORD_L(i, d) \
+ do { \
+ ieee_double_shape_type_little gl_u; \
+ gl_u.value = (d); \
+ (i) = gl_u.parts.lsw; \
+ } while (0)
+
+/* Get the less significant 32 bit int from a double. */
+#define GET_LOW_WORD_B(i, d) \
+ do { \
+ ieee_double_shape_type_big gl_u; \
+ gl_u.value = (d); \
+ (i) = gl_u.parts.lsw; \
+ } while (0)
+
+/*
+ * A union which permits us to convert between a float and a 32 bit
+ * int.
+ */
+typedef union {
+ float value;
+ /* FIXME: Assumes 32 bit int. */
+ unsigned int word;
+} ieee_float_shape_type;
+
+/* Get a 32 bit int from a float. */
+#define GET_FLOAT_WORD(i, d) \
+ do { \
+ ieee_float_shape_type gf_u; \
+ gf_u.value = (d); \
+ (i) = gf_u.word; \
+ } while (0)
+
+/* Set a float from a 32 bit int. */
+#define SET_FLOAT_WORD(d, i) \
+ do { \
+ ieee_float_shape_type sf_u; \
+ sf_u.word = (i); \
+ (d) = sf_u.value; \
+ } while (0)
+
+/* Macro wrappers. */
+#define EXTRACT_WORDS(ix0, ix1, d) \
+ do { \
+ if (is_little_endian()) \
+ EXTRACT_WORDS_L(ix0, ix1, d); \
+ else \
+ EXTRACT_WORDS_B(ix0, ix1, d); \
+ } while (0)
+
+#define INSERT_WORDS(d, ix0, ix1) \
+ do { \
+ if (is_little_endian()) \
+ INSERT_WORDS_L(d, ix0, ix1); \
+ else \
+ INSERT_WORDS_B(d, ix0, ix1); \
+ } while (0)
+
+#define GET_HIGH_WORD(i, d) \
+ do { \
+ if (is_little_endian()) \
+ GET_HIGH_WORD_L(i, d); \
+ else \
+ GET_HIGH_WORD_B(i, d); \
+ } while (0)
+
+#define SET_HIGH_WORD(d, v) \
+ do { \
+ if (is_little_endian()) \
+ SET_HIGH_WORD_L(d, v); \
+ else \
+ SET_HIGH_WORD_B(d, v); \
+ } while (0)
+
+#define GET_LOW_WORD(d, v) \
+ do { \
+ if (is_little_endian()) \
+ GET_LOW_WORD_L(d, v); \
+ else \
+ GET_LOW_WORD_B(d, v); \
+ } while (0)
+
+#define SET_LOW_WORD(d, v) \
+ do { \
+ if (is_little_endian()) \
+ SET_LOW_WORD_L(d, v); \
+ else \
+ SET_LOW_WORD_B(d, v); \
+ } while (0)
+
+#define __HI(x) (is_little_endian() ? __HIL(x) : __HIB(x))
+
+#define __LO(x) (is_little_endian() ? __LOL(x) : __LOB(x))
+
+/*
+ * Attempt to get strict C99 semantics for assignment with non-C99 compilers.
+ */
+#if FLT_EVAL_METHOD == 0 || __GNUC__ == 0
+#define STRICT_ASSIGN(type, lval, rval) ((lval) = (rval))
+#else
+#define STRICT_ASSIGN(type, lval, rval) \
+ do { \
+ volatile type __lval; \
+ \
+ if (sizeof(type) >= sizeof(long double)) \
+ (lval) = (rval); \
+ else { \
+ __lval = (rval); \
+ (lval) = __lval; \
+ } \
+ } while (0)
+#endif
+
+#ifdef __FDLIBM_STDC__
+static const double huge = 1.0e300;
+#else
+static double huge = 1.0e300;
+#endif
+
+#ifdef __STDC__
+static const double
+#else
+static double
+#endif
+ tiny = 1.0e-300;
+
+#ifdef __STDC__
+static const double
+#else
+static double
+#endif
+ one = 1.00000000000000000000e+00; /* 0x3FF00000, 0x00000000 */
+
+#ifdef __STDC__
+static const double
+#else
+static double
+#endif
+ TWO52[2] = {
+ 4.50359962737049600000e+15, /* 0x43300000, 0x00000000 */
+ -4.50359962737049600000e+15, /* 0xC3300000, 0x00000000 */
+ };
+
+#ifdef __STDC__
+static const double
+#else
+static double
+#endif
+ atanhi[] = {
+ 4.63647609000806093515e-01, /* atan(0.5)hi 0x3FDDAC67, 0x0561BB4F */
+ 7.85398163397448278999e-01, /* atan(1.0)hi 0x3FE921FB, 0x54442D18 */
+ 9.82793723247329054082e-01, /* atan(1.5)hi 0x3FEF730B, 0xD281F69B */
+ 1.57079632679489655800e+00, /* atan(inf)hi 0x3FF921FB, 0x54442D18 */
+ };
+
+#ifdef __STDC__
+static const double
+#else
+static double
+#endif
+ atanlo[] = {
+ 2.26987774529616870924e-17, /* atan(0.5)lo 0x3C7A2B7F, 0x222F65E2 */
+ 3.06161699786838301793e-17, /* atan(1.0)lo 0x3C81A626, 0x33145C07 */
+ 1.39033110312309984516e-17, /* atan(1.5)lo 0x3C700788, 0x7AF0CBBD */
+ 6.12323399573676603587e-17, /* atan(inf)lo 0x3C91A626, 0x33145C07 */
+ };
+
+#ifdef __STDC__
+static const double
+#else
+static double
+#endif
+ aT[] = {
+ 3.33333333333329318027e-01, /* 0x3FD55555, 0x5555550D */
+ -1.99999999998764832476e-01, /* 0xBFC99999, 0x9998EBC4 */
+ 1.42857142725034663711e-01, /* 0x3FC24924, 0x920083FF */
+ -1.11111104054623557880e-01, /* 0xBFBC71C6, 0xFE231671 */
+ 9.09088713343650656196e-02, /* 0x3FB745CD, 0xC54C206E */
+ -7.69187620504482999495e-02, /* 0xBFB3B0F2, 0xAF749A6D */
+ 6.66107313738753120669e-02, /* 0x3FB10D66, 0xA0D03D51 */
+ -5.83357013379057348645e-02, /* 0xBFADDE2D, 0x52DEFD9A */
+ 4.97687799461593236017e-02, /* 0x3FA97B4B, 0x24760DEB */
+ -3.65315727442169155270e-02, /* 0xBFA2B444, 0x2C6A6C2F */
+ 1.62858201153657823623e-02, /* 0x3F90AD3A, 0xE322DA11 */
+ };
+
+#ifdef __STDC__
+static const double
+#else
+static double
+#endif
+ zero = 0.0,
+ pi_o_4 = 7.8539816339744827900E-01, /* 0x3FE921FB, 0x54442D18 */
+ pi_o_2 = 1.5707963267948965580E+00, /* 0x3FF921FB, 0x54442D18 */
+ pi = 3.1415926535897931160E+00, /* 0x400921FB, 0x54442D18 */
+ pi_lo = 1.2246467991473531772E-16; /* 0x3CA1A626, 0x33145C07 */
+
+#ifdef __STDC__
+static const double
+#else
+static double
+#endif
+bp[] = {1.0, 1.5,},
+dp_h[] = { 0.0, 5.84962487220764160156e-01,}, /* 0x3FE2B803, 0x40000000 */
+dp_l[] = { 0.0, 1.35003920212974897128e-08,}, /* 0x3E4CFDEB, 0x43CFD006 */
+two = 2.0,
+two53 = 9007199254740992.0, /* 0x43400000, 0x00000000 */
+two54 = 1.80143985094819840000e+16, /* 0x43500000, 0x00000000 */
+twom54 = 5.55111512312578270212e-17, /* 0x3C900000, 0x00000000 */
+ /* poly coefs for (3/2)*(log(x)-2s-2/3*s**3 */
+L1 = 5.99999999999994648725e-01, /* 0x3FE33333, 0x33333303 */
+L2 = 4.28571428578550184252e-01, /* 0x3FDB6DB6, 0xDB6FABFF */
+L3 = 3.33333329818377432918e-01, /* 0x3FD55555, 0x518F264D */
+L4 = 2.72728123808534006489e-01, /* 0x3FD17460, 0xA91D4101 */
+L5 = 2.30660745775561754067e-01, /* 0x3FCD864A, 0x93C9DB65 */
+L6 = 2.06975017800338417784e-01, /* 0x3FCA7E28, 0x4A454EEF */
+P1 = 1.66666666666666019037e-01, /* 0x3FC55555, 0x5555553E */
+P2 = -2.77777777770155933842e-03, /* 0xBF66C16C, 0x16BEBD93 */
+P3 = 6.61375632143793436117e-05, /* 0x3F11566A, 0xAF25DE2C */
+P4 = -1.65339022054652515390e-06, /* 0xBEBBBD41, 0xC5D26BF1 */
+P5 = 4.13813679705723846039e-08, /* 0x3E663769, 0x72BEA4D0 */
+lg2 = 6.93147180559945286227e-01, /* 0x3FE62E42, 0xFEFA39EF */
+lg2_h = 6.93147182464599609375e-01, /* 0x3FE62E43, 0x00000000 */
+lg2_l = -1.90465429995776804525e-09, /* 0xBE205C61, 0x0CA86C39 */
+ovt = 8.0085662595372944372e-0017, /* -(1024-log2(ovfl+.5ulp)) */
+cp = 9.61796693925975554329e-01, /* 0x3FEEC709, 0xDC3A03FD =2/(3ln2) */
+cp_h = 9.61796700954437255859e-01, /* 0x3FEEC709, 0xE0000000 =(float)cp */
+cp_l = -7.02846165095275826516e-09, /* 0xBE3E2FE0, 0x145B01F5 =tail of cp_h*/
+ivln2 = 1.44269504088896338700e+00, /* 0x3FF71547, 0x652B82FE =1/ln2 */
+ivln2_h = 1.44269502162933349609e+00, /* 0x3FF71547, 0x60000000 =24b 1/ln2*/
+ivln2_l = 1.92596299112661746887e-08; /* 0x3E54AE0B, 0xF85DDF44 =1/ln2 tail*/
+
+static double
+freebsd_floor(double x);
+static double
+freebsd_ceil(double x);
+static double
+freebsd_fabs(double x);
+static double
+freebsd_rint(double x);
+static int
+freebsd_isnan(double x);
+static double
+freebsd_atan(double x);
+static double
+freebsd_atan2(double y, double x);
+
+static double
+freebsd_atan(double x)
+{
+ double w, s1, s2, z;
+ int32_t ix, hx, id;
+
+ GET_HIGH_WORD(hx, x);
+ ix = hx & 0x7fffffff;
+ if (ix >= 0x44100000) { /* if |x| >= 2^66 */
+ u_int32_t low;
+ GET_LOW_WORD(low, x);
+ if (ix > 0x7ff00000 || (ix == 0x7ff00000 && (low != 0)))
+ return x + x; /* NaN */
+ if (hx > 0)
+ return atanhi[3] + *(volatile double *)&atanlo[3];
+ else
+ return -atanhi[3] - *(volatile double *)&atanlo[3];
+ }
+ if (ix < 0x3fdc0000) { /* |x| < 0.4375 */
+ if (ix < 0x3e400000) { /* |x| < 2^-27 */
+ if (huge + x > one)
+ return x; /* raise inexact */
+ }
+ id = -1;
+ }
+ else {
+ x = freebsd_fabs(x);
+ if (ix < 0x3ff30000) { /* |x| < 1.1875 */
+ if (ix < 0x3fe60000) { /* 7/16 <=|x|<11/16 */
+ id = 0;
+ x = (2.0 * x - one) / (2.0 + x);
+ }
+ else { /* 11/16<=|x|< 19/16 */
+ id = 1;
+ x = (x - one) / (x + one);
+ }
+ }
+ else {
+ if (ix < 0x40038000) { /* |x| < 2.4375 */
+ id = 2;
+ x = (x - 1.5) / (one + 1.5 * x);
+ }
+ else { /* 2.4375 <= |x| < 2^66 */
+ id = 3;
+ x = -1.0 / x;
+ }
+ }
+ }
+ /* end of argument reduction */
+ z = x * x;
+ w = z * z;
+ /* break sum from i=0 to 10 aT[i]z**(i+1) into odd and even poly */
+ s1 = z
+ * (aT[0]
+ + w
+ * (aT[2]
+ + w * (aT[4] + w * (aT[6] + w * (aT[8] + w * aT[10])))));
+ s2 = w * (aT[1] + w * (aT[3] + w * (aT[5] + w * (aT[7] + w * aT[9]))));
+ if (id < 0)
+ return x - x * (s1 + s2);
+ else {
+ z = atanhi[id] - ((x * (s1 + s2) - atanlo[id]) - x);
+ return (hx < 0) ? -z : z;
+ }
+}
+
+static double
+freebsd_atan2(double y, double x)
+{
+ double z;
+ int32_t k, m, hx, hy, ix, iy;
+ u_int32_t lx, ly;
+
+ EXTRACT_WORDS(hx, lx, x);
+ ix = hx & 0x7fffffff;
+ EXTRACT_WORDS(hy, ly, y);
+ iy = hy & 0x7fffffff;
+ if (((ix | ((lx | -lx) >> 31)) > 0x7ff00000)
+ || ((iy | ((ly | -ly) >> 31)) > 0x7ff00000)) /* x or y is NaN */
+ return x + y;
+ if (hx == 0x3ff00000 && lx == 0)
+ return freebsd_atan(y); /* x=1.0 */
+ m = ((hy >> 31) & 1) | ((hx >> 30) & 2); /* 2*sign(x)+sign(y) */
+
+ /* when y = 0 */
+ if ((iy | ly) == 0) {
+ switch (m) {
+ case 0:
+ case 1:
+ return y; /* atan(+-0,+anything)=+-0 */
+ case 2:
+ return pi + tiny; /* atan(+0,-anything) = pi */
+ case 3:
+ default:
+ return -pi - tiny; /* atan(-0,-anything) =-pi */
+ }
+ }
+ /* when x = 0 */
+ if ((ix | lx) == 0)
+ return (hy < 0) ? -pi_o_2 - tiny : pi_o_2 + tiny;
+
+ /* when x is INF */
+ if (ix == 0x7ff00000) {
+ if (iy == 0x7ff00000) {
+ switch (m) {
+ case 0:
+ return pi_o_4 + tiny; /* atan(+INF,+INF) */
+ case 1:
+ return -pi_o_4 - tiny; /* atan(-INF,+INF) */
+ case 2:
+ return 3.0 * pi_o_4 + tiny; /*atan(+INF,-INF)*/
+ case 3:
+ default:
+ return -3.0 * pi_o_4 - tiny; /*atan(-INF,-INF)*/
+ }
+ }
+ else {
+ switch (m) {
+ case 0:
+ return zero; /* atan(+...,+INF) */
+ case 1:
+ return -zero; /* atan(-...,+INF) */
+ case 2:
+ return pi + tiny; /* atan(+...,-INF) */
+ case 3:
+ default:
+ return -pi - tiny; /* atan(-...,-INF) */
+ }
+ }
+ }
+ /* when y is INF */
+ if (iy == 0x7ff00000)
+ return (hy < 0) ? -pi_o_2 - tiny : pi_o_2 + tiny;
+
+ /* compute y/x */
+ k = (iy - ix) >> 20;
+ if (k > 60) { /* |y/x| > 2**60 */
+ z = pi_o_2 + 0.5 * pi_lo;
+ m &= 1;
+ }
+ else if (hx < 0 && k < -60)
+ z = 0.0; /* 0 > |y|/x > -2**-60 */
+ else
+ z = freebsd_atan(fabs(y / x)); /* safe to do y/x */
+ switch (m) {
+ case 0:
+ return z; /* atan(+,+) */
+ case 1:
+ return -z; /* atan(-,+) */
+ case 2:
+ return pi - (z - pi_lo); /* atan(+,-) */
+ default: /* case 3 */
+ return (z - pi_lo) - pi; /* atan(-,-) */
+ }
+}
+
+#ifndef BH_HAS_SQRTF
+static float
+freebsd_sqrtf(float x)
+{
+ float z;
+ int32_t sign = (int)0x80000000;
+ int32_t ix, s, q, m, t, i;
+ u_int32_t r;
+
+ GET_FLOAT_WORD(ix, x);
+
+ /* take care of Inf and NaN */
+ if ((ix & 0x7f800000) == 0x7f800000) {
+ return x * x + x; /* sqrt(NaN)=NaN, sqrt(+inf)=+inf
+ sqrt(-inf)=sNaN */
+ }
+ /* take care of zero */
+ if (ix <= 0) {
+ if ((ix & (~sign)) == 0)
+ return x; /* sqrt(+-0) = +-0 */
+ else if (ix < 0)
+ return (x - x) / (x - x); /* sqrt(-ve) = sNaN */
+ }
+ /* normalize x */
+ m = (ix >> 23);
+ if (m == 0) { /* subnormal x */
+ for (i = 0; (ix & 0x00800000) == 0; i++)
+ ix <<= 1;
+ m -= i - 1;
+ }
+ m -= 127; /* unbias exponent */
+ ix = (ix & 0x007fffff) | 0x00800000;
+ if (m & 1) /* odd m, double x to make it even */
+ ix += ix;
+ m >>= 1; /* m = [m/2] */
+
+ /* generate sqrt(x) bit by bit */
+ ix += ix;
+ q = s = 0; /* q = sqrt(x) */
+ r = 0x01000000; /* r = moving bit from right to left */
+
+ while (r != 0) {
+ t = s + r;
+ if (t <= ix) {
+ s = t + r;
+ ix -= t;
+ q += r;
+ }
+ ix += ix;
+ r >>= 1;
+ }
+
+ /* use floating add to find out rounding direction */
+ if (ix != 0) {
+ z = one - tiny; /* trigger inexact flag */
+ if (z >= one) {
+ z = one + tiny;
+ if (z > one)
+ q += 2;
+ else
+ q += (q & 1);
+ }
+ }
+ ix = (q >> 1) + 0x3f000000;
+ ix += (m << 23);
+ SET_FLOAT_WORD(z, ix);
+ return z;
+}
+#endif /* end of BH_HAS_SQRTF */
+
+#ifndef BH_HAS_SQRT
+static double
+freebsd_sqrt(double x) /* wrapper sqrt */
+{
+ double z;
+ int32_t sign = (int)0x80000000;
+ int32_t ix0, s0, q, m, t, i;
+ u_int32_t r, t1, s1, ix1, q1;
+
+ EXTRACT_WORDS(ix0, ix1, x);
+
+ /* take care of Inf and NaN */
+ if ((ix0 & 0x7ff00000) == 0x7ff00000) {
+ return x * x + x; /* sqrt(NaN)=NaN, sqrt(+inf)=+inf
+ sqrt(-inf)=sNaN */
+ }
+ /* take care of zero */
+ if (ix0 <= 0) {
+ if (((ix0 & (~sign)) | ix1) == 0)
+ return x; /* sqrt(+-0) = +-0 */
+ else if (ix0 < 0)
+ return (x - x) / (x - x); /* sqrt(-ve) = sNaN */
+ }
+ /* normalize x */
+ m = (ix0 >> 20);
+ if (m == 0) { /* subnormal x */
+ while (ix0 == 0) {
+ m -= 21;
+ ix0 |= (ix1 >> 11);
+ ix1 <<= 21;
+ }
+ for (i = 0; (ix0 & 0x00100000) == 0; i++)
+ ix0 <<= 1;
+ m -= i - 1;
+ ix0 |= (ix1 >> (32 - i));
+ ix1 <<= i;
+ }
+ m -= 1023; /* unbias exponent */
+ ix0 = (ix0 & 0x000fffff) | 0x00100000;
+ if (m & 1) { /* odd m, double x to make it even */
+ ix0 += ix0 + ((ix1 & sign) >> 31);
+ ix1 += ix1;
+ }
+ m >>= 1; /* m = [m/2] */
+
+ /* generate sqrt(x) bit by bit */
+ ix0 += ix0 + ((ix1 & sign) >> 31);
+ ix1 += ix1;
+ q = q1 = s0 = s1 = 0; /* [q,q1] = sqrt(x) */
+ r = 0x00200000; /* r = moving bit from right to left */
+
+ while (r != 0) {
+ t = s0 + r;
+ if (t <= ix0) {
+ s0 = t + r;
+ ix0 -= t;
+ q += r;
+ }
+ ix0 += ix0 + ((ix1 & sign) >> 31);
+ ix1 += ix1;
+ r >>= 1;
+ }
+
+ r = sign;
+ while (r != 0) {
+ t1 = s1 + r;
+ t = s0;
+ if ((t < ix0) || ((t == ix0) && (t1 <= ix1))) {
+ s1 = t1 + r;
+ if (((t1 & sign) == sign) && (s1 & sign) == 0)
+ s0 += 1;
+ ix0 -= t;
+ if (ix1 < t1)
+ ix0 -= 1;
+ ix1 -= t1;
+ q1 += r;
+ }
+ ix0 += ix0 + ((ix1 & sign) >> 31);
+ ix1 += ix1;
+ r >>= 1;
+ }
+
+ /* use floating add to find out rounding direction */
+ if ((ix0 | ix1) != 0) {
+ z = one - tiny; /* trigger inexact flag */
+ if (z >= one) {
+ z = one + tiny;
+ if (q1 == (u_int32_t)0xffffffff) {
+ q1 = 0;
+ q += 1;
+ }
+ else if (z > one) {
+ if (q1 == (u_int32_t)0xfffffffe)
+ q += 1;
+ q1 += 2;
+ }
+ else
+ q1 += (q1 & 1);
+ }
+ }
+ ix0 = (q >> 1) + 0x3fe00000;
+ ix1 = q1 >> 1;
+ if ((q & 1) == 1)
+ ix1 |= sign;
+ ix0 += (m << 20);
+
+ INSERT_WORDS(z, ix0, ix1);
+
+ return z;
+}
+#endif /* end of BH_HAS_SQRT */
+
+static double
+freebsd_floor(double x)
+{
+ int32_t i0, i1, j0;
+ u_int32_t i, j;
+
+ EXTRACT_WORDS(i0, i1, x);
+
+ j0 = ((i0 >> 20) & 0x7ff) - 0x3ff;
+ if (j0 < 20) {
+ if (j0 < 0) { /* raise inexact if x != 0 */
+ if (huge + x > 0.0) { /* return 0*sign(x) if |x|<1 */
+ if (i0 >= 0) {
+ i0 = i1 = 0;
+ }
+ else if (((i0 & 0x7fffffff) | i1) != 0) {
+ i0 = 0xbff00000;
+ i1 = 0;
+ }
+ }
+ }
+ else {
+ i = (0x000fffff) >> j0;
+ if (((i0 & i) | i1) == 0)
+ return x; /* x is integral */
+ if (huge + x > 0.0) { /* raise inexact flag */
+ if (i0 < 0)
+ i0 += (0x00100000) >> j0;
+ i0 &= (~i);
+ i1 = 0;
+ }
+ }
+ }
+ else if (j0 > 51) {
+ if (j0 == 0x400)
+ return x + x; /* inf or NaN */
+ else
+ return x; /* x is integral */
+ }
+ else {
+ i = ((u_int32_t)(0xffffffff)) >> (j0 - 20);
+ if ((i1 & i) == 0)
+ return x; /* x is integral */
+ if (huge + x > 0.0) { /* raise inexact flag */
+ if (i0 < 0) {
+ if (j0 == 20)
+ i0 += 1;
+ else {
+ j = i1 + (1 << (52 - j0));
+ if (j < i1)
+ i0 += 1; /* got a carry */
+ i1 = j;
+ }
+ }
+ i1 &= (~i);
+ }
+ }
+
+ INSERT_WORDS(x, i0, i1);
+
+ return x;
+}
+
+static double
+freebsd_ceil(double x)
+{
+ int32_t i0, i1, j0;
+ u_int32_t i, j;
+ EXTRACT_WORDS(i0, i1, x);
+ j0 = ((i0 >> 20) & 0x7ff) - 0x3ff;
+ if (j0 < 20) {
+ if (j0 < 0) { /* raise inexact if x != 0 */
+ if (huge + x > 0.0) { /* return 0*sign(x) if |x|<1 */
+ if (i0 < 0) {
+ i0 = 0x80000000;
+ i1 = 0;
+ }
+ else if ((i0 | i1) != 0) {
+ i0 = 0x3ff00000;
+ i1 = 0;
+ }
+ }
+ }
+ else {
+ i = (0x000fffff) >> j0;
+ if (((i0 & i) | i1) == 0)
+ return x; /* x is integral */
+ if (huge + x > 0.0) { /* raise inexact flag */
+ if (i0 > 0)
+ i0 += (0x00100000) >> j0;
+ i0 &= (~i);
+ i1 = 0;
+ }
+ }
+ }
+ else if (j0 > 51) {
+ if (j0 == 0x400)
+ return x + x; /* inf or NaN */
+ else
+ return x; /* x is integral */
+ }
+ else {
+ i = ((u_int32_t)(0xffffffff)) >> (j0 - 20);
+ if ((i1 & i) == 0)
+ return x; /* x is integral */
+ if (huge + x > 0.0) { /* raise inexact flag */
+ if (i0 > 0) {
+ if (j0 == 20)
+ i0 += 1;
+ else {
+ j = i1 + (1 << (52 - j0));
+ if (j < i1)
+ i0 += 1; /* got a carry */
+ i1 = j;
+ }
+ }
+ i1 &= (~i);
+ }
+ }
+ INSERT_WORDS(x, i0, i1);
+ return x;
+}
+
+static double
+freebsd_rint(double x)
+{
+ int32_t i0, j0, sx;
+ u_int32_t i, i1;
+ double w, t;
+ EXTRACT_WORDS(i0, i1, x);
+ sx = (i0 >> 31) & 1;
+ j0 = ((i0 >> 20) & 0x7ff) - 0x3ff;
+ if (j0 < 20) {
+ if (j0 < 0) {
+ if (((i0 & 0x7fffffff) | i1) == 0)
+ return x;
+ i1 |= (i0 & 0x0fffff);
+ i0 &= 0xfffe0000;
+ i0 |= ((i1 | -i1) >> 12) & 0x80000;
+ SET_HIGH_WORD(x, i0);
+ STRICT_ASSIGN(double, w, TWO52[sx] + x);
+ t = w - TWO52[sx];
+ GET_HIGH_WORD(i0, t);
+ SET_HIGH_WORD(t, (i0 & 0x7fffffff) | (sx << 31));
+ return t;
+ }
+ else {
+ i = (0x000fffff) >> j0;
+ if (((i0 & i) | i1) == 0)
+ return x; /* x is integral */
+ i >>= 1;
+ if (((i0 & i) | i1) != 0) {
+ /*
+ * Some bit is set after the 0.5 bit. To avoid the
+ * possibility of errors from double rounding in
+ * w = TWO52[sx]+x, adjust the 0.25 bit to a lower
+ * guard bit. We do this for all j0<=51. The
+ * adjustment is trickiest for j0==18 and j0==19
+ * since then it spans the word boundary.
+ */
+ if (j0 == 19)
+ i1 = 0x40000000;
+ else if (j0 == 18)
+ i1 = 0x80000000;
+ else
+ i0 = (i0 & (~i)) | ((0x20000) >> j0);
+ }
+ }
+ }
+ else if (j0 > 51) {
+ if (j0 == 0x400)
+ return x + x; /* inf or NaN */
+ else
+ return x; /* x is integral */
+ }
+ else {
+ i = ((u_int32_t)(0xffffffff)) >> (j0 - 20);
+ if ((i1 & i) == 0)
+ return x; /* x is integral */
+ i >>= 1;
+ if ((i1 & i) != 0)
+ i1 = (i1 & (~i)) | ((0x40000000) >> (j0 - 20));
+ }
+ INSERT_WORDS(x, i0, i1);
+ STRICT_ASSIGN(double, w, TWO52[sx] + x);
+ return w - TWO52[sx];
+}
+
+static int
+freebsd_isnan(double d)
+{
+ if (is_little_endian()) {
+ IEEEd2bits_L u;
+ u.d = d;
+ return (u.bits.exp == 2047 && (u.bits.manl != 0 || u.bits.manh != 0));
+ }
+ else {
+ IEEEd2bits_B u;
+ u.d = d;
+ return (u.bits.exp == 2047 && (u.bits.manl != 0 || u.bits.manh != 0));
+ }
+}
+
+static float
+freebsd_fabsf(float x)
+{
+ u_int32_t ix;
+ GET_FLOAT_WORD(ix, x);
+ SET_FLOAT_WORD(x, ix & 0x7fffffff);
+ return x;
+}
+
+static double
+freebsd_fabs(double x)
+{
+ u_int32_t high;
+ GET_HIGH_WORD(high, x);
+ SET_HIGH_WORD(x, high & 0x7fffffff);
+ return x;
+}
+
+static const float huge_f = 1.0e30F;
+
+static const float TWO23[2] = {
+ 8.3886080000e+06, /* 0x4b000000 */
+ -8.3886080000e+06, /* 0xcb000000 */
+};
+
+static float
+freebsd_truncf(float x)
+{
+ int32_t i0, j0;
+ u_int32_t i;
+ GET_FLOAT_WORD(i0, x);
+ j0 = ((i0 >> 23) & 0xff) - 0x7f;
+ if (j0 < 23) {
+ if (j0 < 0) { /* raise inexact if x != 0 */
+ if (huge_f + x > 0.0F) /* |x|<1, so return 0*sign(x) */
+ i0 &= 0x80000000;
+ }
+ else {
+ i = (0x007fffff) >> j0;
+ if ((i0 & i) == 0)
+ return x; /* x is integral */
+ if (huge_f + x > 0.0F) /* raise inexact flag */
+ i0 &= (~i);
+ }
+ }
+ else {
+ if (j0 == 0x80)
+ return x + x; /* inf or NaN */
+ else
+ return x; /* x is integral */
+ }
+ SET_FLOAT_WORD(x, i0);
+ return x;
+}
+
+static float
+freebsd_rintf(float x)
+{
+ int32_t i0, j0, sx;
+ float w, t;
+ GET_FLOAT_WORD(i0, x);
+ sx = (i0 >> 31) & 1;
+ j0 = ((i0 >> 23) & 0xff) - 0x7f;
+ if (j0 < 23) {
+ if (j0 < 0) {
+ if ((i0 & 0x7fffffff) == 0)
+ return x;
+ STRICT_ASSIGN(float, w, TWO23[sx] + x);
+ t = w - TWO23[sx];
+ GET_FLOAT_WORD(i0, t);
+ SET_FLOAT_WORD(t, (i0 & 0x7fffffff) | (sx << 31));
+ return t;
+ }
+ STRICT_ASSIGN(float, w, TWO23[sx] + x);
+ return w - TWO23[sx];
+ }
+ if (j0 == 0x80)
+ return x + x; /* inf or NaN */
+ else
+ return x; /* x is integral */
+}
+
+static float
+freebsd_ceilf(float x)
+{
+ int32_t i0, j0;
+ u_int32_t i;
+
+ GET_FLOAT_WORD(i0, x);
+ j0 = ((i0 >> 23) & 0xff) - 0x7f;
+ if (j0 < 23) {
+ if (j0 < 0) { /* raise inexact if x != 0 */
+ if (huge_f + x > (float)0.0) { /* return 0*sign(x) if |x|<1 */
+ if (i0 < 0) {
+ i0 = 0x80000000;
+ }
+ else if (i0 != 0) {
+ i0 = 0x3f800000;
+ }
+ }
+ }
+ else {
+ i = (0x007fffff) >> j0;
+ if ((i0 & i) == 0)
+ return x; /* x is integral */
+ if (huge_f + x > (float)0.0) { /* raise inexact flag */
+ if (i0 > 0)
+ i0 += (0x00800000) >> j0;
+ i0 &= (~i);
+ }
+ }
+ }
+ else {
+ if (j0 == 0x80)
+ return x + x; /* inf or NaN */
+ else
+ return x; /* x is integral */
+ }
+ SET_FLOAT_WORD(x, i0);
+ return x;
+}
+
+static float
+freebsd_floorf(float x)
+{
+ int32_t i0, j0;
+ u_int32_t i;
+ GET_FLOAT_WORD(i0, x);
+ j0 = ((i0 >> 23) & 0xff) - 0x7f;
+ if (j0 < 23) {
+ if (j0 < 0) { /* raise inexact if x != 0 */
+ if (huge_f + x > (float)0.0) { /* return 0*sign(x) if |x|<1 */
+ if (i0 >= 0) {
+ i0 = 0;
+ }
+ else if ((i0 & 0x7fffffff) != 0) {
+ i0 = 0xbf800000;
+ }
+ }
+ }
+ else {
+ i = (0x007fffff) >> j0;
+ if ((i0 & i) == 0)
+ return x; /* x is integral */
+ if (huge_f + x > (float)0.0) { /* raise inexact flag */
+ if (i0 < 0)
+ i0 += (0x00800000) >> j0;
+ i0 &= (~i);
+ }
+ }
+ }
+ else {
+ if (j0 == 0x80)
+ return x + x; /* inf or NaN */
+ else
+ return x; /* x is integral */
+ }
+ SET_FLOAT_WORD(x, i0);
+ return x;
+}
+
+static float
+freebsd_fminf(float x, float y)
+{
+ if (is_little_endian()) {
+ IEEEf2bits_L u[2] = { 0 };
+
+ u[0].f = x;
+ u[1].f = y;
+
+ /* Check for NaNs to avoid raising spurious exceptions. */
+ if (u[0].bits.exp == 255 && u[0].bits.man != 0)
+ return (y);
+ if (u[1].bits.exp == 255 && u[1].bits.man != 0)
+ return (x);
+
+ /* Handle comparisons of signed zeroes. */
+ if (u[0].bits.sign != u[1].bits.sign)
+ return (u[u[1].bits.sign].f);
+ }
+ else {
+ IEEEf2bits_B u[2] = { 0 };
+
+ u[0].f = x;
+ u[1].f = y;
+
+ /* Check for NaNs to avoid raising spurious exceptions. */
+ if (u[0].bits.exp == 255 && u[0].bits.man != 0)
+ return (y);
+ if (u[1].bits.exp == 255 && u[1].bits.man != 0)
+ return (x);
+
+ /* Handle comparisons of signed zeroes. */
+ if (u[0].bits.sign != u[1].bits.sign)
+ return (u[u[1].bits.sign].f);
+ }
+
+ return (x < y ? x : y);
+}
+
+static float
+freebsd_fmaxf(float x, float y)
+{
+ if (is_little_endian()) {
+ IEEEf2bits_L u[2] = { 0 };
+
+ u[0].f = x;
+ u[1].f = y;
+
+ /* Check for NaNs to avoid raising spurious exceptions. */
+ if (u[0].bits.exp == 255 && u[0].bits.man != 0)
+ return (y);
+ if (u[1].bits.exp == 255 && u[1].bits.man != 0)
+ return (x);
+
+ /* Handle comparisons of signed zeroes. */
+ if (u[0].bits.sign != u[1].bits.sign)
+ return (u[u[0].bits.sign].f);
+ }
+ else {
+ IEEEf2bits_B u[2] = { 0 };
+
+ u[0].f = x;
+ u[1].f = y;
+
+ /* Check for NaNs to avoid raising spurious exceptions. */
+ if (u[0].bits.exp == 255 && u[0].bits.man != 0)
+ return (y);
+ if (u[1].bits.exp == 255 && u[1].bits.man != 0)
+ return (x);
+
+ /* Handle comparisons of signed zeroes. */
+ if (u[0].bits.sign != u[1].bits.sign)
+ return (u[u[0].bits.sign].f);
+ }
+
+ return (x > y ? x : y);
+}
+
+static double
+freebsd_copysign(double x, double y)
+{
+ u_int32_t hx, hy;
+ GET_HIGH_WORD(hx, x);
+ GET_HIGH_WORD(hy, y);
+ SET_HIGH_WORD(x, (hx & 0x7fffffff) | (hy & 0x80000000));
+ return x;
+}
+
+static double
+freebsd_scalbn(double x, int n)
+{
+ int32_t k, hx, lx;
+ EXTRACT_WORDS(hx, lx, x);
+ k = (hx & 0x7ff00000) >> 20; /* extract exponent */
+ if (k == 0) { /* 0 or subnormal x */
+ if ((lx | (hx & 0x7fffffff)) == 0)
+ return x; /* +-0 */
+ x *= two54;
+ GET_HIGH_WORD(hx, x);
+ k = ((hx & 0x7ff00000) >> 20) - 54;
+ if (n < -50000)
+ return tiny * x; /*underflow*/
+ }
+ if (k == 0x7ff)
+ return x + x; /* NaN or Inf */
+ k = k + n;
+ if (k > 0x7fe)
+ return huge * freebsd_copysign(huge, x); /* overflow */
+ if (k > 0) /* normal result */
+ {
+ SET_HIGH_WORD(x, (hx & 0x800fffff) | (k << 20));
+ return x;
+ }
+ if (k <= -54) {
+ if (n > 50000) /* in case integer overflow in n+k */
+ return huge * freebsd_copysign(huge, x); /*overflow*/
+ else
+ return tiny * freebsd_copysign(tiny, x); /*underflow*/
+ }
+ k += 54; /* subnormal result */
+ SET_HIGH_WORD(x, (hx & 0x800fffff) | (k << 20));
+ return x * twom54;
+}
+
+static double
+freebsd_pow(double x, double y)
+{
+ double z, ax, z_h, z_l, p_h, p_l;
+ double y1, t1, t2, r, s, t, u, v, w;
+ int32_t i, j, k, yisint, n;
+ int32_t hx, hy, ix, iy;
+ u_int32_t lx, ly;
+
+ EXTRACT_WORDS(hx, lx, x);
+ EXTRACT_WORDS(hy, ly, y);
+ ix = hx & 0x7fffffff;
+ iy = hy & 0x7fffffff;
+
+ /* y==zero: x**0 = 1 */
+ if ((iy | ly) == 0)
+ return one;
+
+ /* x==1: 1**y = 1, even if y is NaN */
+ if (hx == 0x3ff00000 && lx == 0)
+ return one;
+
+ /* y!=zero: result is NaN if either arg is NaN */
+ if (ix > 0x7ff00000 || ((ix == 0x7ff00000) && (lx != 0)) || iy > 0x7ff00000
+ || ((iy == 0x7ff00000) && (ly != 0)))
+ return (x + 0.0) + (y + 0.0);
+
+ /* determine if y is an odd int when x < 0
+ * yisint = 0 ... y is not an integer
+ * yisint = 1 ... y is an odd int
+ * yisint = 2 ... y is an even int
+ */
+ yisint = 0;
+ if (hx < 0) {
+ if (iy >= 0x43400000)
+ yisint = 2; /* even integer y */
+ else if (iy >= 0x3ff00000) {
+ k = (iy >> 20) - 0x3ff; /* exponent */
+ if (k > 20) {
+ j = ly >> (52 - k);
+ if ((j << (52 - k)) == ly)
+ yisint = 2 - (j & 1);
+ }
+ else if (ly == 0) {
+ j = iy >> (20 - k);
+ if ((j << (20 - k)) == iy)
+ yisint = 2 - (j & 1);
+ }
+ }
+ }
+
+ /* special value of y */
+ if (ly == 0) {
+ if (iy == 0x7ff00000) { /* y is +-inf */
+ if (((ix - 0x3ff00000) | lx) == 0)
+ return one; /* (-1)**+-inf is NaN */
+ else if (ix >= 0x3ff00000) /* (|x|>1)**+-inf = inf,0 */
+ return (hy >= 0) ? y : zero;
+ else /* (|x|<1)**-,+inf = inf,0 */
+ return (hy < 0) ? -y : zero;
+ }
+ if (iy == 0x3ff00000) { /* y is +-1 */
+ if (hy < 0)
+ return one / x;
+ else
+ return x;
+ }
+ if (hy == 0x40000000)
+ return x * x; /* y is 2 */
+ if (hy == 0x40080000)
+ return x * x * x; /* y is 3 */
+ if (hy == 0x40100000) { /* y is 4 */
+ u = x * x;
+ return u * u;
+ }
+ if (hy == 0x3fe00000) { /* y is 0.5 */
+ if (hx >= 0) /* x >= +0 */
+ return sqrt(x);
+ }
+ }
+
+ ax = fabs(x);
+ /* special value of x */
+ if (lx == 0) {
+ if (ix == 0x7ff00000 || ix == 0 || ix == 0x3ff00000) {
+ z = ax; /*x is +-0,+-inf,+-1*/
+ if (hy < 0)
+ z = one / z; /* z = (1/|x|) */
+ if (hx < 0) {
+ if (((ix - 0x3ff00000) | yisint) == 0) {
+ z = (z - z) / (z - z); /* (-1)**non-int is NaN */
+ }
+ else if (yisint == 1)
+ z = -z; /* (x<0)**odd = -(|x|**odd) */
+ }
+ return z;
+ }
+ }
+
+ /* CYGNUS LOCAL + fdlibm-5.3 fix: This used to be
+ n = (hx>>31)+1;
+ but ANSI C says a right shift of a signed negative quantity is
+ implementation defined. */
+ n = ((u_int32_t)hx >> 31) - 1;
+
+ /* (x<0)**(non-int) is NaN */
+ if ((n | yisint) == 0)
+ return (x - x) / (x - x);
+
+ s = one; /* s (sign of result -ve**odd) = -1 else = 1 */
+ if ((n | (yisint - 1)) == 0)
+ s = -one; /* (-ve)**(odd int) */
+
+ /* |y| is huge */
+ if (iy > 0x41e00000) { /* if |y| > 2**31 */
+ if (iy > 0x43f00000) { /* if |y| > 2**64, must o/uflow */
+ if (ix <= 0x3fefffff)
+ return (hy < 0) ? huge * huge : tiny * tiny;
+ if (ix >= 0x3ff00000)
+ return (hy > 0) ? huge * huge : tiny * tiny;
+ }
+ /* over/underflow if x is not close to one */
+ if (ix < 0x3fefffff)
+ return (hy < 0) ? s * huge * huge : s * tiny * tiny;
+ if (ix > 0x3ff00000)
+ return (hy > 0) ? s * huge * huge : s * tiny * tiny;
+ /* now |1-x| is tiny <= 2**-20, suffice to compute
+ log(x) by x-x^2/2+x^3/3-x^4/4 */
+ t = ax - one; /* t has 20 trailing zeros */
+ w = (t * t) * (0.5 - t * (0.3333333333333333333333 - t * 0.25));
+ u = ivln2_h * t; /* ivln2_h has 21 sig. bits */
+ v = t * ivln2_l - w * ivln2;
+ t1 = u + v;
+ SET_LOW_WORD(t1, 0);
+ t2 = v - (t1 - u);
+ }
+ else {
+ double ss, s2, s_h, s_l, t_h, t_l;
+ n = 0;
+ /* take care subnormal number */
+ if (ix < 0x00100000) {
+ ax *= two53;
+ n -= 53;
+ GET_HIGH_WORD(ix, ax);
+ }
+ n += ((ix) >> 20) - 0x3ff;
+ j = ix & 0x000fffff;
+ /* determine interval */
+ ix = j | 0x3ff00000; /* normalize ix */
+ if (j <= 0x3988E)
+ k = 0; /* |x|<sqrt(3/2) */
+ else if (j < 0xBB67A)
+ k = 1; /* |x|<sqrt(3) */
+ else {
+ k = 0;
+ n += 1;
+ ix -= 0x00100000;
+ }
+ SET_HIGH_WORD(ax, ix);
+
+ /* compute ss = s_h+s_l = (x-1)/(x+1) or (x-1.5)/(x+1.5) */
+ u = ax - bp[k]; /* bp[0]=1.0, bp[1]=1.5 */
+ v = one / (ax + bp[k]);
+ ss = u * v;
+ s_h = ss;
+ SET_LOW_WORD(s_h, 0);
+ /* t_h=ax+bp[k] High */
+ t_h = zero;
+ SET_HIGH_WORD(t_h, ((ix >> 1) | 0x20000000) + 0x00080000 + (k << 18));
+ t_l = ax - (t_h - bp[k]);
+ s_l = v * ((u - s_h * t_h) - s_h * t_l);
+ /* compute log(ax) */
+ s2 = ss * ss;
+ r = s2 * s2
+ * (L1 + s2 * (L2 + s2 * (L3 + s2 * (L4 + s2 * (L5 + s2 * L6)))));
+ r += s_l * (s_h + ss);
+ s2 = s_h * s_h;
+ t_h = 3.0 + s2 + r;
+ SET_LOW_WORD(t_h, 0);
+ t_l = r - ((t_h - 3.0) - s2);
+ /* u+v = ss*(1+...) */
+ u = s_h * t_h;
+ v = s_l * t_h + t_l * ss;
+ /* 2/(3log2)*(ss+...) */
+ p_h = u + v;
+ SET_LOW_WORD(p_h, 0);
+ p_l = v - (p_h - u);
+ z_h = cp_h * p_h; /* cp_h+cp_l = 2/(3*log2) */
+ z_l = cp_l * p_h + p_l * cp + dp_l[k];
+ /* log2(ax) = (ss+..)*2/(3*log2) = n + dp_h + z_h + z_l */
+ t = (double)n;
+ t1 = (((z_h + z_l) + dp_h[k]) + t);
+ SET_LOW_WORD(t1, 0);
+ t2 = z_l - (((t1 - t) - dp_h[k]) - z_h);
+ }
+
+ /* split up y into y1+y2 and compute (y1+y2)*(t1+t2) */
+ y1 = y;
+ SET_LOW_WORD(y1, 0);
+ p_l = (y - y1) * t1 + y * t2;
+ p_h = y1 * t1;
+ z = p_l + p_h;
+ EXTRACT_WORDS(j, i, z);
+ if (j >= 0x40900000) { /* z >= 1024 */
+ if (((j - 0x40900000) | i) != 0) /* if z > 1024 */
+ return s * huge * huge; /* overflow */
+ else {
+ if (p_l + ovt > z - p_h)
+ return s * huge * huge; /* overflow */
+ }
+ }
+ else if ((j & 0x7fffffff) >= 0x4090cc00) { /* z <= -1075 */
+ if (((j - 0xc090cc00) | i) != 0) /* z < -1075 */
+ return s * tiny * tiny; /* underflow */
+ else {
+ if (p_l <= z - p_h)
+ return s * tiny * tiny; /* underflow */
+ }
+ }
+ /*
+ * compute 2**(p_h+p_l)
+ */
+ i = j & 0x7fffffff;
+ k = (i >> 20) - 0x3ff;
+ n = 0;
+ if (i > 0x3fe00000) { /* if |z| > 0.5, set n = [z+0.5] */
+ n = j + (0x00100000 >> (k + 1));
+ k = ((n & 0x7fffffff) >> 20) - 0x3ff; /* new k for n */
+ t = zero;
+ SET_HIGH_WORD(t, n & ~(0x000fffff >> k));
+ n = ((n & 0x000fffff) | 0x00100000) >> (20 - k);
+ if (j < 0)
+ n = -n;
+ p_h -= t;
+ }
+ t = p_l + p_h;
+ SET_LOW_WORD(t, 0);
+ u = t * lg2_h;
+ v = (p_l - (t - p_h)) * lg2 + t * lg2_l;
+ z = u + v;
+ w = v - (z - u);
+ t = z * z;
+ t1 = z - t * (P1 + t * (P2 + t * (P3 + t * (P4 + t * P5))));
+ r = (z * t1) / (t1 - two) - (w + z * w);
+ z = one - (r - z);
+ GET_HIGH_WORD(j, z);
+ j += (n << 20);
+ if ((j >> 20) <= 0)
+ z = freebsd_scalbn(z, n); /* subnormal output */
+ else
+ SET_HIGH_WORD(z, j);
+ return s * z;
+}
+
+double
+atan(double x)
+{
+ return freebsd_atan(x);
+}
+
+double
+atan2(double y, double x)
+{
+ return freebsd_atan2(y, x);
+}
+
+#ifndef BH_HAS_SQRT
+double
+sqrt(double x)
+{
+ return freebsd_sqrt(x);
+}
+#endif
+
+double
+floor(double x)
+{
+ return freebsd_floor(x);
+}
+
+double
+ceil(double x)
+{
+ return freebsd_ceil(x);
+}
+
+double
+fmin(double x, double y)
+{
+ return x < y ? x : y;
+}
+
+double
+fmax(double x, double y)
+{
+ return x > y ? x : y;
+}
+
+double
+rint(double x)
+{
+ return freebsd_rint(x);
+}
+
+double
+fabs(double x)
+{
+ return freebsd_fabs(x);
+}
+
+int
+isnan(double x)
+{
+ return freebsd_isnan(x);
+}
+
+double
+trunc(double x)
+{
+ return (x > 0) ? freebsd_floor(x) : freebsd_ceil(x);
+}
+
+int
+signbit(double x)
+{
+ return ((__HI(x) & 0x80000000) >> 31);
+}
+
+float
+fabsf(float x)
+{
+ return freebsd_fabsf(x);
+}
+
+float
+truncf(float x)
+{
+ return freebsd_truncf(x);
+}
+
+float
+rintf(float x)
+{
+ return freebsd_rintf(x);
+}
+
+float
+ceilf(float x)
+{
+ return freebsd_ceilf(x);
+}
+
+float
+floorf(float x)
+{
+ return freebsd_floorf(x);
+}
+
+float
+fminf(float x, float y)
+{
+ return freebsd_fminf(x, y);
+}
+
+float
+fmaxf(float x, float y)
+{
+ return freebsd_fmaxf(x, y);
+}
+
+#ifndef BH_HAS_SQRTF
+float
+sqrtf(float x)
+{
+ return freebsd_sqrtf(x);
+}
+#endif
+
+double
+pow(double x, double y)
+{
+ return freebsd_pow(x, y);
+}
+
+double
+scalbn(double x, int n)
+{
+ return freebsd_scalbn(x, n);
+}
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/common/math/platform_api_math.cmake b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/common/math/platform_api_math.cmake
new file mode 100644
index 000000000..09c74bfc5
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/common/math/platform_api_math.cmake
@@ -0,0 +1,8 @@
+# Copyright (C) 2019 Intel Corporation. All rights reserved.
+# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+
+set (PLATFORM_COMMON_MATH_DIR ${CMAKE_CURRENT_LIST_DIR})
+
+file (GLOB_RECURSE source_all ${PLATFORM_COMMON_MATH_DIR}/*.c)
+
+set (PLATFORM_COMMON_MATH_SOURCE ${source_all} )
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/common/posix/platform_api_posix.cmake b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/common/posix/platform_api_posix.cmake
new file mode 100644
index 000000000..4abefff1e
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/common/posix/platform_api_posix.cmake
@@ -0,0 +1,8 @@
+# Copyright (C) 2019 Intel Corporation. All rights reserved.
+# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+
+set (PLATFORM_COMMON_POSIX_DIR ${CMAKE_CURRENT_LIST_DIR})
+
+file (GLOB_RECURSE source_all ${PLATFORM_COMMON_POSIX_DIR}/*.c)
+
+set (PLATFORM_COMMON_POSIX_SOURCE ${source_all} )
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/common/posix/posix_malloc.c b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/common/posix/posix_malloc.c
new file mode 100644
index 000000000..912998ee0
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/common/posix/posix_malloc.c
@@ -0,0 +1,72 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#include "platform_api_vmcore.h"
+
+void *
+os_malloc(unsigned size)
+{
+ return malloc(size);
+}
+
+void *
+os_realloc(void *ptr, unsigned size)
+{
+ return realloc(ptr, size);
+}
+
+void
+os_free(void *ptr)
+{
+ free(ptr);
+}
+
+int
+os_dumps_proc_mem_info(char *out, unsigned int size)
+{
+ int ret = -1;
+ FILE *f;
+ char line[128] = { 0 };
+ unsigned int out_idx = 0;
+
+ if (!out || !size)
+ goto quit;
+
+ f = fopen("/proc/self/status", "r");
+ if (!f) {
+ perror("fopen failed: ");
+ goto quit;
+ }
+
+ memset(out, 0, size);
+
+ while (fgets(line, sizeof(line), f)) {
+#if WASM_ENABLE_MEMORY_PROFILING != 0
+ if (strncmp(line, "Vm", 2) == 0 || strncmp(line, "Rss", 3) == 0) {
+#else
+ if (strncmp(line, "VmRSS", 5) == 0
+ || strncmp(line, "RssAnon", 7) == 0) {
+#endif
+ size_t line_len = strlen(line);
+ if (line_len >= size - 1 - out_idx)
+ goto close_file;
+
+ /* copying without null-terminated byte */
+ memcpy(out + out_idx, line, line_len);
+ out_idx += line_len;
+ }
+ }
+
+ if (ferror(f)) {
+ perror("fgets failed: ");
+ goto close_file;
+ }
+
+ ret = 0;
+close_file:
+ fclose(f);
+quit:
+ return ret;
+} \ No newline at end of file
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/common/posix/posix_memmap.c b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/common/posix/posix_memmap.c
new file mode 100644
index 000000000..2dfbee453
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/common/posix/posix_memmap.c
@@ -0,0 +1,253 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#include "platform_api_vmcore.h"
+
+#ifndef BH_ENABLE_TRACE_MMAP
+#define BH_ENABLE_TRACE_MMAP 0
+#endif
+
+#if BH_ENABLE_TRACE_MMAP != 0
+static size_t total_size_mmapped = 0;
+static size_t total_size_munmapped = 0;
+#endif
+
+#define HUGE_PAGE_SIZE (2 * 1024 * 1024)
+
+#if !defined(__APPLE__) && !defined(__NuttX__) && defined(MADV_HUGEPAGE)
+static inline uintptr_t
+round_up(uintptr_t v, uintptr_t b)
+{
+ uintptr_t m = b - 1;
+ return (v + m) & ~m;
+}
+
+static inline uintptr_t
+round_down(uintptr_t v, uintptr_t b)
+{
+ uintptr_t m = b - 1;
+ return v & ~m;
+}
+#endif
+
+void *
+os_mmap(void *hint, size_t size, int prot, int flags)
+{
+ int map_prot = PROT_NONE;
+ int map_flags = MAP_ANONYMOUS | MAP_PRIVATE;
+ uint64 request_size, page_size;
+ uint8 *addr = MAP_FAILED;
+ uint32 i;
+
+ page_size = (uint64)getpagesize();
+ request_size = (size + page_size - 1) & ~(page_size - 1);
+
+#if !defined(__APPLE__) && !defined(__NuttX__) && defined(MADV_HUGEPAGE)
+ /* huge page isn't supported on MacOS and NuttX */
+ if (request_size >= HUGE_PAGE_SIZE)
+ /* apply one extra huge page */
+ request_size += HUGE_PAGE_SIZE;
+#endif
+
+ if ((size_t)request_size < size)
+ /* integer overflow */
+ return NULL;
+
+ if (request_size > 16 * (uint64)UINT32_MAX)
+ /* at most 16 G is allowed */
+ return NULL;
+
+ if (prot & MMAP_PROT_READ)
+ map_prot |= PROT_READ;
+
+ if (prot & MMAP_PROT_WRITE)
+ map_prot |= PROT_WRITE;
+
+ if (prot & MMAP_PROT_EXEC)
+ map_prot |= PROT_EXEC;
+
+#if defined(BUILD_TARGET_X86_64) || defined(BUILD_TARGET_AMD_64)
+#ifndef __APPLE__
+ if (flags & MMAP_MAP_32BIT)
+ map_flags |= MAP_32BIT;
+#endif
+#endif
+
+ if (flags & MMAP_MAP_FIXED)
+ map_flags |= MAP_FIXED;
+
+#if defined(BUILD_TARGET_RISCV64_LP64D) || defined(BUILD_TARGET_RISCV64_LP64)
+ /* As AOT relocation in RISCV64 may require that the code/data mapped
+ * is in range 0 to 2GB, we try to map the memory with hint address
+ * (mmap's first argument) to meet the requirement.
+ */
+ if (!hint && !(flags & MMAP_MAP_FIXED) && (flags & MMAP_MAP_32BIT)) {
+ uint8 *stack_addr = (uint8 *)&map_prot;
+ uint8 *text_addr = (uint8 *)os_mmap;
+ /* hint address begins with 1MB */
+ static uint8 *hint_addr = (uint8 *)(uintptr_t)BH_MB;
+
+ if ((hint_addr - text_addr >= 0 && hint_addr - text_addr < 100 * BH_MB)
+ || (text_addr - hint_addr >= 0
+ && text_addr - hint_addr < 100 * BH_MB)) {
+ /* hint address is possibly in text section, skip it */
+ hint_addr += 100 * BH_MB;
+ }
+
+ if ((hint_addr - stack_addr >= 0 && hint_addr - stack_addr < 8 * BH_MB)
+ || (stack_addr - hint_addr >= 0
+ && stack_addr - hint_addr < 8 * BH_MB)) {
+ /* hint address is possibly in native stack area, skip it */
+ hint_addr += 8 * BH_MB;
+ }
+
+ /* try 10 times, step with 1MB each time */
+ for (i = 0; i < 10 && hint_addr < (uint8 *)(uintptr_t)(2ULL * BH_GB);
+ i++) {
+ addr = mmap(hint_addr, request_size, map_prot, map_flags, -1, 0);
+ if (addr != MAP_FAILED) {
+ if (addr > (uint8 *)(uintptr_t)(2ULL * BH_GB)) {
+ /* unmap and try again if the mapped address doesn't
+ * meet the requirement */
+ os_munmap(addr, request_size);
+ }
+ else {
+ /* success, reset next hint address */
+ hint_addr += request_size;
+ break;
+ }
+ }
+ hint_addr += BH_MB;
+ }
+ }
+#endif /* end of BUILD_TARGET_RISCV64_LP64D || BUILD_TARGET_RISCV64_LP64 */
+
+ /* memory has't been mapped or was mapped failed previously */
+ if (addr == MAP_FAILED) {
+ /* try 5 times */
+ for (i = 0; i < 5; i++) {
+ addr = mmap(hint, request_size, map_prot, map_flags, -1, 0);
+ if (addr != MAP_FAILED)
+ break;
+ }
+ }
+
+ if (addr == MAP_FAILED) {
+#if BH_ENABLE_TRACE_MMAP != 0
+ os_printf("mmap failed\n");
+#endif
+ return NULL;
+ }
+
+#if BH_ENABLE_TRACE_MMAP != 0
+ total_size_mmapped += request_size;
+ os_printf("mmap return: %p with size: %zu, total_size_mmapped: %zu, "
+ "total_size_munmapped: %zu\n",
+ addr, request_size, total_size_mmapped, total_size_munmapped);
+#endif
+
+#if !defined(__APPLE__) && !defined(__NuttX__) && defined(MADV_HUGEPAGE)
+ /* huge page isn't supported on MacOS and NuttX */
+ if (request_size > HUGE_PAGE_SIZE) {
+ uintptr_t huge_start, huge_end;
+ size_t prefix_size = 0, suffix_size = HUGE_PAGE_SIZE;
+
+ huge_start = round_up((uintptr_t)addr, HUGE_PAGE_SIZE);
+
+ if (huge_start > (uintptr_t)addr) {
+ prefix_size += huge_start - (uintptr_t)addr;
+ suffix_size -= huge_start - (uintptr_t)addr;
+ }
+
+ /* unmap one extra huge page */
+
+ if (prefix_size > 0) {
+ munmap(addr, prefix_size);
+#if BH_ENABLE_TRACE_MMAP != 0
+ total_size_munmapped += prefix_size;
+ os_printf("munmap %p with size: %zu, total_size_mmapped: %zu, "
+ "total_size_munmapped: %zu\n",
+ addr, prefix_size, total_size_mmapped,
+ total_size_munmapped);
+#endif
+ }
+ if (suffix_size > 0) {
+ munmap(addr + request_size - suffix_size, suffix_size);
+#if BH_ENABLE_TRACE_MMAP != 0
+ total_size_munmapped += suffix_size;
+ os_printf("munmap %p with size: %zu, total_size_mmapped: %zu, "
+ "total_size_munmapped: %zu\n",
+ addr + request_size - suffix_size, suffix_size,
+ total_size_mmapped, total_size_munmapped);
+#endif
+ }
+
+ addr = (uint8 *)huge_start;
+ request_size -= HUGE_PAGE_SIZE;
+
+ huge_end = round_down(huge_start + request_size, HUGE_PAGE_SIZE);
+ if (huge_end > huge_start) {
+ int ret = madvise((void *)huge_start, huge_end - huge_start,
+ MADV_HUGEPAGE);
+ if (ret) {
+#if BH_ENABLE_TRACE_MMAP != 0
+ os_printf(
+ "warning: madvise(%p, %lu) huge page failed, return %d\n",
+ (void *)huge_start, huge_end - huge_start, ret);
+#endif
+ }
+ }
+ }
+#endif /* end of __APPLE__ || __NuttX__ || !MADV_HUGEPAGE */
+
+ return addr;
+}
+
+void
+os_munmap(void *addr, size_t size)
+{
+ uint64 page_size = (uint64)getpagesize();
+ uint64 request_size = (size + page_size - 1) & ~(page_size - 1);
+
+ if (addr) {
+ if (munmap(addr, request_size)) {
+ os_printf("os_munmap error addr:%p, size:0x%" PRIx64 ", errno:%d\n",
+ addr, request_size, errno);
+ return;
+ }
+#if BH_ENABLE_TRACE_MMAP != 0
+ total_size_munmapped += request_size;
+ os_printf("munmap %p with size: %zu, total_size_mmapped: %zu, "
+ "total_size_munmapped: %zu\n",
+ addr, request_size, total_size_mmapped, total_size_munmapped);
+#endif
+ }
+}
+
+int
+os_mprotect(void *addr, size_t size, int prot)
+{
+ int map_prot = PROT_NONE;
+ uint64 page_size = (uint64)getpagesize();
+ uint64 request_size = (size + page_size - 1) & ~(page_size - 1);
+
+ if (!addr)
+ return 0;
+
+ if (prot & MMAP_PROT_READ)
+ map_prot |= PROT_READ;
+
+ if (prot & MMAP_PROT_WRITE)
+ map_prot |= PROT_WRITE;
+
+ if (prot & MMAP_PROT_EXEC)
+ map_prot |= PROT_EXEC;
+
+ return mprotect(addr, request_size, map_prot);
+}
+
+void
+os_dcache_flush(void)
+{}
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/common/posix/posix_socket.c b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/common/posix/posix_socket.c
new file mode 100644
index 000000000..e33781d7d
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/common/posix/posix_socket.c
@@ -0,0 +1,1028 @@
+/*
+ * Copyright (C) 2021 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#include "platform_api_vmcore.h"
+#include "platform_api_extension.h"
+
+#include <arpa/inet.h>
+#include <netdb.h>
+#include <netinet/tcp.h>
+#include <netinet/in.h>
+
+static bool
+textual_addr_to_sockaddr(const char *textual, int port, struct sockaddr *out,
+ socklen_t *out_len)
+{
+ struct sockaddr_in *v4;
+#ifdef IPPROTO_IPV6
+ struct sockaddr_in6 *v6;
+#endif
+
+ assert(textual);
+
+ v4 = (struct sockaddr_in *)out;
+ if (inet_pton(AF_INET, textual, &v4->sin_addr.s_addr) == 1) {
+ v4->sin_family = AF_INET;
+ v4->sin_port = htons(port);
+ *out_len = sizeof(struct sockaddr_in);
+ return true;
+ }
+
+#ifdef IPPROTO_IPV6
+ v6 = (struct sockaddr_in6 *)out;
+ if (inet_pton(AF_INET6, textual, &v6->sin6_addr.s6_addr) == 1) {
+ v6->sin6_family = AF_INET6;
+ v6->sin6_port = htons(port);
+ *out_len = sizeof(struct sockaddr_in6);
+ return true;
+ }
+#endif
+
+ return false;
+}
+
+static int
+sockaddr_to_bh_sockaddr(const struct sockaddr *sockaddr,
+ bh_sockaddr_t *bh_sockaddr)
+{
+ switch (sockaddr->sa_family) {
+ case AF_INET:
+ {
+ struct sockaddr_in *addr = (struct sockaddr_in *)sockaddr;
+
+ bh_sockaddr->port = ntohs(addr->sin_port);
+ bh_sockaddr->addr_bufer.ipv4 = ntohl(addr->sin_addr.s_addr);
+ bh_sockaddr->is_ipv4 = true;
+ return BHT_OK;
+ }
+#ifdef IPPROTO_IPV6
+ case AF_INET6:
+ {
+ struct sockaddr_in6 *addr = (struct sockaddr_in6 *)sockaddr;
+ size_t i;
+
+ bh_sockaddr->port = ntohs(addr->sin6_port);
+
+ for (i = 0; i < sizeof(bh_sockaddr->addr_bufer.ipv6)
+ / sizeof(bh_sockaddr->addr_bufer.ipv6[0]);
+ i++) {
+ uint16 part_addr = addr->sin6_addr.s6_addr[i * 2]
+ | (addr->sin6_addr.s6_addr[i * 2 + 1] << 8);
+ bh_sockaddr->addr_bufer.ipv6[i] = ntohs(part_addr);
+ }
+
+ bh_sockaddr->is_ipv4 = false;
+ return BHT_OK;
+ }
+#endif
+ default:
+ errno = EAFNOSUPPORT;
+ return BHT_ERROR;
+ }
+}
+
+static void
+bh_sockaddr_to_sockaddr(const bh_sockaddr_t *bh_sockaddr,
+ struct sockaddr_storage *sockaddr, socklen_t *socklen)
+{
+ if (bh_sockaddr->is_ipv4) {
+ struct sockaddr_in *addr = (struct sockaddr_in *)sockaddr;
+ addr->sin_port = htons(bh_sockaddr->port);
+ addr->sin_family = AF_INET;
+ addr->sin_addr.s_addr = htonl(bh_sockaddr->addr_bufer.ipv4);
+ *socklen = sizeof(*addr);
+ }
+#ifdef IPPROTO_IPV6
+ else {
+ struct sockaddr_in6 *addr = (struct sockaddr_in6 *)sockaddr;
+ size_t i;
+ addr->sin6_port = htons(bh_sockaddr->port);
+ addr->sin6_family = AF_INET6;
+
+ for (i = 0; i < sizeof(bh_sockaddr->addr_bufer.ipv6)
+ / sizeof(bh_sockaddr->addr_bufer.ipv6[0]);
+ i++) {
+ uint16 part_addr = htons(bh_sockaddr->addr_bufer.ipv6[i]);
+ addr->sin6_addr.s6_addr[i * 2] = 0xff & part_addr;
+ addr->sin6_addr.s6_addr[i * 2 + 1] = (0xff00 & part_addr) >> 8;
+ }
+
+ *socklen = sizeof(*addr);
+ }
+#endif
+}
+
+int
+os_socket_create(bh_socket_t *sock, bool is_ipv4, bool is_tcp)
+{
+ int af = is_ipv4 ? AF_INET : AF_INET6;
+
+ if (!sock) {
+ return BHT_ERROR;
+ }
+
+ if (is_tcp) {
+ *sock = socket(af, SOCK_STREAM, IPPROTO_TCP);
+ }
+ else {
+ *sock = socket(af, SOCK_DGRAM, 0);
+ }
+
+ return (*sock == -1) ? BHT_ERROR : BHT_OK;
+}
+
+int
+os_socket_bind(bh_socket_t socket, const char *host, int *port)
+{
+ struct sockaddr_storage addr = { 0 };
+ struct linger ling;
+ socklen_t socklen;
+ int ret;
+
+ assert(host);
+ assert(port);
+
+ ling.l_onoff = 1;
+ ling.l_linger = 0;
+
+ if (!textual_addr_to_sockaddr(host, *port, (struct sockaddr *)&addr,
+ &socklen)) {
+ goto fail;
+ }
+
+ ret = fcntl(socket, F_SETFD, FD_CLOEXEC);
+ if (ret < 0) {
+ goto fail;
+ }
+
+ ret = setsockopt(socket, SOL_SOCKET, SO_LINGER, &ling, sizeof(ling));
+ if (ret < 0) {
+ goto fail;
+ }
+
+ ret = bind(socket, (struct sockaddr *)&addr, socklen);
+ if (ret < 0) {
+ goto fail;
+ }
+
+ socklen = sizeof(addr);
+ if (getsockname(socket, (void *)&addr, &socklen) == -1) {
+ goto fail;
+ }
+
+ if (addr.ss_family == AF_INET) {
+ *port = ntohs(((struct sockaddr_in *)&addr)->sin_port);
+ }
+ else {
+#ifdef IPPROTO_IPV6
+ *port = ntohs(((struct sockaddr_in6 *)&addr)->sin6_port);
+#else
+ goto fail;
+#endif
+ }
+
+ return BHT_OK;
+
+fail:
+ return BHT_ERROR;
+}
+
+int
+os_socket_settimeout(bh_socket_t socket, uint64 timeout_us)
+{
+ struct timeval tv;
+ tv.tv_sec = timeout_us / 1000000UL;
+ tv.tv_usec = timeout_us % 1000000UL;
+
+ if (setsockopt(socket, SOL_SOCKET, SO_RCVTIMEO, (const char *)&tv,
+ sizeof(tv))
+ != 0) {
+ return BHT_ERROR;
+ }
+
+ return BHT_OK;
+}
+
+int
+os_socket_listen(bh_socket_t socket, int max_client)
+{
+ if (listen(socket, max_client) != 0) {
+ return BHT_ERROR;
+ }
+
+ return BHT_OK;
+}
+
+int
+os_socket_accept(bh_socket_t server_sock, bh_socket_t *sock, void *addr,
+ unsigned int *addrlen)
+{
+ *sock = accept(server_sock, addr, addrlen);
+
+ if (*sock < 0) {
+ return BHT_ERROR;
+ }
+
+ return BHT_OK;
+}
+
+int
+os_socket_connect(bh_socket_t socket, const char *addr, int port)
+{
+ struct sockaddr_storage addr_in = { 0 };
+ socklen_t addr_len;
+ int ret = 0;
+
+ if (!textual_addr_to_sockaddr(addr, port, (struct sockaddr *)&addr_in,
+ &addr_len)) {
+ return BHT_ERROR;
+ }
+
+ ret = connect(socket, (struct sockaddr *)&addr_in, addr_len);
+ if (ret == -1) {
+ return BHT_ERROR;
+ }
+
+ return BHT_OK;
+}
+
+int
+os_socket_recv(bh_socket_t socket, void *buf, unsigned int len)
+{
+ return recv(socket, buf, len, 0);
+}
+
+int
+os_socket_recv_from(bh_socket_t socket, void *buf, unsigned int len, int flags,
+ bh_sockaddr_t *src_addr)
+{
+ struct sockaddr_storage sock_addr = { 0 };
+ socklen_t socklen = sizeof(sock_addr);
+ int ret;
+
+ ret = recvfrom(socket, buf, len, flags, (struct sockaddr *)&sock_addr,
+ &socklen);
+
+ if (ret < 0) {
+ return ret;
+ }
+
+ if (src_addr && socklen > 0) {
+ if (sockaddr_to_bh_sockaddr((struct sockaddr *)&sock_addr, src_addr)
+ == BHT_ERROR) {
+ return -1;
+ }
+ }
+
+ return ret;
+}
+
+int
+os_socket_send(bh_socket_t socket, const void *buf, unsigned int len)
+{
+ return send(socket, buf, len, 0);
+}
+
+int
+os_socket_send_to(bh_socket_t socket, const void *buf, unsigned int len,
+ int flags, const bh_sockaddr_t *dest_addr)
+{
+ struct sockaddr_storage sock_addr = { 0 };
+ socklen_t socklen = 0;
+
+ bh_sockaddr_to_sockaddr(dest_addr, &sock_addr, &socklen);
+
+ return sendto(socket, buf, len, flags, (const struct sockaddr *)&sock_addr,
+ socklen);
+}
+
+int
+os_socket_close(bh_socket_t socket)
+{
+ close(socket);
+ return BHT_OK;
+}
+
+int
+os_socket_shutdown(bh_socket_t socket)
+{
+ shutdown(socket, O_RDWR);
+ return BHT_OK;
+}
+
+int
+os_socket_inet_network(bool is_ipv4, const char *cp, bh_ip_addr_buffer_t *out)
+{
+ if (!cp)
+ return BHT_ERROR;
+
+ if (is_ipv4) {
+ if (inet_pton(AF_INET, cp, &out->ipv4) != 1) {
+ return BHT_ERROR;
+ }
+ /* Note: ntohl(INADDR_NONE) == INADDR_NONE */
+ out->ipv4 = ntohl(out->ipv4);
+ }
+ else {
+#ifdef IPPROTO_IPV6
+ if (inet_pton(AF_INET6, cp, out->ipv6) != 1) {
+ return BHT_ERROR;
+ }
+ for (int i = 0; i < 8; i++) {
+ out->ipv6[i] = ntohs(out->ipv6[i]);
+ }
+#else
+ errno = EAFNOSUPPORT;
+ return BHT_ERROR;
+#endif
+ }
+
+ return BHT_OK;
+}
+
+static int
+getaddrinfo_error_to_errno(int error)
+{
+ switch (error) {
+ case EAI_AGAIN:
+ return EAGAIN;
+ case EAI_FAIL:
+ return EFAULT;
+ case EAI_MEMORY:
+ return ENOMEM;
+ case EAI_SYSTEM:
+ return errno;
+ default:
+ return EINVAL;
+ }
+}
+
+static int
+is_addrinfo_supported(struct addrinfo *info)
+{
+ return
+ // Allow only IPv4 and IPv6
+ (info->ai_family == AF_INET || info->ai_family == AF_INET6)
+ // Allow only UDP and TCP
+ && (info->ai_socktype == SOCK_DGRAM || info->ai_socktype == SOCK_STREAM)
+ && (info->ai_protocol == IPPROTO_TCP
+ || info->ai_protocol == IPPROTO_UDP);
+}
+
+int
+os_socket_addr_resolve(const char *host, const char *service,
+ uint8_t *hint_is_tcp, uint8_t *hint_is_ipv4,
+ bh_addr_info_t *addr_info, size_t addr_info_size,
+ size_t *max_info_size)
+{
+ struct addrinfo hints = { 0 }, *res, *result;
+ int hints_enabled = hint_is_tcp || hint_is_ipv4;
+ int ret;
+ size_t pos = 0;
+
+ if (hints_enabled) {
+ if (hint_is_ipv4) {
+ hints.ai_family = *hint_is_ipv4 ? AF_INET : AF_INET6;
+ }
+ if (hint_is_tcp) {
+ hints.ai_socktype = *hint_is_tcp ? SOCK_STREAM : SOCK_DGRAM;
+ }
+ }
+
+ ret = getaddrinfo(host, strlen(service) == 0 ? NULL : service,
+ hints_enabled ? &hints : NULL, &result);
+ if (ret != BHT_OK) {
+ errno = getaddrinfo_error_to_errno(ret);
+ return BHT_ERROR;
+ }
+
+ res = result;
+ while (res) {
+ if (addr_info_size > pos) {
+ if (!is_addrinfo_supported(res)) {
+ res = res->ai_next;
+ continue;
+ }
+
+ ret =
+ sockaddr_to_bh_sockaddr(res->ai_addr, &addr_info[pos].sockaddr);
+
+ if (ret == BHT_ERROR) {
+ freeaddrinfo(result);
+ return BHT_ERROR;
+ }
+
+ addr_info[pos].is_tcp = res->ai_socktype == SOCK_STREAM;
+ }
+
+ pos++;
+ res = res->ai_next;
+ }
+
+ *max_info_size = pos;
+ freeaddrinfo(result);
+
+ return BHT_OK;
+}
+
+static int
+os_socket_setbooloption(bh_socket_t socket, int level, int optname,
+ bool is_enabled)
+{
+ int option = (int)is_enabled;
+ if (setsockopt(socket, level, optname, &option, sizeof(option)) != 0) {
+ return BHT_ERROR;
+ }
+
+ return BHT_OK;
+}
+
+static int
+os_socket_getbooloption(bh_socket_t socket, int level, int optname,
+ bool *is_enabled)
+{
+ assert(is_enabled);
+
+ int optval;
+ socklen_t optval_size = sizeof(optval);
+ if (getsockopt(socket, level, optname, &optval, &optval_size) != 0) {
+ return BHT_ERROR;
+ }
+ *is_enabled = (bool)optval;
+ return BHT_OK;
+}
+
+int
+os_socket_set_send_buf_size(bh_socket_t socket, size_t bufsiz)
+{
+ int buf_size_int = (int)bufsiz;
+ if (setsockopt(socket, SOL_SOCKET, SO_SNDBUF, &buf_size_int,
+ sizeof(buf_size_int))
+ != 0) {
+ return BHT_ERROR;
+ }
+
+ return BHT_OK;
+}
+
+int
+os_socket_get_send_buf_size(bh_socket_t socket, size_t *bufsiz)
+{
+ assert(bufsiz);
+
+ int buf_size_int;
+ socklen_t bufsiz_len = sizeof(buf_size_int);
+ if (getsockopt(socket, SOL_SOCKET, SO_SNDBUF, &buf_size_int, &bufsiz_len)
+ != 0) {
+ return BHT_ERROR;
+ }
+ *bufsiz = (size_t)buf_size_int;
+
+ return BHT_OK;
+}
+
+int
+os_socket_set_recv_buf_size(bh_socket_t socket, size_t bufsiz)
+{
+ int buf_size_int = (int)bufsiz;
+ if (setsockopt(socket, SOL_SOCKET, SO_RCVBUF, &buf_size_int,
+ sizeof(buf_size_int))
+ != 0) {
+ return BHT_ERROR;
+ }
+
+ return BHT_OK;
+}
+
+int
+os_socket_get_recv_buf_size(bh_socket_t socket, size_t *bufsiz)
+{
+ assert(bufsiz);
+
+ int buf_size_int;
+ socklen_t bufsiz_len = sizeof(buf_size_int);
+ if (getsockopt(socket, SOL_SOCKET, SO_RCVBUF, &buf_size_int, &bufsiz_len)
+ != 0) {
+ return BHT_ERROR;
+ }
+ *bufsiz = (size_t)buf_size_int;
+
+ return BHT_OK;
+}
+
+int
+os_socket_set_keep_alive(bh_socket_t socket, bool is_enabled)
+{
+ return os_socket_setbooloption(socket, SOL_SOCKET, SO_KEEPALIVE,
+ is_enabled);
+}
+
+int
+os_socket_get_keep_alive(bh_socket_t socket, bool *is_enabled)
+{
+ return os_socket_getbooloption(socket, SOL_SOCKET, SO_KEEPALIVE,
+ is_enabled);
+}
+
+int
+os_socket_set_reuse_addr(bh_socket_t socket, bool is_enabled)
+{
+ return os_socket_setbooloption(socket, SOL_SOCKET, SO_REUSEADDR,
+ is_enabled);
+}
+
+int
+os_socket_get_reuse_addr(bh_socket_t socket, bool *is_enabled)
+{
+ return os_socket_getbooloption(socket, SOL_SOCKET, SO_REUSEADDR,
+ is_enabled);
+}
+
+int
+os_socket_set_reuse_port(bh_socket_t socket, bool is_enabled)
+{
+#if defined(SO_REUSEPORT) /* NuttX doesn't have SO_REUSEPORT */
+ return os_socket_setbooloption(socket, SOL_SOCKET, SO_REUSEPORT,
+ is_enabled);
+#else
+ errno = ENOTSUP;
+ return BHT_ERROR;
+#endif /* defined(SO_REUSEPORT) */
+}
+
+int
+os_socket_get_reuse_port(bh_socket_t socket, bool *is_enabled)
+{
+#if defined(SO_REUSEPORT) /* NuttX doesn't have SO_REUSEPORT */
+ return os_socket_getbooloption(socket, SOL_SOCKET, SO_REUSEPORT,
+ is_enabled);
+#else
+ errno = ENOTSUP;
+ return BHT_ERROR;
+#endif /* defined(SO_REUSEPORT) */
+}
+
+int
+os_socket_set_linger(bh_socket_t socket, bool is_enabled, int linger_s)
+{
+ struct linger linger_opts = { .l_onoff = (int)is_enabled,
+ .l_linger = linger_s };
+ if (setsockopt(socket, SOL_SOCKET, SO_LINGER, &linger_opts,
+ sizeof(linger_opts))
+ != 0) {
+ return BHT_ERROR;
+ }
+
+ return BHT_OK;
+}
+
+int
+os_socket_get_linger(bh_socket_t socket, bool *is_enabled, int *linger_s)
+{
+ assert(is_enabled);
+ assert(linger_s);
+
+ struct linger linger_opts;
+ socklen_t linger_opts_len = sizeof(linger_opts);
+ if (getsockopt(socket, SOL_SOCKET, SO_LINGER, &linger_opts,
+ &linger_opts_len)
+ != 0) {
+ return BHT_ERROR;
+ }
+ *linger_s = linger_opts.l_linger;
+ *is_enabled = (bool)linger_opts.l_onoff;
+ return BHT_OK;
+}
+
+int
+os_socket_set_tcp_no_delay(bh_socket_t socket, bool is_enabled)
+{
+ return os_socket_setbooloption(socket, IPPROTO_TCP, TCP_NODELAY,
+ is_enabled);
+}
+
+int
+os_socket_get_tcp_no_delay(bh_socket_t socket, bool *is_enabled)
+{
+ return os_socket_getbooloption(socket, IPPROTO_TCP, TCP_NODELAY,
+ is_enabled);
+}
+
+int
+os_socket_set_tcp_quick_ack(bh_socket_t socket, bool is_enabled)
+{
+#ifdef TCP_QUICKACK
+ return os_socket_setbooloption(socket, IPPROTO_TCP, TCP_QUICKACK,
+ is_enabled);
+#else
+ errno = ENOSYS;
+
+ return BHT_ERROR;
+#endif
+}
+
+int
+os_socket_get_tcp_quick_ack(bh_socket_t socket, bool *is_enabled)
+{
+#ifdef TCP_QUICKACK
+ return os_socket_getbooloption(socket, IPPROTO_TCP, TCP_QUICKACK,
+ is_enabled);
+#else
+ errno = ENOSYS;
+
+ return BHT_ERROR;
+#endif
+}
+
+int
+os_socket_set_tcp_keep_idle(bh_socket_t socket, uint32 time_s)
+{
+ int time_s_int = (int)time_s;
+#ifdef TCP_KEEPIDLE
+ if (setsockopt(socket, IPPROTO_TCP, TCP_KEEPIDLE, &time_s_int,
+ sizeof(time_s_int))
+ != 0) {
+ return BHT_ERROR;
+ }
+ return BHT_OK;
+#elif defined(TCP_KEEPALIVE)
+ if (setsockopt(socket, IPPROTO_TCP, TCP_KEEPALIVE, &time_s_int,
+ sizeof(time_s_int))
+ != 0) {
+ return BHT_ERROR;
+ }
+ return BHT_OK;
+#else
+ errno = ENOSYS;
+
+ return BHT_ERROR;
+#endif
+}
+
+int
+os_socket_get_tcp_keep_idle(bh_socket_t socket, uint32 *time_s)
+{
+ assert(time_s);
+ int time_s_int;
+ socklen_t time_s_len = sizeof(time_s_int);
+#ifdef TCP_KEEPIDLE
+ if (getsockopt(socket, IPPROTO_TCP, TCP_KEEPIDLE, &time_s_int, &time_s_len)
+ != 0) {
+ return BHT_ERROR;
+ }
+ *time_s = (uint32)time_s_int;
+ return BHT_OK;
+#elif defined(TCP_KEEPALIVE)
+ if (getsockopt(socket, IPPROTO_TCP, TCP_KEEPALIVE, &time_s_int, &time_s_len)
+ != 0) {
+ return BHT_ERROR;
+ }
+ *time_s = (uint32)time_s_int;
+ return BHT_OK;
+#else
+ errno = ENOSYS;
+
+ return BHT_ERROR;
+#endif
+}
+
+int
+os_socket_set_tcp_keep_intvl(bh_socket_t socket, uint32 time_s)
+{
+ int time_s_int = (int)time_s;
+#ifdef TCP_KEEPINTVL
+ if (setsockopt(socket, IPPROTO_TCP, TCP_KEEPINTVL, &time_s_int,
+ sizeof(time_s_int))
+ != 0) {
+ return BHT_ERROR;
+ }
+
+ return BHT_OK;
+#else
+ errno = ENOSYS;
+
+ return BHT_ERROR;
+#endif
+}
+
+int
+os_socket_get_tcp_keep_intvl(bh_socket_t socket, uint32 *time_s)
+{
+#ifdef TCP_KEEPINTVL
+ assert(time_s);
+ int time_s_int;
+ socklen_t time_s_len = sizeof(time_s_int);
+ if (getsockopt(socket, IPPROTO_TCP, TCP_KEEPINTVL, &time_s_int, &time_s_len)
+ != 0) {
+ return BHT_ERROR;
+ }
+ *time_s = (uint32)time_s_int;
+ return BHT_OK;
+#else
+ errno = ENOSYS;
+
+ return BHT_ERROR;
+#endif
+}
+
+int
+os_socket_set_tcp_fastopen_connect(bh_socket_t socket, bool is_enabled)
+{
+#ifdef TCP_FASTOPEN_CONNECT
+ return os_socket_setbooloption(socket, IPPROTO_TCP, TCP_FASTOPEN_CONNECT,
+ is_enabled);
+#else
+ errno = ENOSYS;
+
+ return BHT_ERROR;
+#endif
+}
+
+int
+os_socket_get_tcp_fastopen_connect(bh_socket_t socket, bool *is_enabled)
+{
+#ifdef TCP_FASTOPEN_CONNECT
+ return os_socket_getbooloption(socket, IPPROTO_TCP, TCP_FASTOPEN_CONNECT,
+ is_enabled);
+#else
+ errno = ENOSYS;
+
+ return BHT_ERROR;
+#endif
+}
+
+int
+os_socket_set_ip_multicast_loop(bh_socket_t socket, bool ipv6, bool is_enabled)
+{
+ if (ipv6) {
+#ifdef IPPROTO_IPV6
+ return os_socket_setbooloption(socket, IPPROTO_IPV6,
+ IPV6_MULTICAST_LOOP, is_enabled);
+#else
+ errno = EAFNOSUPPORT;
+ return BHT_ERROR;
+#endif
+ }
+ else {
+ return os_socket_setbooloption(socket, IPPROTO_IP, IP_MULTICAST_LOOP,
+ is_enabled);
+ }
+}
+
+int
+os_socket_get_ip_multicast_loop(bh_socket_t socket, bool ipv6, bool *is_enabled)
+{
+ if (ipv6) {
+#ifdef IPPROTO_IPV6
+ return os_socket_getbooloption(socket, IPPROTO_IPV6,
+ IPV6_MULTICAST_LOOP, is_enabled);
+#else
+ errno = EAFNOSUPPORT;
+ return BHT_ERROR;
+#endif
+ }
+ else {
+ return os_socket_getbooloption(socket, IPPROTO_IP, IP_MULTICAST_LOOP,
+ is_enabled);
+ }
+}
+
+int
+os_socket_set_ip_add_membership(bh_socket_t socket,
+ bh_ip_addr_buffer_t *imr_multiaddr,
+ uint32_t imr_interface, bool is_ipv6)
+{
+ assert(imr_multiaddr);
+ if (is_ipv6) {
+#ifdef IPPROTO_IPV6
+ struct ipv6_mreq mreq;
+ for (int i = 0; i < 8; i++) {
+ ((uint16_t *)mreq.ipv6mr_multiaddr.s6_addr)[i] =
+ imr_multiaddr->ipv6[i];
+ }
+ mreq.ipv6mr_interface = imr_interface;
+ if (setsockopt(socket, IPPROTO_IPV6, IPV6_JOIN_GROUP, &mreq,
+ sizeof(mreq))
+ != 0) {
+ return BHT_ERROR;
+ }
+#else
+ errno = EAFNOSUPPORT;
+ return BHT_ERROR;
+#endif
+ }
+ else {
+ struct ip_mreq mreq;
+ mreq.imr_multiaddr.s_addr = imr_multiaddr->ipv4;
+ mreq.imr_interface.s_addr = imr_interface;
+ if (setsockopt(socket, IPPROTO_IP, IP_ADD_MEMBERSHIP, &mreq,
+ sizeof(mreq))
+ != 0) {
+ return BHT_ERROR;
+ }
+ }
+
+ return BHT_OK;
+}
+
+int
+os_socket_set_ip_drop_membership(bh_socket_t socket,
+ bh_ip_addr_buffer_t *imr_multiaddr,
+ uint32_t imr_interface, bool is_ipv6)
+{
+ assert(imr_multiaddr);
+ if (is_ipv6) {
+#ifdef IPPROTO_IPV6
+ struct ipv6_mreq mreq;
+ for (int i = 0; i < 8; i++) {
+ ((uint16_t *)mreq.ipv6mr_multiaddr.s6_addr)[i] =
+ imr_multiaddr->ipv6[i];
+ }
+ mreq.ipv6mr_interface = imr_interface;
+ if (setsockopt(socket, IPPROTO_IPV6, IPV6_LEAVE_GROUP, &mreq,
+ sizeof(mreq))
+ != 0) {
+ return BHT_ERROR;
+ }
+#else
+ errno = EAFNOSUPPORT;
+ return BHT_ERROR;
+#endif
+ }
+ else {
+ struct ip_mreq mreq;
+ mreq.imr_multiaddr.s_addr = imr_multiaddr->ipv4;
+ mreq.imr_interface.s_addr = imr_interface;
+ if (setsockopt(socket, IPPROTO_IP, IP_DROP_MEMBERSHIP, &mreq,
+ sizeof(mreq))
+ != 0) {
+ return BHT_ERROR;
+ }
+ }
+
+ return BHT_OK;
+}
+
+int
+os_socket_set_ip_ttl(bh_socket_t socket, uint8_t ttl_s)
+{
+ if (setsockopt(socket, IPPROTO_IP, IP_TTL, &ttl_s, sizeof(ttl_s)) != 0) {
+ return BHT_ERROR;
+ }
+
+ return BHT_OK;
+}
+
+int
+os_socket_get_ip_ttl(bh_socket_t socket, uint8_t *ttl_s)
+{
+ socklen_t opt_len = sizeof(ttl_s);
+ if (getsockopt(socket, IPPROTO_IP, IP_TTL, ttl_s, &opt_len) != 0) {
+ return BHT_ERROR;
+ }
+
+ return BHT_OK;
+}
+
+int
+os_socket_set_ip_multicast_ttl(bh_socket_t socket, uint8_t ttl_s)
+{
+ if (setsockopt(socket, IPPROTO_IP, IP_MULTICAST_TTL, &ttl_s, sizeof(ttl_s))
+ != 0) {
+ return BHT_ERROR;
+ }
+
+ return BHT_OK;
+}
+
+int
+os_socket_get_ip_multicast_ttl(bh_socket_t socket, uint8_t *ttl_s)
+{
+ socklen_t opt_len = sizeof(ttl_s);
+ if (getsockopt(socket, IPPROTO_IP, IP_MULTICAST_TTL, ttl_s, &opt_len)
+ != 0) {
+ return BHT_ERROR;
+ }
+
+ return BHT_OK;
+}
+
+int
+os_socket_set_ipv6_only(bh_socket_t socket, bool is_enabled)
+{
+#ifdef IPPROTO_IPV6
+ return os_socket_setbooloption(socket, IPPROTO_IPV6, IPV6_V6ONLY,
+ is_enabled);
+#else
+ errno = EAFNOSUPPORT;
+ return BHT_ERROR;
+#endif
+}
+
+int
+os_socket_get_ipv6_only(bh_socket_t socket, bool *is_enabled)
+{
+#ifdef IPPROTO_IPV6
+ return os_socket_getbooloption(socket, IPPROTO_IPV6, IPV6_V6ONLY,
+ is_enabled);
+#else
+ errno = EAFNOSUPPORT;
+ return BHT_ERROR;
+#endif
+}
+
+int
+os_socket_set_broadcast(bh_socket_t socket, bool is_enabled)
+{
+ return os_socket_setbooloption(socket, SOL_SOCKET, SO_BROADCAST,
+ is_enabled);
+}
+
+int
+os_socket_get_broadcast(bh_socket_t socket, bool *is_enabled)
+{
+ return os_socket_getbooloption(socket, SOL_SOCKET, SO_BROADCAST,
+ is_enabled);
+}
+
+int
+os_socket_set_send_timeout(bh_socket_t socket, uint64 timeout_us)
+{
+ struct timeval tv;
+ tv.tv_sec = timeout_us / 1000000UL;
+ tv.tv_usec = timeout_us % 1000000UL;
+ if (setsockopt(socket, SOL_SOCKET, SO_SNDTIMEO, &tv, sizeof(tv)) != 0) {
+ return BHT_ERROR;
+ }
+ return BHT_OK;
+}
+
+int
+os_socket_get_send_timeout(bh_socket_t socket, uint64 *timeout_us)
+{
+ struct timeval tv;
+ socklen_t tv_len = sizeof(tv);
+ if (getsockopt(socket, SOL_SOCKET, SO_SNDTIMEO, &tv, &tv_len) != 0) {
+ return BHT_ERROR;
+ }
+ *timeout_us = (tv.tv_sec * 1000000UL) + tv.tv_usec;
+ return BHT_OK;
+}
+
+int
+os_socket_set_recv_timeout(bh_socket_t socket, uint64 timeout_us)
+{
+ struct timeval tv;
+ tv.tv_sec = timeout_us / 1000000UL;
+ tv.tv_usec = timeout_us % 1000000UL;
+ if (setsockopt(socket, SOL_SOCKET, SO_RCVTIMEO, &tv, sizeof(tv)) != 0) {
+ return BHT_ERROR;
+ }
+ return BHT_OK;
+}
+
+int
+os_socket_get_recv_timeout(bh_socket_t socket, uint64 *timeout_us)
+{
+ struct timeval tv;
+ socklen_t tv_len = sizeof(tv);
+ if (getsockopt(socket, SOL_SOCKET, SO_RCVTIMEO, &tv, &tv_len) != 0) {
+ return BHT_ERROR;
+ }
+ *timeout_us = (tv.tv_sec * 1000000UL) + tv.tv_usec;
+ return BHT_OK;
+}
+
+int
+os_socket_addr_local(bh_socket_t socket, bh_sockaddr_t *sockaddr)
+{
+ struct sockaddr_storage addr_storage = { 0 };
+ socklen_t addr_len = sizeof(addr_storage);
+ int ret;
+
+ ret = getsockname(socket, (struct sockaddr *)&addr_storage, &addr_len);
+
+ if (ret != BHT_OK) {
+ return BHT_ERROR;
+ }
+
+ return sockaddr_to_bh_sockaddr((struct sockaddr *)&addr_storage, sockaddr);
+}
+
+int
+os_socket_addr_remote(bh_socket_t socket, bh_sockaddr_t *sockaddr)
+{
+ struct sockaddr_storage addr_storage = { 0 };
+ socklen_t addr_len = sizeof(addr_storage);
+ int ret;
+
+ ret = getpeername(socket, (struct sockaddr *)&addr_storage, &addr_len);
+
+ if (ret != BHT_OK) {
+ return BHT_ERROR;
+ }
+
+ return sockaddr_to_bh_sockaddr((struct sockaddr *)&addr_storage, sockaddr);
+}
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/common/posix/posix_thread.c b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/common/posix/posix_thread.c
new file mode 100644
index 000000000..5e814c418
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/common/posix/posix_thread.c
@@ -0,0 +1,680 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#ifndef _GNU_SOURCE
+#define _GNU_SOURCE
+#endif
+#include "platform_api_vmcore.h"
+#include "platform_api_extension.h"
+
+typedef struct {
+ thread_start_routine_t start;
+ void *arg;
+#ifdef OS_ENABLE_HW_BOUND_CHECK
+ os_signal_handler signal_handler;
+#endif
+} thread_wrapper_arg;
+
+#ifdef OS_ENABLE_HW_BOUND_CHECK
+/* The signal handler passed to os_thread_signal_init() */
+static os_thread_local_attribute os_signal_handler signal_handler;
+#endif
+
+static void *
+os_thread_wrapper(void *arg)
+{
+ thread_wrapper_arg *targ = arg;
+ thread_start_routine_t start_func = targ->start;
+ void *thread_arg = targ->arg;
+#ifdef OS_ENABLE_HW_BOUND_CHECK
+ os_signal_handler handler = targ->signal_handler;
+#endif
+
+#if 0
+ os_printf("THREAD CREATED %jx\n", (uintmax_t)(uintptr_t)pthread_self());
+#endif
+ BH_FREE(targ);
+#ifdef OS_ENABLE_HW_BOUND_CHECK
+ if (os_thread_signal_init(handler) != 0)
+ return NULL;
+#endif
+ start_func(thread_arg);
+#ifdef OS_ENABLE_HW_BOUND_CHECK
+ os_thread_signal_destroy();
+#endif
+ return NULL;
+}
+
+int
+os_thread_create_with_prio(korp_tid *tid, thread_start_routine_t start,
+ void *arg, unsigned int stack_size, int prio)
+{
+ pthread_attr_t tattr;
+ thread_wrapper_arg *targ;
+
+ assert(stack_size > 0);
+ assert(tid);
+ assert(start);
+
+ pthread_attr_init(&tattr);
+ pthread_attr_setdetachstate(&tattr, PTHREAD_CREATE_JOINABLE);
+ if (pthread_attr_setstacksize(&tattr, stack_size) != 0) {
+ os_printf("Invalid thread stack size %u. "
+ "Min stack size on Linux = %u\n",
+ stack_size, (unsigned int)PTHREAD_STACK_MIN);
+ pthread_attr_destroy(&tattr);
+ return BHT_ERROR;
+ }
+
+ targ = (thread_wrapper_arg *)BH_MALLOC(sizeof(*targ));
+ if (!targ) {
+ pthread_attr_destroy(&tattr);
+ return BHT_ERROR;
+ }
+
+ targ->start = start;
+ targ->arg = arg;
+#ifdef OS_ENABLE_HW_BOUND_CHECK
+ targ->signal_handler = signal_handler;
+#endif
+
+ if (pthread_create(tid, &tattr, os_thread_wrapper, targ) != 0) {
+ pthread_attr_destroy(&tattr);
+ BH_FREE(targ);
+ return BHT_ERROR;
+ }
+
+ pthread_attr_destroy(&tattr);
+ return BHT_OK;
+}
+
+int
+os_thread_create(korp_tid *tid, thread_start_routine_t start, void *arg,
+ unsigned int stack_size)
+{
+ return os_thread_create_with_prio(tid, start, arg, stack_size,
+ BH_THREAD_DEFAULT_PRIORITY);
+}
+
+korp_tid
+os_self_thread()
+{
+ return (korp_tid)pthread_self();
+}
+
+int
+os_mutex_init(korp_mutex *mutex)
+{
+ return pthread_mutex_init(mutex, NULL) == 0 ? BHT_OK : BHT_ERROR;
+}
+
+int
+os_recursive_mutex_init(korp_mutex *mutex)
+{
+ int ret;
+
+ pthread_mutexattr_t mattr;
+
+ assert(mutex);
+ ret = pthread_mutexattr_init(&mattr);
+ if (ret)
+ return BHT_ERROR;
+
+ pthread_mutexattr_settype(&mattr, PTHREAD_MUTEX_RECURSIVE);
+ ret = pthread_mutex_init(mutex, &mattr);
+ pthread_mutexattr_destroy(&mattr);
+
+ return ret == 0 ? BHT_OK : BHT_ERROR;
+}
+
+int
+os_mutex_destroy(korp_mutex *mutex)
+{
+ int ret;
+
+ assert(mutex);
+ ret = pthread_mutex_destroy(mutex);
+
+ return ret == 0 ? BHT_OK : BHT_ERROR;
+}
+
+int
+os_mutex_lock(korp_mutex *mutex)
+{
+ int ret;
+
+ assert(mutex);
+ ret = pthread_mutex_lock(mutex);
+
+ return ret == 0 ? BHT_OK : BHT_ERROR;
+}
+
+int
+os_mutex_unlock(korp_mutex *mutex)
+{
+ int ret;
+
+ assert(mutex);
+ ret = pthread_mutex_unlock(mutex);
+
+ return ret == 0 ? BHT_OK : BHT_ERROR;
+}
+
+int
+os_cond_init(korp_cond *cond)
+{
+ assert(cond);
+
+ if (pthread_cond_init(cond, NULL) != BHT_OK)
+ return BHT_ERROR;
+
+ return BHT_OK;
+}
+
+int
+os_cond_destroy(korp_cond *cond)
+{
+ assert(cond);
+
+ if (pthread_cond_destroy(cond) != BHT_OK)
+ return BHT_ERROR;
+
+ return BHT_OK;
+}
+
+int
+os_cond_wait(korp_cond *cond, korp_mutex *mutex)
+{
+ assert(cond);
+ assert(mutex);
+
+ if (pthread_cond_wait(cond, mutex) != BHT_OK)
+ return BHT_ERROR;
+
+ return BHT_OK;
+}
+
+korp_sem *
+os_sem_open(const char *name, int oflags, int mode, int val)
+{
+ return sem_open(name, oflags, mode, val);
+}
+
+int
+os_sem_close(korp_sem *sem)
+{
+ return sem_close(sem);
+}
+
+int
+os_sem_wait(korp_sem *sem)
+{
+ return sem_wait(sem);
+}
+
+int
+os_sem_trywait(korp_sem *sem)
+{
+ return sem_trywait(sem);
+}
+
+int
+os_sem_post(korp_sem *sem)
+{
+ return sem_post(sem);
+}
+
+int
+os_sem_getvalue(korp_sem *sem, int *sval)
+{
+#if defined(__APPLE__)
+ /*
+ * macOS doesn't have working sem_getvalue.
+ * It's marked as deprecated in the system header.
+ * Mock it up here to avoid compile-time deprecation warnings.
+ */
+ errno = ENOSYS;
+ return -1;
+#else
+ return sem_getvalue(sem, sval);
+#endif
+}
+
+int
+os_sem_unlink(const char *name)
+{
+ return sem_unlink(name);
+}
+
+static void
+msec_nsec_to_abstime(struct timespec *ts, uint64 usec)
+{
+ struct timeval tv;
+ time_t tv_sec_new;
+ long int tv_nsec_new;
+
+ gettimeofday(&tv, NULL);
+
+ tv_sec_new = (time_t)(tv.tv_sec + usec / 1000000);
+ if (tv_sec_new >= tv.tv_sec) {
+ ts->tv_sec = tv_sec_new;
+ }
+ else {
+ /* integer overflow */
+ ts->tv_sec = BH_TIME_T_MAX;
+ os_printf("Warning: os_cond_reltimedwait exceeds limit, "
+ "set to max timeout instead\n");
+ }
+
+ tv_nsec_new = (long int)(tv.tv_usec * 1000 + (usec % 1000000) * 1000);
+ if (tv.tv_usec * 1000 >= tv.tv_usec && tv_nsec_new >= tv.tv_usec * 1000) {
+ ts->tv_nsec = tv_nsec_new;
+ }
+ else {
+ /* integer overflow */
+ ts->tv_nsec = LONG_MAX;
+ os_printf("Warning: os_cond_reltimedwait exceeds limit, "
+ "set to max timeout instead\n");
+ }
+
+ if (ts->tv_nsec >= 1000000000L && ts->tv_sec < BH_TIME_T_MAX) {
+ ts->tv_sec++;
+ ts->tv_nsec -= 1000000000L;
+ }
+}
+
+int
+os_cond_reltimedwait(korp_cond *cond, korp_mutex *mutex, uint64 useconds)
+{
+ int ret;
+ struct timespec abstime;
+
+ if (useconds == BHT_WAIT_FOREVER)
+ ret = pthread_cond_wait(cond, mutex);
+ else {
+ msec_nsec_to_abstime(&abstime, useconds);
+ ret = pthread_cond_timedwait(cond, mutex, &abstime);
+ }
+
+ if (ret != BHT_OK && ret != ETIMEDOUT)
+ return BHT_ERROR;
+
+ return ret;
+}
+
+int
+os_cond_signal(korp_cond *cond)
+{
+ assert(cond);
+
+ if (pthread_cond_signal(cond) != BHT_OK)
+ return BHT_ERROR;
+
+ return BHT_OK;
+}
+
+int
+os_cond_broadcast(korp_cond *cond)
+{
+ assert(cond);
+
+ if (pthread_cond_broadcast(cond) != BHT_OK)
+ return BHT_ERROR;
+
+ return BHT_OK;
+}
+
+int
+os_thread_join(korp_tid thread, void **value_ptr)
+{
+ return pthread_join(thread, value_ptr);
+}
+
+int
+os_thread_detach(korp_tid thread)
+{
+ return pthread_detach(thread);
+}
+
+void
+os_thread_exit(void *retval)
+{
+#ifdef OS_ENABLE_HW_BOUND_CHECK
+ os_thread_signal_destroy();
+#endif
+ return pthread_exit(retval);
+}
+
+#if defined(os_thread_local_attribute)
+static os_thread_local_attribute uint8 *thread_stack_boundary = NULL;
+#endif
+
+uint8 *
+os_thread_get_stack_boundary()
+{
+ pthread_t self;
+#ifdef __linux__
+ pthread_attr_t attr;
+ size_t guard_size;
+#endif
+ uint8 *addr = NULL;
+ size_t stack_size, max_stack_size;
+ int page_size;
+
+#if defined(os_thread_local_attribute)
+ if (thread_stack_boundary)
+ return thread_stack_boundary;
+#endif
+
+ page_size = getpagesize();
+ self = pthread_self();
+ max_stack_size =
+ (size_t)(APP_THREAD_STACK_SIZE_MAX + page_size - 1) & ~(page_size - 1);
+
+ if (max_stack_size < APP_THREAD_STACK_SIZE_DEFAULT)
+ max_stack_size = APP_THREAD_STACK_SIZE_DEFAULT;
+
+#ifdef __linux__
+ if (pthread_getattr_np(self, &attr) == 0) {
+ pthread_attr_getstack(&attr, (void **)&addr, &stack_size);
+ pthread_attr_getguardsize(&attr, &guard_size);
+ pthread_attr_destroy(&attr);
+ if (stack_size > max_stack_size)
+ addr = addr + stack_size - max_stack_size;
+ if (guard_size < (size_t)page_size)
+ /* Reserved 1 guard page at least for safety */
+ guard_size = (size_t)page_size;
+ addr += guard_size;
+ }
+ (void)stack_size;
+#elif defined(__APPLE__) || defined(__NuttX__)
+ if ((addr = (uint8 *)pthread_get_stackaddr_np(self))) {
+ stack_size = pthread_get_stacksize_np(self);
+
+ /**
+ * Check whether stack_addr is the base or end of the stack,
+ * change it to the base if it is the end of stack.
+ */
+ if (addr <= (uint8 *)&stack_size)
+ addr = addr + stack_size;
+
+ if (stack_size > max_stack_size)
+ stack_size = max_stack_size;
+
+ addr -= stack_size;
+ /* Reserved 1 guard page at least for safety */
+ addr += page_size;
+ }
+#endif
+
+#if defined(os_thread_local_attribute)
+ thread_stack_boundary = addr;
+#endif
+ return addr;
+}
+
+#ifdef OS_ENABLE_HW_BOUND_CHECK
+
+#define SIG_ALT_STACK_SIZE (32 * 1024)
+
+/**
+ * Whether thread signal enviornment is initialized:
+ * the signal handler is registered, the stack pages are touched,
+ * the stack guard pages are set and signal alternate stack are set.
+ */
+static os_thread_local_attribute bool thread_signal_inited = false;
+
+#if WASM_DISABLE_STACK_HW_BOUND_CHECK == 0
+/* The signal alternate stack base addr */
+static os_thread_local_attribute uint8 *sigalt_stack_base_addr;
+
+#if defined(__clang__)
+#pragma clang optimize off
+#elif defined(__GNUC__)
+#pragma GCC push_options
+#pragma GCC optimize("O0")
+__attribute__((no_sanitize_address))
+#endif
+static uint32
+touch_pages(uint8 *stack_min_addr, uint32 page_size)
+{
+ uint8 sum = 0;
+ while (1) {
+ volatile uint8 *touch_addr = (volatile uint8 *)os_alloca(page_size / 2);
+ if (touch_addr < stack_min_addr + page_size) {
+ sum += *(stack_min_addr + page_size - 1);
+ break;
+ }
+ *touch_addr = 0;
+ sum += *touch_addr;
+ }
+ return sum;
+}
+#if defined(__clang__)
+#pragma clang optimize on
+#elif defined(__GNUC__)
+#pragma GCC pop_options
+#endif
+
+static bool
+init_stack_guard_pages()
+{
+ uint32 page_size = os_getpagesize();
+ uint32 guard_page_count = STACK_OVERFLOW_CHECK_GUARD_PAGE_COUNT;
+ uint8 *stack_min_addr = os_thread_get_stack_boundary();
+
+ if (stack_min_addr == NULL)
+ return false;
+
+ /* Touch each stack page to ensure that it has been mapped: the OS
+ may lazily grow the stack mapping as a guard page is hit. */
+ (void)touch_pages(stack_min_addr, page_size);
+ /* First time to call aot function, protect guard pages */
+ if (os_mprotect(stack_min_addr, page_size * guard_page_count,
+ MMAP_PROT_NONE)
+ != 0) {
+ return false;
+ }
+ return true;
+}
+
+static void
+destroy_stack_guard_pages()
+{
+ uint32 page_size = os_getpagesize();
+ uint32 guard_page_count = STACK_OVERFLOW_CHECK_GUARD_PAGE_COUNT;
+ uint8 *stack_min_addr = os_thread_get_stack_boundary();
+
+ os_mprotect(stack_min_addr, page_size * guard_page_count,
+ MMAP_PROT_READ | MMAP_PROT_WRITE);
+}
+#endif /* end of WASM_DISABLE_STACK_HW_BOUND_CHECK == 0 */
+
+static void
+mask_signals(int how)
+{
+ sigset_t set;
+
+ sigemptyset(&set);
+ sigaddset(&set, SIGSEGV);
+ sigaddset(&set, SIGBUS);
+ pthread_sigmask(how, &set, NULL);
+}
+
+static os_thread_local_attribute struct sigaction prev_sig_act_SIGSEGV;
+static os_thread_local_attribute struct sigaction prev_sig_act_SIGBUS;
+
+static void
+signal_callback(int sig_num, siginfo_t *sig_info, void *sig_ucontext)
+{
+ void *sig_addr = sig_info->si_addr;
+ struct sigaction *prev_sig_act = NULL;
+
+ mask_signals(SIG_BLOCK);
+
+ /* Try to handle signal with the registered signal handler */
+ if (signal_handler && (sig_num == SIGSEGV || sig_num == SIGBUS)) {
+ signal_handler(sig_addr);
+ }
+
+ if (sig_num == SIGSEGV)
+ prev_sig_act = &prev_sig_act_SIGSEGV;
+ else if (sig_num == SIGBUS)
+ prev_sig_act = &prev_sig_act_SIGBUS;
+
+ /* Forward the signal to next handler if found */
+ if (prev_sig_act && (prev_sig_act->sa_flags & SA_SIGINFO)) {
+ prev_sig_act->sa_sigaction(sig_num, sig_info, sig_ucontext);
+ }
+ else if (prev_sig_act
+ && ((void *)prev_sig_act->sa_sigaction == SIG_DFL
+ || (void *)prev_sig_act->sa_sigaction == SIG_IGN)) {
+ sigaction(sig_num, prev_sig_act, NULL);
+ }
+ /* Output signal info and then crash if signal is unhandled */
+ else {
+ switch (sig_num) {
+ case SIGSEGV:
+ os_printf("unhandled SIGSEGV, si_addr: %p\n", sig_addr);
+ break;
+ case SIGBUS:
+ os_printf("unhandled SIGBUS, si_addr: %p\n", sig_addr);
+ break;
+ default:
+ os_printf("unhandle signal %d, si_addr: %p\n", sig_num,
+ sig_addr);
+ break;
+ }
+
+ abort();
+ }
+}
+
+int
+os_thread_signal_init(os_signal_handler handler)
+{
+ struct sigaction sig_act;
+#if WASM_DISABLE_STACK_HW_BOUND_CHECK == 0
+ stack_t sigalt_stack_info;
+ uint32 map_size = SIG_ALT_STACK_SIZE;
+ uint8 *map_addr;
+#endif
+
+ if (thread_signal_inited)
+ return 0;
+
+#if WASM_DISABLE_STACK_HW_BOUND_CHECK == 0
+ if (!init_stack_guard_pages()) {
+ os_printf("Failed to init stack guard pages\n");
+ return -1;
+ }
+
+ /* Initialize memory for signal alternate stack of current thread */
+ if (!(map_addr = os_mmap(NULL, map_size, MMAP_PROT_READ | MMAP_PROT_WRITE,
+ MMAP_MAP_NONE))) {
+ os_printf("Failed to mmap memory for alternate stack\n");
+ goto fail1;
+ }
+
+ /* Initialize signal alternate stack */
+ memset(map_addr, 0, map_size);
+ sigalt_stack_info.ss_sp = map_addr;
+ sigalt_stack_info.ss_size = map_size;
+ sigalt_stack_info.ss_flags = 0;
+ if (sigaltstack(&sigalt_stack_info, NULL) != 0) {
+ os_printf("Failed to init signal alternate stack\n");
+ goto fail2;
+ }
+#endif
+
+ memset(&prev_sig_act_SIGSEGV, 0, sizeof(struct sigaction));
+ memset(&prev_sig_act_SIGBUS, 0, sizeof(struct sigaction));
+
+ /* Install signal hanlder */
+ sig_act.sa_sigaction = signal_callback;
+ sig_act.sa_flags = SA_SIGINFO | SA_NODEFER;
+#if WASM_DISABLE_STACK_HW_BOUND_CHECK == 0
+ sig_act.sa_flags |= SA_ONSTACK;
+#endif
+ sigemptyset(&sig_act.sa_mask);
+ if (sigaction(SIGSEGV, &sig_act, &prev_sig_act_SIGSEGV) != 0
+ || sigaction(SIGBUS, &sig_act, &prev_sig_act_SIGBUS) != 0) {
+ os_printf("Failed to register signal handler\n");
+ goto fail3;
+ }
+
+#if WASM_DISABLE_STACK_HW_BOUND_CHECK == 0
+ sigalt_stack_base_addr = map_addr;
+#endif
+ signal_handler = handler;
+ thread_signal_inited = true;
+ return 0;
+
+fail3:
+#if WASM_DISABLE_STACK_HW_BOUND_CHECK == 0
+ memset(&sigalt_stack_info, 0, sizeof(stack_t));
+ sigalt_stack_info.ss_flags = SS_DISABLE;
+ sigalt_stack_info.ss_size = map_size;
+ sigaltstack(&sigalt_stack_info, NULL);
+fail2:
+ os_munmap(map_addr, map_size);
+fail1:
+ destroy_stack_guard_pages();
+#endif
+ return -1;
+}
+
+void
+os_thread_signal_destroy()
+{
+#if WASM_DISABLE_STACK_HW_BOUND_CHECK == 0
+ stack_t sigalt_stack_info;
+#endif
+
+ if (!thread_signal_inited)
+ return;
+
+#if WASM_DISABLE_STACK_HW_BOUND_CHECK == 0
+ /* Disable signal alternate stack */
+ memset(&sigalt_stack_info, 0, sizeof(stack_t));
+ sigalt_stack_info.ss_flags = SS_DISABLE;
+ sigalt_stack_info.ss_size = SIG_ALT_STACK_SIZE;
+ sigaltstack(&sigalt_stack_info, NULL);
+
+ os_munmap(sigalt_stack_base_addr, SIG_ALT_STACK_SIZE);
+
+ destroy_stack_guard_pages();
+#endif
+
+ thread_signal_inited = false;
+}
+
+bool
+os_thread_signal_inited()
+{
+ return thread_signal_inited;
+}
+
+void
+os_signal_unmask()
+{
+ mask_signals(SIG_UNBLOCK);
+}
+
+void
+os_sigreturn()
+{
+#if WASM_DISABLE_STACK_HW_BOUND_CHECK == 0
+#if defined(__APPLE__)
+#define UC_RESET_ALT_STACK 0x80000000
+ extern int __sigreturn(void *, int);
+
+ /* It's necessary to call __sigreturn to restore the sigaltstack state
+ after exiting the signal handler. */
+ __sigreturn(NULL, UC_RESET_ALT_STACK);
+#endif
+#endif
+}
+#endif /* end of OS_ENABLE_HW_BOUND_CHECK */
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/common/posix/posix_time.c b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/common/posix/posix_time.c
new file mode 100644
index 000000000..bcf5ca3ce
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/common/posix/posix_time.c
@@ -0,0 +1,17 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#include "platform_api_vmcore.h"
+
+uint64
+os_time_get_boot_microsecond()
+{
+ struct timespec ts;
+ if (clock_gettime(CLOCK_MONOTONIC, &ts) != 0) {
+ return 0;
+ }
+
+ return ((uint64)ts.tv_sec) * 1000 * 1000 + ((uint64)ts.tv_nsec) / 1000;
+}
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/darwin/platform_init.c b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/darwin/platform_init.c
new file mode 100644
index 000000000..2aae13fa1
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/darwin/platform_init.c
@@ -0,0 +1,43 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#include "platform_api_vmcore.h"
+
+int
+bh_platform_init()
+{
+ return 0;
+}
+
+void
+bh_platform_destroy()
+{}
+
+int
+os_printf(const char *format, ...)
+{
+ int ret = 0;
+ va_list ap;
+
+ va_start(ap, format);
+#ifndef BH_VPRINTF
+ ret += vprintf(format, ap);
+#else
+ ret += BH_VPRINTF(format, ap);
+#endif
+ va_end(ap);
+
+ return ret;
+}
+
+int
+os_vprintf(const char *format, va_list ap)
+{
+#ifndef BH_VPRINTF
+ return vprintf(format, ap);
+#else
+ return BH_VPRINTF(format, ap);
+#endif
+}
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/darwin/platform_internal.h b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/darwin/platform_internal.h
new file mode 100644
index 000000000..3fd1c258e
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/darwin/platform_internal.h
@@ -0,0 +1,109 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#ifndef _PLATFORM_INTERNAL_H
+#define _PLATFORM_INTERNAL_H
+
+#include <inttypes.h>
+#include <stdbool.h>
+#include <assert.h>
+#include <time.h>
+#include <string.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <math.h>
+#include <stdarg.h>
+#include <ctype.h>
+#include <pthread.h>
+#include <signal.h>
+#include <semaphore.h>
+#include <limits.h>
+#include <dirent.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <poll.h>
+#include <sched.h>
+#include <errno.h>
+#include <netinet/in.h>
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <sys/stat.h>
+#include <sys/mman.h>
+#include <sys/time.h>
+#include <sys/timeb.h>
+#include <sys/uio.h>
+#include <sys/ioctl.h>
+#include <sys/socket.h>
+#include <sys/resource.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifndef BH_PLATFORM_DARWIN
+#define BH_PLATFORM_DARWIN
+#endif
+
+#define BH_HAS_DLFCN 1
+
+/* Stack size of applet threads's native part. */
+#define BH_APPLET_PRESERVED_STACK_SIZE (32 * 1024)
+
+/* Default thread priority */
+#define BH_THREAD_DEFAULT_PRIORITY 0
+
+typedef pthread_t korp_tid;
+typedef pthread_mutex_t korp_mutex;
+typedef pthread_cond_t korp_cond;
+typedef pthread_t korp_thread;
+typedef sem_t korp_sem;
+
+#define OS_THREAD_MUTEX_INITIALIZER PTHREAD_MUTEX_INITIALIZER
+
+#define os_thread_local_attribute __thread
+
+#define bh_socket_t int
+
+#if WASM_DISABLE_HW_BOUND_CHECK == 0
+#if defined(BUILD_TARGET_X86_64) || defined(BUILD_TARGET_AMD_64) \
+ || defined(BUILD_TARGET_AARCH64) || defined(BUILD_TARGET_RISCV64_LP64D) \
+ || defined(BUILD_TARGET_RISCV64_LP64)
+
+#include <setjmp.h>
+
+#define OS_ENABLE_HW_BOUND_CHECK
+
+typedef jmp_buf korp_jmpbuf;
+
+#define os_setjmp setjmp
+#define os_longjmp longjmp
+#define os_alloca alloca
+
+#define os_getpagesize getpagesize
+
+typedef void (*os_signal_handler)(void *sig_addr);
+
+int
+os_thread_signal_init(os_signal_handler handler);
+
+void
+os_thread_signal_destroy();
+
+bool
+os_thread_signal_inited();
+
+void
+os_signal_unmask();
+
+void
+os_sigreturn();
+#endif /* end of BUILD_TARGET_X86_64/AMD_64/AARCH64/RISCV64 */
+#endif /* end of WASM_DISABLE_HW_BOUND_CHECK */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* end of _PLATFORM_INTERNAL_H */
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/darwin/shared_platform.cmake b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/darwin/shared_platform.cmake
new file mode 100644
index 000000000..5eecd65c7
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/darwin/shared_platform.cmake
@@ -0,0 +1,18 @@
+# Copyright (C) 2019 Intel Corporation. All rights reserved.
+# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+
+set (PLATFORM_SHARED_DIR ${CMAKE_CURRENT_LIST_DIR})
+
+add_definitions(-DBH_PLATFORM_DARWIN)
+
+include_directories(${PLATFORM_SHARED_DIR})
+include_directories(${PLATFORM_SHARED_DIR}/../include)
+
+include (${CMAKE_CURRENT_LIST_DIR}/../common/posix/platform_api_posix.cmake)
+
+file (GLOB_RECURSE source_all ${PLATFORM_SHARED_DIR}/*.c)
+
+set (PLATFORM_SHARED_SOURCE ${source_all} ${PLATFORM_COMMON_POSIX_SOURCE})
+
+file (GLOB header ${PLATFORM_SHARED_DIR}/../include/*.h)
+LIST (APPEND RUNTIME_LIB_HEADER_LIST ${header})
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/ego/platform_init.c b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/ego/platform_init.c
new file mode 100644
index 000000000..38a0e8049
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/ego/platform_init.c
@@ -0,0 +1,6 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#include "../linux/platform_init.c" \ No newline at end of file
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/ego/platform_internal.h b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/ego/platform_internal.h
new file mode 100644
index 000000000..1ece346be
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/ego/platform_internal.h
@@ -0,0 +1,6 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#include "../linux/platform_internal.h"
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/ego/shared_platform.cmake b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/ego/shared_platform.cmake
new file mode 100644
index 000000000..9b84c5841
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/ego/shared_platform.cmake
@@ -0,0 +1,20 @@
+# Copyright (C) 2019 Intel Corporation. All rights reserved.
+# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+
+set (PLATFORM_SHARED_DIR ${CMAKE_CURRENT_LIST_DIR})
+
+add_definitions(-DBH_PLATFORM_EGO)
+
+include_directories(${PLATFORM_SHARED_DIR})
+include_directories(${PLATFORM_SHARED_DIR}/../include)
+
+include (${CMAKE_CURRENT_LIST_DIR}/../common/posix/platform_api_posix.cmake)
+
+set (PLATFORM_SHARED_SOURCE
+ ${PLATFORM_COMMON_POSIX_SOURCE}
+ ${CMAKE_CURRENT_LIST_DIR}/platform_init.c
+)
+
+LIST (APPEND RUNTIME_LIB_HEADER_LIST
+ ${CMAKE_CURRENT_LIST_DIR}/platform_internal.h
+) \ No newline at end of file
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/esp-idf/espidf_malloc.c b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/esp-idf/espidf_malloc.c
new file mode 100644
index 000000000..08ec88305
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/esp-idf/espidf_malloc.c
@@ -0,0 +1,84 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#include "platform_api_vmcore.h"
+#include "platform_api_extension.h"
+
+void *
+os_malloc(unsigned size)
+{
+ void *buf_origin;
+ void *buf_fixed;
+ uintptr_t *addr_field;
+
+ buf_origin = malloc(size + 8 + sizeof(uintptr_t));
+ if (!buf_origin) {
+ return NULL;
+ }
+ buf_fixed = buf_origin + sizeof(void *);
+ if ((uintptr_t)buf_fixed & (uintptr_t)0x7) {
+ buf_fixed = (void *)((uintptr_t)(buf_fixed + 8) & (~(uintptr_t)7));
+ }
+
+ addr_field = buf_fixed - sizeof(uintptr_t);
+ *addr_field = (uintptr_t)buf_origin;
+
+ return buf_fixed;
+}
+
+void *
+os_realloc(void *ptr, unsigned size)
+{
+ void *mem_origin;
+ void *mem_new;
+ void *mem_new_fixed;
+ uintptr_t *addr_field;
+
+ if (!ptr) {
+ return os_malloc(size);
+ }
+
+ addr_field = ptr - sizeof(uintptr_t);
+ mem_origin = (void *)(*addr_field);
+ mem_new = realloc(mem_origin, size + 8 + sizeof(uintptr_t));
+ if (!mem_new) {
+ return NULL;
+ }
+
+ if (mem_origin != mem_new) {
+ mem_new_fixed = mem_new + sizeof(uintptr_t);
+ if ((uint32)mem_new_fixed & 0x7) {
+ mem_new_fixed =
+ (void *)((uintptr_t)(mem_new + 8) & (~(uintptr_t)7));
+ }
+
+ addr_field = mem_new_fixed - sizeof(uintptr_t);
+ *addr_field = (uintptr_t)mem_new;
+
+ return mem_new_fixed;
+ }
+
+ return ptr;
+}
+
+void
+os_free(void *ptr)
+{
+ void *mem_origin;
+ uintptr_t *addr_field;
+
+ if (ptr) {
+ addr_field = ptr - sizeof(uintptr_t);
+ mem_origin = (void *)(*addr_field);
+
+ free(mem_origin);
+ }
+}
+
+int
+os_dumps_proc_mem_info(char *out, unsigned int size)
+{
+ return -1;
+}
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/esp-idf/espidf_memmap.c b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/esp-idf/espidf_memmap.c
new file mode 100644
index 000000000..693094a63
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/esp-idf/espidf_memmap.c
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#include "platform_api_vmcore.h"
+#include "platform_api_extension.h"
+
+void *
+os_mmap(void *hint, size_t size, int prot, int flags)
+{
+ if (prot & MMAP_PROT_EXEC) {
+ // Memory allocation with MALLOC_CAP_EXEC will return 4-byte aligned
+ // Reserve extra 4 byte to fixup alignment and size for the pointer to
+ // the originally allocated address
+ void *buf_origin =
+ heap_caps_malloc(size + 4 + sizeof(uintptr_t), MALLOC_CAP_EXEC);
+ if (!buf_origin) {
+ return NULL;
+ }
+ void *buf_fixed = buf_origin + sizeof(void *);
+ if ((uintptr_t)buf_fixed & (uintptr_t)0x7) {
+ buf_fixed = (void *)((uintptr_t)(buf_fixed + 4) & (~(uintptr_t)7));
+ }
+
+ uintptr_t *addr_field = buf_fixed - sizeof(uintptr_t);
+ *addr_field = (uintptr_t)buf_origin;
+ return buf_fixed;
+ }
+ else {
+ return os_malloc(size);
+ }
+}
+
+void
+os_munmap(void *addr, size_t size)
+{
+ // We don't need special handling of the executable allocations
+ // here, free() of esp-idf handles it properly
+ return os_free(addr);
+}
+
+int
+os_mprotect(void *addr, size_t size, int prot)
+{
+ return 0;
+}
+
+void
+os_dcache_flush()
+{}
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/esp-idf/espidf_platform.c b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/esp-idf/espidf_platform.c
new file mode 100644
index 000000000..35b893d81
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/esp-idf/espidf_platform.c
@@ -0,0 +1,252 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#include "platform_api_vmcore.h"
+#include "platform_api_extension.h"
+
+int
+bh_platform_init()
+{
+ return 0;
+}
+
+void
+bh_platform_destroy()
+{}
+
+int
+os_printf(const char *format, ...)
+{
+ int ret = 0;
+ va_list ap;
+
+ va_start(ap, format);
+ ret += vprintf(format, ap);
+ va_end(ap);
+
+ return ret;
+}
+
+int
+os_vprintf(const char *format, va_list ap)
+{
+ return vprintf(format, ap);
+}
+
+uint64
+os_time_get_boot_microsecond(void)
+{
+ return (uint64)esp_timer_get_time();
+}
+
+uint8 *
+os_thread_get_stack_boundary(void)
+{
+#if defined(CONFIG_FREERTOS_USE_TRACE_FACILITY)
+ TaskStatus_t pxTaskStatus;
+ vTaskGetInfo(xTaskGetCurrentTaskHandle(), &pxTaskStatus, pdTRUE, eInvalid);
+ return pxTaskStatus.pxStackBase;
+#else // !defined(CONFIG_FREERTOS_USE_TRACE_FACILITY)
+ return NULL;
+#endif
+}
+
+int
+os_usleep(uint32 usec)
+{
+ return usleep(usec);
+}
+
+/* Below parts of readv & writev are ported from Nuttx, under Apache License
+ * v2.0 */
+
+ssize_t
+readv(int fildes, const struct iovec *iov, int iovcnt)
+{
+ ssize_t ntotal;
+ ssize_t nread;
+ size_t remaining;
+ uint8_t *buffer;
+ int i;
+
+ /* Process each entry in the struct iovec array */
+
+ for (i = 0, ntotal = 0; i < iovcnt; i++) {
+ /* Ignore zero-length reads */
+
+ if (iov[i].iov_len > 0) {
+ buffer = iov[i].iov_base;
+ remaining = iov[i].iov_len;
+
+ /* Read repeatedly as necessary to fill buffer */
+
+ do {
+ /* NOTE: read() is a cancellation point */
+
+ nread = read(fildes, buffer, remaining);
+
+ /* Check for a read error */
+
+ if (nread < 0) {
+ return nread;
+ }
+
+ /* Check for an end-of-file condition */
+
+ else if (nread == 0) {
+ return ntotal;
+ }
+
+ /* Update pointers and counts in order to handle partial
+ * buffer reads.
+ */
+
+ buffer += nread;
+ remaining -= nread;
+ ntotal += nread;
+ } while (remaining > 0);
+ }
+ }
+
+ return ntotal;
+}
+
+ssize_t
+writev(int fildes, const struct iovec *iov, int iovcnt)
+{
+ ssize_t ntotal;
+ ssize_t nwritten;
+ size_t remaining;
+ uint8_t *buffer;
+ int i;
+
+ /* Process each entry in the struct iovec array */
+
+ for (i = 0, ntotal = 0; i < iovcnt; i++) {
+ /* Ignore zero-length writes */
+
+ if (iov[i].iov_len > 0) {
+ buffer = iov[i].iov_base;
+ remaining = iov[i].iov_len;
+
+ /* Write repeatedly as necessary to write the entire buffer */
+
+ do {
+ /* NOTE: write() is a cancellation point */
+
+ nwritten = write(fildes, buffer, remaining);
+
+ /* Check for a write error */
+
+ if (nwritten < 0) {
+ return ntotal ? ntotal : -1;
+ }
+
+ /* Update pointers and counts in order to handle partial
+ * buffer writes.
+ */
+
+ buffer += nwritten;
+ remaining -= nwritten;
+ ntotal += nwritten;
+ } while (remaining > 0);
+ }
+ }
+
+ return ntotal;
+}
+
+int
+openat(int fd, const char *path, int oflags, ...)
+{
+ errno = ENOSYS;
+ return -1;
+}
+
+int
+fstatat(int fd, const char *path, struct stat *buf, int flag)
+{
+ errno = ENOSYS;
+ return -1;
+}
+
+int
+mkdirat(int fd, const char *path, mode_t mode)
+{
+ errno = ENOSYS;
+ return -1;
+}
+
+ssize_t
+readlinkat(int fd, const char *path, char *buf, size_t bufsize)
+{
+ errno = EINVAL;
+ return -1;
+}
+
+int
+linkat(int fd1, const char *path1, int fd2, const char *path2, int flag)
+{
+ errno = ENOSYS;
+ return -1;
+}
+
+int
+renameat(int fromfd, const char *from, int tofd, const char *to)
+{
+ errno = ENOSYS;
+ return -1;
+}
+
+int
+symlinkat(const char *target, int fd, const char *path)
+{
+ errno = ENOSYS;
+ return -1;
+}
+
+int
+unlinkat(int fd, const char *path, int flag)
+{
+ errno = ENOSYS;
+ return -1;
+}
+
+int
+utimensat(int fd, const char *path, const struct timespec *ts, int flag)
+{
+ errno = ENOSYS;
+ return -1;
+}
+
+DIR *
+fdopendir(int fd)
+{
+ errno = ENOSYS;
+ return NULL;
+}
+
+#if ESP_IDF_VERSION < ESP_IDF_VERSION_VAL(4, 4, 2)
+int
+ftruncate(int fd, off_t length)
+{
+ errno = ENOSYS;
+ return -1;
+}
+#endif
+
+int
+futimens(int fd, const struct timespec *times)
+{
+ errno = ENOSYS;
+ return -1;
+}
+
+int
+nanosleep(const struct timespec *req, struct timespec *rem)
+{
+ errno = ENOSYS;
+ return -1;
+} \ No newline at end of file
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/esp-idf/espidf_socket.c b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/esp-idf/espidf_socket.c
new file mode 100644
index 000000000..9f441b712
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/esp-idf/espidf_socket.c
@@ -0,0 +1,228 @@
+/*
+ * Copyright (C) 2021 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#include "platform_api_vmcore.h"
+#include "platform_api_extension.h"
+
+#include <arpa/inet.h>
+
+static void
+textual_addr_to_sockaddr(const char *textual, int port, struct sockaddr_in *out)
+{
+ assert(textual);
+
+ out->sin_family = AF_INET;
+ out->sin_port = htons(port);
+ out->sin_addr.s_addr = inet_addr(textual);
+}
+
+static int
+sockaddr_to_bh_sockaddr(const struct sockaddr *sockaddr, socklen_t socklen,
+ bh_sockaddr_t *bh_sockaddr)
+{
+ switch (sockaddr->sa_family) {
+ case AF_INET:
+ {
+ struct sockaddr_in *addr = (struct sockaddr_in *)sockaddr;
+
+ assert(socklen >= sizeof(struct sockaddr_in));
+
+ bh_sockaddr->port = ntohs(addr->sin_port);
+ bh_sockaddr->addr_bufer.ipv4 = ntohl(addr->sin_addr.s_addr);
+ bh_sockaddr->is_ipv4 = true;
+ return BHT_OK;
+ }
+ default:
+ errno = EAFNOSUPPORT;
+ return BHT_ERROR;
+ }
+}
+
+int
+os_socket_create(bh_socket_t *sock, bool is_ipv4, bool is_tcp)
+{
+ if (!sock) {
+ return BHT_ERROR;
+ }
+
+ if (is_tcp) {
+ *sock = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP);
+ }
+ else {
+ *sock = socket(AF_INET, SOCK_DGRAM, 0);
+ }
+
+ return (*sock == -1) ? BHT_ERROR : BHT_OK;
+}
+
+int
+os_socket_bind(bh_socket_t socket, const char *host, int *port)
+{
+ struct sockaddr_in addr;
+ socklen_t socklen;
+ int ret;
+
+ assert(host);
+ assert(port);
+
+ addr.sin_addr.s_addr = inet_addr(host);
+ addr.sin_port = htons(*port);
+ addr.sin_family = AF_INET;
+
+ ret = bind(socket, (struct sockaddr *)&addr, sizeof(addr));
+ if (ret < 0) {
+ goto fail;
+ }
+
+ socklen = sizeof(addr);
+ if (getsockname(socket, (struct sockaddr *)&addr, &socklen) == -1) {
+ goto fail;
+ }
+
+ *port = ntohs(addr.sin_port);
+
+ return BHT_OK;
+
+fail:
+ return BHT_ERROR;
+}
+
+int
+os_socket_settimeout(bh_socket_t socket, uint64 timeout_us)
+{
+ struct timeval tv;
+ tv.tv_sec = timeout_us / 1000000UL;
+ tv.tv_usec = timeout_us % 1000000UL;
+
+ if (setsockopt(socket, SOL_SOCKET, SO_RCVTIMEO, (const char *)&tv,
+ sizeof(tv))
+ != 0) {
+ return BHT_ERROR;
+ }
+
+ return BHT_OK;
+}
+
+int
+os_socket_listen(bh_socket_t socket, int max_client)
+{
+ if (listen(socket, max_client) != 0) {
+ return BHT_ERROR;
+ }
+
+ return BHT_OK;
+}
+
+int
+os_socket_accept(bh_socket_t server_sock, bh_socket_t *sock, void *addr,
+ unsigned int *addrlen)
+{
+ struct sockaddr addr_tmp;
+ socklen_t len = sizeof(struct sockaddr);
+
+ *sock = accept(server_sock, (struct sockaddr *)&addr_tmp, &len);
+
+ if (*sock < 0) {
+ return BHT_ERROR;
+ }
+
+ return BHT_OK;
+}
+
+int
+os_socket_connect(bh_socket_t socket, const char *addr, int port)
+{
+ struct sockaddr_in addr_in = { 0 };
+ socklen_t addr_len = sizeof(struct sockaddr_in);
+ int ret = 0;
+
+ textual_addr_to_sockaddr(addr, port, &addr_in);
+
+ ret = connect(socket, (struct sockaddr *)&addr_in, addr_len);
+ if (ret == -1) {
+ return BHT_ERROR;
+ }
+
+ return BHT_OK;
+}
+
+int
+os_socket_recv(bh_socket_t socket, void *buf, unsigned int len)
+{
+ return recv(socket, buf, len, 0);
+}
+
+int
+os_socket_send(bh_socket_t socket, const void *buf, unsigned int len)
+{
+ return send(socket, buf, len, 0);
+}
+
+int
+os_socket_close(bh_socket_t socket)
+{
+ close(socket);
+ return BHT_OK;
+}
+
+int
+os_socket_shutdown(bh_socket_t socket)
+{
+ shutdown(socket, O_RDWR);
+ return BHT_OK;
+}
+
+int
+os_socket_inet_network(bool is_ipv4, const char *cp, bh_ip_addr_buffer_t *out)
+{
+ if (!cp)
+ return BHT_ERROR;
+
+ if (is_ipv4) {
+ if (inet_pton(AF_INET, cp, &out->ipv4) != 1) {
+ return BHT_ERROR;
+ }
+ /* Note: ntohl(INADDR_NONE) == INADDR_NONE */
+ out->ipv4 = ntohl(out->ipv4);
+ }
+ else {
+ if (inet_pton(AF_INET6, cp, out->ipv6) != 1) {
+ return BHT_ERROR;
+ }
+ for (int i = 0; i < 8; i++) {
+ out->ipv6[i] = ntohs(out->ipv6[i]);
+ }
+ }
+
+ return BHT_OK;
+}
+
+int
+os_socket_addr_remote(bh_socket_t socket, bh_sockaddr_t *sockaddr)
+{
+ struct sockaddr_in addr;
+ socklen_t addr_len = sizeof(addr);
+
+ if (getpeername(socket, (struct sockaddr *)&addr, &addr_len) == -1) {
+ return BHT_ERROR;
+ }
+
+ return sockaddr_to_bh_sockaddr((struct sockaddr *)&addr, addr_len,
+ sockaddr);
+}
+
+int
+os_socket_addr_local(bh_socket_t socket, bh_sockaddr_t *sockaddr)
+{
+ struct sockaddr_in addr;
+ socklen_t addr_len = sizeof(addr);
+
+ if (getsockname(socket, (struct sockaddr *)&addr, &addr_len) == -1) {
+ return BHT_ERROR;
+ }
+
+ return sockaddr_to_bh_sockaddr((struct sockaddr *)&addr, addr_len,
+ sockaddr);
+}
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/esp-idf/espidf_thread.c b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/esp-idf/espidf_thread.c
new file mode 100644
index 000000000..637cd4177
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/esp-idf/espidf_thread.c
@@ -0,0 +1,233 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#ifndef _GNU_SOURCE
+#define _GNU_SOURCE
+#endif
+
+#include "platform_api_vmcore.h"
+#include "platform_api_extension.h"
+
+typedef struct {
+ thread_start_routine_t start;
+ void *arg;
+} thread_wrapper_arg;
+
+static void *
+os_thread_wrapper(void *arg)
+{
+ thread_wrapper_arg *targ = arg;
+ thread_start_routine_t start_func = targ->start;
+ void *thread_arg = targ->arg;
+
+#if 0
+ os_printf("THREAD CREATED %jx\n", (uintmax_t)(uintptr_t)pthread_self());
+#endif
+ BH_FREE(targ);
+ start_func(thread_arg);
+ return NULL;
+}
+
+korp_tid
+os_self_thread(void)
+{
+ /* only allowed if this is a thread, xTaskCreate is not enough look at
+ * product_mini for how to use this*/
+ return pthread_self();
+}
+
+int
+os_mutex_init(korp_mutex *mutex)
+{
+ return pthread_mutex_init(mutex, NULL);
+}
+
+int
+os_recursive_mutex_init(korp_mutex *mutex)
+{
+ int ret;
+
+ pthread_mutexattr_t mattr;
+
+ assert(mutex);
+ ret = pthread_mutexattr_init(&mattr);
+ if (ret)
+ return BHT_ERROR;
+
+ pthread_mutexattr_settype(&mattr, PTHREAD_MUTEX_RECURSIVE);
+ ret = pthread_mutex_init(mutex, &mattr);
+ pthread_mutexattr_destroy(&mattr);
+
+ return ret == 0 ? BHT_OK : BHT_ERROR;
+}
+
+int
+os_mutex_destroy(korp_mutex *mutex)
+{
+ return pthread_mutex_destroy(mutex);
+}
+
+int
+os_mutex_lock(korp_mutex *mutex)
+{
+ return pthread_mutex_lock(mutex);
+}
+
+int
+os_mutex_unlock(korp_mutex *mutex)
+{
+ return pthread_mutex_unlock(mutex);
+}
+
+int
+os_thread_create_with_prio(korp_tid *tid, thread_start_routine_t start,
+ void *arg, unsigned int stack_size, int prio)
+{
+ pthread_attr_t tattr;
+ thread_wrapper_arg *targ;
+
+ assert(stack_size > 0);
+ assert(tid);
+ assert(start);
+
+ pthread_attr_init(&tattr);
+ pthread_attr_setdetachstate(&tattr, PTHREAD_CREATE_JOINABLE);
+ if (pthread_attr_setstacksize(&tattr, stack_size) != 0) {
+ os_printf("Invalid thread stack size %u. Min stack size = %u",
+ stack_size, PTHREAD_STACK_MIN);
+ pthread_attr_destroy(&tattr);
+ return BHT_ERROR;
+ }
+
+ targ = (thread_wrapper_arg *)BH_MALLOC(sizeof(*targ));
+ if (!targ) {
+ pthread_attr_destroy(&tattr);
+ return BHT_ERROR;
+ }
+
+ targ->start = start;
+ targ->arg = arg;
+
+ if (pthread_create(tid, &tattr, os_thread_wrapper, targ) != 0) {
+ pthread_attr_destroy(&tattr);
+ os_free(targ);
+ return BHT_ERROR;
+ }
+
+ pthread_attr_destroy(&tattr);
+ return BHT_OK;
+}
+
+int
+os_thread_create(korp_tid *tid, thread_start_routine_t start, void *arg,
+ unsigned int stack_size)
+{
+ return os_thread_create_with_prio(tid, start, arg, stack_size,
+ BH_THREAD_DEFAULT_PRIORITY);
+}
+
+int
+os_thread_join(korp_tid thread, void **retval)
+{
+ return pthread_join(thread, retval);
+}
+
+int
+os_thread_detach(korp_tid tid)
+{
+ return pthread_detach(tid);
+}
+
+void
+os_thread_exit(void *retval)
+{
+ pthread_exit(retval);
+}
+
+int
+os_cond_init(korp_cond *cond)
+{
+ return pthread_cond_init(cond, NULL);
+}
+
+int
+os_cond_destroy(korp_cond *cond)
+{
+ return pthread_cond_destroy(cond);
+}
+
+int
+os_cond_wait(korp_cond *cond, korp_mutex *mutex)
+{
+ return pthread_cond_wait(cond, mutex);
+}
+
+static void
+msec_nsec_to_abstime(struct timespec *ts, uint64 usec)
+{
+ struct timeval tv;
+ time_t tv_sec_new;
+ long int tv_nsec_new;
+
+ gettimeofday(&tv, NULL);
+
+ tv_sec_new = (time_t)(tv.tv_sec + usec / 1000000);
+ if (tv_sec_new >= tv.tv_sec) {
+ ts->tv_sec = tv_sec_new;
+ }
+ else {
+ /* integer overflow */
+ ts->tv_sec = BH_TIME_T_MAX;
+ os_printf("Warning: os_cond_reltimedwait exceeds limit, "
+ "set to max timeout instead\n");
+ }
+
+ tv_nsec_new = (long int)(tv.tv_usec * 1000 + (usec % 1000000) * 1000);
+ if (tv.tv_usec * 1000 >= tv.tv_usec && tv_nsec_new >= tv.tv_usec * 1000) {
+ ts->tv_nsec = tv_nsec_new;
+ }
+ else {
+ /* integer overflow */
+ ts->tv_nsec = LONG_MAX;
+ os_printf("Warning: os_cond_reltimedwait exceeds limit, "
+ "set to max timeout instead\n");
+ }
+
+ if (ts->tv_nsec >= 1000000000L && ts->tv_sec < BH_TIME_T_MAX) {
+ ts->tv_sec++;
+ ts->tv_nsec -= 1000000000L;
+ }
+}
+
+int
+os_cond_reltimedwait(korp_cond *cond, korp_mutex *mutex, uint64 useconds)
+{
+ int ret;
+ struct timespec abstime;
+
+ if (useconds == BHT_WAIT_FOREVER)
+ ret = pthread_cond_wait(cond, mutex);
+ else {
+ msec_nsec_to_abstime(&abstime, useconds);
+ ret = pthread_cond_timedwait(cond, mutex, &abstime);
+ }
+
+ if (ret != BHT_OK && ret != ETIMEDOUT)
+ return BHT_ERROR;
+
+ return ret;
+}
+
+int
+os_cond_signal(korp_cond *cond)
+{
+ return pthread_cond_signal(cond);
+}
+
+int
+os_cond_broadcast(korp_cond *cond)
+{
+ return pthread_cond_broadcast(cond);
+} \ No newline at end of file
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/esp-idf/platform_internal.h b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/esp-idf/platform_internal.h
new file mode 100644
index 000000000..81304ea80
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/esp-idf/platform_internal.h
@@ -0,0 +1,115 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#ifndef _PLATFORM_INTERNAL_H
+#define _PLATFORM_INTERNAL_H
+
+#include <stdint.h>
+#include <stdarg.h>
+#include <stdbool.h>
+#include <string.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <ctype.h>
+#include <errno.h>
+#include <math.h>
+#include <unistd.h>
+#include <pthread.h>
+#include <arpa/inet.h>
+#include <sys/socket.h>
+#include <sys/uio.h>
+#include <dirent.h>
+
+#include "esp_pthread.h"
+#include "esp_timer.h"
+#include "freertos/FreeRTOS.h"
+#include "freertos/task.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifndef BH_PLATFORM_ESP_IDF
+#define BH_PLATFORM_ESP_IDF
+#endif
+
+typedef pthread_t korp_tid;
+typedef pthread_mutex_t korp_mutex;
+typedef pthread_cond_t korp_cond;
+typedef pthread_t korp_thread;
+typedef unsigned int korp_sem;
+
+#define OS_THREAD_MUTEX_INITIALIZER PTHREAD_MUTEX_INITIALIZER
+
+#define BH_APPLET_PRESERVED_STACK_SIZE (2 * BH_KB)
+
+/* Default thread priority */
+#define BH_THREAD_DEFAULT_PRIORITY 5
+
+/* Special value for tv_nsec field of timespec */
+
+#define UTIME_NOW ((1l << 30) - 1l)
+#ifndef __cplusplus
+#define UTIME_OMIT ((1l << 30) - 2l)
+#endif
+
+#ifdef DT_UNKNOWN
+#undef DT_UNKNOWN
+#endif
+
+#ifdef DT_REG
+#undef DT_REG
+#endif
+
+#ifdef DT_DIR
+#undef DT_DIR
+#endif
+
+/* Below parts of d_type define are ported from Nuttx, under Apache License v2.0
+ */
+
+/* File type code for the d_type field in dirent structure.
+ * Note that because of the simplified filesystem organization of the NuttX,
+ * top-level, pseudo-file system, an inode can be BOTH a file and a directory
+ */
+
+#define DTYPE_UNKNOWN 0
+#define DTYPE_FIFO 1
+#define DTYPE_CHR 2
+#define DTYPE_SEM 3
+#define DTYPE_DIRECTORY 4
+#define DTYPE_MQ 5
+#define DTYPE_BLK 6
+#define DTYPE_SHM 7
+#define DTYPE_FILE 8
+#define DTYPE_MTD 9
+#define DTYPE_LINK 10
+#define DTYPE_SOCK 12
+
+/* The d_type field of the dirent structure is not specified by POSIX. It
+ * is a non-standard, 4.5BSD extension that is implemented by most OSs. A
+ * POSIX compliant OS may not implement the d_type field at all. Many OS's
+ * (including glibc) may use the following alternative naming for the file
+ * type names:
+ */
+
+#define DT_UNKNOWN DTYPE_UNKNOWN
+#define DT_FIFO DTYPE_FIFO
+#define DT_CHR DTYPE_CHR
+#define DT_SEM DTYPE_SEM
+#define DT_DIR DTYPE_DIRECTORY
+#define DT_MQ DTYPE_MQ
+#define DT_BLK DTYPE_BLK
+#define DT_SHM DTYPE_SHM
+#define DT_REG DTYPE_FILE
+#define DT_MTD DTYPE_MTD
+#define DT_LNK DTYPE_LINK
+#define DT_SOCK DTYPE_SOCK
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/esp-idf/shared_platform.cmake b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/esp-idf/shared_platform.cmake
new file mode 100644
index 000000000..13bc45dcb
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/esp-idf/shared_platform.cmake
@@ -0,0 +1,13 @@
+# Copyright (C) 2019 Intel Corporation. All rights reserved.
+# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+
+set (PLATFORM_SHARED_DIR ${CMAKE_CURRENT_LIST_DIR})
+
+add_definitions(-DBH_PLATFORM_ESP_IDF)
+
+include_directories(${PLATFORM_SHARED_DIR})
+include_directories(${PLATFORM_SHARED_DIR}/../include)
+
+file (GLOB_RECURSE source_all ${PLATFORM_SHARED_DIR}/*.c)
+
+set (PLATFORM_SHARED_SOURCE ${source_all} ${PLATFORM_COMMON_MATH_SOURCE})
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/freebsd/platform_init.c b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/freebsd/platform_init.c
new file mode 100644
index 000000000..2aae13fa1
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/freebsd/platform_init.c
@@ -0,0 +1,43 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#include "platform_api_vmcore.h"
+
+int
+bh_platform_init()
+{
+ return 0;
+}
+
+void
+bh_platform_destroy()
+{}
+
+int
+os_printf(const char *format, ...)
+{
+ int ret = 0;
+ va_list ap;
+
+ va_start(ap, format);
+#ifndef BH_VPRINTF
+ ret += vprintf(format, ap);
+#else
+ ret += BH_VPRINTF(format, ap);
+#endif
+ va_end(ap);
+
+ return ret;
+}
+
+int
+os_vprintf(const char *format, va_list ap)
+{
+#ifndef BH_VPRINTF
+ return vprintf(format, ap);
+#else
+ return BH_VPRINTF(format, ap);
+#endif
+}
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/freebsd/platform_internal.h b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/freebsd/platform_internal.h
new file mode 100644
index 000000000..7b4789c99
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/freebsd/platform_internal.h
@@ -0,0 +1,108 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#ifndef _PLATFORM_INTERNAL_H
+#define _PLATFORM_INTERNAL_H
+
+#include <inttypes.h>
+#include <stdbool.h>
+#include <assert.h>
+#include <time.h>
+#include <string.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <math.h>
+#include <stdarg.h>
+#include <ctype.h>
+#include <pthread.h>
+#include <signal.h>
+#include <semaphore.h>
+#include <limits.h>
+#include <dirent.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <poll.h>
+#include <sched.h>
+#include <errno.h>
+#include <netinet/in.h>
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <sys/stat.h>
+#include <sys/mman.h>
+#include <sys/time.h>
+#include <sys/uio.h>
+#include <sys/ioctl.h>
+#include <sys/socket.h>
+#include <sys/resource.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifndef BH_PLATFORM_FREEBSD
+#define BH_PLATFORM_FREEBSD
+#endif
+
+#define BH_HAS_DLFCN 1
+
+/* Stack size of applet threads's native part. */
+#define BH_APPLET_PRESERVED_STACK_SIZE (32 * 1024)
+
+/* Default thread priority */
+#define BH_THREAD_DEFAULT_PRIORITY 0
+
+typedef pthread_t korp_tid;
+typedef pthread_mutex_t korp_mutex;
+typedef pthread_cond_t korp_cond;
+typedef pthread_t korp_thread;
+typedef sem_t korp_sem;
+
+#define OS_THREAD_MUTEX_INITIALIZER PTHREAD_MUTEX_INITIALIZER
+
+#define os_thread_local_attribute __thread
+
+#define bh_socket_t int
+
+#if WASM_DISABLE_HW_BOUND_CHECK == 0
+#if defined(BUILD_TARGET_X86_64) || defined(BUILD_TARGET_AMD_64) \
+ || defined(BUILD_TARGET_AARCH64) || defined(BUILD_TARGET_RISCV64_LP64D) \
+ || defined(BUILD_TARGET_RISCV64_LP64)
+
+#include <setjmp.h>
+
+#define OS_ENABLE_HW_BOUND_CHECK
+
+typedef jmp_buf korp_jmpbuf;
+
+#define os_setjmp setjmp
+#define os_longjmp longjmp
+#define os_alloca alloca
+
+#define os_getpagesize getpagesize
+
+typedef void (*os_signal_handler)(void *sig_addr);
+
+int
+os_thread_signal_init(os_signal_handler handler);
+
+void
+os_thread_signal_destroy();
+
+bool
+os_thread_signal_inited();
+
+void
+os_signal_unmask();
+
+void
+os_sigreturn();
+#endif /* end of BUILD_TARGET_X86_64/AMD_64/AARCH64/RISCV64 */
+#endif /* end of WASM_DISABLE_HW_BOUND_CHECK */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* end of _PLATFORM_INTERNAL_H */
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/freebsd/shared_platform.cmake b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/freebsd/shared_platform.cmake
new file mode 100644
index 000000000..12583fc63
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/freebsd/shared_platform.cmake
@@ -0,0 +1,18 @@
+# Copyright (C) 2019 Intel Corporation. All rights reserved.
+# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+
+set (PLATFORM_SHARED_DIR ${CMAKE_CURRENT_LIST_DIR})
+
+add_definitions(-DBH_PLATFORM_FREEBSD)
+
+include_directories(${PLATFORM_SHARED_DIR})
+include_directories(${PLATFORM_SHARED_DIR}/../include)
+
+include (${CMAKE_CURRENT_LIST_DIR}/../common/posix/platform_api_posix.cmake)
+
+file (GLOB_RECURSE source_all ${PLATFORM_SHARED_DIR}/*.c)
+
+set (PLATFORM_SHARED_SOURCE ${source_all} ${PLATFORM_COMMON_POSIX_SOURCE})
+
+file (GLOB header ${PLATFORM_SHARED_DIR}/../include/*.h)
+LIST (APPEND RUNTIME_LIB_HEADER_LIST ${header})
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/include/platform_api_extension.h b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/include/platform_api_extension.h
new file mode 100644
index 000000000..94fe16ea3
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/include/platform_api_extension.h
@@ -0,0 +1,1039 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#ifndef PLATFORM_API_EXTENSION_H
+#define PLATFORM_API_EXTENSION_H
+
+#include "platform_common.h"
+/**
+ * The related data structures should be defined
+ * in platform_internal.h
+ **/
+#include "platform_internal.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/***************************************************
+ * *
+ * Extension interface *
+ * *
+ ***************************************************/
+
+/****************************************************
+ * Section 1 *
+ * Multi thread support *
+ ****************************************************/
+
+/**
+ * NOTES:
+ * 1. If you are building VM core only, it must be implemented to
+ * enable multi-thread support, otherwise no need to implement it
+ * 2. To build the app-mgr and app-framework, you must implement it
+ */
+
+/**
+ * Creates a thread
+ *
+ * @param p_tid [OUTPUT] the pointer of tid
+ * @param start main routine of the thread
+ * @param arg argument passed to main routine
+ * @param stack_size bytes of stack size
+ *
+ * @return 0 if success.
+ */
+int
+os_thread_create(korp_tid *p_tid, thread_start_routine_t start, void *arg,
+ unsigned int stack_size);
+
+/**
+ * Creates a thread with priority
+ *
+ * @param p_tid [OUTPUT] the pointer of tid
+ * @param start main routine of the thread
+ * @param arg argument passed to main routine
+ * @param stack_size bytes of stack size
+ * @param prio the priority
+ *
+ * @return 0 if success.
+ */
+int
+os_thread_create_with_prio(korp_tid *p_tid, thread_start_routine_t start,
+ void *arg, unsigned int stack_size, int prio);
+
+/**
+ * Waits for the thread specified by thread to terminate
+ *
+ * @param thread the thread to wait
+ * @param retval if not NULL, output the exit status of the terminated thread
+ *
+ * @return return 0 if success
+ */
+int
+os_thread_join(korp_tid thread, void **retval);
+
+/**
+ * Detach the thread specified by thread
+ *
+ * @param thread the thread to detach
+ *
+ * @return return 0 if success
+ */
+int os_thread_detach(korp_tid);
+
+/**
+ * Exit current thread
+ *
+ * @param retval the return value of the current thread
+ */
+void
+os_thread_exit(void *retval);
+
+/* Try to define os_atomic_thread_fence if it isn't defined in
+ platform's platform_internal.h */
+#ifndef os_atomic_thread_fence
+
+#if !defined(__GNUC_PREREQ) && (defined(__GNUC__) || defined(__GNUG__)) \
+ && !defined(__clang__) && defined(__GNUC_MINOR__)
+#define __GNUC_PREREQ(maj, min) \
+ ((__GNUC__ << 16) + __GNUC_MINOR__ >= ((maj) << 16) + (min))
+#endif
+
+/* Clang's __GNUC_PREREQ macro has a different meaning than GCC one,
+ so we have to handle this case specially */
+#if defined(__clang__)
+/* Clang provides stdatomic.h since 3.6.0
+ See https://releases.llvm.org/3.6.0/tools/clang/docs/ReleaseNotes.html */
+#if __clang_major__ > 3 || (__clang_major__ == 3 && __clang_minor__ >= 6)
+#define BH_HAS_STD_ATOMIC
+#endif
+#elif defined(__GNUC_PREREQ)
+/* Even though older versions of GCC support C11, atomics were
+ not implemented until 4.9. See
+ https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58016 */
+#if __GNUC_PREREQ(4, 9)
+#define BH_HAS_STD_ATOMIC
+#elif __GNUC_PREREQ(4, 7)
+#define os_memory_order_acquire __ATOMIC_ACQUIRE
+#define os_memory_order_release __ATOMIC_RELEASE
+#define os_memory_order_seq_cst __ATOMIC_SEQ_CST
+#define os_atomic_thread_fence __atomic_thread_fence
+#endif /* end of __GNUC_PREREQ(4, 9) */
+#endif /* end of defined(__GNUC_PREREQ) */
+
+#if defined(BH_HAS_STD_ATOMIC) && !defined(__cplusplus)
+#include <stdatomic.h>
+#define os_memory_order_acquire memory_order_acquire
+#define os_memory_order_release memory_order_release
+#define os_memory_order_seq_cst memory_order_seq_cst
+#define os_atomic_thread_fence atomic_thread_fence
+#endif
+
+#endif /* end of os_atomic_thread_fence */
+
+/**
+ * Initialize current thread environment if current thread
+ * is created by developer but not runtime
+ *
+ * @return 0 if success, -1 otherwise
+ */
+int
+os_thread_env_init(void);
+
+/**
+ * Destroy current thread environment
+ */
+void
+os_thread_env_destroy(void);
+
+/**
+ * Whether the thread environment is initialized
+ */
+bool
+os_thread_env_inited(void);
+
+/**
+ * Suspend execution of the calling thread for (at least)
+ * usec microseconds
+ *
+ * @return 0 if success, -1 otherwise
+ */
+int
+os_usleep(uint32 usec);
+
+/**
+ * Creates a recursive mutex
+ *
+ * @param mutex [OUTPUT] pointer to mutex initialized.
+ *
+ * @return 0 if success
+ */
+int
+os_recursive_mutex_init(korp_mutex *mutex);
+
+/**
+ * This function creates a condition variable
+ *
+ * @param cond [OUTPUT] pointer to condition variable
+ *
+ * @return 0 if success
+ */
+int
+os_cond_init(korp_cond *cond);
+
+/**
+ * This function destroys condition variable
+ *
+ * @param cond pointer to condition variable
+ *
+ * @return 0 if success
+ */
+int
+os_cond_destroy(korp_cond *cond);
+
+/**
+ * Wait a condition variable.
+ *
+ * @param cond pointer to condition variable
+ * @param mutex pointer to mutex to protect the condition variable
+ *
+ * @return 0 if success
+ */
+int
+os_cond_wait(korp_cond *cond, korp_mutex *mutex);
+
+/**
+ * Wait a condition varible or return if time specified passes.
+ *
+ * @param cond pointer to condition variable
+ * @param mutex pointer to mutex to protect the condition variable
+ * @param useconds microseconds to wait
+ *
+ * @return 0 if success
+ */
+int
+os_cond_reltimedwait(korp_cond *cond, korp_mutex *mutex, uint64 useconds);
+
+/**
+ * Signals the condition variable
+ *
+ * @param cond condition variable
+ *
+ * @return 0 if success
+ */
+int
+os_cond_signal(korp_cond *cond);
+
+/**
+ * Broadcast the condition variable
+ *
+ * @param cond condition variable
+ *
+ * @return 0 if success
+ */
+int
+os_cond_broadcast(korp_cond *cond);
+
+/**
+ * Creates a new POSIX-like semaphore or opens an existing
+ * semaphore. The semaphore is identified by name. For details of
+ * the construction of name, please refer to
+ * https://man7.org/linux/man-pages/man3/sem_open.3.html.
+ *
+ * @param name semaphore name
+ * @param oflasg specifies flags that control the operation of the call
+ * @param mode permission flags
+ * @param val initial value of the named semaphore.
+ *
+ * @return korp_sem * if success, NULL otherwise
+ */
+korp_sem *
+os_sem_open(const char *name, int oflags, int mode, int val);
+
+/**
+ * Closes the named semaphore referred to by sem,
+ * allowing any resources that the system has allocated to the
+ * calling process for this semaphore to be freed.
+ *
+ * @param sem
+ *
+ * @return 0 if success
+ */
+int
+os_sem_close(korp_sem *sem);
+
+/**
+ * Decrements (locks) the semaphore pointed to by sem.
+ * If the semaphore's value is greater than zero, then the decrement
+ * proceeds, and the function returns, immediately. If the
+ * semaphore currently has the value zero, then the call blocks
+ * until either it becomes possible to perform the decrement (i.e.,
+ * the semaphore value rises above zero), or a signal handler
+ * interrupts the call.
+ *
+ * @return 0 if success
+ */
+int
+os_sem_wait(korp_sem *sem);
+
+/**
+ * Is the same as sem_wait(), except that if the
+ * decrement cannot be immediately performed, then call returns an
+ * error (errno set to EAGAIN) instead of blocking.
+ *
+ * @return 0 if success
+ */
+int
+os_sem_trywait(korp_sem *sem);
+
+/**
+ * Increments (unlocks) the semaphore pointed to by sem.
+ * If the semaphore's value consequently becomes greater than zero,
+ * then another process or thread blocked in a sem_wait(3) call will
+ * be woken up and proceed to lock the semaphore.
+ *
+ * @return 0 if success
+ */
+int
+os_sem_post(korp_sem *sem);
+
+/**
+ * Places the current value of the semaphore pointed
+ * to sem into the integer pointed to by sval.
+ *
+ * @return 0 if success
+ */
+int
+os_sem_getvalue(korp_sem *sem, int *sval);
+
+/**
+ * Remove the named semaphore referred to by name.
+ * The semaphore name is removed immediately. The semaphore is
+ * destroyed once all other processes that have the semaphore open
+ * close it.
+ *
+ * @param name semaphore name
+ *
+ * @return 0 if success
+ */
+int
+os_sem_unlink(const char *name);
+
+/****************************************************
+ * Section 2 *
+ * Socket support *
+ ****************************************************/
+
+/**
+ * NOTES:
+ * Socket APIs are required by source debugging feature.
+ * If you don't need source debugging feature, then no
+ * need to implement these APIs
+ */
+
+typedef union {
+ uint32 ipv4;
+ uint16 ipv6[8];
+ uint8 data[1];
+} bh_ip_addr_buffer_t;
+
+typedef struct {
+ bh_ip_addr_buffer_t addr_bufer;
+ uint16 port;
+ bool is_ipv4;
+} bh_sockaddr_t;
+
+/**
+ * Create a socket
+ *
+ * @param sock [OUTPUT] the pointer of socket
+ * @param is_ipv4 true for IPv4, false for IPv6
+ * @param is_tcp true for tcp, false for udp
+ *
+ * @return 0 if success, -1 otherwise
+ */
+int
+os_socket_create(bh_socket_t *sock, bool is_ipv4, bool is_tcp);
+
+/**
+ * Assign the address and port to the socket
+ *
+ * @param socket the socket to bind
+ * @param addr the ip address, only IPv4 supported currently
+ * @param port [INPUT/OUTPUT] the port number, if the value is 0,
+ * it will use a port assigned by OS. On return it will
+ * contain the actual bound port number
+ *
+ * @return 0 if success, -1 otherwise
+ */
+int
+os_socket_bind(bh_socket_t socket, const char *addr, int *port);
+
+/**
+ * Set timeout for the given socket
+ *
+ * @param socket the socket to set timeout
+ * @param timeout_us timeout in microseconds
+ *
+ * @return 0 if success, -1 otherwise
+ */
+int
+os_socket_settimeout(bh_socket_t socket, uint64 timeout_us);
+
+/**
+ * Make the socket as a passive socket to accept incoming connection requests
+ *
+ * @param socket the socket to listen
+ * @param max_client maximum clients
+ *
+ * @return 0 if success, -1 otherwise
+ */
+int
+os_socket_listen(bh_socket_t socket, int max_client);
+
+/**
+ * Accept an incoming connection
+ *
+ * @param server_sock the socket to accept new connections
+ * @param sock [OUTPUT] the connected socket
+ * @param addr [OUTPUT] the address of the peer socket. If addr is NULL,
+ * nothing is filled in, and addrlen will not be used
+ * @param addrlen [INPUT/OUTPUT] the size (in bytes) of the structure
+ * pointed to by addr, on return it will contain the actual
+ * size of the peer address
+ *
+ * @return 0 if success, -1 otherwise
+ */
+int
+os_socket_accept(bh_socket_t server_sock, bh_socket_t *sock, void *addr,
+ unsigned int *addrlen);
+
+/**
+ * initiate a connection on a socket
+ *
+ * @param socket the socket to connect with
+ * @param addr the ip address, only IPv4 supported currently
+ *
+ * @return 0 if success, -1 otherwise
+ */
+int
+os_socket_connect(bh_socket_t socket, const char *addr, int port);
+
+/**
+ * Blocking receive message from a socket.
+ *
+ * @param socket the socket to receive message from
+ * @param buf the buffer to store the data
+ * @param len length of the buffer, this API does not guarantee that
+ * [len] bytes are received
+ *
+ * @return number of bytes received if success, -1 otherwise
+ */
+int
+os_socket_recv(bh_socket_t socket, void *buf, unsigned int len);
+
+/**
+ * Blocking receive message from a socket.
+ *
+ * @param socket the socket to send message
+ * @param buf the buffer to store the data
+ * @param len length of the buffer, this API does not guarantee that
+ * [len] bytes are received
+ * @param flags control the operation
+ * @param src_addr source address
+ *
+ * @return number of bytes sent if success, -1 otherwise
+ */
+int
+os_socket_recv_from(bh_socket_t socket, void *buf, unsigned int len, int flags,
+ bh_sockaddr_t *src_addr);
+
+/**
+ * Blocking send message on a socket
+ *
+ * @param socket the socket to send message
+ * @param buf the buffer of data to be sent
+ * @param len length of the buffer
+ *
+ * @return number of bytes sent if success, -1 otherwise
+ */
+int
+os_socket_send(bh_socket_t socket, const void *buf, unsigned int len);
+
+/**
+ * Blocking send message on a socket to the target address
+ *
+ * @param socket the socket to send message
+ * @param buf the buffer of data to be sent
+ * @param len length of the buffer
+ * @param flags control the operation
+ * @param dest_addr target address
+ *
+ * @return number of bytes sent if success, -1 otherwise
+ */
+int
+os_socket_send_to(bh_socket_t socket, const void *buf, unsigned int len,
+ int flags, const bh_sockaddr_t *dest_addr);
+
+/**
+ * Close a socket
+ *
+ * @param socket the socket to be closed
+ *
+ * @return always return 0
+ */
+int
+os_socket_close(bh_socket_t socket);
+
+/**
+ * Shutdown a socket
+ *
+ * @param socket the socket to be shutdown
+ *
+ * @return always return 0
+ */
+int
+os_socket_shutdown(bh_socket_t socket);
+
+/**
+ * converts cp into a number in host byte order suitable for use as
+ * an Internet network address
+ *
+ * @param is_ipv4 a flag that indicates whether the string is an IPv4 or
+ * IPv6 address
+ *
+ * @param cp a string in IPv4 numbers-and-dots notation or IPv6
+ * numbers-and-colons notation
+ *
+ * @param out an output buffer to store binary address
+ *
+ * @return On success, the function returns 0.
+ * If the input is invalid, -1 is returned
+ */
+int
+os_socket_inet_network(bool is_ipv4, const char *cp, bh_ip_addr_buffer_t *out);
+
+typedef struct {
+ bh_sockaddr_t sockaddr;
+ uint8_t is_tcp;
+} bh_addr_info_t;
+
+/**
+ * Resolve a host a hostname and a service to one or more IP addresses
+ *
+ * @param host a host to resolve
+ *
+ * @param service a service to find a port for
+ *
+ * @param hint_is_tcp an optional flag that determines a preferred socket type
+ (TCP or UDP).
+ *
+ * @param hint_is_ipv4 an optional flag that determines a preferred address
+ family (IPv4 or IPv6)
+ *
+ * @param addr_info a buffer for resolved addresses
+ *
+ * @param addr_info_size a size of the buffer for resolved addresses
+
+ * @param max_info_size a maximum number of addresses available (can be bigger
+ or smaller than buffer size)
+
+ * @return On success, the function returns 0; otherwise, it returns -1
+ */
+int
+os_socket_addr_resolve(const char *host, const char *service,
+ uint8_t *hint_is_tcp, uint8_t *hint_is_ipv4,
+ bh_addr_info_t *addr_info, size_t addr_info_size,
+ size_t *max_info_size);
+
+/**
+ * Returns an binary address and a port of the local socket
+ *
+ * @param socket the local socket
+ *
+ * @param sockaddr a buffer for storing the address
+ *
+ * @return On success, returns 0; otherwise, it returns -1.
+ */
+int
+os_socket_addr_local(bh_socket_t socket, bh_sockaddr_t *sockaddr);
+
+/**
+ * Returns an binary address and a port of the remote socket
+ *
+ * @param socket the remote socket
+ *
+ * @param sockaddr a buffer for storing the address
+ *
+ * @return On success, returns 0; otherwise, it returns -1.
+ */
+int
+os_socket_addr_remote(bh_socket_t socket, bh_sockaddr_t *sockaddr);
+
+/**
+ * Set the maximum send buffer size.
+ *
+ * @param socket the socket to set
+ * @param bufsiz requested kernel buffer size
+ *
+ * @return 0 if success, -1 otherwise
+ */
+int
+os_socket_set_send_buf_size(bh_socket_t socket, size_t bufsiz);
+
+/**
+ * Get the maximum send buffer size.
+ *
+ * @param socket the socket to set
+ * @param bufsiz the returned kernel buffer size
+ *
+ * @return 0 if success, -1 otherwise
+ */
+int
+os_socket_get_send_buf_size(bh_socket_t socket, size_t *bufsiz);
+
+/**
+ * Set the maximum receive buffer size.
+ *
+ * @param socket the socket to set
+ * @param bufsiz requested kernel buffer size
+ *
+ * @return 0 if success, -1 otherwise
+ */
+int
+os_socket_set_recv_buf_size(bh_socket_t socket, size_t bufsiz);
+
+/**
+ * Get the maximum receive buffer size.
+ *
+ * @param socket the socket to set
+ * @param bufsiz the returned kernel buffer size
+ *
+ * @return 0 if success, -1 otherwise
+ */
+int
+os_socket_get_recv_buf_size(bh_socket_t socket, size_t *bufsiz);
+
+/**
+ * Enable sending of keep-alive messages on connection-oriented sockets
+ *
+ * @param socket the socket to set the flag
+ * @param is_enabled 1 to enable or 0 to disable
+ *
+ * @return 0 if success, -1 otherwise
+ */
+int
+os_socket_set_keep_alive(bh_socket_t socket, bool is_enabled);
+
+/**
+ * Get if sending of keep-alive messages on connection-oriented sockets is
+ * enabled
+ *
+ * @param socket the socket to check
+ * @param is_enabled 1 if enabled or 0 if disabled
+ *
+ * @return 0 if success, -1 otherwise
+ */
+int
+os_socket_get_keep_alive(bh_socket_t socket, bool *is_enabled);
+
+/**
+ * Set the send timeout until reporting an error
+ *
+ * @param socket the socket to set
+ * @param time_us microseconds until timeout
+ *
+ * @return 0 if success, -1 otherwise
+ */
+int
+os_socket_set_send_timeout(bh_socket_t socket, uint64 timeout_us);
+
+/**
+ * Get the send timeout until reporting an error
+ *
+ * @param socket the socket to set
+ * @param time_us the returned microseconds until timeout
+ *
+ * @return 0 if success, -1 otherwise
+ */
+int
+os_socket_get_send_timeout(bh_socket_t socket, uint64 *timeout_us);
+
+/**
+ * Set the recv timeout until reporting an error
+ *
+ * @param socket the socket to set
+ * @param time_us microseconds until timeout
+ *
+ * @return 0 if success, -1 otherwise
+ */
+int
+os_socket_set_recv_timeout(bh_socket_t socket, uint64 timeout_us);
+
+/**
+ * Get the recv timeout until reporting an error
+ *
+ * @param socket the socket to set
+ * @param time_us the returned microseconds until timeout
+ *
+ * @return 0 if success, -1 otherwise
+ */
+int
+os_socket_get_recv_timeout(bh_socket_t socket, uint64 *timeout_us);
+
+/**
+ * Enable re-use of local addresses
+ *
+ * @param socket the socket to set
+ * @param is_enabled 1 to enable or 0 to disable
+ *
+ * @return 0 if success, -1 otherwise
+ */
+int
+os_socket_set_reuse_addr(bh_socket_t socket, bool is_enabled);
+
+/**
+ * Get whether re-use of local addresses is enabled
+ *
+ * @param socket the socket to set
+ * @param is_enabled 1 for enabled or 0 for disabled
+ *
+ * @return 0 if success, -1 otherwise
+ */
+int
+os_socket_get_reuse_addr(bh_socket_t socket, bool *is_enabled);
+
+/**
+ * Enable re-use of local ports
+ *
+ * @param socket the socket to set
+ * @param is_enabled 1 to enable or 0 to disable
+ *
+ * @return 0 if success, -1 otherwise
+ */
+int
+os_socket_set_reuse_port(bh_socket_t socket, bool is_enabled);
+
+/**
+ * Get whether re-use of local ports is enabled
+ *
+ * @param socket the socket to set
+ * @param is_enabled 1 for enabled or 0 for disabled
+ *
+ * @return 0 if success, -1 otherwise
+ */
+int
+os_socket_get_reuse_port(bh_socket_t socket, bool *is_enabled);
+
+/**
+ * Set the linger options for the given socket
+ *
+ * @param socket the socket to set
+ * @param is_enabled whether linger is enabled
+ * @param linger_s linger time (seconds)
+ * @return 0 if success, -1 otherwise
+ */
+int
+os_socket_set_linger(bh_socket_t socket, bool is_enabled, int linger_s);
+
+/**
+ * Get the linger options for the given socket
+ *
+ * @param socket the socket to get
+ * @param is_enabled whether linger is enabled
+ * @param linger_s linger time (seconds)
+ * @return 0 if success, -1 otherwise
+ */
+int
+os_socket_get_linger(bh_socket_t socket, bool *is_enabled, int *linger_s);
+
+/**
+ * Set no delay TCP
+ * If set, disable the Nagle algorithm.
+ * This means that segments are always sent as soon as possible,
+ * even if there is only a small amount of data
+ *
+ * @param socket the socket to set the flag
+ * @param is_enabled 1 to enable or 0 to disable
+ *
+ * @return 0 if success, -1 otherwise
+ */
+int
+os_socket_set_tcp_no_delay(bh_socket_t socket, bool is_enabled);
+
+/**
+ * Get no delay TCP
+ * If set, disable the Nagle algorithm.
+ * This means that segments are always sent as soon as possible,
+ * even if there is only a small amount of data
+ *
+ * @param socket the socket to check
+ * @param is_enabled 1 if enabled or 0 if disabled
+ *
+ * @return 0 if success, -1 otherwise
+ */
+int
+os_socket_get_tcp_no_delay(bh_socket_t socket, bool *is_enabled);
+
+/**
+ * Enable/Disable tcp quickack mode
+ * In quickack mode, acks are sent immediately, rather than delayed if needed in
+ * accordance to normal TCP operation
+ *
+ * @param socket the socket to set the flag
+ * @param is_enabled 1 to enable or 0 to disable
+ *
+ * @return 0 if success, -1 otherwise
+ */
+int
+os_socket_set_tcp_quick_ack(bh_socket_t socket, bool is_enabled);
+
+/**
+ * Enable/Disable tcp quickack mode
+ * In quickack mode, acks are sent immediately, rather than delayed if needed in
+ * accordance to normal TCP operation
+ *
+ * @param socket the socket to check
+ * @param is_enabled 1 if enabled or 0 if disabled
+ *
+ * @return 0 if success, -1 otherwise
+ */
+int
+os_socket_get_tcp_quick_ack(bh_socket_t socket, bool *is_enabled);
+
+/**
+ * Set the time the connection needs to remain idle before sending keepalive
+ * probes
+ *
+ * @param socket the socket to set
+ * @param time_s seconds until keepalive probes are sent
+ *
+ * @return 0 if success, -1 otherwise
+ */
+int
+os_socket_set_tcp_keep_idle(bh_socket_t socket, uint32_t time_s);
+
+/**
+ * Gets the time the connection needs to remain idle before sending keepalive
+ * probes
+ *
+ * @param socket the socket to check
+ * @param time_s seconds until keepalive probes are sent
+ *
+ * @return 0 if success, -1 otherwise
+ */
+int
+os_socket_get_tcp_keep_idle(bh_socket_t socket, uint32_t *time_s);
+
+/**
+ * Set the time between individual keepalive probes
+ *
+ * @param socket the socket to set
+ * @param time_us seconds between individual keepalive probes
+ *
+ * @return 0 if success, -1 otherwise
+ */
+int
+os_socket_set_tcp_keep_intvl(bh_socket_t socket, uint32_t time_s);
+
+/**
+ * Get the time between individual keepalive probes
+ *
+ * @param socket the socket to get
+ * @param time_s seconds between individual keepalive probes
+ *
+ * @return 0 if success, -1 otherwise
+ */
+int
+os_socket_get_tcp_keep_intvl(bh_socket_t socket, uint32_t *time_s);
+
+/**
+ * Set use of TCP Fast Open
+ *
+ * @param socket the socket to set
+ * @param is_enabled 1 to enable or 0 to disable
+ *
+ * @return 0 if success, -1 otherwise
+ */
+int
+os_socket_set_tcp_fastopen_connect(bh_socket_t socket, bool is_enabled);
+
+/**
+ * Get whether use of TCP Fast Open is enabled
+ *
+ * @param socket the socket to get
+ * @param is_enabled 1 to enabled or 0 to disabled
+ *
+ * @return 0 if success, -1 otherwise
+ */
+int
+os_socket_get_tcp_fastopen_connect(bh_socket_t socket, bool *is_enabled);
+
+/**
+ * Set enable or disable IPv4 or IPv6 multicast loopback.
+ *
+ * @param socket the socket to set
+ * @param ipv6 true to set ipv6 loopback or false for ipv4
+ * @param is_enabled 1 to enable or 0 to disable
+ *
+ * @return 0 if success, -1 otherwise
+ */
+int
+os_socket_set_ip_multicast_loop(bh_socket_t socket, bool ipv6, bool is_enabled);
+
+/**
+ * Get enable or disable IPv4 or IPv6 multicast loopback.
+ *
+ * @param socket the socket to check
+ * @param ipv6 true to set ipv6 loopback or false for ipv4
+ * @param is_enabled 1 for enabled or 0 for disabled
+ *
+ * @return 0 if success, -1 otherwise
+ */
+int
+os_socket_get_ip_multicast_loop(bh_socket_t socket, bool ipv6,
+ bool *is_enabled);
+
+/**
+ * Add membership to a group
+ *
+ * @param socket the socket to add membership to
+ * @param imr_multiaddr the group multicast address (IPv4 or IPv6)
+ * @param imr_interface the interface to join on
+ * @param is_ipv6 whether the imr_multiaddr is IPv4 or IPv6 (true for IPv6)
+ *
+ * @return 0 if success, -1 otherwise
+ */
+int
+os_socket_set_ip_add_membership(bh_socket_t socket,
+ bh_ip_addr_buffer_t *imr_multiaddr,
+ uint32_t imr_interface, bool is_ipv6);
+
+/**
+ * Drop membership of a group
+ *
+ * @param socket the socket to drop membership to
+ * @param imr_multiaddr the group multicast address (IPv4 or IPv6)
+ * @param imr_interface the interface to join on
+ * @param is_ipv6 whether the imr_multiaddr is IPv4 or IPv6 (true for IPv6)
+ *
+ * @return 0 if success, -1 otherwise
+ */
+int
+os_socket_set_ip_drop_membership(bh_socket_t socket,
+ bh_ip_addr_buffer_t *imr_multiaddr,
+ uint32_t imr_interface, bool is_ipv6);
+
+/**
+ * Set the current time-to-live field that is
+ * used in every packet sent from this socket.
+ * @param socket the socket to set the flag
+ * @param ttl_s time to live (seconds)
+ *
+ * @return 0 if success, -1 otherwise
+ */
+int
+os_socket_set_ip_ttl(bh_socket_t socket, uint8_t ttl_s);
+
+/**
+ * Retrieve the current time-to-live field that is
+ * used in every packet sent from this socket.
+ * @param socket the socket to set the flag
+ * @param ttl_s time to live (seconds)
+ *
+ * @return 0 if success, -1 otherwise
+ */
+int
+os_socket_get_ip_ttl(bh_socket_t socket, uint8_t *ttl_s);
+
+/**
+ * Set the time-to-live value of outgoing multicast
+ * packets for this socket
+ * @param socket the socket to set the flag
+ * @param ttl_s time to live (seconds)
+ *
+ * @return 0 if success, -1 otherwise
+ */
+int
+os_socket_set_ip_multicast_ttl(bh_socket_t socket, uint8_t ttl_s);
+
+/**
+ * Read the time-to-live value of outgoing multicast
+ * packets for this socket
+ * @param socket the socket to set the flag
+ * @param ttl_s time to live (seconds)
+ *
+ * @return 0 if success, -1 otherwise
+ */
+int
+os_socket_get_ip_multicast_ttl(bh_socket_t socket, uint8_t *ttl_s);
+
+/**
+ * Restrict to sending and receiving IPv6 packets only
+ *
+ * @param socket the socket to set
+ * @param is_enabled 1 to enable or 0 to disable
+ *
+ * @return 0 if success, -1 otherwise
+ */
+int
+os_socket_set_ipv6_only(bh_socket_t socket, bool is_enabled);
+
+/**
+ * Get whether only sending and receiving IPv6 packets
+ *
+ * @param socket the socket to check
+ * @param is_enabled 1 for enabled or 0 for disabled
+ *
+ * @return 0 if success, -1 otherwise
+ */
+int
+os_socket_get_ipv6_only(bh_socket_t socket, bool *is_enabled);
+
+/**
+ * Set whether broadcast is enabled
+ * When enabled, datagram sockets are allowed
+ * to send packets to a broadcast address.
+ *
+ * @param socket the socket to set the flag
+ * @param is_enabled 1 to enable or 0 to disable
+ *
+ * @return 0 if success, -1 otherwise
+ */
+int
+os_socket_set_broadcast(bh_socket_t socket, bool is_enabled);
+
+/**
+ * Get whether broadcast is enabled
+ * When enabled, datagram sockets are allowed
+ * to send packets to a broadcast address.
+ *
+ * @param socket the socket to check
+ * @param is_enabled 1 if enabled or 0 if disabled
+ *
+ * @return 0 if success, -1 otherwise
+ */
+int
+os_socket_get_broadcast(bh_socket_t socket, bool *is_enabled);
+
+/**
+ * Dump memory information of the current process
+ * It may have variant implementations in different platforms
+ *
+ * @param out the output buffer. It is for sure the return content
+ * is a c-string which ends up with '\0'
+ * @param size the size of the output buffer
+ *
+ * @return 0 if success, -1 otherwise
+ */
+int
+os_dumps_proc_mem_info(char *out, unsigned int size);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* #ifndef PLATFORM_API_EXTENSION_H */
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/include/platform_api_vmcore.h b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/include/platform_api_vmcore.h
new file mode 100644
index 000000000..c2f03c9e5
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/include/platform_api_vmcore.h
@@ -0,0 +1,145 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#ifndef _PLATFORM_API_VMCORE_H
+#define _PLATFORM_API_VMCORE_H
+
+#include "platform_common.h"
+#include "platform_internal.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/****************************************************
+ * Section 1 *
+ * Interfaces required by the runtime *
+ ****************************************************/
+
+/**
+ * Initialize the platform internal resources if needed,
+ * this function is called by wasm_runtime_init() and
+ * wasm_runtime_full_init()
+ *
+ * @return 0 if success
+ */
+int
+bh_platform_init(void);
+
+/**
+ * Destroy the platform internal resources if needed,
+ * this function is called by wasm_runtime_destroy()
+ */
+void
+bh_platform_destroy(void);
+
+/**
+ ******** memory allocator APIs **********
+ */
+
+void *
+os_malloc(unsigned size);
+
+void *
+os_realloc(void *ptr, unsigned size);
+
+void
+os_free(void *ptr);
+
+/**
+ * Note: the above APIs can simply return NULL if wasm runtime
+ * isn't initialized with Alloc_With_System_Allocator.
+ * Refer to wasm_runtime_full_init().
+ */
+
+int
+os_printf(const char *format, ...);
+
+int
+os_vprintf(const char *format, va_list ap);
+
+/**
+ * Get microseconds after boot.
+ */
+uint64
+os_time_get_boot_microsecond(void);
+
+/**
+ * Get current thread id.
+ * Implementation optional: Used by runtime for logging only.
+ */
+korp_tid
+os_self_thread(void);
+
+/**
+ * Get current thread's stack boundary address, used for runtime
+ * to check the native stack overflow. Return NULL if it is not
+ * easy to implement, but may have potential issue.
+ */
+uint8 *
+os_thread_get_stack_boundary(void);
+
+/**
+ ************** mutext APIs ***********
+ * vmcore: Not required until pthread is supported by runtime
+ * app-mgr: Must be implemented
+ */
+
+int
+os_mutex_init(korp_mutex *mutex);
+
+int
+os_mutex_destroy(korp_mutex *mutex);
+
+int
+os_mutex_lock(korp_mutex *mutex);
+
+int
+os_mutex_unlock(korp_mutex *mutex);
+
+/**************************************************
+ * Section 2 *
+ * APIs required by WAMR AOT *
+ **************************************************/
+
+/* Memory map modes */
+enum {
+ MMAP_PROT_NONE = 0,
+ MMAP_PROT_READ = 1,
+ MMAP_PROT_WRITE = 2,
+ MMAP_PROT_EXEC = 4
+};
+
+/* Memory map flags */
+enum {
+ MMAP_MAP_NONE = 0,
+ /* Put the mapping into 0 to 2 G, supported only on x86_64 */
+ MMAP_MAP_32BIT = 1,
+ /* Don't interpret addr as a hint: place the mapping at exactly
+ that address. */
+ MMAP_MAP_FIXED = 2
+};
+
+void *
+os_mmap(void *hint, size_t size, int prot, int flags);
+void
+os_munmap(void *addr, size_t size);
+int
+os_mprotect(void *addr, size_t size, int prot);
+
+/**
+ * Flush cpu data cache, in some CPUs, after applying relocation to the
+ * AOT code, the code may haven't been written back to the cpu data cache,
+ * which may cause unexpected behaviour when executing the AOT code.
+ * Implement this function if required, or just leave it empty.
+ */
+void
+os_dcache_flush(void);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* #ifndef _PLATFORM_API_VMCORE_H */
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/include/platform_common.h b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/include/platform_common.h
new file mode 100644
index 000000000..28001af74
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/include/platform_common.h
@@ -0,0 +1,204 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#ifndef _PLATFORM_COMMON_H
+#define _PLATFORM_COMMON_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "platform_internal.h"
+#include "../../../config.h"
+
+#define BH_MAX_THREAD 32
+
+#define BHT_ERROR (-1)
+#define BHT_TIMED_OUT (1)
+#define BHT_OK (0)
+
+#define BHT_WAIT_FOREVER ((uint64)-1LL)
+
+#define BH_KB (1024)
+#define BH_MB ((BH_KB)*1024)
+#define BH_GB ((BH_MB)*1024)
+
+#ifndef BH_MALLOC
+#define BH_MALLOC os_malloc
+#endif
+
+#ifndef BH_FREE
+#define BH_FREE os_free
+#endif
+
+#ifndef BH_TIME_T_MAX
+#define BH_TIME_T_MAX LONG_MAX
+#endif
+
+#if defined(_MSC_BUILD)
+#if defined(COMPILING_WASM_RUNTIME_API)
+__declspec(dllexport) void *BH_MALLOC(unsigned int size);
+__declspec(dllexport) void BH_FREE(void *ptr);
+#else
+__declspec(dllimport) void *BH_MALLOC(unsigned int size);
+__declspec(dllimport) void BH_FREE(void *ptr);
+#endif
+#else
+void *
+BH_MALLOC(unsigned int size);
+void
+BH_FREE(void *ptr);
+#endif
+
+#if defined(BH_VPRINTF)
+#if defined(MSVC)
+__declspec(dllimport) int BH_VPRINTF(const char *format, va_list ap);
+#else
+int
+BH_VPRINTF(const char *format, va_list ap);
+#endif
+#endif
+
+#ifndef NULL
+#define NULL (void *)0
+#endif
+
+#if !defined(BH_HAS_DLFCN)
+#if defined(_POSIX_SOURCE) || defined(_POSIX_C_SOURCE)
+#define BH_HAS_DLFCN 1
+#else
+#define BH_HAS_DLFCN 0
+#endif
+#endif
+
+#ifndef __cplusplus
+
+#ifndef true
+#define true 1
+#endif
+
+#ifndef false
+#define false 0
+#endif
+
+#ifndef inline
+#define inline __inline
+#endif
+
+#endif
+
+/* Return the offset of the given field in the given type */
+#ifndef offsetof
+/* GCC 4.0 and later has the builtin. */
+#if defined(__GNUC__) && __GNUC__ >= 4
+#define offsetof(Type, field) __builtin_offsetof(Type, field)
+#else
+#define offsetof(Type, field) ((size_t)(&((Type *)0)->field))
+#endif
+#endif
+
+typedef uint8_t uint8;
+typedef int8_t int8;
+typedef uint16_t uint16;
+typedef int16_t int16;
+typedef uint32_t uint32;
+typedef int32_t int32;
+typedef float float32;
+typedef double float64;
+typedef uint64_t uint64;
+typedef int64_t int64;
+
+typedef void *(*thread_start_routine_t)(void *);
+
+#ifndef bh_socket_t
+/* If no socket defined on current platform,
+ give a fake definition to make the compiler happy */
+#define bh_socket_t int
+#endif
+
+/* Format specifiers macros in case
+ they are not provided by compiler */
+#ifndef __PRI64_PREFIX
+#if UINTPTR_MAX == UINT64_MAX
+#define __PRI64_PREFIX "l"
+#define __PRIPTR_PREFIX "l"
+#else
+#define __PRI64_PREFIX "ll"
+#define __PRIPTR_PREFIX
+#endif
+#endif /* #ifndef __PRI64_PREFIX */
+
+/* Macros for printing format specifiers */
+#ifndef PRId32
+#define PRId32 "d"
+#endif
+#ifndef PRIi32
+#define PRIi32 "i"
+#endif
+#ifndef PRIu32
+#define PRIu32 "u"
+#endif
+#ifndef PRIx32
+#define PRIx32 "x"
+#endif
+#ifndef PRIX32
+#define PRIX32 "X"
+#endif
+
+#ifndef PRId64
+#define PRId64 __PRI64_PREFIX "d"
+#endif
+#ifndef PRIu64
+#define PRIu64 __PRI64_PREFIX "u"
+#endif
+#ifndef PRIx64
+#define PRIx64 __PRI64_PREFIX "x"
+#endif
+#ifndef PRIX64
+#define PRIX64 __PRI64_PREFIX "X"
+#endif
+#ifndef PRIxPTR
+#define PRIxPTR __PRIPTR_PREFIX "x"
+#endif
+#ifndef PRIXPTR
+#define PRIXPTR __PRIPTR_PREFIX "X"
+#endif
+
+/* Macros for scanning format specifiers */
+#ifndef SCNd32
+#define SCNd32 "d"
+#endif
+#ifndef SCNi32
+#define SCNi32 "i"
+#endif
+#ifndef SCNu32
+#define SCNu32 "u"
+#endif
+#ifndef SCNx32
+#define SCNx32 "x"
+#endif
+
+#ifndef SCNd64
+#define SCNd64 __PRI64_PREFIX "d"
+#endif
+#ifndef SCNu64
+#define SCNu64 __PRI64_PREFIX "u"
+#endif
+#ifndef SCNx64
+#define SCNx64 __PRI64_PREFIX "x"
+#endif
+#ifndef SCNxPTR
+#define SCNxPTR __PRIPTR_PREFIX "x"
+#endif
+
+#ifndef NAN
+#define NAN (0.0 / 0.0)
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* #ifndef _PLATFORM_COMMON_H */
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/linux-sgx/platform_internal.h b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/linux-sgx/platform_internal.h
new file mode 100644
index 000000000..d18f015ee
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/linux-sgx/platform_internal.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#ifndef _PLATFORM_INTERNAL_H
+#define _PLATFORM_INTERNAL_H
+
+#include <inttypes.h>
+#include <stdbool.h>
+#include <assert.h>
+#include <time.h>
+#include <string.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <math.h>
+#include <unistd.h>
+#include <stdarg.h>
+#include <ctype.h>
+#include <limits.h>
+#include <errno.h>
+#include <sgx_thread.h>
+#include <pthread.h>
+
+#include "sgx_error.h"
+#include "sgx_file.h"
+#include "sgx_pthread.h"
+#include "sgx_time.h"
+#include "sgx_socket.h"
+#include "sgx_signal.h"
+#include "sgx_trts.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifndef BH_PLATFORM_LINUX_SGX
+#define BH_PLATFORM_LINUX_SGX
+#endif
+
+#define _STACK_SIZE_ADJUSTMENT (32 * 1024)
+
+/* Stack size of applet threads's native part. */
+#define BH_APPLET_PRESERVED_STACK_SIZE (8 * 1024 + _STACK_SIZE_ADJUSTMENT)
+
+/* Default thread priority */
+#define BH_THREAD_DEFAULT_PRIORITY 0
+
+typedef pthread_t korp_thread;
+typedef pthread_t korp_tid;
+typedef pthread_mutex_t korp_mutex;
+typedef pthread_cond_t korp_cond;
+typedef unsigned int korp_sem;
+
+#ifndef SGX_DISABLE_PTHREAD
+#define OS_THREAD_MUTEX_INITIALIZER PTHREAD_MUTEX_INITIALIZER
+#endif
+
+typedef int (*os_print_function_t)(const char *message);
+void
+os_set_print_function(os_print_function_t pf);
+
+char *
+strcpy(char *dest, const char *src);
+
+#define os_memory_order_acquire __ATOMIC_ACQUIRE
+#define os_memory_order_release __ATOMIC_RELEASE
+#define os_memory_order_seq_cst __ATOMIC_SEQ_CST
+#define os_atomic_thread_fence __atomic_thread_fence
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* end of _PLATFORM_INTERNAL_H */
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/linux-sgx/sgx_file.c b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/linux-sgx/sgx_file.c
new file mode 100644
index 000000000..a8ae8d2f9
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/linux-sgx/sgx_file.c
@@ -0,0 +1,1117 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#include "platform_api_vmcore.h"
+#include "sgx_error.h"
+#include "sgx_file.h"
+
+#if WASM_ENABLE_SGX_IPFS != 0
+#include "sgx_ipfs.h"
+#endif
+
+#ifndef SGX_DISABLE_WASI
+
+#define TRACE_FUNC() os_printf("undefined %s\n", __FUNCTION__)
+#define TRACE_OCALL_FAIL() os_printf("ocall %s failed!\n", __FUNCTION__)
+
+/** fd **/
+int
+ocall_open(int *p_fd, const char *pathname, int flags, bool has_mode,
+ unsigned mode);
+
+int
+ocall_openat(int *p_fd, int dirfd, const char *pathname, int flags,
+ bool has_mode, unsigned mode);
+
+int
+ocall_read(ssize_t *p_ret, int fd, void *buf, size_t read_size);
+
+int
+ocall_close(int *p_ret, int fd);
+
+int
+ocall_lseek(off_t *p_ret, int fd, off_t offset, int whence);
+
+int
+ocall_ftruncate(int *p_ret, int fd, off_t length);
+
+int
+ocall_fsync(int *p_ret, int fd);
+
+int
+ocall_fdatasync(int *p_ret, int fd);
+
+int
+ocall_isatty(int *p_ret, int fd);
+/** fd end **/
+
+/** DIR **/
+int
+ocall_fdopendir(int fd, void **p_dirp);
+
+int
+ocall_readdir(void **p_dirent, void *dirp);
+
+int
+ocall_rewinddir(void *dirp);
+
+int
+ocall_seekdir(void *dirp, long loc);
+
+int
+ocall_telldir(long *p_dir, void *dirp);
+
+int
+ocall_closedir(int *p_ret, void *dirp);
+/** DIR end **/
+
+/** stat **/
+int
+ocall_stat(int *p_ret, const char *pathname, void *buf, unsigned int buf_len);
+int
+ocall_fstat(int *p_ret, int fd, void *buf, unsigned int buf_len);
+int
+ocall_fstatat(int *p_ret, int dirfd, const char *pathname, void *buf,
+ unsigned int buf_len, int flags);
+/** stat end **/
+
+/** link **/
+int
+ocall_mkdirat(int *p_ret, int dirfd, const char *pathname, unsigned mode);
+int
+ocall_link(int *p_ret, const char *oldpath, const char *newpath);
+int
+ocall_linkat(int *p_ret, int olddirfd, const char *oldpath, int newdirfd,
+ const char *newpath, int flags);
+int
+ocall_unlinkat(int *p_ret, int dirfd, const char *pathname, int flags);
+int
+ocall_readlink(ssize_t *p_ret, const char *pathname, char *buf, size_t bufsiz);
+int
+ocall_readlinkat(ssize_t *p_ret, int dirfd, const char *pathname, char *buf,
+ size_t bufsiz);
+int
+ocall_renameat(int *p_ret, int olddirfd, const char *oldpath, int newdirfd,
+ const char *newpath);
+int
+ocall_symlinkat(int *p_ret, const char *target, int newdirfd,
+ const char *linkpath);
+/** link end **/
+
+/** control **/
+int
+ocall_ioctl(int *p_ret, int fd, unsigned long request, void *arg,
+ unsigned int arg_len);
+int
+ocall_fcntl(int *p_ret, int fd, int cmd);
+int
+ocall_fcntl_long(int *p_ret, int fd, int cmd, long arg);
+/** control end **/
+
+/** **/
+int
+ocall_realpath(int *p_ret, const char *path, char *buf, unsigned int buf_len);
+int
+ocall_posix_fallocate(int *p_ret, int fd, off_t offset, off_t len);
+int
+ocall_poll(int *p_ret, void *fds, unsigned nfds, int timeout,
+ unsigned int fds_len);
+int
+ocall_getopt(int *p_ret, int argc, char *argv_buf, unsigned int argv_buf_len,
+ const char *optstring);
+int
+ocall_sched_yield(int *p_ret);
+
+/** struct iovec **/
+ssize_t
+ocall_readv(ssize_t *p_ret, int fd, char *iov_buf, unsigned int buf_size,
+ int iovcnt, bool has_offset, off_t offset);
+ssize_t
+ocall_writev(ssize_t *p_ret, int fd, char *iov_buf, unsigned int buf_size,
+ int iovcnt, bool has_offset, off_t offset);
+/** iovec end **/
+
+int
+ocall_get_errno(int *p_ret);
+
+int
+open(const char *pathname, int flags, ...)
+{
+ int fd;
+ bool has_mode = false;
+ mode_t mode = 0;
+
+ if ((flags & O_CREAT) || (flags & O_TMPFILE) == O_TMPFILE) {
+ va_list ap;
+ va_start(ap, flags);
+ mode = va_arg(ap, mode_t);
+ va_end(ap);
+ has_mode = true;
+ }
+
+ if (SGX_SUCCESS != ocall_open(&fd, pathname, flags, has_mode, mode)) {
+ TRACE_OCALL_FAIL();
+ return -1;
+ }
+
+ if (fd >= 0 && (flags & O_CLOEXEC))
+ fcntl(fd, F_SETFD, FD_CLOEXEC);
+
+ if (fd == -1)
+ errno = get_errno();
+ return fd;
+}
+
+int
+openat(int dirfd, const char *pathname, int flags, ...)
+{
+ int fd;
+ bool has_mode = false;
+ mode_t mode = 0;
+
+ if ((flags & O_CREAT) || (flags & O_TMPFILE) == O_TMPFILE) {
+ va_list ap;
+ va_start(ap, flags);
+ mode = va_arg(ap, mode_t);
+ va_end(ap);
+ has_mode = true;
+ }
+
+ if (SGX_SUCCESS
+ != ocall_openat(&fd, dirfd, pathname, flags, has_mode, mode)) {
+ TRACE_OCALL_FAIL();
+ return -1;
+ }
+
+ if (fd >= 0 && (flags & O_CLOEXEC))
+ fcntl(fd, F_SETFD, FD_CLOEXEC);
+
+ if (fd == -1)
+ errno = get_errno();
+
+#if WASM_ENABLE_SGX_IPFS != 0
+ struct stat sb;
+ int ret = fstatat(dirfd, pathname, &sb, 0);
+ if (ret < 0) {
+ if (ocall_close(&ret, fd) != SGX_SUCCESS) {
+ TRACE_OCALL_FAIL();
+ }
+ return -1;
+ }
+
+ // Ony files are managed by SGX IPFS
+ if (S_ISREG(sb.st_mode)) {
+ // When WAMR uses Intel SGX IPFS to enabled, it opens a second
+ // file descriptor to interact with the secure file.
+ // The first file descriptor opened earlier is used to interact
+ // with the metadata of the file (e.g., time, flags, etc.).
+ void *file_ptr = ipfs_fopen(fd, flags);
+ if (file_ptr == NULL) {
+ if (ocall_close(&ret, fd) != SGX_SUCCESS) {
+ TRACE_OCALL_FAIL();
+ }
+ return -1;
+ }
+ }
+#endif
+
+ return fd;
+}
+
+int
+close(int fd)
+{
+ int ret;
+
+#if WASM_ENABLE_SGX_IPFS != 0
+ // Close the IPFS file pointer in addition of the file descriptor
+ ret = ipfs_close(fd);
+ if (ret == -1)
+ errno = get_errno();
+#endif
+
+ if (ocall_close(&ret, fd) != SGX_SUCCESS) {
+ TRACE_OCALL_FAIL();
+ return -1;
+ }
+ if (ret == -1)
+ errno = get_errno();
+ return ret;
+}
+
+ssize_t
+read(int fd, void *buf, size_t size)
+{
+ ssize_t ret;
+ int size_read_max = 2048, size_read, total_size_read = 0, count, i;
+ char *p = buf;
+
+ if (buf == NULL) {
+ TRACE_FUNC();
+ return -1;
+ }
+
+ count = (size + size_read_max - 1) / size_read_max;
+ for (i = 0; i < count; i++) {
+ size_read = (i < count - 1) ? size_read_max : size - size_read_max * i;
+
+ if (ocall_read(&ret, fd, p, size_read) != SGX_SUCCESS) {
+ TRACE_OCALL_FAIL();
+ return -1;
+ }
+ if (ret == -1) {
+ /* read failed */
+ errno = get_errno();
+ return -1;
+ }
+
+ p += ret;
+ total_size_read += ret;
+
+ if (ret < size_read)
+ /* end of file */
+ break;
+ }
+ return total_size_read;
+}
+
+DIR *
+fdopendir(int fd)
+{
+ DIR *result = NULL;
+
+ result = (DIR *)BH_MALLOC(sizeof(DIR));
+ if (!result)
+ return NULL;
+
+ if (ocall_fdopendir(fd, (void **)result) != SGX_SUCCESS) {
+ TRACE_OCALL_FAIL();
+ BH_FREE(result);
+ return NULL;
+ }
+
+ if ((void *)*result == NULL) { /* opendir failed */
+ TRACE_FUNC();
+ BH_FREE(result);
+ errno = get_errno();
+ return NULL;
+ }
+
+ return result;
+}
+
+struct dirent *
+readdir(DIR *dirp)
+{
+ struct dirent *result;
+
+ if (dirp == NULL)
+ return NULL;
+
+ if (ocall_readdir((void **)&result, (void *)*dirp) != SGX_SUCCESS) {
+ TRACE_OCALL_FAIL();
+ return NULL;
+ }
+
+ if (!result)
+ errno = get_errno();
+ return result;
+}
+
+void
+rewinddir(DIR *dirp)
+{
+ if (ocall_rewinddir((void *)*dirp) != SGX_SUCCESS) {
+ TRACE_OCALL_FAIL();
+ }
+}
+
+void
+seekdir(DIR *dirp, long loc)
+{
+ if (ocall_seekdir((void *)*dirp, loc) != SGX_SUCCESS) {
+ TRACE_OCALL_FAIL();
+ }
+}
+
+long
+telldir(DIR *dirp)
+{
+ long ret;
+
+ if (ocall_telldir(&ret, (void *)*dirp) != SGX_SUCCESS) {
+ TRACE_OCALL_FAIL();
+ return -1;
+ }
+ if (ret == -1)
+ errno = get_errno();
+ return ret;
+}
+
+int
+closedir(DIR *dirp)
+{
+ int ret;
+
+ if (ocall_closedir(&ret, (void *)*dirp) != SGX_SUCCESS) {
+ TRACE_OCALL_FAIL();
+ return -1;
+ }
+ BH_FREE(dirp);
+ if (ret == -1)
+ errno = get_errno();
+ return ret;
+}
+
+static ssize_t
+readv_internal(int fd, const struct iovec *iov, int iovcnt, bool has_offset,
+ off_t offset)
+{
+ ssize_t ret, size_left;
+ struct iovec *iov1;
+ int i;
+ char *p;
+ uint64 total_size = sizeof(struct iovec) * (uint64)iovcnt;
+
+ if (iov == NULL || iovcnt < 1)
+ return -1;
+
+ for (i = 0; i < iovcnt; i++) {
+ total_size += iov[i].iov_len;
+ }
+
+ if (total_size >= UINT32_MAX)
+ return -1;
+
+#if WASM_ENABLE_SGX_IPFS != 0
+ if (fd > 2) {
+ return ipfs_read(fd, iov, iovcnt, has_offset, offset);
+ }
+#endif
+
+ iov1 = BH_MALLOC((uint32)total_size);
+
+ if (iov1 == NULL)
+ return -1;
+
+ memset(iov1, 0, (uint32)total_size);
+
+ p = (char *)(uintptr_t)(sizeof(struct iovec) * iovcnt);
+
+ for (i = 0; i < iovcnt; i++) {
+ iov1[i].iov_len = iov[i].iov_len;
+ iov1[i].iov_base = p;
+ p += iov[i].iov_len;
+ }
+
+ if (ocall_readv(&ret, fd, (char *)iov1, (uint32)total_size, iovcnt,
+ has_offset, offset)
+ != SGX_SUCCESS) {
+ TRACE_OCALL_FAIL();
+ BH_FREE(iov1);
+ return -1;
+ }
+
+ p = (char *)(uintptr_t)(sizeof(struct iovec) * iovcnt);
+
+ size_left = ret;
+ for (i = 0; i < iovcnt; i++) {
+ if (size_left > iov[i].iov_len) {
+ memcpy(iov[i].iov_base, (uintptr_t)p + (char *)iov1,
+ iov[i].iov_len);
+ p += iov[i].iov_len;
+ size_left -= iov[i].iov_len;
+ }
+ else {
+ memcpy(iov[i].iov_base, (uintptr_t)p + (char *)iov1, size_left);
+ break;
+ }
+ }
+
+ BH_FREE(iov1);
+ if (ret == -1)
+ errno = get_errno();
+ return ret;
+}
+
+static ssize_t
+writev_internal(int fd, const struct iovec *iov, int iovcnt, bool has_offset,
+ off_t offset)
+{
+ ssize_t ret;
+ struct iovec *iov1;
+ int i;
+ char *p;
+ uint64 total_size = sizeof(struct iovec) * (uint64)iovcnt;
+
+ if (iov == NULL || iovcnt < 1)
+ return -1;
+
+ for (i = 0; i < iovcnt; i++) {
+ total_size += iov[i].iov_len;
+ }
+
+ if (total_size >= UINT32_MAX)
+ return -1;
+
+#if WASM_ENABLE_SGX_IPFS != 0
+ if (fd > 2) {
+ return ipfs_write(fd, iov, iovcnt, has_offset, offset);
+ }
+#endif
+
+ iov1 = BH_MALLOC((uint32)total_size);
+
+ if (iov1 == NULL)
+ return -1;
+
+ memset(iov1, 0, (uint32)total_size);
+
+ p = (char *)(uintptr_t)(sizeof(struct iovec) * iovcnt);
+
+ for (i = 0; i < iovcnt; i++) {
+ iov1[i].iov_len = iov[i].iov_len;
+ iov1[i].iov_base = p;
+ memcpy((uintptr_t)p + (char *)iov1, iov[i].iov_base, iov[i].iov_len);
+ p += iov[i].iov_len;
+ }
+
+ if (ocall_writev(&ret, fd, (char *)iov1, (uint32)total_size, iovcnt,
+ has_offset, offset)
+ != SGX_SUCCESS) {
+ TRACE_OCALL_FAIL();
+ BH_FREE(iov1);
+ return -1;
+ }
+
+ BH_FREE(iov1);
+ if (ret == -1)
+ errno = get_errno();
+ return ret;
+}
+
+ssize_t
+readv(int fd, const struct iovec *iov, int iovcnt)
+{
+ return readv_internal(fd, iov, iovcnt, false, 0);
+}
+
+ssize_t
+writev(int fd, const struct iovec *iov, int iovcnt)
+{
+ return writev_internal(fd, iov, iovcnt, false, 0);
+}
+
+ssize_t
+preadv(int fd, const struct iovec *iov, int iovcnt, off_t offset)
+{
+ return readv_internal(fd, iov, iovcnt, true, offset);
+}
+
+ssize_t
+pwritev(int fd, const struct iovec *iov, int iovcnt, off_t offset)
+{
+ return writev_internal(fd, iov, iovcnt, true, offset);
+}
+
+off_t
+lseek(int fd, off_t offset, int whence)
+{
+ off_t ret;
+
+#if WASM_ENABLE_SGX_IPFS != 0
+ ret = ipfs_lseek(fd, offset, whence);
+#else
+ if (ocall_lseek(&ret, fd, (long)offset, whence) != SGX_SUCCESS) {
+ TRACE_OCALL_FAIL();
+ return -1;
+ }
+ if (ret == -1)
+ errno = get_errno();
+#endif
+
+ return ret;
+}
+
+int
+ftruncate(int fd, off_t length)
+{
+ int ret;
+
+#if WASM_ENABLE_SGX_IPFS != 0
+ ret = ipfs_ftruncate(fd, length);
+#else
+ if (ocall_ftruncate(&ret, fd, length) != SGX_SUCCESS) {
+ TRACE_OCALL_FAIL();
+ return -1;
+ }
+ if (ret == -1)
+ errno = get_errno();
+#endif
+
+ return ret;
+}
+
+int
+stat(const char *pathname, struct stat *statbuf)
+{
+ int ret;
+
+ if (statbuf == NULL)
+ return -1;
+
+ if (ocall_stat(&ret, pathname, (void *)statbuf, sizeof(struct stat))
+ != SGX_SUCCESS) {
+ TRACE_OCALL_FAIL();
+ return -1;
+ }
+
+ if (ret == -1)
+ errno = get_errno();
+ return ret;
+}
+
+int
+fstat(int fd, struct stat *statbuf)
+{
+ int ret;
+
+ if (statbuf == NULL)
+ return -1;
+
+ if (ocall_fstat(&ret, fd, (void *)statbuf, sizeof(struct stat))
+ != SGX_SUCCESS) {
+ TRACE_OCALL_FAIL();
+ return -1;
+ }
+
+ if (ret == -1)
+ errno = get_errno();
+ return ret;
+}
+
+int
+fstatat(int dirfd, const char *pathname, struct stat *statbuf, int flags)
+{
+ int ret;
+
+ if (statbuf == NULL)
+ return -1;
+
+ if (ocall_fstatat(&ret, dirfd, pathname, (void *)statbuf,
+ sizeof(struct stat), flags)
+ != SGX_SUCCESS) {
+ TRACE_OCALL_FAIL();
+ return -1;
+ }
+
+ if (ret == -1)
+ errno = get_errno();
+ return ret;
+}
+
+int
+fsync(int fd)
+{
+ int ret;
+
+#if WASM_ENABLE_SGX_IPFS != 0
+ ret = ipfs_fflush(fd);
+#else
+ if (ocall_fsync(&ret, fd) != SGX_SUCCESS) {
+ TRACE_OCALL_FAIL();
+ return -1;
+ }
+ if (ret == -1)
+ errno = get_errno();
+#endif
+
+ return ret;
+}
+
+int
+fdatasync(int fd)
+{
+ int ret;
+
+#if WASM_ENABLE_SGX_IPFS != 0
+ ret = ipfs_fflush(fd);
+#else
+ if (ocall_fdatasync(&ret, fd) != SGX_SUCCESS) {
+ TRACE_OCALL_FAIL();
+ return -1;
+ }
+ if (ret == -1)
+ errno = get_errno();
+#endif
+
+ return ret;
+}
+
+int
+mkdirat(int dirfd, const char *pathname, mode_t mode)
+{
+ int ret;
+
+ if (ocall_mkdirat(&ret, dirfd, pathname, mode) != SGX_SUCCESS) {
+ TRACE_OCALL_FAIL();
+ return -1;
+ }
+
+ if (ret == -1)
+ errno = get_errno();
+ return ret;
+}
+
+int
+link(const char *oldpath, const char *newpath)
+{
+ int ret;
+
+ if (ocall_link(&ret, oldpath, newpath) != SGX_SUCCESS) {
+ TRACE_OCALL_FAIL();
+ return -1;
+ }
+
+ if (ret == -1)
+ errno = get_errno();
+ return ret;
+}
+
+int
+linkat(int olddirfd, const char *oldpath, int newdirfd, const char *newpath,
+ int flags)
+{
+ int ret;
+
+ if (ocall_linkat(&ret, olddirfd, oldpath, newdirfd, newpath, flags)
+ != SGX_SUCCESS) {
+ TRACE_OCALL_FAIL();
+ return -1;
+ }
+
+ if (ret == -1)
+ errno = get_errno();
+ return ret;
+}
+
+int
+unlinkat(int dirfd, const char *pathname, int flags)
+{
+ int ret;
+
+ if (ocall_unlinkat(&ret, dirfd, pathname, flags) != SGX_SUCCESS) {
+ TRACE_OCALL_FAIL();
+ return -1;
+ }
+
+ if (ret == -1)
+ errno = get_errno();
+ return ret;
+}
+
+ssize_t
+readlink(const char *pathname, char *buf, size_t bufsiz)
+{
+ ssize_t ret;
+
+ if (buf == NULL)
+ return -1;
+
+ if (ocall_readlink(&ret, pathname, buf, bufsiz) != SGX_SUCCESS) {
+ TRACE_OCALL_FAIL();
+ return -1;
+ }
+
+ if (ret == -1)
+ errno = get_errno();
+ return ret;
+}
+
+ssize_t
+readlinkat(int dirfd, const char *pathname, char *buf, size_t bufsiz)
+{
+ ssize_t ret;
+
+ if (buf == NULL)
+ return -1;
+
+ if (ocall_readlinkat(&ret, dirfd, pathname, buf, bufsiz) != SGX_SUCCESS) {
+ TRACE_OCALL_FAIL();
+ return -1;
+ }
+
+ if (ret == -1)
+ errno = get_errno();
+ return ret;
+}
+
+int
+symlinkat(const char *target, int newdirfd, const char *linkpath)
+{
+ int ret;
+
+ if (ocall_symlinkat(&ret, target, newdirfd, linkpath) != SGX_SUCCESS) {
+ TRACE_OCALL_FAIL();
+ return -1;
+ }
+
+ if (ret == -1)
+ errno = get_errno();
+ return ret;
+}
+
+int
+renameat(int olddirfd, const char *oldpath, int newdirfd, const char *newpath)
+{
+ int ret;
+
+ if (ocall_renameat(&ret, olddirfd, oldpath, newdirfd, newpath)
+ != SGX_SUCCESS) {
+ TRACE_OCALL_FAIL();
+ return -1;
+ }
+
+ if (ret == -1)
+ errno = get_errno();
+ return ret;
+}
+
+int
+ioctl(int fd, unsigned long request, ...)
+{
+ int ret;
+ va_list args;
+
+ switch (request) {
+ case FIONREAD:
+ va_start(args, request);
+ int *arg = (int *)va_arg(args, int *);
+ if (ocall_ioctl(&ret, fd, request, arg, sizeof(*arg))
+ != SGX_SUCCESS) {
+ TRACE_OCALL_FAIL();
+ va_end(args);
+ return -1;
+ }
+ va_end(args);
+ break;
+
+ default:
+ os_printf("ioctl failed: unknown request", request);
+ return -1;
+ }
+
+ if (ret == -1)
+ errno = get_errno();
+ return ret;
+}
+
+int
+fcntl(int fd, int cmd, ... /* arg */)
+{
+ int ret;
+ va_list args;
+
+ switch (cmd) {
+ case F_GETFD:
+ case F_GETFL:
+ if (ocall_fcntl(&ret, fd, cmd) != SGX_SUCCESS) {
+ TRACE_OCALL_FAIL();
+ return -1;
+ }
+ break;
+
+ case F_DUPFD:
+ case F_SETFD:
+ case F_SETFL:
+ va_start(args, cmd);
+ long arg_1 = (long)va_arg(args, long);
+ if (ocall_fcntl_long(&ret, fd, cmd, arg_1) != SGX_SUCCESS) {
+ TRACE_OCALL_FAIL();
+ va_end(args);
+ return -1;
+ }
+ va_end(args);
+ break;
+
+ default:
+ os_printf("fcntl failed: unknown cmd %d.\n", cmd);
+ return -1;
+ }
+
+ if (ret == -1)
+ errno = get_errno();
+ return ret;
+}
+
+int
+isatty(int fd)
+{
+ int ret;
+
+ if (ocall_isatty(&ret, fd) != SGX_SUCCESS) {
+ TRACE_OCALL_FAIL();
+ return -1;
+ }
+ if (ret == 0)
+ errno = get_errno();
+ return ret;
+}
+
+char *
+realpath(const char *path, char *resolved_path)
+{
+ int ret;
+ char buf[PATH_MAX] = { 0 };
+
+ if (ocall_realpath(&ret, path, buf, PATH_MAX) != SGX_SUCCESS) {
+ TRACE_OCALL_FAIL();
+ return (char *)NULL;
+ }
+
+ if (ret != 0)
+ return (char *)NULL;
+
+ if (resolved_path) {
+ strcpy(resolved_path, buf);
+ }
+ else {
+ resolved_path = BH_MALLOC(strlen(buf) + 1);
+ if (resolved_path == NULL)
+ return NULL;
+ strcpy(resolved_path, buf);
+ }
+
+ return resolved_path;
+}
+
+int
+posix_fallocate(int fd, off_t offset, off_t len)
+{
+ int ret;
+
+#if WASM_ENABLE_SGX_IPFS != 0
+ ret = ipfs_posix_fallocate(fd, offset, len);
+#else
+ if (ocall_posix_fallocate(&ret, fd, offset, len) != SGX_SUCCESS) {
+ TRACE_OCALL_FAIL();
+ return -1;
+ }
+#endif
+
+ return ret;
+}
+
+int
+poll(struct pollfd *fds, nfds_t nfds, int timeout)
+{
+ int ret;
+
+ if (fds == NULL)
+ return -1;
+
+ if (ocall_poll(&ret, fds, nfds, timeout, sizeof(*fds) * nfds)
+ != SGX_SUCCESS) {
+ TRACE_OCALL_FAIL();
+ return -1;
+ }
+
+ if (ret == -1)
+ errno = get_errno();
+ return ret;
+}
+
+int
+getopt(int argc, char *const argv[], const char *optstring)
+{
+ int ret;
+ char **argv1;
+ char *p;
+ int i;
+ uint64 total_size = sizeof(char *) * (uint64)argc;
+
+ for (i = 0; i < argc; i++) {
+ total_size += strlen(argv[i]) + 1;
+ }
+
+ if (total_size >= UINT32_MAX)
+ return -1;
+
+ argv1 = BH_MALLOC((uint32)total_size);
+
+ if (argv1 == NULL)
+ return -1;
+
+ p = (char *)(uintptr_t)(sizeof(char *) * argc);
+
+ for (i = 0; i < argc; i++) {
+ argv1[i] = p;
+ strcpy((char *)argv1 + (uintptr_t)p, argv[i]);
+ p += ((uintptr_t)strlen(argv[i]) + 1);
+ }
+
+ if (ocall_getopt(&ret, argc, (char *)argv1, total_size, optstring)
+ != SGX_SUCCESS) {
+ TRACE_OCALL_FAIL();
+ BH_FREE(argv1);
+ return -1;
+ }
+
+ BH_FREE(argv1);
+ if (ret == -1)
+ errno = get_errno();
+ return ret;
+}
+
+int
+sched_yield(void)
+{
+ int ret;
+
+ if (ocall_sched_yield(&ret) != SGX_SUCCESS) {
+ TRACE_OCALL_FAIL();
+ return -1;
+ }
+ if (ret == -1)
+ errno = get_errno();
+ return ret;
+}
+
+ssize_t
+getrandom(void *buf, size_t buflen, unsigned int flags)
+{
+ sgx_status_t ret;
+
+ if (!buf || buflen > INT32_MAX || flags != 0) {
+ errno = EINVAL;
+ return -1;
+ }
+
+ ret = sgx_read_rand(buf, buflen);
+ if (ret != SGX_SUCCESS) {
+ errno = EFAULT;
+ return -1;
+ }
+
+ return (ssize_t)buflen;
+}
+
+#define RDRAND_RETRIES 3
+
+static int
+rdrand64_step(uint64 *seed)
+{
+ uint8 ok;
+ __asm__ volatile("rdseed %0; setc %1" : "=r"(*seed), "=qm"(ok));
+ return (int)ok;
+}
+
+static int
+rdrand64_retry(uint64 *rand, uint32 retries)
+{
+ uint32 count = 0;
+
+ while (count++ <= retries) {
+ if (rdrand64_step(rand)) {
+ return -1;
+ }
+ }
+ return 0;
+}
+
+static uint32
+rdrand_get_bytes(uint8 *dest, uint32 n)
+{
+ uint8 *head_start = dest, *tail_start = NULL;
+ uint64 *block_start;
+ uint32 count, ltail, lhead, lblock;
+ uint64 i, temp_rand;
+
+ /* Get the address of the first 64-bit aligned block in the
+ destination buffer. */
+ if (((uintptr_t)head_start & (uintptr_t)7) == 0) {
+ /* already 8-byte aligned */
+ block_start = (uint64 *)head_start;
+ lhead = 0;
+ lblock = n & ~7;
+ }
+ else {
+ /* next 8-byte aligned */
+ block_start = (uint64 *)(((uintptr_t)head_start + 7) & ~(uintptr_t)7);
+ lhead = (uint32)((uintptr_t)block_start - (uintptr_t)head_start);
+ lblock = (n - lhead) & ~7;
+ }
+
+ /* Compute the number of 64-bit blocks and the remaining number
+ of bytes (the tail) */
+ ltail = n - lblock - lhead;
+ if (ltail > 0) {
+ tail_start = (uint8 *)block_start + lblock;
+ }
+
+ /* Populate the starting, mis-aligned section (the head) */
+ if (lhead > 0) {
+ if (!rdrand64_retry(&temp_rand, RDRAND_RETRIES)) {
+ return 0;
+ }
+ memcpy(head_start, &temp_rand, lhead);
+ }
+
+ /* Populate the central, aligned blocks */
+ count = lblock / 8;
+ for (i = 0; i < count; i++, block_start++) {
+ if (!rdrand64_retry(block_start, RDRAND_RETRIES)) {
+ return i * 8 + lhead;
+ }
+ }
+
+ /* Populate the tail */
+ if (ltail > 0) {
+ if (!rdrand64_retry(&temp_rand, RDRAND_RETRIES)) {
+ return count * 8 + lhead;
+ }
+
+ memcpy(tail_start, &temp_rand, ltail);
+ }
+
+ return n;
+}
+
+int
+getentropy(void *buffer, size_t length)
+{
+ uint32 size;
+
+ if (!buffer || length > INT32_MAX) {
+ errno = EINVAL;
+ return -1;
+ }
+
+ if (length == 0) {
+ return 0;
+ }
+
+ size = rdrand_get_bytes(buffer, (uint32)length);
+ if (size != length) {
+ errno = EFAULT;
+ return -1;
+ }
+
+ return 0;
+}
+
+int
+get_errno(void)
+{
+ int ret;
+
+ if (ocall_get_errno(&ret) != SGX_SUCCESS) {
+ TRACE_OCALL_FAIL();
+ return -1;
+ }
+ return ret;
+}
+
+#endif
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/linux-sgx/sgx_file.h b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/linux-sgx/sgx_file.h
new file mode 100644
index 000000000..8690e1f69
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/linux-sgx/sgx_file.h
@@ -0,0 +1,266 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#ifndef _SGX_FILE_H
+#define _SGX_FILE_H
+
+#include "sgx_time.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define F_DUPFD 0
+#define F_GETFD 1
+#define F_SETFD 2
+#define F_GETFL 3
+#define F_SETFL 4
+
+#define FD_CLOEXEC 1
+
+#define O_PATH 010000000
+#define O_SEARCH O_PATH
+#define O_EXEC O_PATH
+
+#define O_ACCMODE (03 | O_SEARCH)
+#define O_RDONLY 00
+#define O_WRONLY 01
+#define O_RDWR 02
+
+#define O_CREAT 0100
+#define O_EXCL 0200
+#define O_NOCTTY 0400
+#define O_TRUNC 01000
+#define O_APPEND 02000
+#define O_NONBLOCK 04000
+#define O_DSYNC 010000
+#define O_SYNC 04010000
+#define O_RSYNC 04010000
+#define O_DIRECTORY 0200000
+#define O_NOFOLLOW 0400000
+#define O_CLOEXEC 02000000
+
+#define O_ASYNC 020000
+#define O_DIRECT 040000
+#define O_LARGEFILE 0
+#define O_NOATIME 01000000
+#define O_PATH 010000000
+#define O_TMPFILE 020200000
+#define O_NDELAY O_NONBLOCK
+
+#define S_IFMT 0170000
+#define S_IFDIR 0040000
+#define S_IFCHR 0020000
+#define S_IFBLK 0060000
+#define S_IFREG 0100000
+#define S_IFIFO 0010000
+#define S_IFLNK 0120000
+#define S_IFSOCK 0140000
+
+#define SEEK_SET 0
+#define SEEK_CUR 1
+#define SEEK_END 2
+
+#define S_ISDIR(mode) (((mode)&S_IFMT) == S_IFDIR)
+#define S_ISCHR(mode) (((mode)&S_IFMT) == S_IFCHR)
+#define S_ISBLK(mode) (((mode)&S_IFMT) == S_IFBLK)
+#define S_ISREG(mode) (((mode)&S_IFMT) == S_IFREG)
+#define S_ISFIFO(mode) (((mode)&S_IFMT) == S_IFIFO)
+#define S_ISLNK(mode) (((mode)&S_IFMT) == S_IFLNK)
+#define S_ISSOCK(mode) (((mode)&S_IFMT) == S_IFSOCK)
+
+#define DT_UNKNOWN 0
+#define DT_FIFO 1
+#define DT_CHR 2
+#define DT_DIR 4
+#define DT_BLK 6
+#define DT_REG 8
+#define DT_LNK 10
+#define DT_SOCK 12
+#define DT_WHT 14
+
+#define AT_SYMLINK_NOFOLLOW 0x100
+#define AT_REMOVEDIR 0x200
+#define AT_SYMLINK_FOLLOW 0x400
+
+#define POLLIN 0x001
+#define POLLPRI 0x002
+#define POLLOUT 0x004
+#define POLLERR 0x008
+#define POLLHUP 0x010
+#define POLLNVAL 0x020
+#define POLLRDNORM 0x040
+#define POLLRDBAND 0x080
+#define POLLWRNORM 0x100
+#define POLLWRBAND 0x200
+
+#define FIONREAD 0x541B
+
+#define PATH_MAX 4096
+
+/* Special value used to indicate openat should use the current
+ working directory. */
+#define AT_FDCWD -100
+
+typedef long __syscall_slong_t;
+
+typedef unsigned long dev_t;
+typedef unsigned long ino_t;
+typedef unsigned mode_t;
+typedef unsigned long nlink_t;
+typedef unsigned socklen_t;
+typedef long blksize_t;
+typedef long blkcnt_t;
+
+typedef int pid_t;
+typedef unsigned gid_t;
+typedef unsigned uid_t;
+
+typedef unsigned long nfds_t;
+
+typedef uintptr_t DIR;
+
+struct dirent {
+ ino_t d_ino;
+ off_t d_off;
+ unsigned short d_reclen;
+ unsigned char d_type;
+ char d_name[256];
+};
+
+struct stat {
+ dev_t st_dev;
+ ino_t st_ino;
+ nlink_t st_nlink;
+
+ mode_t st_mode;
+ uid_t st_uid;
+ gid_t st_gid;
+ unsigned int __pad0;
+ dev_t st_rdev;
+ off_t st_size;
+ blksize_t st_blksize;
+ blkcnt_t st_blocks;
+
+ struct timespec st_atim;
+ struct timespec st_mtim;
+ struct timespec st_ctim;
+ long __unused[3];
+};
+
+struct iovec {
+ void *iov_base;
+ size_t iov_len;
+};
+
+struct pollfd {
+ int fd;
+ short events;
+ short revents;
+};
+
+int
+open(const char *pathname, int flags, ...);
+int
+openat(int dirfd, const char *pathname, int flags, ...);
+int
+close(int fd);
+
+DIR *
+fdopendir(int fd);
+int
+closedir(DIR *dirp);
+void
+rewinddir(DIR *dirp);
+void
+seekdir(DIR *dirp, long loc);
+struct dirent *
+readdir(DIR *dirp);
+long
+telldir(DIR *dirp);
+
+ssize_t
+read(int fd, void *buf, size_t count);
+ssize_t
+readv(int fd, const struct iovec *iov, int iovcnt);
+ssize_t
+writev(int fd, const struct iovec *iov, int iovcnt);
+ssize_t
+preadv(int fd, const struct iovec *iov, int iovcnt, off_t offset);
+ssize_t
+pwritev(int fd, const struct iovec *iov, int iovcnt, off_t offset);
+
+off_t
+lseek(int fd, off_t offset, int whence);
+int
+ftruncate(int fd, off_t length);
+
+int
+stat(const char *pathname, struct stat *statbuf);
+int
+fstat(int fd, struct stat *statbuf);
+int
+fstatat(int dirfd, const char *pathname, struct stat *statbuf, int flags);
+
+int
+fsync(int fd);
+int
+fdatasync(int fd);
+
+int
+mkdirat(int dirfd, const char *pathname, mode_t mode);
+int
+link(const char *oldpath, const char *newpath);
+int
+linkat(int olddirfd, const char *oldpath, int newdirfd, const char *newpath,
+ int flags);
+int
+unlinkat(int dirfd, const char *pathname, int flags);
+ssize_t
+readlink(const char *pathname, char *buf, size_t bufsiz);
+ssize_t
+readlinkat(int dirfd, const char *pathname, char *buf, size_t bufsiz);
+int
+symlinkat(const char *target, int newdirfd, const char *linkpath);
+int
+renameat(int olddirfd, const char *oldpath, int newdirfd, const char *newpath);
+
+int
+ioctl(int fd, unsigned long request, ...);
+int
+fcntl(int fd, int cmd, ... /* arg */);
+
+int
+isatty(int fd);
+
+char *
+realpath(const char *path, char *resolved_path);
+
+int
+posix_fallocate(int fd, off_t offset, off_t len);
+
+int
+poll(struct pollfd *fds, nfds_t nfds, int timeout);
+
+int
+getopt(int argc, char *const argv[], const char *optstring);
+
+int
+sched_yield(void);
+
+ssize_t
+getrandom(void *buf, size_t buflen, unsigned int flags);
+
+int
+getentropy(void *buffer, size_t length);
+
+int
+get_errno(void);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* end of _SGX_FILE_H */
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/linux-sgx/sgx_ipfs.c b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/linux-sgx/sgx_ipfs.c
new file mode 100644
index 000000000..322688980
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/linux-sgx/sgx_ipfs.c
@@ -0,0 +1,532 @@
+/*
+ * Copyright (C) 2022 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#if WASM_ENABLE_SGX_IPFS != 0
+
+#include "ssp_config.h"
+#include "bh_platform.h"
+#include "sgx_ipfs.h"
+
+#include <errno.h>
+
+#include "sgx_tprotected_fs.h"
+
+#define SGX_ERROR_FILE_LOWEST_ERROR_ID SGX_ERROR_FILE_BAD_STATUS
+#define SGX_ERROR_FILE_HIGHEST_ERROR_ID SGX_ERROR_FILE_CLOSE_FAILED
+
+// Internal buffer filled with zeroes and used when extending the size of
+// protected files.
+#define ZEROES_PADDING_LENGTH 32 * 1024
+char zeroes_padding[ZEROES_PADDING_LENGTH] = { 0 };
+
+// The mapping between file descriptors and IPFS file pointers.
+static HashMap *ipfs_file_list;
+
+// Converts an SGX error code to a POSIX error code.
+static __wasi_errno_t
+convert_sgx_errno(int error)
+{
+ if (error >= SGX_ERROR_FILE_LOWEST_ERROR_ID
+ && error <= SGX_ERROR_FILE_HIGHEST_ERROR_ID) {
+ switch (error) {
+ /* The file is in bad status */
+ case SGX_ERROR_FILE_BAD_STATUS:
+ return ENOTRECOVERABLE;
+ /* The Key ID field is all zeros, can't re-generate the encryption
+ * key */
+ case SGX_ERROR_FILE_NO_KEY_ID:
+ return EKEYREJECTED;
+ /* The current file name is different then the original file name
+ * (not allowed, substitution attack) */
+ case SGX_ERROR_FILE_NAME_MISMATCH:
+ return EIO;
+ /* The file is not an SGX file */
+ case SGX_ERROR_FILE_NOT_SGX_FILE:
+ return EEXIST;
+ /* A recovery file can't be opened, so flush operation can't
+ * continue (only used when no EXXX is returned) */
+ case SGX_ERROR_FILE_CANT_OPEN_RECOVERY_FILE:
+ return EIO;
+ /* A recovery file can't be written, so flush operation can't
+ * continue (only used when no EXXX is returned) */
+ case SGX_ERROR_FILE_CANT_WRITE_RECOVERY_FILE:
+ return EIO;
+ /* When openeing the file, recovery is needed, but the recovery
+ * process failed */
+ case SGX_ERROR_FILE_RECOVERY_NEEDED:
+ return EIO;
+ /* fflush operation (to disk) failed (only used when no EXXX is
+ * returned) */
+ case SGX_ERROR_FILE_FLUSH_FAILED:
+ return EIO;
+ /* fclose operation (to disk) failed (only used when no EXXX is
+ * returned) */
+ case SGX_ERROR_FILE_CLOSE_FAILED:
+ return EIO;
+ }
+ }
+
+ return error;
+}
+
+static void *
+fd2file(int fd)
+{
+ return bh_hash_map_find(ipfs_file_list, (void *)(intptr_t)fd);
+}
+
+static void
+ipfs_file_destroy(void *sgx_file)
+{
+ sgx_fclose(sgx_file);
+}
+
+// Writes a given number of zeroes in file at the current offset.
+// The return value is zero if successful; otherwise non-zero.
+static int
+ipfs_write_zeroes(void *sgx_file, size_t len)
+{
+ int min_count;
+
+ while (len > 0) {
+ min_count = len < ZEROES_PADDING_LENGTH ? len : ZEROES_PADDING_LENGTH;
+
+ if (sgx_fwrite(zeroes_padding, 1, min_count, sgx_file) == 0) {
+ errno = convert_sgx_errno(sgx_ferror(sgx_file));
+ return -1;
+ }
+
+ len -= min_count;
+ }
+
+ return 0;
+}
+
+int
+ipfs_init()
+{
+ ipfs_file_list =
+ bh_hash_map_create(32, true, (HashFunc)fd_hash, (KeyEqualFunc)fd_equal,
+ NULL, (ValueDestroyFunc)ipfs_file_destroy);
+
+ return ipfs_file_list != NULL ? BHT_OK : BHT_ERROR;
+}
+
+void
+ipfs_destroy()
+{
+ bh_hash_map_destroy(ipfs_file_list);
+}
+
+int
+ipfs_posix_fallocate(int fd, off_t offset, size_t len)
+{
+ void *sgx_file = fd2file(fd);
+ if (!sgx_file) {
+ return EBADF;
+ }
+
+ // The wrapper for fseek takes care of extending the file if sought beyond
+ // the end
+ if (ipfs_lseek(fd, offset + len, SEEK_SET) == -1) {
+ return errno;
+ }
+
+ // Make sure the file is allocated by flushing it
+ if (sgx_fflush(sgx_file) != 0) {
+ return errno;
+ }
+
+ return 0;
+}
+
+size_t
+ipfs_read(int fd, const struct iovec *iov, int iovcnt, bool has_offset,
+ off_t offset)
+{
+ int i;
+ off_t original_offset = 0;
+ void *sgx_file = fd2file(fd);
+ size_t read_result, number_of_read_bytes = 0;
+
+ if (!sgx_file) {
+ errno = EBADF;
+ return -1;
+ }
+
+ if (has_offset) {
+ // Save the current offset, to restore it after the read operation
+ original_offset = (off_t)sgx_ftell(sgx_file);
+
+ if (original_offset == -1) {
+ errno = convert_sgx_errno(sgx_ferror(sgx_file));
+ return -1;
+ }
+
+ // Move to the desired location
+ if (sgx_fseek(sgx_file, offset, SEEK_SET) == -1) {
+ errno = convert_sgx_errno(sgx_ferror(sgx_file));
+ return -1;
+ }
+ }
+
+ // For each element in the vector
+ for (i = 0; i < iovcnt; i++) {
+ if (iov[i].iov_len == 0)
+ continue;
+
+ read_result = sgx_fread(iov[i].iov_base, 1, iov[i].iov_len, sgx_file);
+ number_of_read_bytes += read_result;
+
+ if (read_result != iov[i].iov_len) {
+ if (!sgx_feof(sgx_file)) {
+ errno = convert_sgx_errno(sgx_ferror(sgx_file));
+ return -1;
+ }
+ }
+ }
+
+ if (has_offset) {
+ // Restore the position of the cursor
+ if (sgx_fseek(sgx_file, original_offset, SEEK_SET) == -1) {
+ errno = convert_sgx_errno(sgx_ferror(sgx_file));
+ return -1;
+ }
+ }
+
+ return number_of_read_bytes;
+}
+
+size_t
+ipfs_write(int fd, const struct iovec *iov, int iovcnt, bool has_offset,
+ off_t offset)
+{
+ int i;
+ off_t original_offset = 0;
+ void *sgx_file = fd2file(fd);
+ size_t write_result, number_of_written_bytes = 0;
+
+ if (!sgx_file) {
+ errno = EBADF;
+ return -1;
+ }
+
+ if (has_offset) {
+ // Save the current offset, to restore it after the read operation
+ original_offset = (off_t)sgx_ftell(sgx_file);
+
+ if (original_offset == -1) {
+ errno = convert_sgx_errno(sgx_ferror(sgx_file));
+ return -1;
+ }
+
+ // Move to the desired location
+ if (sgx_fseek(sgx_file, offset, SEEK_SET) == -1) {
+ errno = convert_sgx_errno(sgx_ferror(sgx_file));
+ return -1;
+ }
+ }
+
+ // For each element in the vector
+ for (i = 0; i < iovcnt; i++) {
+ if (iov[i].iov_len == 0)
+ continue;
+
+ write_result = sgx_fwrite(iov[i].iov_base, 1, iov[i].iov_len, sgx_file);
+ number_of_written_bytes += write_result;
+
+ if (write_result != iov[i].iov_len) {
+ errno = convert_sgx_errno(sgx_ferror(sgx_file));
+ return -1;
+ }
+ }
+
+ if (has_offset) {
+ // Restore the position of the cursor
+ if (sgx_fseek(sgx_file, original_offset, SEEK_SET) == -1) {
+ errno = convert_sgx_errno(sgx_ferror(sgx_file));
+ return -1;
+ }
+ }
+
+ return number_of_written_bytes;
+}
+
+int
+ipfs_close(int fd)
+{
+ void *sgx_file;
+
+ if (!bh_hash_map_remove(ipfs_file_list, (void *)(intptr_t)fd, NULL,
+ &sgx_file)) {
+ errno = EBADF;
+ return -1;
+ }
+
+ if (sgx_fclose(sgx_file)) {
+ errno = convert_sgx_errno(sgx_ferror(sgx_file));
+ return -1;
+ }
+
+ return 0;
+}
+
+void *
+ipfs_fopen(int fd, int flags)
+{
+ // Mapping back the mode
+ const char *mode;
+
+ bool must_create = (flags & O_CREAT) != 0;
+ bool must_truncate = (flags & O_TRUNC) != 0;
+ bool must_append = (flags & O_APPEND) != 0;
+ bool read_only = (flags & O_ACCMODE) == O_RDONLY;
+ bool write_only = (flags & O_ACCMODE) == O_WRONLY;
+ bool read_write = (flags & O_ACCMODE) == O_RDWR;
+
+ // The mapping of the mode is similar to the table in the official
+ // specifications:
+ // https://pubs.opengroup.org/onlinepubs/9699919799/functions/fopen.html
+ // Note that POSIX has obtained a file descriptor beforehand.
+ // If opened with a destructive mode ("w" or "w+"), the truncate operation
+ // already occurred and must not be repeated because this will invalidate
+ // the file descriptor obtained by POSIX. Therefore, we do NOT map to the
+ // modes that truncate the file ("w" and "w+"). Instead, we map to a
+ // non-destructive mode ("r+").
+
+ if (read_only)
+ mode = "r";
+ else if (write_only && must_create && must_truncate)
+ // Rather than "w", we map to a non-destructive mode
+ mode = "r+";
+ else if (write_only && must_create && must_append)
+ mode = "a";
+ else if (read_write && must_create && must_append)
+ mode = "a+";
+ else if (read_write)
+ // Rather than "w+", we map to a non-destructive mode
+ mode = "r+";
+ else
+ mode = NULL;
+
+ // Cannot map the requested access to the SGX IPFS
+ if (mode == NULL) {
+ errno = __WASI_ENOTCAPABLE;
+ return NULL;
+ }
+
+ // Determine the symbolic link of the file descriptor, because IPFS does not
+ // support opening a relative path to a file descriptor (i.e., openat).
+ // Using the symbolic link in /proc/self allows to retrieve the same path as
+ // opened by the initial openat and respects the chroot of WAMR.
+ size_t ret;
+ char symbolic_path[32];
+ ret =
+ snprintf(symbolic_path, sizeof(symbolic_path), "/proc/self/fd/%d", fd);
+ if (ret >= sizeof(symbolic_path)) {
+ errno = ENAMETOOLONG;
+ return NULL;
+ }
+
+ // Resolve the symbolic link to real absolute path, because IPFS can only
+ // open a file with a same file name it was initially created. Otherwise,
+ // IPFS throws SGX_ERROR_FILE_NAME_MISMATCH.
+ char real_path[PATH_MAX] = { 0 };
+ ret = readlink(symbolic_path, real_path, PATH_MAX - 1);
+ if (ret == -1)
+ return NULL;
+
+ // Opening the file using the real path
+ void *sgx_file = sgx_fopen_auto_key(real_path, mode);
+
+ if (sgx_file == NULL) {
+ errno = convert_sgx_errno(sgx_ferror(sgx_file));
+ return NULL;
+ }
+
+ if (!bh_hash_map_insert(ipfs_file_list, (void *)(intptr_t)fd, sgx_file)) {
+ errno = __WASI_ECANCELED;
+ sgx_fclose(sgx_file);
+ os_printf("An error occurred while inserting the IPFS file pointer in "
+ "the map.");
+ return NULL;
+ }
+
+ return sgx_file;
+}
+
+int
+ipfs_fflush(int fd)
+{
+ void *sgx_file = fd2file(fd);
+
+ if (!sgx_file) {
+ errno = EBADF;
+ return EOF;
+ }
+
+ int ret = sgx_fflush(sgx_file);
+
+ if (ret == 1) {
+ errno = convert_sgx_errno(sgx_ferror(sgx_file));
+ return EOF;
+ }
+
+ return ret;
+}
+
+off_t
+ipfs_lseek(int fd, off_t offset, int nwhence)
+{
+ off_t cursor_current_location;
+ void *sgx_file = fd2file(fd);
+ if (!sgx_file) {
+ errno = EBADF;
+ return -1;
+ }
+
+ // Optimization: if the offset is 0 and the whence is SEEK_CUR,
+ // this is equivalent of a call to ftell.
+ if (offset == 0 && nwhence == SEEK_CUR) {
+ cursor_current_location = (off_t)sgx_ftell(sgx_file);
+
+ if (cursor_current_location == -1) {
+ errno = convert_sgx_errno(sgx_ferror(sgx_file));
+ return -1;
+ }
+
+ return cursor_current_location;
+ }
+
+ int fseek_result = sgx_fseek(sgx_file, offset, nwhence);
+
+ if (fseek_result == 0) {
+ off_t new_offset = (off_t)sgx_ftell(sgx_file);
+
+ if (new_offset == -1) {
+ errno = convert_sgx_errno(sgx_ferror(sgx_file));
+ return -1;
+ }
+
+ return new_offset;
+ }
+ else {
+ // In the case fseek returned an error
+ int sgx_error = sgx_ferror(sgx_file);
+ if (sgx_error != EINVAL) {
+ errno = convert_sgx_errno(sgx_error);
+ return -1;
+ }
+
+ // We must consider a difference in behavior of sgx_fseek and the POSIX
+ // fseek. If the cursor is moved beyond the end of the file, sgx_fseek
+ // returns an error, whereas POSIX fseek accepts the cursor move and
+ // fill with zeroes the difference for the next write. This
+ // implementation handle zeroes completion and moving the cursor forward
+ // the end of the file, but does it now (during the fseek), which is
+ // different compared to POSIX implementation, that writes zeroes on the
+ // next write. This avoids the runtime to keep track of the cursor
+ // manually.
+
+ // Assume the error is raised because the cursor is moved beyond the end
+ // of the file.
+
+ // If the whence is the current cursor location, retrieve it
+ if (nwhence == SEEK_CUR) {
+ cursor_current_location = (off_t)sgx_ftell(sgx_file);
+ }
+
+ // Move the cursor at the end of the file
+ if (sgx_fseek(sgx_file, 0, SEEK_END) == -1) {
+ errno = convert_sgx_errno(sgx_ferror(sgx_file));
+ return -1;
+ }
+
+ // Compute the number of zeroes to append.
+ int64_t number_of_zeroes;
+ switch (nwhence) {
+ case SEEK_SET:
+ number_of_zeroes = offset - sgx_ftell(sgx_file);
+ break;
+ case SEEK_END:
+ number_of_zeroes = offset;
+ break;
+ case SEEK_CUR:
+ number_of_zeroes =
+ cursor_current_location + offset - sgx_ftell(sgx_file);
+ break;
+ default:
+ errno = EINVAL;
+ return -1;
+ }
+
+ // Write the missing zeroes
+ if (ipfs_write_zeroes(sgx_file, number_of_zeroes) != 0) {
+ return -1;
+ }
+
+ // Move again at the end of the file
+ if (sgx_fseek(sgx_file, 0, SEEK_END) == -1) {
+ errno = convert_sgx_errno(sgx_ferror(sgx_file));
+ return -1;
+ }
+
+ return offset;
+ }
+}
+
+// The official API does not provide a way to truncate files.
+// Only files extension is supported.
+int
+ipfs_ftruncate(int fd, off_t len)
+{
+ void *sgx_file = fd2file(fd);
+ if (!sgx_file) {
+ errno = EBADF;
+ return -1;
+ }
+
+ off_t original_offset = sgx_ftell(sgx_file);
+
+ // Optimization path: if the length is smaller than the offset,
+ // IPFS does not support truncate to a smaller size.
+ if (len < original_offset) {
+ os_printf(
+ "SGX IPFS does not support truncate files to smaller sizes.\n");
+ return __WASI_ECANCELED;
+ }
+
+ // Move to the end of the file to determine whether this is
+ // a file extension or reduction.
+ if (sgx_fseek(sgx_file, 0, SEEK_END) == -1) {
+ errno = convert_sgx_errno(sgx_ferror(sgx_file));
+ return -1;
+ }
+
+ off_t file_size = sgx_ftell(sgx_file);
+
+ // Reducing the file space is not supported by IPFS.
+ if (len < file_size) {
+ os_printf(
+ "SGX IPFS does not support truncate files to smaller sizes.\n");
+ return __WASI_ECANCELED;
+ }
+
+ // Increasing the size is equal to writing from the end of the file
+ // with null bytes.
+ if (ipfs_write_zeroes(sgx_file, len - file_size) != 0) {
+ return -1;
+ }
+
+ // Restore the position of the cursor
+ if (sgx_fseek(sgx_file, original_offset, SEEK_SET) == -1) {
+ errno = convert_sgx_errno(sgx_ferror(sgx_file));
+ return -1;
+ }
+
+ return 0;
+}
+
+#endif /* end of WASM_ENABLE_SGX_IPFS */ \ No newline at end of file
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/linux-sgx/sgx_ipfs.h b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/linux-sgx/sgx_ipfs.h
new file mode 100644
index 000000000..e4de90274
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/linux-sgx/sgx_ipfs.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright (C) 2022 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#ifndef _LIBC_WASI_SGX_PFS_H
+#define _LIBC_WASI_SGX_PFS_H
+
+#include "bh_hashmap.h"
+#include "wasmtime_ssp.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+int
+ipfs_init();
+void
+ipfs_destroy();
+int
+ipfs_posix_fallocate(int fd, off_t offset, size_t len);
+size_t
+ipfs_read(int fd, const struct iovec *iov, int iovcnt, bool has_offset,
+ off_t offset);
+size_t
+ipfs_write(int fd, const struct iovec *iov, int iovcnt, bool has_offset,
+ off_t offset);
+int
+ipfs_close(int fd);
+void *
+ipfs_fopen(int fd, int flags);
+int
+ipfs_fflush(int fd);
+off_t
+ipfs_lseek(int fd, off_t offset, int nwhence);
+int
+ipfs_ftruncate(int fd, off_t len);
+
+/**
+ * Whether two file descriptors are equal.
+ */
+inline static bool
+fd_equal(int left, int right)
+{
+ return left == right ? true : false;
+}
+
+/**
+ * Returns the file descriptor as a hash value.
+ */
+inline static uint32
+fd_hash(int fd)
+{
+ return (uint32)fd;
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* end of _LIBC_WASI_SGX_PFS_H */ \ No newline at end of file
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/linux-sgx/sgx_platform.c b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/linux-sgx/sgx_platform.c
new file mode 100644
index 000000000..b40eaf79c
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/linux-sgx/sgx_platform.c
@@ -0,0 +1,197 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#include "platform_api_vmcore.h"
+#include "platform_api_extension.h"
+#include "sgx_rsrv_mem_mngr.h"
+
+#if WASM_ENABLE_SGX_IPFS != 0
+#include "sgx_ipfs.h"
+#endif
+
+static os_print_function_t print_function = NULL;
+
+int
+bh_platform_init()
+{
+ int ret = BHT_OK;
+
+#if WASM_ENABLE_SGX_IPFS != 0
+ ret = ipfs_init();
+#endif
+
+ return ret;
+}
+
+void
+bh_platform_destroy()
+{
+#if WASM_ENABLE_SGX_IPFS != 0
+ ipfs_destroy();
+#endif
+}
+
+void *
+os_malloc(unsigned size)
+{
+ return malloc(size);
+}
+
+void *
+os_realloc(void *ptr, unsigned size)
+{
+ return realloc(ptr, size);
+}
+
+void
+os_free(void *ptr)
+{
+ free(ptr);
+}
+
+int
+os_dumps_proc_mem_info(char *out, unsigned int size)
+{
+ return -1;
+}
+
+int
+putchar(int c)
+{
+ return 0;
+}
+
+int
+puts(const char *s)
+{
+ return 0;
+}
+
+void
+os_set_print_function(os_print_function_t pf)
+{
+ print_function = pf;
+}
+
+#define FIXED_BUFFER_SIZE 4096
+
+int
+os_printf(const char *message, ...)
+{
+ int bytes_written = 0;
+
+ if (print_function != NULL) {
+ char msg[FIXED_BUFFER_SIZE] = { '\0' };
+ va_list ap;
+ va_start(ap, message);
+ vsnprintf(msg, FIXED_BUFFER_SIZE, message, ap);
+ va_end(ap);
+ bytes_written += print_function(msg);
+ }
+
+ return bytes_written;
+}
+
+int
+os_vprintf(const char *format, va_list arg)
+{
+ int bytes_written = 0;
+
+ if (print_function != NULL) {
+ char msg[FIXED_BUFFER_SIZE] = { '\0' };
+ vsnprintf(msg, FIXED_BUFFER_SIZE, format, arg);
+ bytes_written += print_function(msg);
+ }
+
+ return bytes_written;
+}
+
+char *
+strcpy(char *dest, const char *src)
+{
+ const unsigned char *s = src;
+ unsigned char *d = dest;
+
+ while ((*d++ = *s++)) {
+ }
+ return dest;
+}
+
+void *
+os_mmap(void *hint, size_t size, int prot, int flags)
+{
+ int mprot = 0;
+ uint64 aligned_size, page_size;
+ void *ret = NULL;
+ sgx_status_t st = 0;
+
+ page_size = getpagesize();
+ aligned_size = (size + page_size - 1) & ~(page_size - 1);
+
+ if (aligned_size >= UINT32_MAX)
+ return NULL;
+
+ ret = sgx_alloc_rsrv_mem(aligned_size);
+ if (ret == NULL) {
+ os_printf("os_mmap(size=%u, aligned size=%lu, prot=0x%x) failed.", size,
+ aligned_size, prot);
+ return NULL;
+ }
+
+ if (prot & MMAP_PROT_READ)
+ mprot |= SGX_PROT_READ;
+ if (prot & MMAP_PROT_WRITE)
+ mprot |= SGX_PROT_WRITE;
+ if (prot & MMAP_PROT_EXEC)
+ mprot |= SGX_PROT_EXEC;
+
+ st = sgx_tprotect_rsrv_mem(ret, aligned_size, mprot);
+ if (st != SGX_SUCCESS) {
+ os_printf("os_mmap(size=%u, prot=0x%x) failed to set protect.", size,
+ prot);
+ sgx_free_rsrv_mem(ret, aligned_size);
+ return NULL;
+ }
+
+ return ret;
+}
+
+void
+os_munmap(void *addr, size_t size)
+{
+ uint64 aligned_size, page_size;
+
+ page_size = getpagesize();
+ aligned_size = (size + page_size - 1) & ~(page_size - 1);
+ sgx_free_rsrv_mem(addr, aligned_size);
+}
+
+int
+os_mprotect(void *addr, size_t size, int prot)
+{
+ int mprot = 0;
+ sgx_status_t st = 0;
+ uint64 aligned_size, page_size;
+
+ page_size = getpagesize();
+ aligned_size = (size + page_size - 1) & ~(page_size - 1);
+
+ if (prot & MMAP_PROT_READ)
+ mprot |= SGX_PROT_READ;
+ if (prot & MMAP_PROT_WRITE)
+ mprot |= SGX_PROT_WRITE;
+ if (prot & MMAP_PROT_EXEC)
+ mprot |= SGX_PROT_EXEC;
+ st = sgx_tprotect_rsrv_mem(addr, aligned_size, mprot);
+ if (st != SGX_SUCCESS)
+ os_printf("os_mprotect(addr=0x%" PRIx64 ", size=%u, prot=0x%x) failed.",
+ (uintptr_t)addr, size, prot);
+
+ return (st == SGX_SUCCESS ? 0 : -1);
+}
+
+void
+os_dcache_flush(void)
+{}
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/linux-sgx/sgx_pthread.c b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/linux-sgx/sgx_pthread.c
new file mode 100644
index 000000000..7801e3534
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/linux-sgx/sgx_pthread.c
@@ -0,0 +1,91 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#include "platform_api_vmcore.h"
+#include "sgx_pthread.h"
+#include "sgx_error.h"
+
+#ifndef SGX_DISABLE_WASI
+
+#define TRACE_FUNC() os_printf("undefined %s\n", __FUNCTION__)
+#define TRACE_OCALL_FAIL() os_printf("ocall %s failed!\n", __FUNCTION__)
+
+#ifndef SGX_THREAD_LOCK_INITIALIZER /* defined since sgxsdk-2.11 */
+/* sgxsdk doesn't support pthread_rwlock related APIs until
+ version 2.11, we implement them by ourselves. */
+int
+ocall_pthread_rwlock_init(int *p_ret, void **rwlock, void *attr);
+
+int
+ocall_pthread_rwlock_destroy(int *p_ret, void **rwlock);
+
+int
+ocall_pthread_rwlock_rdlock(int *p_ret, void **rwlock);
+
+int
+ocall_pthread_rwlock_wrlock(int *p_ret, void **rwlock);
+
+int
+ocall_pthread_rwlock_unlock(int *p_ret, void **rwlock);
+
+int
+pthread_rwlock_init(pthread_rwlock_t *rwlock, void *attr)
+{
+ int ret = -1;
+
+ if (ocall_pthread_rwlock_init(&ret, (void **)rwlock, NULL) != SGX_SUCCESS) {
+ TRACE_OCALL_FAIL();
+ return -1;
+ }
+ (void)attr;
+ return ret;
+}
+
+int
+pthread_rwlock_destroy(pthread_rwlock_t *rwlock)
+{
+ int ret = -1;
+
+ if (ocall_pthread_rwlock_destroy(&ret, (void *)*rwlock) != SGX_SUCCESS) {
+ TRACE_OCALL_FAIL();
+ }
+ return ret;
+}
+
+int
+pthread_rwlock_rdlock(pthread_rwlock_t *rwlock)
+{
+ int ret = -1;
+
+ if (ocall_pthread_rwlock_rdlock(&ret, (void *)*rwlock) != SGX_SUCCESS) {
+ TRACE_OCALL_FAIL();
+ }
+ return ret;
+}
+
+int
+pthread_rwlock_wrlock(pthread_rwlock_t *rwlock)
+{
+ int ret = -1;
+
+ if (ocall_pthread_rwlock_wrlock(&ret, (void *)*rwlock) != SGX_SUCCESS) {
+ TRACE_OCALL_FAIL();
+ }
+ return ret;
+}
+
+int
+pthread_rwlock_unlock(pthread_rwlock_t *rwlock)
+{
+ int ret = -1;
+
+ if (ocall_pthread_rwlock_unlock(&ret, (void *)*rwlock) != SGX_SUCCESS) {
+ TRACE_OCALL_FAIL();
+ }
+ return ret;
+}
+#endif /* end of SGX_THREAD_LOCK_INITIALIZER */
+
+#endif
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/linux-sgx/sgx_pthread.h b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/linux-sgx/sgx_pthread.h
new file mode 100644
index 000000000..01a3ae044
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/linux-sgx/sgx_pthread.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#ifndef _SGX_PTHREAD_H
+#define _SGX_PTHREAD_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifndef SGX_THREAD_LOCK_INITIALIZER /* defined since sgxsdk-2.11 */
+/* sgxsdk doesn't support pthread_rwlock related APIs until
+ version 2.11, we implement them by ourselves. */
+typedef uintptr_t pthread_rwlock_t;
+
+int
+pthread_rwlock_init(pthread_rwlock_t *rwlock, void *attr);
+int
+pthread_rwlock_destroy(pthread_rwlock_t *rwlock);
+
+int
+pthread_rwlock_wrlock(pthread_rwlock_t *rwlock);
+int
+pthread_rwlock_rdlock(pthread_rwlock_t *rwlock);
+int
+pthread_rwlock_unlock(pthread_rwlock_t *rwlock);
+#endif /* end of SGX_THREAD_LOCK_INITIALIZER */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* end of _SGX_PTHREAD_H */
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/linux-sgx/sgx_rsrv_mem_mngr.h b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/linux-sgx/sgx_rsrv_mem_mngr.h
new file mode 100644
index 000000000..5555d4d9f
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/linux-sgx/sgx_rsrv_mem_mngr.h
@@ -0,0 +1,95 @@
+/*
+ * Copyright (C) 2011-2019 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+/*
+ * This file is copied from
+ * https://github.com/intel/linux-sgx/blob/4589daddd58bec7367a6a9de3fe301e6de17671a/common/inc/internal/sgx_rsrv_mem_mngr.h
+ * The reason we copied here is that the official SGX SDK release has
+ * not included this header file yet.
+ */
+
+#pragma once
+
+#ifndef _SGX_RSRV_MEM_MNGR_H_
+#define _SGX_RSRV_MEM_MNGR_H_
+
+#include "stdint.h"
+#include "sgx_error.h"
+
+#define SGX_PROT_READ 0x1 /* page can be read */
+#define SGX_PROT_WRITE 0x2 /* page can be written */
+#define SGX_PROT_EXEC 0x4 /* page can be executed */
+#define SGX_PROT_NONE 0x0 /* page can not be accessed */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Allocate a range of EPC memory from the reserved memory area with RW
+ * permission
+ *
+ * Parameters:
+ * Inputs: length [in]: Size of region to be allocated in bytes. Page aligned.
+ * Return: Starting address of the new allocated memory area on success;
+ * otherwise NULL
+ */
+void *
+sgx_alloc_rsrv_mem(size_t length);
+
+/* Free a range of EPC memory from the reserved memory area
+ *
+ * Parameters:
+ * Inputs: addr[in]: Starting address of region to be freed. Page aligned.
+ * length[in]: The length of the memory to be freed in bytes.
+ * Page aligned.
+ * Return: 0 on success; otherwise -1
+ */
+int
+sgx_free_rsrv_mem(void *addr, size_t length);
+
+/* Modify the access permissions of the pages in the reserved memory area.
+ *
+ * Parameters:
+ * Inputs: addr[in]: Starting address of region which needs to change access
+ * permission. Page aligned.
+ * length[in]: The length of the memory to be manipulated in bytes.
+ * Page aligned.
+ * prot[in]: The target memory protection.
+ * Return: sgx_status_t - SGX_SUCCESS or failure as defined in sgx_error.h
+ */
+sgx_status_t
+sgx_tprotect_rsrv_mem(void *addr, size_t len, int prot);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/linux-sgx/sgx_signal.c b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/linux-sgx/sgx_signal.c
new file mode 100644
index 000000000..b52c18821
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/linux-sgx/sgx_signal.c
@@ -0,0 +1,31 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#include "platform_api_vmcore.h"
+
+#ifndef SGX_DISABLE_WASI
+
+#define TRACE_OCALL_FAIL() os_printf("ocall %s failed!\n", __FUNCTION__)
+
+int
+ocall_raise(int *p_ret, int sig);
+
+int
+raise(int sig)
+{
+ int ret;
+
+ if (ocall_raise(&ret, sig) != SGX_SUCCESS) {
+ TRACE_OCALL_FAIL();
+ return -1;
+ }
+
+ if (ret == -1)
+ errno = get_errno();
+
+ return ret;
+}
+
+#endif
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/linux-sgx/sgx_signal.h b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/linux-sgx/sgx_signal.h
new file mode 100644
index 000000000..494342be3
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/linux-sgx/sgx_signal.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#ifndef _SGX_SIGNAL_H
+#define _SGX_SIGNAL_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Signals. */
+#define SIGHUP 1 /* Hangup (POSIX). */
+#define SIGINT 2 /* Interrupt (ANSI). */
+#define SIGQUIT 3 /* Quit (POSIX). */
+#define SIGILL 4 /* Illegal instruction (ANSI). */
+#define SIGTRAP 5 /* Trace trap (POSIX). */
+#define SIGABRT 6 /* Abort (ANSI). */
+#define SIGIOT 6 /* IOT trap (4.2 BSD). */
+#define SIGBUS 7 /* BUS error (4.2 BSD). */
+#define SIGFPE 8 /* Floating-point exception (ANSI). */
+#define SIGKILL 9 /* Kill, unblockable (POSIX). */
+#define SIGUSR1 10 /* User-defined signal 1 (POSIX). */
+#define SIGSEGV 11 /* Segmentation violation (ANSI). */
+#define SIGUSR2 12 /* User-defined signal 2 (POSIX). */
+#define SIGPIPE 13 /* Broken pipe (POSIX). */
+#define SIGALRM 14 /* Alarm clock (POSIX). */
+#define SIGTERM 15 /* Termination (ANSI). */
+#define SIGSTKFLT 16 /* Stack fault. */
+#define SIGCLD SIGCHLD /* Same as SIGCHLD (System V). */
+#define SIGCHLD 17 /* Child status has changed (POSIX). */
+#define SIGCONT 18 /* Continue (POSIX). */
+#define SIGSTOP 19 /* Stop, unblockable (POSIX). */
+#define SIGTSTP 20 /* Keyboard stop (POSIX). */
+#define SIGTTIN 21 /* Background read from tty (POSIX). */
+#define SIGTTOU 22 /* Background write to tty (POSIX). */
+#define SIGURG 23 /* Urgent condition on socket (4.2 BSD). */
+#define SIGXCPU 24 /* CPU limit exceeded (4.2 BSD). */
+#define SIGXFSZ 25 /* File size limit exceeded (4.2 BSD). */
+#define SIGVTALRM 26 /* Virtual alarm clock (4.2 BSD). */
+#define SIGPROF 27 /* Profiling alarm clock (4.2 BSD). */
+#define SIGWINCH 28 /* Window size change (4.3 BSD, Sun). */
+#define SIGPOLL SIGIO /* Pollable event occurred (System V). */
+#define SIGIO 29 /* I/O now possible (4.2 BSD). */
+#define SIGPWR 30 /* Power failure restart (System V). */
+#define SIGSYS 31 /* Bad system call. */
+#define SIGUNUSED 31
+
+int
+raise(int sig);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* end of _SGX_SIGNAL_H */
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/linux-sgx/sgx_socket.c b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/linux-sgx/sgx_socket.c
new file mode 100644
index 000000000..afb6d6014
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/linux-sgx/sgx_socket.c
@@ -0,0 +1,1222 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#include "platform_api_vmcore.h"
+#include "platform_api_extension.h"
+
+#ifndef SGX_DISABLE_WASI
+
+#define TRACE_OCALL_FAIL() os_printf("ocall %s failed!\n", __FUNCTION__)
+
+/** OCALLs prototypes **/
+int
+ocall_accept(int *p_ret, int sockfd, void *addr, uint32_t *addrlen,
+ uint32_t addr_size);
+
+int
+ocall_bind(int *p_ret, int sockfd, const void *addr, uint32_t addrlen);
+
+int
+ocall_close(int *p_ret, int fd);
+
+int
+ocall_connect(int *p_ret, int sockfd, void *addr, uint32_t addrlen);
+
+int
+ocall_fcntl_long(int *p_ret, int fd, int cmd, long arg);
+
+int
+ocall_getsockname(int *p_ret, int sockfd, void *addr, uint32_t *addrlen,
+ uint32_t addr_size);
+
+int
+ocall_getpeername(int *p_ret, int sockfd, void *addr, uint32_t *addrlen,
+ uint32_t addr_size);
+
+int
+ocall_getsockopt(int *p_ret, int sockfd, int level, int optname, void *val_buf,
+ unsigned int val_buf_size, void *len_buf);
+
+int
+ocall_listen(int *p_ret, int sockfd, int backlog);
+
+int
+ocall_recv(int *p_ret, int sockfd, void *buf, size_t len, int flags);
+
+int
+ocall_recvfrom(ssize_t *p_ret, int sockfd, void *buf, size_t len, int flags,
+ void *src_addr, uint32_t *addrlen, uint32_t addr_size);
+
+int
+ocall_recvmsg(ssize_t *p_ret, int sockfd, void *msg_buf,
+ unsigned int msg_buf_size, int flags);
+
+int
+ocall_send(int *p_ret, int sockfd, const void *buf, size_t len, int flags);
+
+int
+ocall_sendto(ssize_t *p_ret, int sockfd, const void *buf, size_t len, int flags,
+ void *dest_addr, uint32_t addrlen);
+
+int
+ocall_sendmsg(ssize_t *p_ret, int sockfd, void *msg_buf,
+ unsigned int msg_buf_size, int flags);
+
+int
+ocall_setsockopt(int *p_ret, int sockfd, int level, int optname, void *optval,
+ unsigned int optlen);
+
+int
+ocall_shutdown(int *p_ret, int sockfd, int how);
+
+int
+ocall_socket(int *p_ret, int domain, int type, int protocol);
+/** OCALLs prototypes end **/
+
+/** In-enclave implementation of POSIX functions **/
+static bool
+is_little_endian()
+{
+ long i = 0x01020304;
+ unsigned char *c = (unsigned char *)&i;
+ return (*c == 0x04) ? true : false;
+}
+
+static void
+swap32(uint8 *pData)
+{
+ uint8 value = *pData;
+ *pData = *(pData + 3);
+ *(pData + 3) = value;
+
+ value = *(pData + 1);
+ *(pData + 1) = *(pData + 2);
+ *(pData + 2) = value;
+}
+
+static void
+swap16(uint8 *pData)
+{
+ uint8 value = *pData;
+ *(pData) = *(pData + 1);
+ *(pData + 1) = value;
+}
+
+uint32
+htonl(uint32 value)
+{
+ uint32 ret;
+ if (is_little_endian()) {
+ ret = value;
+ swap32((uint8 *)&ret);
+ return ret;
+ }
+
+ return value;
+}
+
+uint32
+ntohl(uint32 value)
+{
+ return htonl(value);
+}
+
+uint16
+htons(uint16 value)
+{
+ uint16 ret;
+ if (is_little_endian()) {
+ ret = value;
+ swap16((uint8 *)&ret);
+ return ret;
+ }
+
+ return value;
+}
+
+static uint16
+ntohs(uint16 value)
+{
+ return htons(value);
+}
+
+/* Coming from musl, under MIT license */
+static int
+hexval(unsigned c)
+{
+ if (c - '0' < 10)
+ return c - '0';
+ c |= 32;
+ if (c - 'a' < 6)
+ return c - 'a' + 10;
+ return -1;
+}
+
+/* Coming from musl, under MIT license */
+static int
+inet_pton(int af, const char *restrict s, void *restrict a0)
+{
+ uint16_t ip[8];
+ unsigned char *a = a0;
+ int i, j, v, d, brk = -1, need_v4 = 0;
+
+ if (af == AF_INET) {
+ for (i = 0; i < 4; i++) {
+ for (v = j = 0; j < 3 && isdigit(s[j]); j++)
+ v = 10 * v + s[j] - '0';
+ if (j == 0 || (j > 1 && s[0] == '0') || v > 255)
+ return 0;
+ a[i] = v;
+ if (s[j] == 0 && i == 3)
+ return 1;
+ if (s[j] != '.')
+ return 0;
+ s += j + 1;
+ }
+ return 0;
+ }
+ else if (af != AF_INET6) {
+ errno = EAFNOSUPPORT;
+ return -1;
+ }
+
+ if (*s == ':' && *++s != ':')
+ return 0;
+
+ for (i = 0;; i++) {
+ if (s[0] == ':' && brk < 0) {
+ brk = i;
+ ip[i & 7] = 0;
+ if (!*++s)
+ break;
+ if (i == 7)
+ return 0;
+ continue;
+ }
+ for (v = j = 0; j < 4 && (d = hexval(s[j])) >= 0; j++)
+ v = 16 * v + d;
+ if (j == 0)
+ return 0;
+ ip[i & 7] = v;
+ if (!s[j] && (brk >= 0 || i == 7))
+ break;
+ if (i == 7)
+ return 0;
+ if (s[j] != ':') {
+ if (s[j] != '.' || (i < 6 && brk < 0))
+ return 0;
+ need_v4 = 1;
+ i++;
+ break;
+ }
+ s += j + 1;
+ }
+ if (brk >= 0) {
+ memmove(ip + brk + 7 - i, ip + brk, 2 * (i + 1 - brk));
+ for (j = 0; j < 7 - i; j++)
+ ip[brk + j] = 0;
+ }
+ for (j = 0; j < 8; j++) {
+ *a++ = ip[j] >> 8;
+ *a++ = ip[j];
+ }
+ if (need_v4 && inet_pton(AF_INET, (void *)s, a - 4) <= 0)
+ return 0;
+ return 1;
+}
+
+static int
+inet_addr(const char *p)
+{
+ struct in_addr a;
+ if (!inet_pton(AF_INET, p, &a))
+ return -1;
+ return a.s_addr;
+}
+/** In-enclave implementation of POSIX functions end **/
+
+static int
+textual_addr_to_sockaddr(const char *textual, int port, struct sockaddr_in *out)
+{
+ assert(textual);
+
+ out->sin_family = AF_INET;
+ out->sin_port = htons(port);
+ out->sin_addr.s_addr = inet_addr(textual);
+
+ return BHT_OK;
+}
+
+static int
+sockaddr_to_bh_sockaddr(const struct sockaddr *sockaddr, socklen_t socklen,
+ bh_sockaddr_t *bh_sockaddr)
+{
+ switch (sockaddr->sa_family) {
+ case AF_INET:
+ {
+ struct sockaddr_in *addr = (struct sockaddr_in *)sockaddr;
+
+ assert(socklen >= sizeof(struct sockaddr_in));
+
+ bh_sockaddr->port = ntohs(addr->sin_port);
+ bh_sockaddr->addr_bufer.ipv4 = ntohl(addr->sin_addr.s_addr);
+ bh_sockaddr->is_ipv4 = true;
+ return BHT_OK;
+ }
+ default:
+ errno = EAFNOSUPPORT;
+ return BHT_ERROR;
+ }
+}
+
+static int
+bh_sockaddr_to_sockaddr(const bh_sockaddr_t *bh_sockaddr,
+ struct sockaddr *sockaddr, socklen_t *socklen)
+{
+ if (bh_sockaddr->is_ipv4) {
+ struct sockaddr_in *addr = (struct sockaddr_in *)sockaddr;
+ addr->sin_port = htons(bh_sockaddr->port);
+ addr->sin_family = AF_INET;
+ addr->sin_addr.s_addr = htonl(bh_sockaddr->addr_bufer.ipv4);
+ *socklen = sizeof(*addr);
+ return BHT_OK;
+ }
+ else {
+ errno = EAFNOSUPPORT;
+ return BHT_ERROR;
+ }
+}
+
+static int
+os_socket_setbooloption(bh_socket_t socket, int level, int optname,
+ bool is_enabled)
+{
+ int option = (int)is_enabled;
+ int ret;
+
+ if (ocall_setsockopt(&ret, socket, level, optname, &option, sizeof(option))
+ != SGX_SUCCESS) {
+ TRACE_OCALL_FAIL();
+ return BHT_ERROR;
+ }
+
+ if (ret != 0) {
+ errno = get_errno();
+ return BHT_ERROR;
+ }
+
+ return BHT_OK;
+}
+
+static int
+os_socket_getbooloption(bh_socket_t socket, int level, int optname,
+ bool *is_enabled)
+{
+ assert(is_enabled);
+
+ int optval;
+ socklen_t optval_size = sizeof(optval);
+ int ret;
+ if (ocall_getsockopt(&ret, socket, level, optname, &optval, optval_size,
+ &optval_size)
+ != SGX_SUCCESS) {
+ TRACE_OCALL_FAIL();
+ return BHT_ERROR;
+ }
+
+ if (ret != 0) {
+ errno = get_errno();
+ return BHT_ERROR;
+ }
+
+ *is_enabled = (bool)optval;
+ return BHT_OK;
+}
+
+int
+socket(int domain, int type, int protocol)
+{
+ int ret;
+
+ if (ocall_socket(&ret, domain, type, protocol) != SGX_SUCCESS) {
+ TRACE_OCALL_FAIL();
+ return -1;
+ }
+
+ if (ret == -1)
+ errno = get_errno();
+
+ return ret;
+}
+
+int
+getsockopt(int sockfd, int level, int optname, void *optval, socklen_t *optlen)
+{
+ int ret;
+ unsigned int val_buf_size = *optlen;
+
+ if (ocall_getsockopt(&ret, sockfd, level, optname, optval, val_buf_size,
+ (void *)optlen)
+ != SGX_SUCCESS) {
+ TRACE_OCALL_FAIL();
+ return -1;
+ }
+
+ if (ret == -1)
+ errno = get_errno();
+
+ return ret;
+}
+
+int
+setsockopt(int sockfd, int level, int optname, const void *optval,
+ socklen_t optlen)
+{
+ int ret;
+
+ if (ocall_setsockopt(&ret, sockfd, level, optname, (void *)optval, optlen)
+ != SGX_SUCCESS) {
+ TRACE_OCALL_FAIL();
+ return -1;
+ }
+
+ if (ret == -1)
+ errno = get_errno();
+
+ return ret;
+}
+
+ssize_t
+sendmsg(int sockfd, const struct msghdr *msg, int flags)
+{
+ ssize_t ret;
+ int i;
+ char *p;
+ struct msghdr *msg1;
+
+ uint64 total_size = sizeof(struct msghdr) + (uint64)msg->msg_namelen
+ + (uint64)msg->msg_controllen;
+
+ total_size += sizeof(struct iovec) * (msg->msg_iovlen);
+
+ for (i = 0; i < msg->msg_iovlen; i++) {
+ total_size += msg->msg_iov[i].iov_len;
+ }
+
+ if (total_size >= UINT32_MAX)
+ return -1;
+
+ msg1 = BH_MALLOC((uint32)total_size);
+
+ if (msg1 == NULL)
+ return -1;
+
+ p = (char *)(uintptr_t)sizeof(struct msghdr);
+
+ if (msg->msg_name != NULL) {
+ msg1->msg_name = p;
+ memcpy((uintptr_t)p + (char *)msg1, msg->msg_name,
+ (size_t)msg->msg_namelen);
+ p += msg->msg_namelen;
+ }
+
+ if (msg->msg_control != NULL) {
+ msg1->msg_control = p;
+ memcpy((uintptr_t)p + (char *)msg1, msg->msg_control,
+ (size_t)msg->msg_control);
+ p += msg->msg_controllen;
+ }
+
+ if (msg->msg_iov != NULL) {
+ msg1->msg_iov = (struct iovec *)p;
+ p += (uintptr_t)(sizeof(struct iovec) * (msg->msg_iovlen));
+
+ for (i = 0; i < msg->msg_iovlen; i++) {
+ msg1->msg_iov[i].iov_base = p;
+ msg1->msg_iov[i].iov_len = msg->msg_iov[i].iov_len;
+ memcpy((uintptr_t)p + (char *)msg1, msg->msg_iov[i].iov_base,
+ (size_t)(msg->msg_iov[i].iov_len));
+ p += msg->msg_iov[i].iov_len;
+ }
+ }
+
+ if (ocall_sendmsg(&ret, sockfd, (void *)msg1, (uint32)total_size, flags)
+ != SGX_SUCCESS) {
+ TRACE_OCALL_FAIL();
+ return -1;
+ }
+
+ if (ret == -1)
+ errno = get_errno();
+
+ return ret;
+}
+
+ssize_t
+recvmsg(int sockfd, struct msghdr *msg, int flags)
+{
+ ssize_t ret;
+ int i;
+ char *p;
+ struct msghdr *msg1;
+
+ uint64 total_size = sizeof(struct msghdr) + (uint64)msg->msg_namelen
+ + (uint64)msg->msg_controllen;
+
+ total_size += sizeof(struct iovec) * (msg->msg_iovlen);
+
+ for (i = 0; i < msg->msg_iovlen; i++) {
+ total_size += msg->msg_iov[i].iov_len;
+ }
+
+ if (total_size >= UINT32_MAX)
+ return -1;
+
+ msg1 = BH_MALLOC((uint32)total_size);
+
+ if (msg1 == NULL)
+ return -1;
+
+ memset(msg1, 0, total_size);
+
+ p = (char *)(uintptr_t)sizeof(struct msghdr);
+
+ if (msg->msg_name != NULL) {
+ msg1->msg_name = p;
+ p += msg->msg_namelen;
+ }
+
+ if (msg->msg_control != NULL) {
+ msg1->msg_control = p;
+ p += msg->msg_controllen;
+ }
+
+ if (msg->msg_iov != NULL) {
+ msg1->msg_iov = (struct iovec *)p;
+ p += (uintptr_t)(sizeof(struct iovec) * (msg->msg_iovlen));
+
+ for (i = 0; i < msg->msg_iovlen; i++) {
+ msg1->msg_iov[i].iov_base = p;
+ msg1->msg_iov[i].iov_len = msg->msg_iov[i].iov_len;
+ p += msg->msg_iov[i].iov_len;
+ }
+ }
+
+ if (ocall_recvmsg(&ret, sockfd, (void *)msg1, (uint32)total_size, flags)
+ != SGX_SUCCESS) {
+ TRACE_OCALL_FAIL();
+ return -1;
+ }
+
+ p = (char *)(uintptr_t)(sizeof(struct msghdr));
+
+ if (msg1->msg_name != NULL) {
+ memcpy(msg->msg_name, (uintptr_t)p + (char *)msg1,
+ (size_t)msg1->msg_namelen);
+ p += msg1->msg_namelen;
+ }
+
+ if (msg1->msg_control != NULL) {
+ memcpy(msg->msg_control, (uintptr_t)p + (char *)msg1,
+ (size_t)msg1->msg_control);
+ p += msg->msg_controllen;
+ }
+
+ if (msg1->msg_iov != NULL) {
+ p += (uintptr_t)(sizeof(struct iovec) * (msg1->msg_iovlen));
+
+ for (i = 0; i < msg1->msg_iovlen; i++) {
+ memcpy(msg->msg_iov[i].iov_base, (uintptr_t)p + (char *)msg1,
+ (size_t)(msg1->msg_iov[i].iov_len));
+ p += msg1->msg_iov[i].iov_len;
+ }
+ }
+
+ if (ret == -1)
+ errno = get_errno();
+
+ return ret;
+}
+
+int
+shutdown(int sockfd, int how)
+{
+ int ret;
+
+ if (ocall_shutdown(&ret, sockfd, how) != SGX_SUCCESS) {
+ TRACE_OCALL_FAIL();
+ return -1;
+ }
+
+ if (ret == -1)
+ errno = get_errno();
+
+ return ret;
+}
+
+int
+os_socket_accept(bh_socket_t server_sock, bh_socket_t *sock, void *addr,
+ unsigned int *addrlen)
+
+{
+ struct sockaddr addr_tmp;
+ unsigned int len = sizeof(struct sockaddr);
+
+ if (ocall_accept(sock, server_sock, &addr_tmp, &len, len) != SGX_SUCCESS) {
+ TRACE_OCALL_FAIL();
+ return -1;
+ }
+
+ if (*sock < 0) {
+ errno = get_errno();
+ return BHT_ERROR;
+ }
+
+ return BHT_OK;
+}
+int
+os_socket_bind(bh_socket_t socket, const char *host, int *port)
+{
+ struct sockaddr_in addr;
+ struct linger ling;
+ unsigned int socklen;
+ int ret;
+
+ assert(host);
+ assert(port);
+
+ ling.l_onoff = 1;
+ ling.l_linger = 0;
+
+ if (ocall_fcntl_long(&ret, socket, F_SETFD, FD_CLOEXEC) != SGX_SUCCESS) {
+ TRACE_OCALL_FAIL();
+ return -1;
+ }
+
+ if (ret < 0) {
+ goto fail;
+ }
+
+ if (ocall_setsockopt(&ret, socket, SOL_SOCKET, SO_LINGER, &ling,
+ sizeof(ling))
+ != SGX_SUCCESS) {
+ TRACE_OCALL_FAIL();
+ return -1;
+ }
+
+ if (ret < 0) {
+ goto fail;
+ }
+
+ addr.sin_addr.s_addr = inet_addr(host);
+ addr.sin_port = htons(*port);
+ addr.sin_family = AF_INET;
+
+ if (ocall_bind(&ret, socket, &addr, sizeof(addr)) != SGX_SUCCESS) {
+ TRACE_OCALL_FAIL();
+ return -1;
+ }
+
+ if (ret < 0) {
+ goto fail;
+ }
+
+ socklen = sizeof(addr);
+
+ if (ocall_getsockname(&ret, socket, (void *)&addr, &socklen, socklen)
+ != SGX_SUCCESS) {
+ TRACE_OCALL_FAIL();
+ return -1;
+ }
+
+ if (ret == -1) {
+ goto fail;
+ }
+
+ *port = ntohs(addr.sin_port);
+
+ return BHT_OK;
+
+fail:
+ errno = get_errno();
+ return BHT_ERROR;
+}
+
+int
+os_socket_close(bh_socket_t socket)
+{
+ int ret;
+
+ if (ocall_close(&ret, socket) != SGX_SUCCESS) {
+ TRACE_OCALL_FAIL();
+ return -1;
+ }
+
+ if (ret == -1)
+ errno = get_errno();
+
+ return ret;
+}
+
+int
+os_socket_connect(bh_socket_t socket, const char *addr, int port)
+{
+ struct sockaddr_in addr_in = { 0 };
+ socklen_t addr_len = sizeof(struct sockaddr_in);
+ int ret = 0;
+
+ if ((ret = textual_addr_to_sockaddr(addr, port, &addr_in)) < 0) {
+ return ret;
+ }
+
+ if (ocall_connect(&ret, socket, &addr_in, addr_len) != SGX_SUCCESS) {
+ TRACE_OCALL_FAIL();
+ return -1;
+ }
+
+ if (ret == -1)
+ errno = get_errno();
+
+ return ret;
+}
+
+int
+os_socket_create(bh_socket_t *sock, bool is_ipv4, bool is_tcp)
+{
+ int af;
+
+ if (!sock) {
+ return BHT_ERROR;
+ }
+
+ if (is_ipv4) {
+ af = AF_INET;
+ }
+ else {
+ errno = ENOSYS;
+ return BHT_ERROR;
+ }
+
+ if (is_tcp) {
+ if (ocall_socket(sock, af, SOCK_STREAM, IPPROTO_TCP) != SGX_SUCCESS) {
+ TRACE_OCALL_FAIL();
+ return -1;
+ }
+ }
+ else {
+ if (ocall_socket(sock, af, SOCK_DGRAM, 0) != SGX_SUCCESS) {
+ TRACE_OCALL_FAIL();
+ return -1;
+ }
+ }
+
+ if (*sock == -1) {
+ errno = get_errno();
+ return BHT_ERROR;
+ }
+
+ return BHT_OK;
+}
+
+int
+os_socket_inet_network(bool is_ipv4, const char *cp, bh_ip_addr_buffer_t *out)
+{
+ if (!cp)
+ return BHT_ERROR;
+
+ if (is_ipv4) {
+ if (inet_pton(AF_INET, cp, &out->ipv4) != 1) {
+ return BHT_ERROR;
+ }
+ /* Note: ntohl(INADDR_NONE) == INADDR_NONE */
+ out->ipv4 = ntohl(out->ipv4);
+ }
+ else {
+ if (inet_pton(AF_INET6, cp, out->ipv6) != 1) {
+ return BHT_ERROR;
+ }
+ for (int i = 0; i < 8; i++) {
+ out->ipv6[i] = ntohs(out->ipv6[i]);
+ }
+ }
+
+ return BHT_OK;
+}
+
+int
+os_socket_listen(bh_socket_t socket, int max_client)
+{
+ int ret;
+
+ if (ocall_listen(&ret, socket, max_client) != SGX_SUCCESS) {
+ TRACE_OCALL_FAIL();
+ return -1;
+ }
+
+ if (ret == -1)
+ errno = get_errno();
+
+ return ret;
+}
+
+int
+os_socket_recv(bh_socket_t socket, void *buf, unsigned int len)
+{
+ int ret;
+
+ if (ocall_recv(&ret, socket, buf, len, 0) != SGX_SUCCESS) {
+ TRACE_OCALL_FAIL();
+ errno = ENOSYS;
+ return -1;
+ }
+
+ if (ret == -1)
+ errno = get_errno();
+
+ return ret;
+}
+
+int
+os_socket_recv_from(bh_socket_t socket, void *buf, unsigned int len, int flags,
+ bh_sockaddr_t *src_addr)
+{
+ struct sockaddr_in addr;
+ socklen_t addr_len = sizeof(addr);
+ ssize_t ret;
+
+ if (ocall_recvfrom(&ret, socket, buf, len, flags, &addr, &addr_len,
+ addr_len)
+ != SGX_SUCCESS) {
+ TRACE_OCALL_FAIL();
+ errno = ENOSYS;
+ return -1;
+ }
+
+ if (ret < 0) {
+ errno = get_errno();
+ return ret;
+ }
+
+ if (src_addr && addr_len > 0) {
+ if (sockaddr_to_bh_sockaddr((struct sockaddr *)&addr, addr_len,
+ src_addr)
+ == BHT_ERROR) {
+ return -1;
+ }
+ }
+
+ return ret;
+}
+
+int
+os_socket_send(bh_socket_t socket, const void *buf, unsigned int len)
+{
+ int ret;
+
+ if (ocall_send(&ret, socket, buf, len, 0) != SGX_SUCCESS) {
+ TRACE_OCALL_FAIL();
+ errno = ENOSYS;
+ return -1;
+ }
+
+ if (ret == -1)
+ errno = get_errno();
+
+ return ret;
+}
+
+int
+os_socket_send_to(bh_socket_t socket, const void *buf, unsigned int len,
+ int flags, const bh_sockaddr_t *dest_addr)
+{
+ struct sockaddr_in addr;
+ socklen_t addr_len;
+ ssize_t ret;
+
+ if (bh_sockaddr_to_sockaddr(dest_addr, (struct sockaddr *)&addr, &addr_len)
+ == BHT_ERROR) {
+ return -1;
+ }
+
+ if (ocall_sendto(&ret, socket, buf, len, flags, (struct sockaddr *)&addr,
+ addr_len)
+ != SGX_SUCCESS) {
+ TRACE_OCALL_FAIL();
+ errno = ENOSYS;
+ return -1;
+ }
+
+ if (ret == -1) {
+ errno = get_errno();
+ }
+
+ return ret;
+}
+
+int
+os_socket_shutdown(bh_socket_t socket)
+{
+ return shutdown(socket, O_RDWR);
+}
+
+int
+os_socket_addr_resolve(const char *host, const char *service,
+ uint8_t *hint_is_tcp, uint8_t *hint_is_ipv4,
+ bh_addr_info_t *addr_info, size_t addr_info_size,
+ size_t *max_info_size)
+{
+ errno = ENOSYS;
+
+ return BHT_ERROR;
+}
+
+int
+os_socket_addr_local(bh_socket_t socket, bh_sockaddr_t *sockaddr)
+{
+ struct sockaddr_in addr;
+ socklen_t addr_len = sizeof(addr);
+ int ret;
+
+ if (ocall_getsockname(&ret, socket, (struct sockaddr *)&addr, &addr_len,
+ addr_len)
+ != SGX_SUCCESS) {
+ TRACE_OCALL_FAIL();
+ return BHT_ERROR;
+ }
+
+ if (ret != BHT_OK) {
+ errno = get_errno();
+ return BHT_ERROR;
+ }
+
+ return sockaddr_to_bh_sockaddr((struct sockaddr *)&addr, addr_len,
+ sockaddr);
+}
+
+int
+os_socket_addr_remote(bh_socket_t socket, bh_sockaddr_t *sockaddr)
+{
+ struct sockaddr_in addr;
+ socklen_t addr_len = sizeof(addr);
+ int ret;
+
+ if (ocall_getpeername(&ret, socket, (void *)&addr, &addr_len, addr_len)
+ != SGX_SUCCESS) {
+ TRACE_OCALL_FAIL();
+ return -1;
+ }
+
+ if (ret != BHT_OK) {
+ errno = get_errno();
+ return BHT_ERROR;
+ }
+
+ return sockaddr_to_bh_sockaddr((struct sockaddr *)&addr, addr_len,
+ sockaddr);
+}
+
+int
+os_socket_set_send_timeout(bh_socket_t socket, uint64 timeout_us)
+{
+ errno = ENOSYS;
+
+ return BHT_ERROR;
+}
+
+int
+os_socket_get_send_timeout(bh_socket_t socket, uint64 *timeout_us)
+{
+ errno = ENOSYS;
+
+ return BHT_ERROR;
+}
+
+int
+os_socket_set_recv_timeout(bh_socket_t socket, uint64 timeout_us)
+{
+ errno = ENOSYS;
+
+ return BHT_ERROR;
+}
+
+int
+os_socket_get_recv_timeout(bh_socket_t socket, uint64 *timeout_us)
+{
+ errno = ENOSYS;
+
+ return BHT_ERROR;
+}
+
+int
+os_socket_set_send_buf_size(bh_socket_t socket, size_t bufsiz)
+{
+ errno = ENOSYS;
+
+ return BHT_ERROR;
+}
+
+int
+os_socket_get_send_buf_size(bh_socket_t socket, size_t *bufsiz)
+{
+ errno = ENOSYS;
+
+ return BHT_ERROR;
+}
+
+int
+os_socket_set_recv_buf_size(bh_socket_t socket, size_t bufsiz)
+{
+ errno = ENOSYS;
+
+ return BHT_ERROR;
+}
+
+int
+os_socket_get_recv_buf_size(bh_socket_t socket, size_t *bufsiz)
+{
+ errno = ENOSYS;
+
+ return BHT_ERROR;
+}
+
+int
+os_socket_set_keep_alive(bh_socket_t socket, bool is_enabled)
+{
+ return os_socket_setbooloption(socket, SOL_SOCKET, SO_KEEPALIVE,
+ is_enabled);
+}
+
+int
+os_socket_get_keep_alive(bh_socket_t socket, bool *is_enabled)
+{
+ return os_socket_getbooloption(socket, SOL_SOCKET, SO_KEEPALIVE,
+ is_enabled);
+}
+
+int
+os_socket_set_reuse_addr(bh_socket_t socket, bool is_enabled)
+{
+ return os_socket_setbooloption(socket, SOL_SOCKET, SO_REUSEADDR,
+ is_enabled);
+}
+
+int
+os_socket_get_reuse_addr(bh_socket_t socket, bool *is_enabled)
+{
+ return os_socket_getbooloption(socket, SOL_SOCKET, SO_REUSEADDR,
+ is_enabled);
+}
+
+int
+os_socket_set_reuse_port(bh_socket_t socket, bool is_enabled)
+{
+ return os_socket_setbooloption(socket, SOL_SOCKET, SO_REUSEPORT,
+ is_enabled);
+}
+
+int
+os_socket_get_reuse_port(bh_socket_t socket, bool *is_enabled)
+{
+ return os_socket_getbooloption(socket, SOL_SOCKET, SO_REUSEPORT,
+ is_enabled);
+}
+
+int
+os_socket_set_linger(bh_socket_t socket, bool is_enabled, int linger_s)
+{
+ errno = ENOSYS;
+
+ return BHT_ERROR;
+}
+
+int
+os_socket_get_linger(bh_socket_t socket, bool *is_enabled, int *linger_s)
+{
+ errno = ENOSYS;
+
+ return BHT_ERROR;
+}
+
+int
+os_socket_set_tcp_no_delay(bh_socket_t socket, bool is_enabled)
+{
+ return os_socket_setbooloption(socket, IPPROTO_TCP, TCP_NODELAY,
+ is_enabled);
+}
+
+int
+os_socket_get_tcp_no_delay(bh_socket_t socket, bool *is_enabled)
+{
+ return os_socket_getbooloption(socket, IPPROTO_TCP, TCP_NODELAY,
+ is_enabled);
+}
+
+int
+os_socket_set_tcp_quick_ack(bh_socket_t socket, bool is_enabled)
+{
+ return os_socket_setbooloption(socket, IPPROTO_TCP, TCP_QUICKACK,
+ is_enabled);
+}
+
+int
+os_socket_get_tcp_quick_ack(bh_socket_t socket, bool *is_enabled)
+{
+ return os_socket_getbooloption(socket, IPPROTO_TCP, TCP_QUICKACK,
+ is_enabled);
+}
+
+int
+os_socket_set_tcp_keep_idle(bh_socket_t socket, uint32 time_s)
+{
+ errno = ENOSYS;
+
+ return BHT_ERROR;
+}
+
+int
+os_socket_get_tcp_keep_idle(bh_socket_t socket, uint32 *time_s)
+{
+ errno = ENOSYS;
+
+ return BHT_ERROR;
+}
+
+int
+os_socket_set_tcp_keep_intvl(bh_socket_t socket, uint32 time_s)
+{
+ errno = ENOSYS;
+
+ return BHT_ERROR;
+}
+
+int
+os_socket_get_tcp_keep_intvl(bh_socket_t socket, uint32 *time_s)
+{
+ errno = ENOSYS;
+
+ return BHT_ERROR;
+}
+
+int
+os_socket_set_tcp_fastopen_connect(bh_socket_t socket, bool is_enabled)
+{
+ return os_socket_setbooloption(socket, IPPROTO_TCP, TCP_FASTOPEN_CONNECT,
+ is_enabled);
+}
+
+int
+os_socket_get_tcp_fastopen_connect(bh_socket_t socket, bool *is_enabled)
+{
+ return os_socket_getbooloption(socket, IPPROTO_TCP, TCP_FASTOPEN_CONNECT,
+ is_enabled);
+}
+
+int
+os_socket_set_ip_multicast_loop(bh_socket_t socket, bool ipv6, bool is_enabled)
+{
+ if (ipv6) {
+ return os_socket_setbooloption(socket, IPPROTO_IPV6,
+ IPV6_MULTICAST_LOOP, is_enabled);
+ }
+ else {
+ return os_socket_setbooloption(socket, IPPROTO_IP, IP_MULTICAST_LOOP,
+ is_enabled);
+ }
+}
+
+int
+os_socket_get_ip_multicast_loop(bh_socket_t socket, bool ipv6, bool *is_enabled)
+{
+ if (ipv6) {
+ return os_socket_getbooloption(socket, IPPROTO_IPV6,
+ IPV6_MULTICAST_LOOP, is_enabled);
+ }
+ else {
+ return os_socket_getbooloption(socket, IPPROTO_IP, IP_MULTICAST_LOOP,
+ is_enabled);
+ }
+}
+
+int
+os_socket_set_ip_add_membership(bh_socket_t socket,
+ bh_ip_addr_buffer_t *imr_multiaddr,
+ uint32_t imr_interface, bool is_ipv6)
+{
+ errno = ENOSYS;
+
+ return BHT_ERROR;
+}
+
+int
+os_socket_set_ip_drop_membership(bh_socket_t socket,
+ bh_ip_addr_buffer_t *imr_multiaddr,
+ uint32_t imr_interface, bool is_ipv6)
+{
+ errno = ENOSYS;
+
+ return BHT_ERROR;
+}
+
+int
+os_socket_set_ip_ttl(bh_socket_t socket, uint8_t ttl_s)
+{
+ errno = ENOSYS;
+
+ return BHT_ERROR;
+}
+
+int
+os_socket_get_ip_ttl(bh_socket_t socket, uint8_t *ttl_s)
+{
+ errno = ENOSYS;
+
+ return BHT_ERROR;
+}
+
+int
+os_socket_set_ip_multicast_ttl(bh_socket_t socket, uint8_t ttl_s)
+{
+ errno = ENOSYS;
+
+ return BHT_ERROR;
+}
+
+int
+os_socket_get_ip_multicast_ttl(bh_socket_t socket, uint8_t *ttl_s)
+{
+ errno = ENOSYS;
+
+ return BHT_ERROR;
+}
+
+int
+os_socket_set_ipv6_only(bh_socket_t socket, bool is_enabled)
+{
+ return os_socket_setbooloption(socket, IPPROTO_IPV6, IPV6_V6ONLY,
+ is_enabled);
+}
+
+int
+os_socket_get_ipv6_only(bh_socket_t socket, bool *is_enabled)
+{
+ return os_socket_getbooloption(socket, IPPROTO_IPV6, IPV6_V6ONLY,
+ is_enabled);
+}
+
+int
+os_socket_set_broadcast(bh_socket_t socket, bool is_enabled)
+{
+ return os_socket_setbooloption(socket, SOL_SOCKET, SO_BROADCAST,
+ is_enabled);
+}
+
+int
+os_socket_get_broadcast(bh_socket_t socket, bool *is_enabled)
+{
+ return os_socket_getbooloption(socket, SOL_SOCKET, SO_BROADCAST,
+ is_enabled);
+}
+
+#endif
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/linux-sgx/sgx_socket.h b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/linux-sgx/sgx_socket.h
new file mode 100644
index 000000000..edf977dd6
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/linux-sgx/sgx_socket.h
@@ -0,0 +1,332 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#ifndef _SGX_SOCKET_H
+#define _SGX_SOCKET_H
+
+#include "sgx_file.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* For setsockopt(2) */
+#define SOL_SOCKET 1
+
+#define SO_DEBUG 1
+#define SO_REUSEADDR 2
+#define SO_TYPE 3
+#define SO_ERROR 4
+#define SO_DONTROUTE 5
+#define SO_BROADCAST 6
+#define SO_SNDBUF 7
+#define SO_RCVBUF 8
+#define SO_SNDBUFFORCE 32
+#define SO_RCVBUFFORCE 33
+#define SO_KEEPALIVE 9
+#define SO_OOBINLINE 10
+#define SO_NO_CHECK 11
+#define SO_PRIORITY 12
+#define SO_LINGER 13
+#define SO_BSDCOMPAT 14
+#define SO_REUSEPORT 15
+#define SO_PASSCRED 16
+#define SO_PEERCRED 17
+#define SO_RCVLOWAT 18
+#define SO_SNDLOWAT 19
+#define SO_RCVTIMEO_OLD 20
+#define SO_SNDTIMEO_OLD 21
+
+/* User-settable options (used with setsockopt) */
+#define TCP_NODELAY 1 /* Don't delay send to coalesce packets */
+#define TCP_MAXSEG 2 /* Set maximum segment size */
+#define TCP_CORK 3 /* Control sending of partial frames */
+#define TCP_KEEPIDLE 4 /* Start keeplives after this period */
+#define TCP_KEEPINTVL 5 /* Interval between keepalives */
+#define TCP_KEEPCNT 6 /* Number of keepalives before death */
+#define TCP_SYNCNT 7 /* Number of SYN retransmits */
+#define TCP_LINGER2 8 /* Life time of orphaned FIN-WAIT-2 state */
+#define TCP_DEFER_ACCEPT 9 /* Wake up listener only when data arrive */
+#define TCP_WINDOW_CLAMP 10 /* Bound advertised window */
+#define TCP_INFO 11 /* Information about this connection. */
+#define TCP_QUICKACK 12 /* Bock/reenable quick ACKs. */
+#define TCP_CONGESTION 13 /* Congestion control algorithm. */
+#define TCP_MD5SIG 14 /* TCP MD5 Signature (RFC2385) */
+#define TCP_COOKIE_TRANSACTIONS 15 /* TCP Cookie Transactions */
+#define TCP_THIN_LINEAR_TIMEOUTS 16 /* Use linear timeouts for thin streams*/
+#define TCP_THIN_DUPACK 17 /* Fast retrans. after 1 dupack */
+#define TCP_USER_TIMEOUT 18 /* How long for loss retry before timeout */
+#define TCP_REPAIR 19 /* TCP sock is under repair right now */
+#define TCP_REPAIR_QUEUE 20 /* Set TCP queue to repair */
+#define TCP_QUEUE_SEQ 21 /* Set sequence number of repaired queue. */
+#define TCP_REPAIR_OPTIONS 22 /* Repair TCP connection options */
+#define TCP_FASTOPEN 23 /* Enable FastOpen on listeners */
+#define TCP_TIMESTAMP 24 /* TCP time stamp */
+#define TCP_NOTSENT_LOWAT \
+ 25 /* Limit number of unsent bytes in write queue. \
+ */
+#define TCP_CC_INFO 26 /* Get Congestion Control (optional) info. */
+#define TCP_SAVE_SYN 27 /* Record SYN headers for new connections. */
+#define TCP_SAVED_SYN 28 /* Get SYN headers recorded for connection. */
+#define TCP_REPAIR_WINDOW 29 /* Get/set window parameters. */
+#define TCP_FASTOPEN_CONNECT 30 /* Attempt FastOpen with connect. */
+#define TCP_ULP 31 /* Attach a ULP to a TCP connection. */
+#define TCP_MD5SIG_EXT 32 /* TCP MD5 Signature with extensions. */
+#define TCP_FASTOPEN_KEY 33 /* Set the key for Fast Open (cookie). */
+#define TCP_FASTOPEN_NO_COOKIE 34 /* Enable TFO without a TFO cookie. */
+#define TCP_ZEROCOPY_RECEIVE 35
+#define TCP_INQ 36 /* Notify bytes available to read as a cmsg on read. */
+#define TCP_CM_INQ TCP_INQ
+#define TCP_TX_DELAY 37 /* Delay outgoing packets by XX usec. */
+
+/* Standard well-defined IP protocols. */
+#define IPPROTO_IP 0 /* Dummy protocol for TCP. */
+#define IPPROTO_ICMP 1 /* Internet Control Message Protocol. */
+#define IPPROTO_IGMP 2 /* Internet Group Management Protocol. */
+#define IPPROTO_IPIP 4 /* IPIP tunnels (older KA9Q tunnels use 94). */
+#define IPPROTO_TCP 6 /* Transmission Control Protocol. */
+#define IPPROTO_EGP 8 /* Exterior Gateway Protocol. */
+#define IPPROTO_PUP 12 /* PUP protocol. */
+#define IPPROTO_UDP 17 /* User Datagram Protocol. */
+#define IPPROTO_IDP 22 /* XNS IDP protocol. */
+#define IPPROTO_TP 29 /* SO Transport Protocol Class 4. */
+#define IPPROTO_DCCP 33 /* Datagram Congestion Control Protocol. */
+#define IPPROTO_IPV6 41 /* IPv6 header. */
+#define IPPROTO_RSVP 46 /* Reservation Protocol. */
+#define IPPROTO_GRE 47 /* General Routing Encapsulation. */
+#define IPPROTO_ESP 50 /* encapsulating security payload. */
+#define IPPROTO_AH 51 /* authentication header. */
+#define IPPROTO_MTP 92 /* Multicast Transport Protocol. */
+#define IPPROTO_BEETPH 94 /* IP option pseudo header for BEET. */
+#define IPPROTO_ENCAP 98 /* Encapsulation Header. */
+#define IPPROTO_PIM 103 /* Protocol Independent Multicast. */
+#define IPPROTO_COMP 108 /* Compression Header Protocol. */
+#define IPPROTO_SCTP 132 /* Stream Control Transmission Protocol. */
+#define IPPROTO_UDPLITE 136 /* UDP-Lite protocol. */
+#define IPPROTO_MPLS 137 /* MPLS in IP. */
+#define IPPROTO_RAW 255 /* Raw IP packets. */
+
+#define IP_ROUTER_ALERT 5 /* bool */
+#define IP_PKTINFO 8 /* bool */
+#define IP_PKTOPTIONS 9
+#define IP_PMTUDISC 10 /* obsolete name? */
+#define IP_MTU_DISCOVER 10 /* int; see below */
+#define IP_RECVERR 11 /* bool */
+#define IP_RECVTTL 12 /* bool */
+#define IP_RECVTOS 13 /* bool */
+#define IP_MTU 14 /* int */
+#define IP_FREEBIND 15
+#define IP_IPSEC_POLICY 16
+#define IP_XFRM_POLICY 17
+#define IP_PASSSEC 18
+#define IP_TRANSPARENT 19
+#define IP_MULTICAST_ALL 49 /* bool */
+
+/* TProxy original addresses */
+#define IP_ORIGDSTADDR 20
+#define IP_RECVORIGDSTADDR IP_ORIGDSTADDR
+#define IP_MINTTL 21
+#define IP_NODEFRAG 22
+#define IP_CHECKSUM 23
+#define IP_BIND_ADDRESS_NO_PORT 24
+#define IP_RECVFRAGSIZE 25
+#define IP_PMTUDISC_DONT 0
+#define IP_PMTUDISC_WANT 1
+#define IP_PMTUDISC_DO 2
+#define IP_PMTUDISC_PROBE 3
+#define IP_PMTUDISC_INTERFACE 4
+#define IP_PMTUDISC_OMIT 5
+#define IP_MULTICAST_IF 32
+#define IP_MULTICAST_TTL 33
+#define IP_MULTICAST_LOOP 34
+#define IP_ADD_MEMBERSHIP 35
+#define IP_DROP_MEMBERSHIP 36
+#define IP_UNBLOCK_SOURCE 37
+#define IP_BLOCK_SOURCE 38
+#define IP_ADD_SOURCE_MEMBERSHIP 39
+#define IP_DROP_SOURCE_MEMBERSHIP 40
+#define IP_MSFILTER 41
+#define IP_MULTICAST_ALL 49
+#define IP_UNICAST_IF 50
+
+#define IPV6_ADDRFORM 1
+#define IPV6_2292PKTINFO 2
+#define IPV6_2292HOPOPTS 3
+#define IPV6_2292DSTOPTS 4
+#define IPV6_2292RTHDR 5
+#define IPV6_2292PKTOPTIONS 6
+#define IPV6_CHECKSUM 7
+#define IPV6_2292HOPLIMIT 8
+
+#define SCM_SRCRT IPV6_RXSRCRT
+
+#define IPV6_NEXTHOP 9
+#define IPV6_AUTHHDR 10
+#define IPV6_UNICAST_HOPS 16
+#define IPV6_MULTICAST_IF 17
+#define IPV6_MULTICAST_HOPS 18
+#define IPV6_MULTICAST_LOOP 19
+#define IPV6_JOIN_GROUP 20
+#define IPV6_LEAVE_GROUP 21
+#define IPV6_ROUTER_ALERT 22
+#define IPV6_MTU_DISCOVER 23
+#define IPV6_MTU 24
+#define IPV6_RECVERR 25
+#define IPV6_V6ONLY 26
+#define IPV6_JOIN_ANYCAST 27
+#define IPV6_LEAVE_ANYCAST 28
+#define IPV6_MULTICAST_ALL 29
+#define IPV6_ROUTER_ALERT_ISOLATE 30
+#define IPV6_IPSEC_POLICY 34
+#define IPV6_XFRM_POLICY 35
+#define IPV6_HDRINCL 36
+
+/* Advanced API (RFC3542) (1). */
+#define IPV6_RECVPKTINFO 49
+#define IPV6_PKTINFO 50
+#define IPV6_RECVHOPLIMIT 51
+#define IPV6_HOPLIMIT 52
+#define IPV6_RECVHOPOPTS 53
+#define IPV6_HOPOPTS 54
+#define IPV6_RTHDRDSTOPTS 55
+#define IPV6_RECVRTHDR 56
+#define IPV6_RTHDR 57
+#define IPV6_RECVDSTOPTS 58
+#define IPV6_DSTOPTS 59
+#define IPV6_RECVPATHMTU 60
+#define IPV6_PATHMTU 61
+#define IPV6_DONTFRAG 62
+
+/* Advanced API (RFC3542) (2). */
+#define IPV6_RECVTCLASS 66
+#define IPV6_TCLASS 67
+
+#define IPV6_AUTOFLOWLABEL 70
+
+/* RFC5014. */
+#define IPV6_ADDR_PREFERENCES 72
+
+/* RFC5082. */
+#define IPV6_MINHOPCOUNT 73
+
+#define IPV6_ORIGDSTADDR 74
+#define IPV6_RECVORIGDSTADDR IPV6_ORIGDSTADDR
+#define IPV6_TRANSPARENT 75
+#define IPV6_UNICAST_IF 76
+#define IPV6_RECVFRAGSIZE 77
+#define IPV6_FREEBIND 78
+
+#define SOCK_STREAM 1
+#define SOCK_DGRAM 2
+
+#define MSG_OOB 0x0001
+#define MSG_PEEK 0x0002
+#define MSG_DONTROUTE 0x0004
+#define MSG_CTRUNC 0x0008
+#define MSG_PROXY 0x0010
+#define MSG_TRUNC 0x0020
+#define MSG_DONTWAIT 0x0040
+#define MSG_EOR 0x0080
+#define MSG_WAITALL 0x0100
+#define MSG_FIN 0x0200
+#define MSG_SYN 0x0400
+#define MSG_CONFIRM 0x0800
+#define MSG_RST 0x1000
+#define MSG_ERRQUEUE 0x2000
+#define MSG_NOSIGNAL 0x4000
+#define MSG_MORE 0x8000
+#define MSG_WAITFORONE 0x10000
+#define MSG_BATCH 0x40000
+#define MSG_FASTOPEN 0x20000000
+#define MSG_CMSG_CLOEXEC 0x40000000
+
+#define SHUT_RD 0
+#define SHUT_WR 1
+#define SHUT_RDWR 2
+
+/* Address families. */
+#define AF_INET 2 /* IP protocol family. */
+#define AF_INET6 10 /* IP version 6. */
+
+/* Standard well-defined IP protocols. */
+#define IPPROTO_TCP 6 /* Transmission Control Protocol. */
+
+/* Types of sockets. */
+#define SOCK_DGRAM \
+ 2 /* Connectionless, unreliable datagrams of fixed maximum length. */
+
+struct msghdr {
+ void *msg_name;
+ socklen_t msg_namelen;
+ struct iovec *msg_iov;
+ int msg_iovlen;
+ void *msg_control;
+ socklen_t msg_controllen;
+ int msg_flags;
+};
+
+/* Internet address. */
+struct in_addr {
+ uint32_t s_addr;
+};
+typedef struct in_addr in_addr_t;
+
+/* Structure describing an Internet socket address. */
+#define __SOCK_SIZE__ 16 /* sizeof(struct sockaddr) */
+struct sockaddr_in {
+ uint16_t sin_family;
+ uint16_t sin_port; /* Port number. */
+ struct in_addr sin_addr; /* Internet address. */
+
+ /* Pad to size of `struct sockaddr'. */
+ unsigned char__pad[__SOCK_SIZE__ - sizeof(uint16_t) - sizeof(uint16_t)
+ - sizeof(struct in_addr)];
+};
+
+/* Structure used to manipulate the SO_LINGER option. */
+struct linger {
+ int l_onoff; /* Nonzero to linger on close. */
+ int l_linger; /* Time to linger. */
+};
+
+/* Structure describing a generic socket address. */
+struct sockaddr {
+ unsigned short int sa_family; /* Common data: address family and length. */
+ char sa_data[14]; /* Address data. */
+};
+
+uint32_t
+ntohl(uint32_t value);
+
+uint32_t
+htonl(uint32_t value);
+
+uint16_t
+htons(uint16_t value);
+
+int
+socket(int domain, int type, int protocol);
+
+int
+getsockopt(int sockfd, int level, int optname, void *optval, socklen_t *optlen);
+
+int
+setsockopt(int sockfd, int level, int optname, const void *optval,
+ socklen_t optlen);
+
+ssize_t
+sendmsg(int sockfd, const struct msghdr *msg, int flags);
+
+ssize_t
+recvmsg(int sockfd, struct msghdr *msg, int flags);
+
+int
+shutdown(int sockfd, int how);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* end of _SGX_SOCKET_H */
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/linux-sgx/sgx_thread.c b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/linux-sgx/sgx_thread.c
new file mode 100644
index 000000000..1cb2f5d09
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/linux-sgx/sgx_thread.c
@@ -0,0 +1,212 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#include "platform_api_vmcore.h"
+#include "platform_api_extension.h"
+
+#ifndef SGX_DISABLE_PTHREAD
+typedef struct {
+ thread_start_routine_t start;
+ void *arg;
+} thread_wrapper_arg;
+
+static void *
+os_thread_wrapper(void *arg)
+{
+ thread_wrapper_arg *targ = arg;
+ thread_start_routine_t start_func = targ->start;
+ void *thread_arg = targ->arg;
+
+#if 0
+ os_printf("THREAD CREATED %p\n", &targ);
+#endif
+ BH_FREE(targ);
+ start_func(thread_arg);
+ return NULL;
+}
+
+int
+os_thread_create_with_prio(korp_tid *tid, thread_start_routine_t start,
+ void *arg, unsigned int stack_size, int prio)
+{
+ thread_wrapper_arg *targ;
+
+ assert(tid);
+ assert(start);
+
+ targ = (thread_wrapper_arg *)BH_MALLOC(sizeof(*targ));
+ if (!targ) {
+ return BHT_ERROR;
+ }
+
+ targ->start = start;
+ targ->arg = arg;
+
+ if (pthread_create(tid, NULL, os_thread_wrapper, targ) != 0) {
+ BH_FREE(targ);
+ return BHT_ERROR;
+ }
+
+ return BHT_OK;
+}
+
+int
+os_thread_create(korp_tid *tid, thread_start_routine_t start, void *arg,
+ unsigned int stack_size)
+{
+ return os_thread_create_with_prio(tid, start, arg, stack_size,
+ BH_THREAD_DEFAULT_PRIORITY);
+}
+#endif
+
+korp_tid
+os_self_thread()
+{
+#ifndef SGX_DISABLE_PTHREAD
+ return pthread_self();
+#else
+ return 0;
+#endif
+}
+
+int
+os_mutex_init(korp_mutex *mutex)
+{
+#ifndef SGX_DISABLE_PTHREAD
+ pthread_mutex_t m = PTHREAD_MUTEX_INITIALIZER;
+ *mutex = m;
+#endif
+ return BHT_OK;
+}
+
+int
+os_mutex_destroy(korp_mutex *mutex)
+{
+#ifndef SGX_DISABLE_PTHREAD
+ pthread_mutex_destroy(mutex);
+#endif
+ return BHT_OK;
+}
+
+int
+os_mutex_lock(korp_mutex *mutex)
+{
+#ifndef SGX_DISABLE_PTHREAD
+ return pthread_mutex_lock(mutex);
+#else
+ return 0;
+#endif
+}
+
+int
+os_mutex_unlock(korp_mutex *mutex)
+{
+#ifndef SGX_DISABLE_PTHREAD
+ return pthread_mutex_unlock(mutex);
+#else
+ return 0;
+#endif
+}
+
+int
+os_cond_init(korp_cond *cond)
+{
+#ifndef SGX_DISABLE_PTHREAD
+ pthread_cond_t c = PTHREAD_COND_INITIALIZER;
+ *cond = c;
+#endif
+ return BHT_OK;
+}
+
+int
+os_cond_destroy(korp_cond *cond)
+{
+#ifndef SGX_DISABLE_PTHREAD
+ pthread_cond_destroy(cond);
+#endif
+ return BHT_OK;
+}
+
+int
+os_cond_wait(korp_cond *cond, korp_mutex *mutex)
+{
+#ifndef SGX_DISABLE_PTHREAD
+ assert(cond);
+ assert(mutex);
+
+ if (pthread_cond_wait(cond, mutex) != BHT_OK)
+ return BHT_ERROR;
+
+#endif
+ return BHT_OK;
+}
+
+int
+os_cond_reltimedwait(korp_cond *cond, korp_mutex *mutex, uint64 useconds)
+{
+ os_printf("warning: SGX pthread_cond_timedwait isn't supported, "
+ "calling pthread_cond_wait instead!\n");
+ return BHT_ERROR;
+}
+
+int
+os_cond_signal(korp_cond *cond)
+{
+#ifndef SGX_DISABLE_PTHREAD
+ assert(cond);
+
+ if (pthread_cond_signal(cond) != BHT_OK)
+ return BHT_ERROR;
+
+#endif
+ return BHT_OK;
+}
+
+int
+os_cond_broadcast(korp_cond *cond)
+{
+#ifndef SGX_DISABLE_PTHREAD
+ assert(cond);
+
+ if (pthread_cond_broadcast(cond) != BHT_OK)
+ return BHT_ERROR;
+
+#endif
+ return BHT_OK;
+}
+
+int
+os_thread_join(korp_tid thread, void **value_ptr)
+{
+#ifndef SGX_DISABLE_PTHREAD
+ return pthread_join(thread, value_ptr);
+#else
+ return 0;
+#endif
+}
+
+int
+os_thread_detach(korp_tid thread)
+{
+ /* SGX pthread_detach isn't provided, return directly. */
+ return 0;
+}
+
+void
+os_thread_exit(void *retval)
+{
+#ifndef SGX_DISABLE_PTHREAD
+ pthread_exit(retval);
+#else
+ return;
+#endif
+}
+
+uint8 *
+os_thread_get_stack_boundary()
+{
+ /* TODO: get sgx stack boundary */
+ return NULL;
+}
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/linux-sgx/sgx_time.c b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/linux-sgx/sgx_time.c
new file mode 100644
index 000000000..d090083ef
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/linux-sgx/sgx_time.c
@@ -0,0 +1,135 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#include "platform_api_vmcore.h"
+
+#define TRACE_FUNC() os_printf("undefined %s\n", __FUNCTION__)
+#define TRACE_OCALL_FAIL() os_printf("ocall %s failed!\n", __FUNCTION__)
+
+int
+ocall_clock_gettime(int *p_ret, unsigned clock_id, void *tp_buf,
+ unsigned int tp_buf_size);
+int
+ocall_clock_getres(int *p_ret, int clock_id, void *res_buf,
+ unsigned int res_buf_size);
+int
+ocall_utimensat(int *p_ret, int dirfd, const char *pathname,
+ const void *times_buf, unsigned int times_buf_size, int flags);
+int
+ocall_futimens(int *p_ret, int fd, const void *times_buf,
+ unsigned int times_buf_size);
+int
+ocall_clock_nanosleep(int *p_ret, unsigned clock_id, int flags,
+ const void *req_buf, unsigned int req_buf_size,
+ const void *rem_buf, unsigned int rem_buf_size);
+
+uint64
+os_time_get_boot_microsecond()
+{
+#ifndef SGX_DISABLE_WASI
+ struct timespec ts;
+ if (clock_gettime(CLOCK_MONOTONIC, &ts) != 0) {
+ return 0;
+ }
+
+ return ((uint64)ts.tv_sec) * 1000 * 1000 + ((uint64)ts.tv_nsec) / 1000;
+#else
+ return 0;
+#endif
+}
+
+#ifndef SGX_DISABLE_WASI
+
+int
+clock_getres(int clock_id, struct timespec *res)
+{
+ int ret;
+
+ if (ocall_clock_getres(&ret, clock_id, (void *)res, sizeof(struct timespec))
+ != SGX_SUCCESS) {
+ TRACE_OCALL_FAIL();
+ return -1;
+ }
+
+ if (ret == -1)
+ errno = get_errno();
+
+ return ret;
+}
+
+int
+clock_gettime(clockid_t clock_id, struct timespec *tp)
+{
+ int ret;
+
+ if (ocall_clock_gettime(&ret, clock_id, (void *)tp, sizeof(struct timespec))
+ != SGX_SUCCESS) {
+ TRACE_OCALL_FAIL();
+ return -1;
+ }
+
+ if (ret == -1)
+ errno = get_errno();
+
+ return ret;
+}
+
+int
+utimensat(int dirfd, const char *pathname, const struct timespec times[2],
+ int flags)
+{
+ int ret;
+
+ if (ocall_utimensat(&ret, dirfd, pathname, (void *)times,
+ sizeof(struct timespec) * 2, flags)
+ != SGX_SUCCESS) {
+ TRACE_OCALL_FAIL();
+ return -1;
+ }
+
+ if (ret == -1)
+ errno = get_errno();
+
+ return ret;
+}
+
+int
+futimens(int fd, const struct timespec times[2])
+{
+ int ret;
+
+ if (ocall_futimens(&ret, fd, (void *)times, sizeof(struct timespec) * 2)
+ != SGX_SUCCESS) {
+ TRACE_OCALL_FAIL();
+ return -1;
+ }
+
+ if (ret == -1)
+ errno = get_errno();
+
+ return ret;
+}
+
+int
+clock_nanosleep(clockid_t clock_id, int flags, const struct timespec *request,
+ struct timespec *remain)
+{
+ int ret;
+
+ if (ocall_clock_nanosleep(&ret, clock_id, flags, (void *)request,
+ sizeof(struct timespec), (void *)remain,
+ sizeof(struct timespec))
+ != SGX_SUCCESS) {
+ TRACE_OCALL_FAIL();
+ return -1;
+ }
+
+ if (ret == -1)
+ errno = get_errno();
+
+ return ret;
+}
+
+#endif
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/linux-sgx/sgx_time.h b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/linux-sgx/sgx_time.h
new file mode 100644
index 000000000..8267f1fa5
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/linux-sgx/sgx_time.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#ifndef _SGX_TIME_H
+#define _SGX_TIME_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define CLOCK_REALTIME 0
+#define CLOCK_MONOTONIC 1
+#define CLOCK_PROCESS_CPUTIME_ID 2
+#define CLOCK_THREAD_CPUTIME_ID 3
+
+#define UTIME_NOW 0x3fffffff
+#define UTIME_OMIT 0x3ffffffe
+#define TIMER_ABSTIME 1
+
+typedef long int time_t;
+
+typedef int clockid_t;
+
+struct timespec {
+ time_t tv_sec;
+ long tv_nsec;
+};
+
+int
+clock_getres(int clock_id, struct timespec *res);
+
+int
+clock_gettime(clockid_t clock_id, struct timespec *tp);
+
+int
+utimensat(int dirfd, const char *pathname, const struct timespec times[2],
+ int flags);
+int
+futimens(int fd, const struct timespec times[2]);
+int
+clock_nanosleep(clockid_t clock_id, int flags, const struct timespec *request,
+ struct timespec *remain);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* end of _SGX_TIME_H */
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/linux-sgx/sgx_wamr.edl b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/linux-sgx/sgx_wamr.edl
new file mode 100644
index 000000000..7cb4817fd
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/linux-sgx/sgx_wamr.edl
@@ -0,0 +1,158 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+enclave {
+ include "stdint.h"
+ include "stdbool.h"
+ include "unistd.h"
+
+ untrusted {
+ int ocall_open([in, string]const char *pathname, int flags,
+ bool has_mode, unsigned mode);
+ int ocall_openat(int dirfd,
+ [in, string]const char *pathname, int flags,
+ bool has_mode, unsigned mode);
+ int ocall_close(int fd);
+ ssize_t ocall_read(int fd, [out, size=read_size]void *buf,
+ size_t read_size);
+ off_t ocall_lseek(int fd, off_t offset, int whence);
+ int ocall_ftruncate(int fd, off_t length);
+ int ocall_fsync(int fd);
+ int ocall_fdatasync(int fd);
+ int ocall_isatty(int fd);
+ void ocall_fdopendir(int fd, [out]void **p_dirp);
+ /* implementation related to multiple thread */
+ void *ocall_readdir([user_check]void *dirp);
+ void ocall_rewinddir([user_check]void *dirp);
+ void ocall_seekdir([user_check]void *dirp, long loc);
+ long ocall_telldir([user_check]void *dirp);
+ int ocall_closedir([user_check]void *dirp);
+
+ int ocall_stat([in, string]const char *pathname,
+ [out, size=buf_len]void *buf,
+ unsigned int buf_len);
+ int ocall_fstat(int fd, [out, size=buf_len]void *buf,
+ unsigned int buf_len);
+ int ocall_fstatat(int dirfd, [in, string]const char *pathname,
+ [out, size=buf_len]void *buf,
+ unsigned int buf_len, int flags);
+
+ int ocall_mkdirat(int dirfd, [in, string]const char *pathname,
+ unsigned mode);
+ int ocall_link([in, string] const char *oldpath,
+ [in, string] const char *newpath);
+ int ocall_linkat(int olddirfd, [in, string]const char *oldpath,
+ int newdirfd, [in, string]const char *newpath,
+ int flags);
+ int ocall_unlinkat(int dirfd, [in, string]const char *pathname,
+ int flags);
+ ssize_t ocall_readlink([in, string]const char *pathname,
+ [out, size=bufsiz]char *buf,
+ size_t bufsiz);
+ ssize_t ocall_readlinkat(int dirfd,
+ [in, string]const char *pathname,
+ [out, size=bufsiz]char *buf,
+ size_t bufsiz);
+ int ocall_renameat(int olddirfd,
+ [in, string]const char *oldpath,
+ int newdirfd,
+ [in, string]const char *newpath);
+ int ocall_symlinkat([in ,string]const char *target,
+ int newdirfd,
+ [in, string]const char *linkpath);
+
+ int ocall_ioctl(int fd, unsigned long request,
+ [out, size=arg_len]void *arg,
+ unsigned int arg_len);
+ int ocall_fcntl(int fd, int cmd);
+ int ocall_fcntl_long(int fd, int cmd, long arg);
+
+ int ocall_realpath([in, string]const char *path,
+ [out, size=buf_len]char *buf,
+ unsigned int buf_len);
+ int ocall_posix_fallocate(int fd, off_t offset, off_t len);
+ int ocall_poll([in, out, size=fds_len]void *fds, unsigned nfds,
+ int timeout, unsigned int fds_len);
+
+ int ocall_getopt(int argc,
+ [in, size=argv_buf_len]char *argv_buf,
+ unsigned int argv_buf_len,
+ [in, string]const char *optstring);
+ ssize_t ocall_readv(int fd,
+ [in, out, size=buf_size]char *iov_buf,
+ unsigned int buf_size, int iovcnt,
+ bool has_offset, off_t offset);
+ ssize_t ocall_writev(int fd,
+ [in, size=buf_size]char *iov_buf,
+ unsigned int buf_size, int iovcnt,
+ bool has_offset, off_t offset);
+
+ /* time clock */
+ int ocall_clock_gettime(unsigned clock_id,
+ [out, size=tp_buf_size]void *tp_buf,
+ unsigned int tp_buf_size);
+ int ocall_clock_getres(int clock_id,
+ [out, size=res_buf_size]void *res_buf,
+ unsigned int res_buf_size);
+ int ocall_utimensat(int dirfd, [in, string]const char *pathname,
+ [in, size=times_buf_size]const void *times_buf,
+ unsigned int times_buf_size, int flags);
+ int ocall_futimens(int fd, [in, size=times_buf_size]const void *times_buf,
+ unsigned int times_buf_size);
+ int ocall_clock_nanosleep(unsigned clock_id, int flags,
+ [in, size=req_buf_size]const void *req_buf,
+ unsigned int req_buf_size,
+ [out, size=rem_buf_size]void *rem_buf,
+ unsigned int rem_buf_size);
+
+ int ocall_raise(int sig);
+
+ int ocall_sched_yield();
+
+ int ocall_pthread_rwlock_init([out]void **rwlock, [user_check]void *attr);
+ int ocall_pthread_rwlock_destroy([user_check]void *rwlock);
+ int ocall_pthread_rwlock_rdlock([user_check]void *rwlock);
+ int ocall_pthread_rwlock_wrlock([user_check]void *rwlock);
+ int ocall_pthread_rwlock_unlock([user_check]void *rwlock);
+
+ int ocall_get_errno();
+
+ /* sockets */
+ int ocall_accept(int sockfd, [in, size=addr_size]void *addr,
+ [in, size=4] uint32_t *addrlen, uint32_t addr_size);
+ int ocall_bind(int sockfd, [in, size=addrlen]const void *addr,
+ uint32_t addrlen);
+ int ocall_connect(int sockfd, [in, size=addrlen]void *addr, uint32_t addrlen);
+ int ocall_getsockname(int sockfd, [out, size=addr_size]void *addr,
+ [in, out, size=4]uint32_t *addrlen, uint32_t addr_size);
+ int ocall_getpeername(int sockfd, [out, size=addr_size]void *addr,
+ [in, out, size=4]uint32_t *addrlen, uint32_t addr_size);
+ int ocall_getsockopt(int sockfd, int level, int optname,
+ [out, size=val_buf_size]void *val_buf,
+ unsigned int val_buf_size,
+ [in, out, size=4]void *len_buf);
+ int ocall_listen(int sockfd, int backlog);
+ int ocall_recv(int sockfd, [out, size=len]void *buf, size_t len, int flags);
+ ssize_t ocall_recvfrom(int sockfd, [out, size=len]void *buf, size_t len, int flags,
+ [out, size=addr_size]void *src_addr,
+ [in, out, size=4]uint32_t *addrlen, uint32_t addr_size);
+ ssize_t ocall_recvmsg(int sockfd,
+ [in, out, size=msg_buf_size]void *msg_buf,
+ unsigned int msg_buf_size,
+ int flags);
+ int ocall_send(int sockfd, [in, size=len]const void *buf, size_t len, int flags);
+ ssize_t ocall_sendto(int sockfd, [in, size=len]const void *buf, size_t len, int flags,
+ [in, size=addrlen]void *dest_addr, uint32_t addrlen);
+ ssize_t ocall_sendmsg(int sockfd,
+ [in, size=msg_buf_size]void *msg_buf,
+ unsigned int msg_buf_size,
+ int flags);
+ int ocall_setsockopt(int sockfd, int level, int optname,
+ [in, size=optlen]void *optval,
+ unsigned int optlen);
+ int ocall_shutdown(int sockfd, int how);
+ int ocall_socket(int domain, int type, int protocol);
+ };
+};
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/linux-sgx/shared_platform.cmake b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/linux-sgx/shared_platform.cmake
new file mode 100644
index 000000000..b2de1ab06
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/linux-sgx/shared_platform.cmake
@@ -0,0 +1,38 @@
+# Copyright (C) 2019 Intel Corporation. All rights reserved.
+# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+
+set (PLATFORM_SHARED_DIR ${CMAKE_CURRENT_LIST_DIR})
+
+add_definitions(-DBH_PLATFORM_LINUX_SGX)
+
+include_directories(${PLATFORM_SHARED_DIR})
+include_directories(${PLATFORM_SHARED_DIR}/../include)
+
+if ("$ENV{SGX_SDK}" STREQUAL "")
+ set (SGX_SDK_DIR "/opt/intel/sgxsdk")
+else()
+ set (SGX_SDK_DIR $ENV{SGX_SDK})
+endif()
+
+include_directories (${SGX_SDK_DIR}/include)
+if (NOT BUILD_UNTRUST_PART EQUAL 1)
+ include_directories (${SGX_SDK_DIR}/include/tlibc
+ ${SGX_SDK_DIR}/include/libcxx)
+endif ()
+
+if (NOT WAMR_BUILD_LIBC_WASI EQUAL 1)
+ add_definitions(-DSGX_DISABLE_WASI)
+endif ()
+
+if (NOT WAMR_BUILD_THREAD_MGR EQUAL 1)
+ add_definitions(-DSGX_DISABLE_PTHREAD)
+endif ()
+
+file (GLOB source_all ${PLATFORM_SHARED_DIR}/*.c)
+
+file (GLOB source_all_untrusted ${PLATFORM_SHARED_DIR}/untrusted/*.c)
+
+set (PLATFORM_SHARED_SOURCE ${source_all})
+
+set (PLATFORM_SHARED_SOURCE_UNTRUSTED ${source_all_untrusted})
+
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/linux-sgx/untrusted/file.c b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/linux-sgx/untrusted/file.c
new file mode 100644
index 000000000..cb9bf6a21
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/linux-sgx/untrusted/file.c
@@ -0,0 +1,321 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#include <stdbool.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/ioctl.h>
+#include <sys/uio.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <dirent.h>
+#include <sched.h>
+#include <poll.h>
+#include <errno.h>
+
+int
+ocall_open(const char *pathname, int flags, bool has_mode, unsigned mode)
+{
+ if (has_mode) {
+ return open(pathname, flags, (mode_t)mode);
+ }
+ else {
+ return open(pathname, flags);
+ }
+}
+
+int
+ocall_openat(int dirfd, const char *pathname, int flags, bool has_mode,
+ unsigned mode)
+{
+ if (has_mode) {
+ return openat(dirfd, pathname, flags, (mode_t)mode);
+ }
+ else {
+ return openat(dirfd, pathname, flags);
+ }
+}
+
+int
+ocall_close(int fd)
+{
+ return close(fd);
+}
+
+ssize_t
+ocall_read(int fd, void *buf, size_t read_size)
+{
+ if (buf != NULL) {
+ return read(fd, buf, read_size);
+ }
+ else {
+ return -1;
+ }
+}
+
+off_t
+ocall_lseek(int fd, off_t offset, int whence)
+{
+ return lseek(fd, offset, whence);
+}
+
+int
+ocall_ftruncate(int fd, off_t length)
+{
+ return ftruncate(fd, length);
+}
+
+int
+ocall_fsync(int fd)
+{
+ return fsync(fd);
+}
+
+int
+ocall_fdatasync(int fd)
+{
+ return fdatasync(fd);
+}
+
+int
+ocall_isatty(int fd)
+{
+ return isatty(fd);
+}
+
+void
+ocall_fdopendir(int fd, void **dirp)
+{
+ if (dirp) {
+ *(DIR **)dirp = fdopendir(fd);
+ }
+}
+
+void *
+ocall_readdir(void *dirp)
+{
+ DIR *p_dirp = (DIR *)dirp;
+ return readdir(p_dirp);
+}
+
+void
+ocall_rewinddir(void *dirp)
+{
+ DIR *p_dirp = (DIR *)dirp;
+ if (p_dirp) {
+ rewinddir(p_dirp);
+ }
+}
+
+void
+ocall_seekdir(void *dirp, long loc)
+{
+ DIR *p_dirp = (DIR *)dirp;
+
+ if (p_dirp) {
+ seekdir(p_dirp, loc);
+ }
+}
+
+long
+ocall_telldir(void *dirp)
+{
+ DIR *p_dirp = (DIR *)dirp;
+ if (p_dirp) {
+ return telldir(p_dirp);
+ }
+ return -1;
+}
+
+int
+ocall_closedir(void *dirp)
+{
+ DIR *p_dirp = (DIR *)dirp;
+ if (p_dirp) {
+ return closedir(p_dirp);
+ }
+ return -1;
+}
+
+int
+ocall_stat(const char *pathname, void *buf, unsigned int buf_len)
+{
+ return stat(pathname, (struct stat *)buf);
+}
+
+int
+ocall_fstat(int fd, void *buf, unsigned int buf_len)
+{
+ return fstat(fd, (struct stat *)buf);
+}
+
+int
+ocall_fstatat(int dirfd, const char *pathname, void *buf, unsigned int buf_len,
+ int flags)
+{
+ return fstatat(dirfd, pathname, (struct stat *)buf, flags);
+}
+
+int
+ocall_mkdirat(int dirfd, const char *pathname, unsigned mode)
+{
+ return mkdirat(dirfd, pathname, (mode_t)mode);
+}
+
+int
+ocall_link(const char *oldpath, const char *newpath)
+{
+ return link(oldpath, newpath);
+}
+
+int
+ocall_linkat(int olddirfd, const char *oldpath, int newdirfd,
+ const char *newpath, int flags)
+{
+ return linkat(olddirfd, oldpath, newdirfd, newpath, flags);
+}
+
+int
+ocall_unlinkat(int dirfd, const char *pathname, int flags)
+{
+ return unlinkat(dirfd, pathname, flags);
+}
+
+ssize_t
+ocall_readlink(const char *pathname, char *buf, size_t bufsiz)
+{
+ return readlink(pathname, buf, bufsiz);
+}
+
+ssize_t
+ocall_readlinkat(int dirfd, const char *pathname, char *buf, size_t bufsiz)
+{
+ return readlinkat(dirfd, pathname, buf, bufsiz);
+}
+
+int
+ocall_renameat(int olddirfd, const char *oldpath, int newdirfd,
+ const char *newpath)
+{
+ return renameat(olddirfd, oldpath, newdirfd, newpath);
+}
+
+int
+ocall_symlinkat(const char *target, int newdirfd, const char *linkpath)
+{
+ return symlinkat(target, newdirfd, linkpath);
+}
+
+int
+ocall_ioctl(int fd, unsigned long request, void *arg, unsigned int arg_len)
+{
+ /* support just int *arg temporally */
+ return ioctl(fd, request, (int *)arg);
+}
+
+int
+ocall_fcntl(int fd, int cmd)
+{
+ return fcntl(fd, cmd);
+}
+
+int
+ocall_fcntl_long(int fd, int cmd, long arg)
+{
+ return fcntl(fd, cmd, arg);
+}
+
+ssize_t
+ocall_readv(int fd, char *iov_buf, unsigned int buf_size, int iovcnt,
+ bool has_offset, off_t offset)
+{
+ struct iovec *iov = (struct iovec *)iov_buf;
+ ssize_t ret;
+ int i;
+
+ for (i = 0; i < iovcnt; i++) {
+ iov[i].iov_base = iov_buf + (unsigned)(uintptr_t)iov[i].iov_base;
+ }
+
+ if (has_offset)
+ ret = preadv(fd, iov, iovcnt, offset);
+ else
+ ret = readv(fd, iov, iovcnt);
+
+ return ret;
+}
+
+ssize_t
+ocall_writev(int fd, char *iov_buf, unsigned int buf_size, int iovcnt,
+ bool has_offset, off_t offset)
+{
+ struct iovec *iov = (struct iovec *)iov_buf;
+ int i;
+ ssize_t ret;
+
+ for (i = 0; i < iovcnt; i++) {
+ iov[i].iov_base = iov_buf + (unsigned)(uintptr_t)iov[i].iov_base;
+ }
+
+ if (has_offset)
+ ret = pwritev(fd, iov, iovcnt, offset);
+ else
+ ret = writev(fd, iov, iovcnt);
+
+ return ret;
+}
+
+int
+ocall_realpath(const char *path, char *buf, unsigned int buf_len)
+{
+ char *val = NULL;
+ val = realpath(path, buf);
+ if (val != NULL) {
+ return 0;
+ }
+ return -1;
+}
+
+int
+ocall_posix_fallocate(int fd, off_t offset, off_t len)
+{
+ return posix_fallocate(fd, offset, len);
+}
+
+int
+ocall_poll(void *fds, unsigned nfds, int timeout, unsigned int fds_len)
+{
+ return poll((struct pollfd *)fds, (nfds_t)nfds, timeout);
+}
+
+int
+ocall_getopt(int argc, char *argv_buf, unsigned int argv_buf_len,
+ const char *optstring)
+{
+ int ret;
+ int i;
+ char **argv = (char **)argv_buf;
+
+ for (i = 0; i < argc; i++) {
+ argv[i] = argv_buf + (uintptr_t)argv[i];
+ }
+
+ return getopt(argc, argv, optstring);
+}
+
+int
+ocall_sched_yield()
+{
+ return sched_yield();
+}
+
+int
+ocall_get_errno()
+{
+ return errno;
+}
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/linux-sgx/untrusted/pthread.c b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/linux-sgx/untrusted/pthread.c
new file mode 100644
index 000000000..890ef754c
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/linux-sgx/untrusted/pthread.c
@@ -0,0 +1,54 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#include <stdlib.h>
+#include <pthread.h>
+
+int
+ocall_pthread_rwlock_init(void **rwlock, void *attr)
+{
+ int ret = 0;
+
+ *rwlock = malloc(sizeof(pthread_rwlock_t));
+ if (*rwlock == NULL)
+ return -1;
+
+ ret = pthread_rwlock_init((pthread_rwlock_t *)*rwlock, NULL);
+ if (ret != 0) {
+ free(*rwlock);
+ *rwlock = NULL;
+ }
+ (void)attr;
+ return ret;
+}
+
+int
+ocall_pthread_rwlock_destroy(void *rwlock)
+{
+ pthread_rwlock_t *lock = (pthread_rwlock_t *)rwlock;
+ int ret;
+
+ ret = pthread_rwlock_destroy(lock);
+ free(lock);
+ return ret;
+}
+
+int
+ocall_pthread_rwlock_rdlock(void *rwlock)
+{
+ return pthread_rwlock_rdlock((pthread_rwlock_t *)rwlock);
+}
+
+int
+ocall_pthread_rwlock_wrlock(void *rwlock)
+{
+ return pthread_rwlock_wrlock((pthread_rwlock_t *)rwlock);
+}
+
+int
+ocall_pthread_rwlock_unlock(void *rwlock)
+{
+ return pthread_rwlock_unlock((pthread_rwlock_t *)rwlock);
+}
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/linux-sgx/untrusted/signal.c b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/linux-sgx/untrusted/signal.c
new file mode 100644
index 000000000..b2eecfb7a
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/linux-sgx/untrusted/signal.c
@@ -0,0 +1,11 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+#include <signal.h>
+
+int
+ocall_raise(int sig)
+{
+ return raise(sig);
+} \ No newline at end of file
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/linux-sgx/untrusted/socket.c b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/linux-sgx/untrusted/socket.c
new file mode 100644
index 000000000..6f598ab8f
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/linux-sgx/untrusted/socket.c
@@ -0,0 +1,148 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <stdint.h>
+#include <stddef.h>
+#include <netinet/in.h>
+#include <arpa/inet.h>
+
+int
+ocall_socket(int domain, int type, int protocol)
+{
+ return socket(domain, type, protocol);
+}
+
+int
+ocall_getsockopt(int sockfd, int level, int optname, void *val_buf,
+ unsigned int val_buf_size, void *len_buf)
+{
+ return getsockopt(sockfd, level, optname, val_buf, (socklen_t *)len_buf);
+}
+
+ssize_t
+ocall_sendmsg(int sockfd, void *msg_buf, unsigned int msg_buf_size, int flags)
+{
+ struct msghdr *msg = (struct msghdr *)msg_buf;
+ int i;
+ ssize_t ret;
+
+ if (msg->msg_name != NULL)
+ msg->msg_name = msg_buf + (unsigned)(uintptr_t)msg->msg_name;
+
+ if (msg->msg_control != NULL)
+ msg->msg_control = msg_buf + (unsigned)(uintptr_t)msg->msg_control;
+
+ if (msg->msg_iov != NULL) {
+ msg->msg_iov = msg_buf + (unsigned)(uintptr_t)msg->msg_iov;
+ for (i = 0; i < msg->msg_iovlen; i++) {
+ msg->msg_iov[i].iov_base =
+ msg_buf + (unsigned)(uintptr_t)msg->msg_iov[i].iov_base;
+ }
+ }
+
+ return sendmsg(sockfd, msg, flags);
+}
+
+ssize_t
+ocall_recvmsg(int sockfd, void *msg_buf, unsigned int msg_buf_size, int flags)
+{
+ struct msghdr *msg = (struct msghdr *)msg_buf;
+ int i;
+ ssize_t ret;
+
+ if (msg->msg_name != NULL)
+ msg->msg_name = msg_buf + (unsigned)(uintptr_t)msg->msg_name;
+
+ if (msg->msg_control != NULL)
+ msg->msg_control = msg_buf + (unsigned)(uintptr_t)msg->msg_control;
+
+ if (msg->msg_iov != NULL) {
+ msg->msg_iov = msg_buf + (unsigned)(uintptr_t)msg->msg_iov;
+ for (i = 0; i < msg->msg_iovlen; i++) {
+ msg->msg_iov[i].iov_base =
+ msg_buf + (unsigned)(uintptr_t)msg->msg_iov[i].iov_base;
+ }
+ }
+
+ return recvmsg(sockfd, msg, flags);
+}
+
+int
+ocall_shutdown(int sockfd, int how)
+{
+ return shutdown(sockfd, how);
+}
+
+int
+ocall_setsockopt(int sockfd, int level, int optname, void *optval,
+ unsigned int optlen)
+{
+ return setsockopt(sockfd, level, optname, optval, optlen);
+}
+
+int
+ocall_bind(int sockfd, const void *addr, uint32_t addrlen)
+{
+ return bind(sockfd, (const struct sockaddr *)addr, addrlen);
+}
+
+int
+ocall_getsockname(int sockfd, void *addr, uint32_t *addrlen, uint32_t addr_size)
+{
+ return getsockname(sockfd, (struct sockaddr *)addr, addrlen);
+}
+
+int
+ocall_getpeername(int sockfd, void *addr, uint32_t *addrlen, uint32_t addr_size)
+{
+ return getpeername(sockfd, (struct sockaddr *)addr, addrlen);
+}
+
+int
+ocall_listen(int sockfd, int backlog)
+{
+ return listen(sockfd, backlog);
+}
+
+int
+ocall_accept(int sockfd, void *addr, uint32_t *addrlen, uint32_t addr_size)
+{
+ return accept(sockfd, (struct sockaddr *)addr, addrlen);
+}
+
+int
+ocall_recv(int sockfd, void *buf, size_t len, int flags)
+{
+ return recv(sockfd, buf, len, flags);
+}
+
+ssize_t
+ocall_recvfrom(int sockfd, void *buf, size_t len, int flags, void *src_addr,
+ uint32_t *addrlen, uint32_t addr_size)
+{
+ return recvfrom(sockfd, buf, len, flags, (struct sockaddr *)src_addr,
+ addrlen);
+}
+
+int
+ocall_send(int sockfd, const void *buf, size_t len, int flags)
+{
+ return send(sockfd, buf, len, flags);
+}
+
+ssize_t
+ocall_sendto(int sockfd, const void *buf, size_t len, int flags,
+ void *dest_addr, uint32_t addrlen)
+{
+ return sendto(sockfd, buf, len, flags, (struct sockaddr *)dest_addr,
+ addrlen);
+}
+
+int
+ocall_connect(int sockfd, void *addr, uint32_t addrlen)
+{
+ return connect(sockfd, (const struct sockaddr *)addr, addrlen);
+} \ No newline at end of file
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/linux-sgx/untrusted/time.c b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/linux-sgx/untrusted/time.c
new file mode 100644
index 000000000..5fa387b0c
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/linux-sgx/untrusted/time.c
@@ -0,0 +1,44 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+#include <stdbool.h>
+#include <sys/stat.h>
+#include <time.h>
+#include <fcntl.h>
+
+/** time clock **/
+int
+ocall_clock_gettime(unsigned clock_id, void *tp_buf, unsigned int tp_buf_size)
+{
+ return clock_gettime((clockid_t)clock_id, (struct timespec *)tp_buf);
+}
+
+int
+ocall_clock_getres(int clock_id, void *res_buf, unsigned int res_buf_size)
+{
+ return clock_getres(clock_id, (struct timespec *)res_buf);
+}
+
+int
+ocall_utimensat(int dirfd, const char *pathname, const void *times_buf,
+ unsigned int times_buf_size, int flags)
+{
+ return utimensat(dirfd, pathname, (struct timespec *)times_buf, flags);
+}
+
+int
+ocall_futimens(int fd, const void *times_buf, unsigned int times_buf_size)
+{
+ return futimens(fd, (struct timespec *)times_buf);
+}
+
+int
+ocall_clock_nanosleep(unsigned clock_id, int flags, const void *req_buf,
+ unsigned int req_buf_size, const void *rem_buf,
+ unsigned int rem_buf_size)
+{
+ return clock_nanosleep((clockid_t)clock_id, flags,
+ (struct timespec *)req_buf,
+ (struct timespec *)rem_buf);
+}
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/linux/platform_init.c b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/linux/platform_init.c
new file mode 100644
index 000000000..2aae13fa1
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/linux/platform_init.c
@@ -0,0 +1,43 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#include "platform_api_vmcore.h"
+
+int
+bh_platform_init()
+{
+ return 0;
+}
+
+void
+bh_platform_destroy()
+{}
+
+int
+os_printf(const char *format, ...)
+{
+ int ret = 0;
+ va_list ap;
+
+ va_start(ap, format);
+#ifndef BH_VPRINTF
+ ret += vprintf(format, ap);
+#else
+ ret += BH_VPRINTF(format, ap);
+#endif
+ va_end(ap);
+
+ return ret;
+}
+
+int
+os_vprintf(const char *format, va_list ap)
+{
+#ifndef BH_VPRINTF
+ return vprintf(format, ap);
+#else
+ return BH_VPRINTF(format, ap);
+#endif
+}
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/linux/platform_internal.h b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/linux/platform_internal.h
new file mode 100644
index 000000000..0ac63cf5e
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/linux/platform_internal.h
@@ -0,0 +1,106 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#ifndef _PLATFORM_INTERNAL_H
+#define _PLATFORM_INTERNAL_H
+
+#include <inttypes.h>
+#include <stdbool.h>
+#include <assert.h>
+#include <time.h>
+#include <string.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <math.h>
+#include <stdarg.h>
+#include <ctype.h>
+#include <pthread.h>
+#include <signal.h>
+#include <semaphore.h>
+#include <limits.h>
+#include <dirent.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <poll.h>
+#include <sched.h>
+#include <errno.h>
+#include <netinet/in.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/mman.h>
+#include <sys/time.h>
+#include <sys/timeb.h>
+#include <sys/uio.h>
+#include <sys/ioctl.h>
+#include <sys/socket.h>
+#include <sys/resource.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifndef BH_PLATFORM_LINUX
+#define BH_PLATFORM_LINUX
+#endif
+
+/* Stack size of applet threads's native part. */
+#define BH_APPLET_PRESERVED_STACK_SIZE (32 * 1024)
+
+/* Default thread priority */
+#define BH_THREAD_DEFAULT_PRIORITY 0
+
+typedef pthread_t korp_tid;
+typedef pthread_mutex_t korp_mutex;
+typedef pthread_cond_t korp_cond;
+typedef pthread_t korp_thread;
+typedef sem_t korp_sem;
+
+#define OS_THREAD_MUTEX_INITIALIZER PTHREAD_MUTEX_INITIALIZER
+
+#define os_thread_local_attribute __thread
+
+#define bh_socket_t int
+
+#if WASM_DISABLE_HW_BOUND_CHECK == 0
+#if defined(BUILD_TARGET_X86_64) || defined(BUILD_TARGET_AMD_64) \
+ || defined(BUILD_TARGET_AARCH64) || defined(BUILD_TARGET_RISCV64_LP64D) \
+ || defined(BUILD_TARGET_RISCV64_LP64)
+
+#include <setjmp.h>
+
+#define OS_ENABLE_HW_BOUND_CHECK
+
+typedef jmp_buf korp_jmpbuf;
+
+#define os_setjmp setjmp
+#define os_longjmp longjmp
+#define os_alloca alloca
+
+#define os_getpagesize getpagesize
+
+typedef void (*os_signal_handler)(void *sig_addr);
+
+int
+os_thread_signal_init(os_signal_handler handler);
+
+void
+os_thread_signal_destroy();
+
+bool
+os_thread_signal_inited();
+
+void
+os_signal_unmask();
+
+void
+os_sigreturn();
+#endif /* end of BUILD_TARGET_X86_64/AMD_64/AARCH64/RISCV64 */
+#endif /* end of WASM_DISABLE_HW_BOUND_CHECK */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* end of _PLATFORM_INTERNAL_H */
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/linux/shared_platform.cmake b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/linux/shared_platform.cmake
new file mode 100644
index 000000000..9a8726016
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/linux/shared_platform.cmake
@@ -0,0 +1,18 @@
+# Copyright (C) 2019 Intel Corporation. All rights reserved.
+# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+
+set (PLATFORM_SHARED_DIR ${CMAKE_CURRENT_LIST_DIR})
+
+add_definitions(-DBH_PLATFORM_LINUX)
+
+include_directories(${PLATFORM_SHARED_DIR})
+include_directories(${PLATFORM_SHARED_DIR}/../include)
+
+include (${CMAKE_CURRENT_LIST_DIR}/../common/posix/platform_api_posix.cmake)
+
+file (GLOB_RECURSE source_all ${PLATFORM_SHARED_DIR}/*.c)
+
+set (PLATFORM_SHARED_SOURCE ${source_all} ${PLATFORM_COMMON_POSIX_SOURCE})
+
+file (GLOB header ${PLATFORM_SHARED_DIR}/../include/*.h)
+LIST (APPEND RUNTIME_LIB_HEADER_LIST ${header})
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/nuttx/nuttx_platform.c b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/nuttx/nuttx_platform.c
new file mode 100644
index 000000000..9cb123e01
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/nuttx/nuttx_platform.c
@@ -0,0 +1,259 @@
+/*
+ * Copyright (C) 2020 XiaoMi Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#include "platform_api_extension.h"
+#include "platform_api_vmcore.h"
+
+#if defined(CONFIG_ARCH_USE_TEXT_HEAP)
+#include <nuttx/arch.h>
+#endif
+
+int
+bh_platform_init()
+{
+ return 0;
+}
+
+void
+bh_platform_destroy()
+{}
+
+void *
+os_malloc(unsigned size)
+{
+ return malloc(size);
+}
+
+void *
+os_realloc(void *ptr, unsigned size)
+{
+ return realloc(ptr, size);
+}
+
+void
+os_free(void *ptr)
+{
+ free(ptr);
+}
+
+int
+os_dumps_proc_mem_info(char *out, unsigned int size)
+{
+ return -1;
+}
+
+void *
+os_mmap(void *hint, size_t size, int prot, int flags)
+{
+#if defined(CONFIG_ARCH_USE_TEXT_HEAP)
+ if ((prot & MMAP_PROT_EXEC) != 0) {
+ return up_textheap_memalign(sizeof(void *), size);
+ }
+#endif
+
+ if ((uint64)size >= UINT32_MAX)
+ return NULL;
+ return malloc((uint32)size);
+}
+
+void
+os_munmap(void *addr, size_t size)
+{
+#if defined(CONFIG_ARCH_USE_TEXT_HEAP)
+ if (up_textheap_heapmember(addr)) {
+ up_textheap_free(addr);
+ return;
+ }
+#endif
+ return free(addr);
+}
+
+int
+os_mprotect(void *addr, size_t size, int prot)
+{
+ return 0;
+}
+
+void
+os_dcache_flush()
+{}
+
+/* If AT_FDCWD is provided, maybe we have openat family */
+#if !defined(AT_FDCWD)
+
+int
+openat(int fd, const char *path, int oflags, ...)
+{
+ errno = ENOSYS;
+ return -1;
+}
+
+int
+fstatat(int fd, const char *path, struct stat *buf, int flag)
+{
+ errno = ENOSYS;
+ return -1;
+}
+
+int
+mkdirat(int fd, const char *path, mode_t mode)
+{
+ errno = ENOSYS;
+ return -1;
+}
+
+ssize_t
+readlinkat(int fd, const char *path, char *buf, size_t bufsize)
+{
+ errno = ENOSYS;
+ return -1;
+}
+
+int
+linkat(int fd1, const char *path1, int fd2, const char *path2, int flag)
+{
+ errno = ENOSYS;
+ return -1;
+}
+
+int
+renameat(int fromfd, const char *from, int tofd, const char *to)
+{
+ errno = ENOSYS;
+ return -1;
+}
+int
+symlinkat(const char *target, int fd, const char *path)
+{
+ errno = ENOSYS;
+ return -1;
+}
+int
+unlinkat(int fd, const char *path, int flag)
+{
+ errno = ENOSYS;
+ return -1;
+}
+int
+utimensat(int fd, const char *path, const struct timespec ts[2], int flag)
+{
+ errno = ENOSYS;
+ return -1;
+}
+
+#endif /* !defined(AT_FDCWD) */
+
+#ifndef CONFIG_NET
+
+#include <netdb.h>
+
+int
+accept(int sockfd, FAR struct sockaddr *addr, FAR socklen_t *addrlen)
+{
+ errno = ENOTSUP;
+ return -1;
+}
+
+int
+bind(int sockfd, FAR const struct sockaddr *addr, socklen_t addrlen)
+{
+ errno = ENOTSUP;
+ return -1;
+}
+
+int
+listen(int sockfd, int backlog)
+{
+ errno = ENOTSUP;
+ return -1;
+}
+
+int
+connect(int sockfd, FAR const struct sockaddr *addr, socklen_t addrlen)
+{
+ errno = ENOTSUP;
+ return -1;
+}
+
+ssize_t
+recvfrom(int sockfd, FAR void *buf, size_t len, int flags,
+ FAR struct sockaddr *from, FAR socklen_t *fromlen)
+{
+ errno = ENOTSUP;
+ return -1;
+}
+
+ssize_t
+send(int sockfd, FAR const void *buf, size_t len, int flags)
+{
+ errno = ENOTSUP;
+ return -1;
+}
+
+ssize_t
+sendto(int sockfd, FAR const void *buf, size_t len, int flags,
+ FAR const struct sockaddr *to, socklen_t tolen)
+{
+ errno = ENOTSUP;
+ return -1;
+}
+
+int
+socket(int domain, int type, int protocol)
+{
+ errno = ENOTSUP;
+ return -1;
+}
+
+int
+shutdown(int sockfd, int how)
+{
+ errno = ENOTSUP;
+ return -1;
+}
+
+int
+getaddrinfo(FAR const char *nodename, FAR const char *servname,
+ FAR const struct addrinfo *hints, FAR struct addrinfo **res)
+{
+ errno = ENOTSUP;
+ return -1;
+}
+
+void
+freeaddrinfo(FAR struct addrinfo *ai)
+{}
+
+int
+setsockopt(int sockfd, int level, int option, FAR const void *value,
+ socklen_t value_len)
+{
+ errno = ENOTSUP;
+ return -1;
+}
+
+int
+getsockopt(int sockfd, int level, int option, FAR void *value,
+ FAR socklen_t *value_len)
+{
+ errno = ENOTSUP;
+ return -1;
+}
+
+int
+getpeername(int sockfd, FAR struct sockaddr *addr, FAR socklen_t *addrlen)
+{
+ errno = ENOTSUP;
+ return -1;
+}
+
+int
+getsockname(int sockfd, FAR struct sockaddr *addr, FAR socklen_t *addrlen)
+{
+ errno = ENOTSUP;
+ return -1;
+}
+
+#endif
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/nuttx/platform_internal.h b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/nuttx/platform_internal.h
new file mode 100644
index 000000000..b5bbdacd0
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/nuttx/platform_internal.h
@@ -0,0 +1,130 @@
+/*
+ * Copyright (C) 2020 XiaoMi Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#ifndef _PLATFORM_INTERNAL_H
+#define _PLATFORM_INTERNAL_H
+
+#include <assert.h>
+#include <ctype.h>
+#include <dirent.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <inttypes.h>
+#include <limits.h>
+#include <poll.h>
+#include <pthread.h>
+#include <stdarg.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <math.h>
+#include <sys/ioctl.h>
+#include <sys/socket.h>
+#include <sys/stat.h>
+#include <sys/time.h>
+#include <sys/mman.h>
+#include <semaphore.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifndef BH_PLATFORM_NUTTX
+#define BH_PLATFORM_NUTTX
+#endif
+
+typedef pthread_t korp_tid;
+typedef pthread_mutex_t korp_mutex;
+typedef pthread_cond_t korp_cond;
+typedef pthread_t korp_thread;
+typedef sem_t korp_sem;
+
+#define OS_THREAD_MUTEX_INITIALIZER PTHREAD_MUTEX_INITIALIZER
+
+#define BH_APPLET_PRESERVED_STACK_SIZE (2 * BH_KB)
+
+/* Default thread priority */
+#define BH_THREAD_DEFAULT_PRIORITY 100
+
+#define os_printf printf
+#define os_vprintf vprintf
+
+#if defined(CONFIG_LIBC_DLFCN)
+#define BH_HAS_DLFCN 1
+#else
+#define BH_HAS_DLFCN 0
+#endif
+
+/* On NuttX, time_t is uint32_t */
+#define BH_TIME_T_MAX 0xffffffff
+
+/*
+ * NuttX doesn't have O_DIRECTORY or directory open.
+ * REVISIT: maybe this is safer to be disabled at higher level.
+ */
+#if !defined(O_DIRECTORY)
+#define O_DIRECTORY 0
+#endif
+
+#if !defined(O_NOFOLLOW)
+#define O_NOFOLLOW 0
+#endif
+
+#undef CONFIG_HAS_ISATTY
+#ifdef CONFIG_SERIAL_TERMIOS
+#define CONFIG_HAS_ISATTY 1
+#else
+#define CONFIG_HAS_ISATTY 0
+#endif
+
+#define BUILTIN_LIBC_BUFFERED_PRINTF 1
+#define BUILTIN_LIBC_BUFFERED_PRINT_SIZE 128
+#define BUILTIN_LIBC_BUFFERED_PRINT_PREFIX
+
+/*
+ * NuttX doesn't have openat family.
+ */
+
+/* If AT_FDCWD is provided, maybe we have openat family */
+#if !defined(AT_FDCWD)
+
+int
+openat(int fd, const char *path, int oflags, ...);
+int
+fstatat(int fd, const char *path, struct stat *buf, int flag);
+int
+mkdirat(int fd, const char *path, mode_t mode);
+ssize_t
+readlinkat(int fd, const char *path, char *buf, size_t bufsize);
+int
+linkat(int fd1, const char *path1, int fd2, const char *path2, int flag);
+int
+renameat(int fromfd, const char *from, int tofd, const char *to);
+int
+symlinkat(const char *target, int fd, const char *path);
+int
+unlinkat(int fd, const char *path, int flag);
+int
+utimensat(int fd, const char *path, const struct timespec ts[2], int flag);
+#define AT_SYMLINK_NOFOLLOW 0
+#define AT_SYMLINK_FOLLOW 0
+#define AT_REMOVEDIR 0
+
+#endif /* !defined(AT_FDCWD) */
+
+/*
+ * NuttX doesn't have fdopendir.
+ */
+
+DIR *
+fdopendir(int fd);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* end of _BH_PLATFORM_H */
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/nuttx/shared_platform.cmake b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/nuttx/shared_platform.cmake
new file mode 100644
index 000000000..7b29b5f09
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/nuttx/shared_platform.cmake
@@ -0,0 +1,14 @@
+# Copyright (C) 2020 XiaoMi Corporation. All rights reserved.
+# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+
+set (PLATFORM_SHARED_DIR ${CMAKE_CURRENT_LIST_DIR})
+
+add_definitions(-DBH_PLATFORM_NUTTX)
+
+include_directories(${PLATFORM_SHARED_DIR})
+include_directories(${PLATFORM_SHARED_DIR}/../include)
+
+file (GLOB_RECURSE source_all ${PLATFORM_SHARED_DIR}/*.c)
+
+set (PLATFORM_SHARED_SOURCE ${source_all} ${PLATFORM_COMMON_MATH_SOURCE})
+
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/riot/platform_internal.h b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/riot/platform_internal.h
new file mode 100644
index 000000000..8fec6dd0b
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/riot/platform_internal.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * Copyright (C) 2020 TU Bergakademie Freiberg Karl Fessel
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#ifndef _PLATFORM_INTERNAL_H
+#define _PLATFORM_INTERNAL_H
+
+/* Riot includes core */
+#include <sched.h>
+#include <thread.h>
+#include <mutex.h>
+
+/* Riot includes sys */
+#include <sema.h>
+
+#include <inttypes.h>
+#include <stdbool.h>
+#include <stdarg.h>
+#include <ctype.h>
+#include <limits.h>
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <math.h>
+
+#ifndef BH_PLATFORM_RIOT
+#define BH_PLATFORM_RIOT
+#endif
+
+#define BH_APPLET_PRESERVED_STACK_SIZE (2 * BH_KB)
+
+/* Default thread priority */
+#define BH_THREAD_DEFAULT_PRIORITY 7
+
+typedef thread_t korp_thread;
+typedef kernel_pid_t korp_tid;
+typedef mutex_t korp_mutex;
+typedef unsigned int korp_sem;
+
+/* typedef sema_t korp_sem; */
+
+struct os_thread_wait_node;
+typedef struct os_thread_wait_node *os_thread_wait_list;
+typedef struct korp_cond {
+ mutex_t wait_list_lock;
+ os_thread_wait_list thread_wait_list;
+} korp_cond;
+
+#define os_printf printf
+#define os_vprintf vprintf
+
+#if WA_MATH
+/* clang-format off */
+/* math functions which are not provided by os*/
+double sqrt(double x);
+double floor(double x);
+double ceil(double x);
+double fmin(double x, double y);
+double fmax(double x, double y);
+double rint(double x);
+double fabs(double x);
+double trunc(double x);
+float sqrtf(float x);
+float floorf(float x);
+float ceilf(float x);
+float fminf(float x, float y);
+float fmaxf(float x, float y);
+float rintf(float x);
+float fabsf(float x);
+float truncf(float x);
+int signbit(double x);
+int isnan(double x);
+/* clang-format on */
+#endif
+
+#endif /* end of _BH_PLATFORM_H */
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/riot/riot_platform.c b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/riot/riot_platform.c
new file mode 100644
index 000000000..a0c38e8c9
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/riot/riot_platform.c
@@ -0,0 +1,81 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * Copyright (C) 2020 TU Bergakademie Freiberg Karl Fessel
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#include "platform_api_vmcore.h"
+#include "platform_api_extension.h"
+
+int
+os_thread_sys_init(void);
+
+void
+os_thread_sys_destroy(void);
+
+int
+bh_platform_init(void)
+{
+ return os_thread_sys_init();
+}
+
+void
+bh_platform_destroy(void)
+{
+ os_thread_sys_destroy();
+}
+
+void *
+os_malloc(unsigned size)
+{
+ return malloc(size);
+}
+
+void *
+os_realloc(void *ptr, unsigned size)
+{
+ return realloc(ptr, size);
+}
+
+void
+os_free(void *ptr)
+{
+ free(ptr);
+}
+
+int
+os_dumps_proc_mem_info(char *out, unsigned int size)
+{
+ return -1;
+}
+
+void *
+os_mmap(void *hint, size_t size, int prot, int flags)
+{
+ if (size > ((unsigned)~0))
+ return NULL;
+ return BH_MALLOC((unsigned)size);
+}
+
+void
+os_munmap(void *addr, size_t size)
+{
+ return BH_FREE(addr);
+}
+
+int
+os_mprotect(void *addr, size_t size, int prot)
+{
+ return 0;
+}
+
+void
+os_dcache_flush(void)
+{
+#if defined(CONFIG_CPU_CORTEX_M7) && defined(CONFIG_ARM_MPU)
+ uint32 key;
+ key = irq_lock();
+ SCB_CleanDCache();
+ irq_unlock(key);
+#endif
+}
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/riot/riot_thread.c b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/riot/riot_thread.c
new file mode 100644
index 000000000..0ebcf30e0
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/riot/riot_thread.c
@@ -0,0 +1,432 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * Copyright (C) 2020 TU Bergakademie Freiberg Karl Fessel
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#include "platform_api_vmcore.h"
+#include "platform_api_extension.h"
+
+#include <panic.h>
+#include <sema.h>
+#include <ztimer.h>
+
+/* clang-format off */
+#define bh_assert(v) do { \
+ if (!(v)) { \
+ printf("\nASSERTION FAILED: %s, at %s, line %d\n", \
+ #v, __FILE__, __LINE__); \
+ core_panic(0, 0/*expr_string*/); \
+ while (1); \
+ } \
+} while (0)
+/* clang-format on */
+
+struct os_thread_data;
+typedef struct os_thread_wait_node {
+ sema_t sem;
+ void *ret;
+ os_thread_wait_list next;
+} os_thread_wait_node;
+
+// all information for thread to cleanup it self
+typedef struct os_thread_data {
+ /* Next thread data */
+ struct os_thread_data *next;
+ /* thread handle */
+ kernel_pid_t tid;
+ /* Thread start routine */
+ thread_start_routine_t start_routine;
+ /* Thread start routine argument */
+ void *arg;
+ /* thread local root */
+ void *tlr;
+ /* Lock for waiting list */
+ mutex_t wait_list_lock;
+ /* Waiting list of other threads who are joining this thread */
+ os_thread_wait_list thread_wait_list;
+ /* Thread stack size */
+ unsigned stack_size;
+ /* Thread stack */
+ char stack[1];
+} os_thread_data;
+
+typedef struct os_thread_obj {
+ korp_tid thread;
+ /* Whether the thread is terminated and this thread object is to
+ be freed in the future. */
+ bool to_be_freed;
+ struct os_thread_obj *next;
+} os_thread_obj;
+
+static bool is_thread_sys_inited = false;
+
+/* Lock for thread data list */
+static mutex_t thread_data_lock;
+
+/* Thread data list */
+static os_thread_data *thread_data_list = NULL;
+
+static void
+thread_data_list_add(os_thread_data *thread_data)
+{
+ mutex_lock(&thread_data_lock);
+ if (!thread_data_list)
+ thread_data_list = thread_data;
+ else {
+ /* If already in list, just return */
+ os_thread_data *p = thread_data_list;
+ while (p) {
+ if (p == thread_data) {
+ mutex_unlock(&thread_data_lock);
+ return;
+ }
+ p = p->next;
+ }
+
+ /* Set as head of list */
+ thread_data->next = thread_data_list;
+ thread_data_list = thread_data;
+ }
+ mutex_unlock(&thread_data_lock);
+}
+
+static void
+thread_data_list_remove(os_thread_data *thread_data)
+{
+ mutex_lock(&thread_data_lock);
+ if (thread_data_list) {
+ if (thread_data_list == thread_data)
+ thread_data_list = thread_data_list->next;
+ else {
+ /* Search and remove it from list */
+ os_thread_data *p = thread_data_list;
+ while (p && p->next != thread_data)
+ p = p->next;
+ if (p && p->next == thread_data)
+ p->next = p->next->next;
+ }
+ }
+ mutex_unlock(&thread_data_lock);
+}
+
+static os_thread_data *
+thread_data_list_lookup(korp_tid tid)
+{
+ mutex_lock(&thread_data_lock);
+ if (thread_data_list) {
+ os_thread_data *p = thread_data_list;
+ while (p) {
+ if (p->tid == tid) {
+ /* Found */
+ mutex_unlock(&thread_data_lock);
+ return p;
+ }
+ p = p->next;
+ }
+ }
+ mutex_unlock(&thread_data_lock);
+ return NULL;
+}
+
+int
+os_thread_sys_init()
+{
+ if (is_thread_sys_inited)
+ return BHT_OK;
+
+ mutex_init(&thread_data_lock);
+
+ is_thread_sys_inited = true;
+ return BHT_OK;
+}
+
+void
+os_thread_sys_destroy()
+{
+ if (is_thread_sys_inited) {
+ is_thread_sys_inited = false;
+ }
+}
+
+static os_thread_data *
+thread_data_current()
+{
+ kernel_pid_t tid = thread_getpid();
+ return thread_data_list_lookup(tid);
+}
+
+static void
+os_thread_cleanup(void)
+{
+ // TODO Check this (Join sema trigger, cleanup of thread_data)
+ os_thread_data *thread_data = thread_data_current();
+ bh_assert(thread_data != NULL);
+ mutex_lock(&thread_data->wait_list_lock);
+ if (thread_data->thread_wait_list) {
+ /* Signal each joining thread */
+ os_thread_wait_list head = thread_data->thread_wait_list;
+ while (head) {
+ os_thread_wait_list next = head->next;
+ head->ret = thread_data->arg;
+ sema_post(&head->sem);
+ head = next;
+ }
+ thread_data->thread_wait_list = NULL;
+ }
+ mutex_unlock(&thread_data->wait_list_lock);
+
+ thread_data_list_remove(thread_data);
+}
+
+static void *
+os_thread_wrapper(void *thread_data)
+{
+ /* Set thread custom data */
+ os_thread_data *t = (os_thread_data *)thread_data;
+ t->tid = thread_getpid();
+ thread_data_list_add(t);
+
+ // save the return value to arg since it is not need after the call
+ t->arg = (t->start_routine)(t->arg);
+
+ os_thread_cleanup(); // internal structures and joiners
+
+ BH_FREE(thread_data);
+ sched_task_exit(); // stop thread //clean
+ return NULL; // never reached
+}
+
+int
+os_thread_create(korp_tid *p_tid, thread_start_routine_t start, void *arg,
+ unsigned int stack_size)
+{
+ return os_thread_create_with_prio(p_tid, start, arg, stack_size,
+ BH_THREAD_DEFAULT_PRIORITY);
+}
+
+int
+os_thread_create_with_prio(korp_tid *p_tid, thread_start_routine_t start,
+ void *arg, unsigned int stack_size, int prio)
+{
+ kernel_pid_t tid;
+ os_thread_data *thread_data;
+ unsigned thread_data_size;
+
+ if (!p_tid || !stack_size)
+ return BHT_ERROR;
+
+ /* Create and initialize thread data */
+ thread_data_size = offsetof(os_thread_data, stack) + stack_size;
+ if (!(thread_data = BH_MALLOC(thread_data_size))) {
+ return BHT_ERROR;
+ }
+
+ memset(thread_data, 0, thread_data_size);
+ mutex_init(&thread_data->wait_list_lock);
+ thread_data->stack_size = stack_size;
+ thread_data->start_routine = start;
+ thread_data->arg = arg;
+
+ /* Create the thread &*/
+ if (!((tid = thread_create(thread_data->stack, stack_size, prio, 0,
+ os_thread_wrapper, thread_data, "WASM")))) {
+ BH_FREE(thread_data);
+ return BHT_ERROR;
+ }
+
+ thread_data->tid = tid;
+
+ /* Set thread custom data */
+ thread_data_list_add(thread_data);
+ *p_tid = tid;
+ return BHT_OK;
+}
+
+korp_tid
+os_self_thread()
+{
+ return (korp_tid)thread_getpid();
+}
+
+int
+os_thread_join(korp_tid thread, void **value_ptr)
+{
+ // will test if thread is still working,
+ // wait if it is
+ os_thread_data *thread_data;
+ os_thread_wait_node node;
+
+ sema_create(&node.sem, 0);
+ node.next = NULL;
+
+ /* Get thread data */
+ thread_data = thread_data_list_lookup(thread);
+ if (thread_data == NULL) {
+ // thread not found
+ sema_destroy(&node.sem);
+ return BHT_ERROR;
+ }
+ bh_assert(thread_data != NULL);
+
+ mutex_lock(&thread_data->wait_list_lock);
+ if (!thread_data->thread_wait_list)
+ thread_data->thread_wait_list = &node;
+ else {
+ /* Add to end of waiting list */
+ os_thread_wait_node *p = thread_data->thread_wait_list;
+ while (p->next)
+ p = p->next;
+ p->next = &node;
+ }
+ mutex_unlock(&thread_data->wait_list_lock);
+
+ sema_wait(&node.sem);
+ // get the return value pointer conted may not be availible after return
+ if (value_ptr)
+ (*value_ptr) = node.ret;
+ /* Wait some time for the thread to be actually terminated */
+ // TODO: k_sleep(100);
+
+ // TODO: bump target prio to make it finish and free its resources
+ thread_yield();
+
+ // node has done its job
+ sema_destroy(&node.sem);
+
+ return BHT_OK;
+}
+
+// int vm_mutex_trylock(korp_mutex *mutex)
+// {
+// return mutex_trylock(mutex);
+// }
+
+int
+os_mutex_init(korp_mutex *mutex)
+{
+ mutex_init(mutex);
+ return BHT_OK;
+}
+
+int
+os_mutex_destroy(korp_mutex *mutex)
+{
+ (void)mutex;
+ return BHT_OK;
+}
+
+int
+os_mutex_lock(korp_mutex *mutex)
+{
+ mutex_lock(mutex);
+ return 0; // Riot mutexes do not return until success
+}
+
+int
+os_mutex_unlock(korp_mutex *mutex)
+{
+ mutex_unlock(mutex);
+ return 0; // Riot mutexes do not return until success
+}
+
+int
+os_cond_init(korp_cond *cond)
+{
+ mutex_init(&cond->wait_list_lock);
+ cond->thread_wait_list = NULL;
+ return BHT_OK;
+}
+
+int
+os_cond_destroy(korp_cond *cond)
+{
+ (void)cond;
+ return BHT_OK;
+}
+
+static int
+os_cond_wait_internal(korp_cond *cond, korp_mutex *mutex, bool timed,
+ uint64 useconds)
+{
+ os_thread_wait_node *node;
+
+ /* Create wait node and append it to wait list */
+ if (!(node = BH_MALLOC(sizeof(os_thread_wait_node))))
+ return BHT_ERROR;
+
+ sema_create(&node->sem, 0);
+ node->next = NULL;
+
+ mutex_lock(&cond->wait_list_lock);
+ if (!cond->thread_wait_list)
+ cond->thread_wait_list = node;
+ else {
+ /* Add to end of wait list */
+ os_thread_wait_node *p = cond->thread_wait_list;
+ while (p->next)
+ p = p->next;
+ p->next = node;
+ }
+ mutex_unlock(&cond->wait_list_lock);
+
+ /* Unlock mutex, wait sem and lock mutex again */
+ mutex_unlock(mutex);
+ if (timed)
+ sema_wait(&node->sem);
+ else
+ sema_wait_timed_ztimer(&node->sem, ZTIMER_USEC, useconds);
+ mutex_lock(mutex);
+
+ /* Remove wait node from wait list */
+ mutex_lock(&cond->wait_list_lock);
+ if (cond->thread_wait_list == node)
+ cond->thread_wait_list = node->next;
+ else {
+ /* Remove from the wait list */
+ os_thread_wait_node *p = cond->thread_wait_list;
+ while (p->next != node)
+ p = p->next;
+ p->next = node->next;
+ }
+ BH_FREE(node);
+ mutex_unlock(&cond->wait_list_lock);
+
+ return BHT_OK;
+}
+
+int
+os_cond_wait(korp_cond *cond, korp_mutex *mutex)
+{
+ return os_cond_wait_internal(cond, mutex, false, 0);
+}
+
+int
+os_cond_reltimedwait(korp_cond *cond, korp_mutex *mutex, uint64 useconds)
+{
+ return os_cond_wait_internal(cond, mutex, (useconds != BHT_WAIT_FOREVER),
+ useconds);
+}
+
+int
+os_cond_signal(korp_cond *cond)
+{
+ /* Signal the head wait node of wait list */
+ mutex_lock(&cond->wait_list_lock);
+ if (cond->thread_wait_list)
+ sema_post(&cond->thread_wait_list->sem);
+ mutex_unlock(&cond->wait_list_lock);
+
+ return BHT_OK;
+}
+
+uint8 *
+os_thread_get_stack_boundary()
+{
+#if defined(DEVELHELP) || defined(SCHED_TEST_STACK) \
+ || defined(MODULE_MPU_STACK_GUARD)
+ return (uint8 *)thread_get_active()->stack_start;
+#else
+ return NULL;
+#endif
+}
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/riot/riot_time.c b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/riot/riot_time.c
new file mode 100644
index 000000000..1503495c1
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/riot/riot_time.c
@@ -0,0 +1,34 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * Copyright (C) 2020 TU Bergakademie Freiberg Karl Fessel
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#include "platform_api_vmcore.h"
+#include <ztimer64.h>
+#include <kernel_defines.h>
+
+#if IS_USED(MODULE_ZTIMER64_USEC)
+uint64
+os_time_get_boot_microsecond()
+{
+ return ztimer64_now(ZTIMER64_USEC);
+}
+#elif IS_USED(MODULE_ZTIMER64_MSEC)
+uint64
+os_time_get_boot_microsecond()
+{
+ return ztimer64_now(ZTIMER64_MSEC) * 1000;
+}
+#else
+#ifdef __GNUC__
+__attribute__((weak)) uint64
+os_time_get_boot_microsecond();
+#endif
+uint64
+os_time_get_boot_microsecond()
+{
+ static uint64_t times;
+ return ++times;
+}
+#endif
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/riot/shared_platform.cmake b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/riot/shared_platform.cmake
new file mode 100644
index 000000000..52cf90463
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/riot/shared_platform.cmake
@@ -0,0 +1,17 @@
+# Copyright (C) 2019 Intel Corporation. All rights reserved.
+# Copyright (C) 2020 TU Bergakademie Freiberg Karl Fessel
+# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+
+set (PLATFORM_SHARED_DIR ${CMAKE_CURRENT_LIST_DIR})
+
+add_definitions(-DBH_PLATFORM_RIOT)
+
+include_directories(${PLATFORM_SHARED_DIR})
+include_directories(${PLATFORM_SHARED_DIR}/../include)
+
+# include (${CMAKE_CURRENT_LIST_DIR}/../common/math/platform_api_math.cmake)
+
+file (GLOB_RECURSE source_all ${PLATFORM_SHARED_DIR}/*.c)
+
+set (PLATFORM_SHARED_SOURCE ${source_all} ${PLATFORM_COMMON_MATH_SOURCE})
+
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/rt-thread/SConscript b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/rt-thread/SConscript
new file mode 100644
index 000000000..1e93f4755
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/rt-thread/SConscript
@@ -0,0 +1,34 @@
+#
+# Copyright (c) 2021, RT-Thread Development Team
+#
+# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+#
+
+
+from building import *
+import os
+
+cwd = GetCurrentDir()
+
+src = Split('''
+''')
+
+
+def addSrcFiles(arr, path):
+ for f in os.listdir(path):
+ fpath = os.path.join(path, f);
+ if os.path.isfile(fpath):
+ ext = os.path.splitext(fpath)[-1]
+ if ext == '.c' or ext == '.cpp':
+ arr += [fpath]
+ elif os.path.isdir(fpath):
+ addSrcFiles(arr, fpath)
+
+
+
+addSrcFiles(src, cwd);
+CPPPATH = [cwd, cwd+'/../include']
+
+group = DefineGroup('iwasm_platform_core', src, depend = [''], CPPPATH = CPPPATH)
+
+Return('group')
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/rt-thread/platform_internal.h b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/rt-thread/platform_internal.h
new file mode 100644
index 000000000..5f9cc8bc8
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/rt-thread/platform_internal.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2021, RT-Thread Development Team
+ *
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#ifndef RTTHREAD_PLATFORM_INTERNAL_H
+#define RTTHREAD_PLATFORM_INTERNAL_H
+
+#include <rtthread.h>
+#include <stdbool.h>
+#include <string.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <math.h>
+#include <stdint.h>
+#include <ctype.h>
+
+#if defined(WASM_ENABLE_AOT)
+#if defined(RTT_WAMR_BUILD_TARGET_THUMB)
+#define BUILD_TARGET "thumbv4t"
+#elif defined(RTT_WAMR_BUILD_TARGET_ARMV7)
+#define BUILD_TARGET "armv7"
+#elif defined(RTT_WAMR_BUILD_TARGET_ARMV6)
+#define BUILD_TARGET "armv6"
+#elif defined(RTT_WAMR_BUILD_TARGET_ARMV4)
+#define BUILD_TARGET "armv4"
+#elif defined(RTT_WAMR_BUILD_TARGET_X86_32)
+#define BUILD_TARGET "X86_32"
+#else
+#error "unsupported aot platform."
+#endif
+#endif /* WASM_ENABLE_AOT */
+
+typedef rt_thread_t korp_tid;
+typedef struct rt_mutex korp_mutex;
+typedef struct rt_thread korp_cond;
+typedef struct rt_thread korp_thread;
+typedef unsigned int korp_sem;
+
+typedef rt_uint8_t uint8_t;
+typedef rt_int8_t int8_t;
+typedef rt_uint16_t uint16_t;
+typedef rt_int16_t int16_t;
+typedef rt_uint64_t uint64_t;
+typedef rt_int64_t int64_t;
+
+#endif /* RTTHREAD_PLATFORM_INTERNAL_H */
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/rt-thread/rtt_platform.c b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/rt-thread/rtt_platform.c
new file mode 100644
index 000000000..4685e1ea3
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/rt-thread/rtt_platform.c
@@ -0,0 +1,209 @@
+/*
+ * Copyright (c) 2021, RT-Thread Development Team
+ *
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#include <platform_api_vmcore.h>
+#include <platform_api_extension.h>
+
+typedef struct os_malloc_list {
+ void *real;
+ void *used;
+ rt_list_t node;
+} os_malloc_list_t;
+
+int
+bh_platform_init(void)
+{
+ return 0;
+}
+
+void
+bh_platform_destroy(void)
+{}
+
+void *
+os_malloc(unsigned size)
+{
+ void *buf_origin;
+ void *buf_fixed;
+ rt_ubase_t *addr_field;
+
+ buf_origin = rt_malloc(size + 8 + sizeof(rt_ubase_t));
+ buf_fixed = buf_origin + sizeof(void *);
+ if ((rt_ubase_t)buf_fixed & 0x7) {
+ buf_fixed = (void *)((rt_ubase_t)(buf_fixed + 8) & (~7));
+ }
+
+ addr_field = buf_fixed - sizeof(rt_ubase_t);
+ *addr_field = (rt_ubase_t)buf_origin;
+
+ return buf_fixed;
+}
+
+void *
+os_realloc(void *ptr, unsigned size)
+{
+
+ void *mem_origin;
+ void *mem_new;
+ void *mem_new_fixed;
+ rt_ubase_t *addr_field;
+
+ if (!ptr) {
+ return RT_NULL;
+ }
+
+ addr_field = ptr - sizeof(rt_ubase_t);
+ mem_origin = (void *)(*addr_field);
+ mem_new = rt_realloc(mem_origin, size + 8 + sizeof(rt_ubase_t));
+
+ if (mem_origin != mem_new) {
+ mem_new_fixed = mem_new + sizeof(rt_ubase_t);
+ if ((rt_ubase_t)mem_new_fixed & 0x7) {
+ mem_new_fixed = (void *)((rt_ubase_t)(mem_new_fixed + 8) & (~7));
+ }
+
+ addr_field = mem_new_fixed - sizeof(rt_ubase_t);
+ *addr_field = (rt_ubase_t)mem_new;
+
+ return mem_new_fixed;
+ }
+
+ return ptr;
+}
+
+void
+os_free(void *ptr)
+{
+ void *mem_origin;
+ rt_ubase_t *addr_field;
+
+ if (ptr) {
+ addr_field = ptr - sizeof(rt_ubase_t);
+ mem_origin = (void *)(*addr_field);
+
+ rt_free(mem_origin);
+ }
+}
+
+int
+os_dumps_proc_mem_info(char *out, unsigned int size)
+{
+ return -1;
+}
+
+static char wamr_vprint_buf[RT_CONSOLEBUF_SIZE * 2];
+
+int
+os_printf(const char *format, ...)
+{
+ va_list ap;
+ va_start(ap, format);
+ rt_size_t len =
+ vsnprintf(wamr_vprint_buf, sizeof(wamr_vprint_buf) - 1, format, ap);
+ wamr_vprint_buf[len] = 0x00;
+ rt_kputs(wamr_vprint_buf);
+ va_end(ap);
+ return 0;
+}
+
+int
+os_vprintf(const char *format, va_list ap)
+{
+ rt_size_t len =
+ vsnprintf(wamr_vprint_buf, sizeof(wamr_vprint_buf) - 1, format, ap);
+ wamr_vprint_buf[len] = 0;
+ rt_kputs(wamr_vprint_buf);
+ return 0;
+}
+
+uint64
+os_time_get_boot_microsecond(void)
+{
+ uint64 ret = rt_tick_get() * 1000;
+ ret /= RT_TICK_PER_SECOND;
+ return ret;
+}
+
+korp_tid
+os_self_thread(void)
+{
+ return rt_thread_self();
+}
+
+uint8 *
+os_thread_get_stack_boundary(void)
+{
+ rt_thread_t tid = rt_thread_self();
+ return tid->stack_addr;
+}
+
+int
+os_mutex_init(korp_mutex *mutex)
+{
+ return rt_mutex_init(mutex, "wamr0", RT_IPC_FLAG_FIFO);
+}
+
+int
+os_mutex_destroy(korp_mutex *mutex)
+{
+ return rt_mutex_detach(mutex);
+}
+
+int
+os_mutex_lock(korp_mutex *mutex)
+{
+ return rt_mutex_take(mutex, RT_WAITING_FOREVER);
+}
+
+int
+os_mutex_unlock(korp_mutex *mutex)
+{
+ return rt_mutex_release(mutex);
+}
+
+/*
+ * functions below was not implement
+ */
+
+int
+os_cond_init(korp_cond *cond)
+{
+ return 0;
+}
+
+int
+os_cond_destroy(korp_cond *cond)
+{
+ return 0;
+}
+
+int
+os_cond_wait(korp_cond *cond, korp_mutex *mutex)
+{
+ return 0;
+}
+
+void *
+os_mmap(void *hint, size_t size, int prot, int flags)
+{
+ return rt_malloc(size);
+}
+
+void
+os_munmap(void *addr, size_t size)
+{
+ rt_free(addr);
+}
+
+int
+os_mprotect(void *addr, size_t size, int prot)
+{
+ return 0;
+}
+
+void
+os_dcache_flush(void)
+{}
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/rt-thread/shared_platform.cmake b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/rt-thread/shared_platform.cmake
new file mode 100644
index 000000000..fce9bff33
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/rt-thread/shared_platform.cmake
@@ -0,0 +1,19 @@
+#
+# Copyright (c) 2021, RT-Thread Development Team
+#
+# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+#
+
+set (PLATFORM_SHARED_DIR ${CMAKE_CURRENT_LIST_DIR})
+
+add_definitions(-DBH_PLATFORM_RTT)
+
+include_directories(${PLATFORM_SHARED_DIR})
+include_directories(${PLATFORM_SHARED_DIR}/../include)
+
+# include (${CMAKE_CURRENT_LIST_DIR}/../common/math/platform_api_math.cmake)
+
+file (GLOB_RECURSE source_all ${PLATFORM_SHARED_DIR}/*.c)
+
+set (PLATFORM_SHARED_SOURCE ${source_all} ${PLATFORM_COMMON_MATH_SOURCE})
+
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/vxworks/platform_init.c b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/vxworks/platform_init.c
new file mode 100644
index 000000000..2aae13fa1
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/vxworks/platform_init.c
@@ -0,0 +1,43 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#include "platform_api_vmcore.h"
+
+int
+bh_platform_init()
+{
+ return 0;
+}
+
+void
+bh_platform_destroy()
+{}
+
+int
+os_printf(const char *format, ...)
+{
+ int ret = 0;
+ va_list ap;
+
+ va_start(ap, format);
+#ifndef BH_VPRINTF
+ ret += vprintf(format, ap);
+#else
+ ret += BH_VPRINTF(format, ap);
+#endif
+ va_end(ap);
+
+ return ret;
+}
+
+int
+os_vprintf(const char *format, va_list ap)
+{
+#ifndef BH_VPRINTF
+ return vprintf(format, ap);
+#else
+ return BH_VPRINTF(format, ap);
+#endif
+}
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/vxworks/platform_internal.h b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/vxworks/platform_internal.h
new file mode 100644
index 000000000..f72f60322
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/vxworks/platform_internal.h
@@ -0,0 +1,102 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#ifndef _PLATFORM_INTERNAL_H
+#define _PLATFORM_INTERNAL_H
+
+#include <inttypes.h>
+#include <stdbool.h>
+#include <assert.h>
+#include <time.h>
+#include <string.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <math.h>
+#include <stdarg.h>
+#include <ctype.h>
+#include <pthread.h>
+#include <signal.h>
+#include <semaphore.h>
+#include <limits.h>
+#include <dirent.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <poll.h>
+#include <sched.h>
+#include <errno.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/mman.h>
+#include <sys/time.h>
+#include <sys/timeb.h>
+#include <sys/uio.h>
+#include <sys/ioctl.h>
+#include <sys/socket.h>
+#include <sys/resource.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifndef BH_PLATFORM_VXWORKS
+#define BH_PLATFORM_VXWORKS
+#endif
+
+/* Stack size of applet threads's native part. */
+#define BH_APPLET_PRESERVED_STACK_SIZE (32 * 1024)
+
+/* Default thread priority */
+#define BH_THREAD_DEFAULT_PRIORITY 0
+
+typedef pthread_t korp_tid;
+typedef pthread_mutex_t korp_mutex;
+typedef pthread_cond_t korp_cond;
+typedef pthread_t korp_thread;
+typedef sem_t korp_sem;
+
+#define OS_THREAD_MUTEX_INITIALIZER PTHREAD_MUTEX_INITIALIZER
+
+#define os_thread_local_attribute __thread
+
+#if WASM_DISABLE_HW_BOUND_CHECK == 0
+#if defined(BUILD_TARGET_X86_64) || defined(BUILD_TARGET_AMD_64) \
+ || defined(BUILD_TARGET_AARCH64)
+
+#include <setjmp.h>
+
+#define OS_ENABLE_HW_BOUND_CHECK
+
+typedef jmp_buf korp_jmpbuf;
+
+#define os_setjmp setjmp
+#define os_longjmp longjmp
+#define os_alloca alloca
+
+#define os_getpagesize getpagesize
+
+typedef void (*os_signal_handler)(void *sig_addr);
+
+int
+os_thread_signal_init(os_signal_handler handler);
+
+void
+os_thread_signal_destroy();
+
+bool
+os_thread_signal_inited();
+
+void
+os_signal_unmask();
+
+void
+os_sigreturn();
+#endif /* end of BUILD_TARGET_X86_64/AMD_64/AARCH64 */
+#endif /* end of WASM_DISABLE_HW_BOUND_CHECK */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* end of _PLATFORM_INTERNAL_H */
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/vxworks/shared_platform.cmake b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/vxworks/shared_platform.cmake
new file mode 100644
index 000000000..6979ce235
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/vxworks/shared_platform.cmake
@@ -0,0 +1,18 @@
+# Copyright (C) 2019 Intel Corporation. All rights reserved.
+# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+
+set (PLATFORM_SHARED_DIR ${CMAKE_CURRENT_LIST_DIR})
+
+add_definitions(-DBH_PLATFORM_VXWORKS)
+
+include_directories(${PLATFORM_SHARED_DIR})
+include_directories(${PLATFORM_SHARED_DIR}/../include)
+
+include (${CMAKE_CURRENT_LIST_DIR}/../common/posix/platform_api_posix.cmake)
+
+file (GLOB_RECURSE source_all ${PLATFORM_SHARED_DIR}/*.c)
+
+set (PLATFORM_SHARED_SOURCE ${source_all} ${PLATFORM_COMMON_POSIX_SOURCE})
+
+file (GLOB header ${PLATFORM_SHARED_DIR}/../include/*.h)
+LIST (APPEND RUNTIME_LIB_HEADER_LIST ${header})
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/windows/platform_init.c b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/windows/platform_init.c
new file mode 100644
index 000000000..db5885387
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/windows/platform_init.c
@@ -0,0 +1,75 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#include "platform_api_vmcore.h"
+
+int
+os_thread_sys_init();
+
+void
+os_thread_sys_destroy();
+
+int
+init_winsock();
+
+void
+deinit_winsock();
+
+int
+bh_platform_init()
+{
+ if (init_winsock() != 0) {
+ return -1;
+ }
+
+ return os_thread_sys_init();
+}
+
+void
+bh_platform_destroy()
+{
+ deinit_winsock();
+
+ os_thread_sys_destroy();
+}
+
+int
+os_printf(const char *format, ...)
+{
+ int ret = 0;
+ va_list ap;
+
+ va_start(ap, format);
+#ifndef BH_VPRINTF
+ ret += vprintf(format, ap);
+#else
+ ret += BH_VPRINTF(format, ap);
+#endif
+ va_end(ap);
+
+ return ret;
+}
+
+int
+os_vprintf(const char *format, va_list ap)
+{
+#ifndef BH_VPRINTF
+ return vprintf(format, ap);
+#else
+ return BH_VPRINTF(format, ap);
+#endif
+}
+
+unsigned
+os_getpagesize()
+{
+ SYSTEM_INFO sys_info;
+ GetNativeSystemInfo(&sys_info);
+ return (unsigned)sys_info.dwPageSize;
+}
+
+void
+os_dcache_flush(void)
+{}
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/windows/platform_internal.h b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/windows/platform_internal.h
new file mode 100644
index 000000000..500ab200c
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/windows/platform_internal.h
@@ -0,0 +1,138 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#ifndef _PLATFORM_INTERNAL_H
+#define _PLATFORM_INTERNAL_H
+
+#include <inttypes.h>
+#include <stdbool.h>
+#include <assert.h>
+#include <time.h>
+#include <sys/timeb.h>
+#include <string.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <math.h>
+#include <stdarg.h>
+#include <ctype.h>
+#include <limits.h>
+#include <fcntl.h>
+#include <errno.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <stdint.h>
+#include <malloc.h>
+#include <process.h>
+#include <winsock2.h>
+#include <ws2tcpip.h>
+#include <windows.h>
+#include <basetsd.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifndef BH_PLATFORM_WINDOWS
+#define BH_PLATFORM_WINDOWS
+#endif
+
+#ifdef _MSC_VER
+#ifndef PATH_MAX
+#define PATH_MAX MAX_PATH
+#endif
+#endif /* #ifdef _MSC_VER */
+
+/* Stack size of applet threads's native part. */
+#define BH_APPLET_PRESERVED_STACK_SIZE (32 * 1024)
+
+/* Default thread priority */
+#define BH_THREAD_DEFAULT_PRIORITY 0
+
+typedef SSIZE_T ssize_t;
+
+typedef void *korp_thread;
+typedef void *korp_tid;
+typedef void *korp_mutex;
+typedef void *korp_sem;
+
+/**
+ * Create the mutex when os_mutex_lock is called, and no need to
+ * CloseHandle() for the static lock's lifetime, since
+ * "The system closes the handle automatically when the process
+ * terminates. The mutex object is destroyed when its last
+ * handle has been closed."
+ * Refer to:
+ * https://learn.microsoft.com/en-us/windows/win32/api/synchapi/nf-synchapi-createmutexa
+ */
+#define OS_THREAD_MUTEX_INITIALIZER NULL
+
+struct os_thread_wait_node;
+typedef struct os_thread_wait_node *os_thread_wait_list;
+typedef struct korp_cond {
+ korp_mutex wait_list_lock;
+ os_thread_wait_list thread_wait_list;
+ struct os_thread_wait_node *thread_wait_list_end;
+} korp_cond;
+
+#define bh_socket_t SOCKET
+
+unsigned
+os_getpagesize();
+void *
+os_mem_commit(void *ptr, size_t size, int flags);
+void
+os_mem_decommit(void *ptr, size_t size);
+
+#define os_thread_local_attribute __declspec(thread)
+
+#define strncasecmp _strnicmp
+#define strcasecmp _stricmp
+
+#if WASM_DISABLE_HW_BOUND_CHECK == 0
+#if defined(BUILD_TARGET_X86_64) || defined(BUILD_TARGET_AMD_64)
+
+#include <setjmp.h>
+
+#define OS_ENABLE_HW_BOUND_CHECK
+
+typedef jmp_buf korp_jmpbuf;
+
+#define os_setjmp setjmp
+#define os_longjmp longjmp
+
+int
+os_thread_signal_init();
+
+void
+os_thread_signal_destroy();
+
+bool
+os_thread_signal_inited();
+
+#define os_signal_unmask() (void)0
+#define os_sigreturn() (void)0
+
+#endif /* end of BUILD_TARGET_X86_64/AMD_64 */
+#endif /* end of WASM_DISABLE_HW_BOUND_CHECK */
+
+typedef enum os_memory_order {
+ os_memory_order_relaxed,
+ os_memory_order_consume,
+ os_memory_order_acquire,
+ os_memory_order_release,
+ os_memory_order_acq_rel,
+ os_memory_order_seq_cst,
+} os_memory_order;
+
+void
+bh_atomic_thread_fence(int mem_order);
+
+#define os_atomic_thread_fence bh_atomic_thread_fence
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* end of _PLATFORM_INTERNAL_H */
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/windows/shared_platform.cmake b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/windows/shared_platform.cmake
new file mode 100644
index 000000000..a68d63177
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/windows/shared_platform.cmake
@@ -0,0 +1,19 @@
+# Copyright (C) 2019 Intel Corporation. All rights reserved.
+# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+
+set (PLATFORM_SHARED_DIR ${CMAKE_CURRENT_LIST_DIR})
+
+add_definitions(-DBH_PLATFORM_WINDOWS)
+add_definitions(-DHAVE_STRUCT_TIMESPEC)
+add_definitions(-D_WINSOCK_DEPRECATED_NO_WARNINGS)
+
+include_directories(${PLATFORM_SHARED_DIR})
+include_directories(${PLATFORM_SHARED_DIR}/../include)
+
+file (GLOB_RECURSE source_all ${PLATFORM_SHARED_DIR}/*.c
+ ${PLATFORM_SHARED_DIR}/*.cpp)
+
+set (PLATFORM_SHARED_SOURCE ${source_all})
+
+file (GLOB header ${PLATFORM_SHARED_DIR}/../include/*.h)
+LIST (APPEND RUNTIME_LIB_HEADER_LIST ${header})
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/windows/win_atomic.cpp b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/windows/win_atomic.cpp
new file mode 100644
index 000000000..80e8ef518
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/windows/win_atomic.cpp
@@ -0,0 +1,22 @@
+/*
+ * Copyright (C) 2023 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#include "platform_api_vmcore.h"
+#include "platform_api_extension.h"
+
+#if WASM_ENABLE_SHARED_MEMORY != 0
+
+#include <atomic>
+
+void
+bh_atomic_thread_fence(int mem_order)
+{
+ std::memory_order order =
+ (std::memory_order)(std::memory_order::memory_order_relaxed + mem_order
+ - os_memory_order_relaxed);
+ std::atomic_thread_fence(order);
+}
+
+#endif
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/windows/win_malloc.c b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/windows/win_malloc.c
new file mode 100644
index 000000000..56aaf9c7b
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/windows/win_malloc.c
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#include "platform_api_vmcore.h"
+
+void *
+os_malloc(unsigned size)
+{
+ return malloc(size);
+}
+
+void *
+os_realloc(void *ptr, unsigned size)
+{
+ return realloc(ptr, size);
+}
+
+void
+os_free(void *ptr)
+{
+ free(ptr);
+}
+
+int
+os_dumps_proc_mem_info(char *out, unsigned int size)
+{
+ return -1;
+} \ No newline at end of file
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/windows/win_memmap.c b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/windows/win_memmap.c
new file mode 100644
index 000000000..c4a6b0756
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/windows/win_memmap.c
@@ -0,0 +1,146 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#include "platform_api_vmcore.h"
+
+#define TRACE_MEMMAP 0
+
+static DWORD
+access_to_win32_flags(int prot)
+{
+ DWORD protect = PAGE_NOACCESS;
+
+ if (prot & MMAP_PROT_EXEC) {
+ if (prot & MMAP_PROT_WRITE)
+ protect = PAGE_EXECUTE_READWRITE;
+ else
+ protect = PAGE_EXECUTE_READ;
+ }
+ else if (prot & MMAP_PROT_WRITE) {
+ protect = PAGE_READWRITE;
+ }
+ else if (prot & MMAP_PROT_READ) {
+ protect = PAGE_READONLY;
+ }
+
+ return protect;
+}
+
+void *
+os_mmap(void *hint, size_t size, int prot, int flags)
+{
+ DWORD alloc_type = MEM_RESERVE;
+ DWORD protect;
+ size_t request_size, page_size;
+ void *addr;
+
+ page_size = os_getpagesize();
+ request_size = (size + page_size - 1) & ~(page_size - 1);
+
+ if (request_size < size)
+ /* integer overflow */
+ return NULL;
+
+#if WASM_ENABLE_JIT != 0
+ /**
+ * Allocate memory at the highest possible address if the
+ * request size is large, or LLVM JIT might report error:
+ * IMAGE_REL_AMD64_ADDR32NB relocation requires an ordered
+ * section layout.
+ */
+ if (request_size > 10 * BH_MB)
+ alloc_type |= MEM_TOP_DOWN;
+#endif
+
+ protect = access_to_win32_flags(prot);
+ if (protect != PAGE_NOACCESS) {
+ alloc_type |= MEM_COMMIT;
+ }
+
+ addr = VirtualAlloc((LPVOID)hint, request_size, alloc_type, protect);
+
+#if TRACE_MEMMAP != 0
+ printf("Map memory, request_size: %zu, alloc_type: 0x%x, "
+ "protect: 0x%x, ret: %p\n",
+ request_size, alloc_type, protect, addr);
+#endif
+ return addr;
+}
+
+void
+os_munmap(void *addr, size_t size)
+{
+ size_t page_size = os_getpagesize();
+ size_t request_size = (size + page_size - 1) & ~(page_size - 1);
+
+ if (addr) {
+ if (!VirtualFree(addr, request_size, MEM_DECOMMIT)) {
+ printf("warning: os_munmap decommit pages failed, "
+ "addr: %p, request_size: %zu, errno: %d\n",
+ addr, request_size, errno);
+ return;
+ }
+
+ if (!VirtualFree(addr, 0, MEM_RELEASE)) {
+ printf("warning: os_munmap release pages failed, "
+ "addr: %p, size: %zu, errno:%d\n",
+ addr, request_size, errno);
+ }
+ }
+#if TRACE_MEMMAP != 0
+ printf("Unmap memory, addr: %p, request_size: %zu\n", addr, request_size);
+#endif
+}
+
+void *
+os_mem_commit(void *addr, size_t size, int flags)
+{
+ DWORD protect = access_to_win32_flags(flags);
+ size_t page_size = os_getpagesize();
+ size_t request_size = (size + page_size - 1) & ~(page_size - 1);
+
+ if (!addr)
+ return NULL;
+
+#if TRACE_MEMMAP != 0
+ printf("Commit memory, addr: %p, request_size: %zu, protect: 0x%x\n", addr,
+ request_size, protect);
+#endif
+ return VirtualAlloc((LPVOID)addr, request_size, MEM_COMMIT, protect);
+}
+
+void
+os_mem_decommit(void *addr, size_t size)
+{
+ size_t page_size = os_getpagesize();
+ size_t request_size = (size + page_size - 1) & ~(page_size - 1);
+
+ if (!addr)
+ return;
+
+#if TRACE_MEMMAP != 0
+ printf("Decommit memory, addr: %p, request_size: %zu\n", addr,
+ request_size);
+#endif
+ VirtualFree((LPVOID)addr, request_size, MEM_DECOMMIT);
+}
+
+int
+os_mprotect(void *addr, size_t size, int prot)
+{
+ DWORD protect;
+ size_t page_size = os_getpagesize();
+ size_t request_size = (size + page_size - 1) & ~(page_size - 1);
+
+ if (!addr)
+ return 0;
+
+ protect = access_to_win32_flags(prot);
+#if TRACE_MEMMAP != 0
+ printf("Mprotect memory, addr: %p, request_size: %zu, protect: 0x%x\n",
+ addr, request_size, protect);
+#endif
+ return VirtualProtect((LPVOID)addr, request_size, protect, NULL);
+}
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/windows/win_socket.c b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/windows/win_socket.c
new file mode 100644
index 000000000..9a1c7a3c9
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/windows/win_socket.c
@@ -0,0 +1,541 @@
+/*
+ * Copyright (C) 2021 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#include "platform_api_vmcore.h"
+#include "platform_api_extension.h"
+
+/* link with Ws2_32.lib */
+#pragma comment(lib, "ws2_32.lib")
+
+static bool is_winsock_inited = false;
+
+int
+init_winsock()
+{
+ WSADATA wsaData;
+
+ if (!is_winsock_inited) {
+ if (WSAStartup(MAKEWORD(2, 2), &wsaData) != 0) {
+ os_printf("winsock init failed");
+ return BHT_ERROR;
+ }
+
+ is_winsock_inited = true;
+ }
+
+ return BHT_OK;
+}
+
+void
+deinit_winsock()
+{
+ if (is_winsock_inited) {
+ WSACleanup();
+ }
+}
+
+int
+os_socket_create(bh_socket_t *sock, bool is_ipv4, bool is_tcp)
+{
+ int af;
+
+ if (!sock) {
+ return BHT_ERROR;
+ }
+
+ if (is_ipv4) {
+ af = AF_INET;
+ }
+ else {
+ errno = ENOSYS;
+ return BHT_ERROR;
+ }
+
+ if (is_tcp) {
+ *sock = socket(af, SOCK_STREAM, IPPROTO_TCP);
+ }
+ else {
+ *sock = socket(af, SOCK_DGRAM, 0);
+ }
+
+ return (*sock == -1) ? BHT_ERROR : BHT_OK;
+}
+
+int
+os_socket_bind(bh_socket_t socket, const char *host, int *port)
+{
+ struct sockaddr_in addr;
+ int socklen, ret;
+
+ assert(host);
+ assert(port);
+
+ addr.sin_addr.s_addr = inet_addr(host);
+ addr.sin_port = htons(*port);
+ addr.sin_family = AF_INET;
+
+ ret = bind(socket, (struct sockaddr *)&addr, sizeof(addr));
+ if (ret < 0) {
+ goto fail;
+ }
+
+ socklen = sizeof(addr);
+ if (getsockname(socket, (void *)&addr, &socklen) == -1) {
+ os_printf("getsockname failed with error %d\n", WSAGetLastError());
+ goto fail;
+ }
+
+ *port = ntohs(addr.sin_port);
+
+ return BHT_OK;
+
+fail:
+ return BHT_ERROR;
+}
+
+int
+os_socket_settimeout(bh_socket_t socket, uint64 timeout_us)
+{
+ DWORD tv = (DWORD)(timeout_us / 1000UL);
+
+ if (setsockopt(socket, SOL_SOCKET, SO_RCVTIMEO, (const char *)&tv,
+ sizeof(tv))
+ != 0) {
+ return BHT_ERROR;
+ }
+
+ return BHT_OK;
+}
+
+int
+os_socket_listen(bh_socket_t socket, int max_client)
+{
+ if (listen(socket, max_client) != 0) {
+ os_printf("socket listen failed with error %d\n", WSAGetLastError());
+ return BHT_ERROR;
+ }
+
+ return BHT_OK;
+}
+
+int
+os_socket_accept(bh_socket_t server_sock, bh_socket_t *sock, void *addr,
+ unsigned int *addrlen)
+{
+ struct sockaddr addr_tmp;
+ unsigned int len = sizeof(struct sockaddr);
+
+ *sock = accept(server_sock, (struct sockaddr *)&addr_tmp, &len);
+
+ if (*sock < 0) {
+ os_printf("socket accept failed with error %d\n", WSAGetLastError());
+ return BHT_ERROR;
+ }
+
+ return BHT_OK;
+}
+
+int
+os_socket_recv(bh_socket_t socket, void *buf, unsigned int len)
+{
+ return recv(socket, buf, len, 0);
+}
+
+int
+os_socket_recv_from(bh_socket_t socket, void *buf, unsigned int len, int flags,
+ bh_sockaddr_t *src_addr)
+{
+ errno = ENOSYS;
+
+ return BHT_ERROR;
+}
+
+int
+os_socket_send(bh_socket_t socket, const void *buf, unsigned int len)
+{
+ return send(socket, buf, len, 0);
+}
+
+int
+os_socket_send_to(bh_socket_t socket, const void *buf, unsigned int len,
+ int flags, const bh_sockaddr_t *dest_addr)
+{
+ errno = ENOSYS;
+
+ return BHT_ERROR;
+}
+
+int
+os_socket_close(bh_socket_t socket)
+{
+ closesocket(socket);
+ return BHT_OK;
+}
+
+int
+os_socket_shutdown(bh_socket_t socket)
+{
+ shutdown(socket, SD_BOTH);
+ return BHT_OK;
+}
+
+int
+os_socket_inet_network(bool is_ipv4, const char *cp, bh_ip_addr_buffer_t *out)
+{
+ if (!cp)
+ return BHT_ERROR;
+
+ if (is_ipv4) {
+ if (inet_pton(AF_INET, cp, &out->ipv4) != 1) {
+ return BHT_ERROR;
+ }
+ /* Note: ntohl(INADDR_NONE) == INADDR_NONE */
+ out->ipv4 = ntohl(out->ipv4);
+ }
+ else {
+ if (inet_pton(AF_INET6, cp, out->ipv6) != 1) {
+ return BHT_ERROR;
+ }
+ for (int i = 0; i < 8; i++) {
+ out->ipv6[i] = ntohs(out->ipv6[i]);
+ }
+ }
+
+ return BHT_OK;
+}
+
+int
+os_socket_addr_resolve(const char *host, const char *service,
+ uint8_t *hint_is_tcp, uint8_t *hint_is_ipv4,
+ bh_addr_info_t *addr_info, size_t addr_info_size,
+ size_t *max_info_size)
+{
+ errno = ENOSYS;
+
+ return BHT_ERROR;
+}
+
+int
+os_socket_addr_local(bh_socket_t socket, bh_sockaddr_t *sockaddr)
+{
+ errno = ENOSYS;
+
+ return BHT_ERROR;
+}
+
+int
+os_socket_set_send_timeout(bh_socket_t socket, uint64 timeout_us)
+{
+ errno = ENOSYS;
+
+ return BHT_ERROR;
+}
+
+int
+os_socket_get_send_timeout(bh_socket_t socket, uint64 *timeout_us)
+{
+ errno = ENOSYS;
+
+ return BHT_ERROR;
+}
+
+int
+os_socket_set_recv_timeout(bh_socket_t socket, uint64 timeout_us)
+{
+ errno = ENOSYS;
+
+ return BHT_ERROR;
+}
+
+int
+os_socket_get_recv_timeout(bh_socket_t socket, uint64 *timeout_us)
+{
+ errno = ENOSYS;
+
+ return BHT_ERROR;
+}
+
+int
+os_socket_addr_remote(bh_socket_t socket, bh_sockaddr_t *sockaddr)
+{
+ errno = ENOSYS;
+
+ return BHT_ERROR;
+}
+
+int
+os_socket_set_send_buf_size(bh_socket_t socket, size_t bufsiz)
+{
+ errno = ENOSYS;
+
+ return BHT_ERROR;
+}
+
+int
+os_socket_get_send_buf_size(bh_socket_t socket, size_t *bufsiz)
+{
+ errno = ENOSYS;
+
+ return BHT_ERROR;
+}
+
+int
+os_socket_set_recv_buf_size(bh_socket_t socket, size_t bufsiz)
+{
+ errno = ENOSYS;
+
+ return BHT_ERROR;
+}
+
+int
+os_socket_get_recv_buf_size(bh_socket_t socket, size_t *bufsiz)
+{
+ errno = ENOSYS;
+
+ return BHT_ERROR;
+}
+
+int
+os_socket_set_keep_alive(bh_socket_t socket, bool is_enabled)
+{
+ errno = ENOSYS;
+
+ return BHT_ERROR;
+}
+
+int
+os_socket_get_keep_alive(bh_socket_t socket, bool *is_enabled)
+{
+ errno = ENOSYS;
+
+ return BHT_ERROR;
+}
+
+int
+os_socket_set_reuse_addr(bh_socket_t socket, bool is_enabled)
+{
+ errno = ENOSYS;
+
+ return BHT_ERROR;
+}
+
+int
+os_socket_get_reuse_addr(bh_socket_t socket, bool *is_enabled)
+{
+ errno = ENOSYS;
+
+ return BHT_ERROR;
+}
+
+int
+os_socket_set_reuse_port(bh_socket_t socket, bool is_enabled)
+{
+ errno = ENOSYS;
+
+ return BHT_ERROR;
+}
+
+int
+os_socket_get_reuse_port(bh_socket_t socket, bool *is_enabled)
+{
+ errno = ENOSYS;
+
+ return BHT_ERROR;
+}
+
+int
+os_socket_set_linger(bh_socket_t socket, bool is_enabled, int linger_s)
+{
+ errno = ENOSYS;
+
+ return BHT_ERROR;
+}
+
+int
+os_socket_get_linger(bh_socket_t socket, bool *is_enabled, int *linger_s)
+{
+ errno = ENOSYS;
+
+ return BHT_ERROR;
+}
+
+int
+os_socket_set_tcp_no_delay(bh_socket_t socket, bool is_enabled)
+{
+ errno = ENOSYS;
+
+ return BHT_ERROR;
+}
+
+int
+os_socket_get_tcp_no_delay(bh_socket_t socket, bool *is_enabled)
+{
+ errno = ENOSYS;
+
+ return BHT_ERROR;
+}
+
+int
+os_socket_set_tcp_quick_ack(bh_socket_t socket, bool is_enabled)
+{
+ errno = ENOSYS;
+
+ return BHT_ERROR;
+}
+
+int
+os_socket_get_tcp_quick_ack(bh_socket_t socket, bool *is_enabled)
+{
+ errno = ENOSYS;
+
+ return BHT_ERROR;
+}
+
+int
+os_socket_set_tcp_keep_idle(bh_socket_t socket, uint32 time_s)
+{
+ errno = ENOSYS;
+
+ return BHT_ERROR;
+}
+
+int
+os_socket_get_tcp_keep_idle(bh_socket_t socket, uint32 *time_s)
+{
+ errno = ENOSYS;
+
+ return BHT_ERROR;
+}
+
+int
+os_socket_set_tcp_keep_intvl(bh_socket_t socket, uint32 time_s)
+{
+ errno = ENOSYS;
+
+ return BHT_ERROR;
+}
+
+int
+os_socket_get_tcp_keep_intvl(bh_socket_t socket, uint32 *time_s)
+{
+ errno = ENOSYS;
+
+ return BHT_ERROR;
+}
+
+int
+os_socket_set_tcp_fastopen_connect(bh_socket_t socket, bool is_enabled)
+{
+ errno = ENOSYS;
+
+ return BHT_ERROR;
+}
+
+int
+os_socket_get_tcp_fastopen_connect(bh_socket_t socket, bool *is_enabled)
+{
+ errno = ENOSYS;
+
+ return BHT_ERROR;
+}
+
+int
+os_socket_set_ip_multicast_loop(bh_socket_t socket, bool ipv6, bool is_enabled)
+{
+ errno = ENOSYS;
+
+ return BHT_ERROR;
+}
+
+int
+os_socket_get_ip_multicast_loop(bh_socket_t socket, bool ipv6, bool *is_enabled)
+{
+ errno = ENOSYS;
+
+ return BHT_ERROR;
+}
+
+int
+os_socket_set_ip_add_membership(bh_socket_t socket,
+ bh_ip_addr_buffer_t *imr_multiaddr,
+ uint32_t imr_interface, bool is_ipv6)
+{
+ errno = ENOSYS;
+
+ return BHT_ERROR;
+}
+
+int
+os_socket_set_ip_drop_membership(bh_socket_t socket,
+ bh_ip_addr_buffer_t *imr_multiaddr,
+ uint32_t imr_interface, bool is_ipv6)
+{
+ errno = ENOSYS;
+
+ return BHT_ERROR;
+}
+
+int
+os_socket_set_ip_ttl(bh_socket_t socket, uint8_t ttl_s)
+{
+ errno = ENOSYS;
+
+ return BHT_ERROR;
+}
+
+int
+os_socket_get_ip_ttl(bh_socket_t socket, uint8_t *ttl_s)
+{
+ errno = ENOSYS;
+
+ return BHT_ERROR;
+}
+
+int
+os_socket_set_ip_multicast_ttl(bh_socket_t socket, uint8_t ttl_s)
+{
+ errno = ENOSYS;
+
+ return BHT_ERROR;
+}
+
+int
+os_socket_get_ip_multicast_ttl(bh_socket_t socket, uint8_t *ttl_s)
+{
+ errno = ENOSYS;
+
+ return BHT_ERROR;
+}
+
+int
+os_socket_set_ipv6_only(bh_socket_t socket, bool option)
+{
+ errno = ENOSYS;
+
+ return BHT_ERROR;
+}
+
+int
+os_socket_get_ipv6_only(bh_socket_t socket, bool *option)
+{
+ errno = ENOSYS;
+
+ return BHT_ERROR;
+}
+
+int
+os_socket_set_broadcast(bh_socket_t socket, bool is_enabled)
+{
+ errno = ENOSYS;
+
+ return BHT_ERROR;
+}
+
+int
+os_socket_get_broadcast(bh_socket_t socket, bool *is_enabled)
+{
+ errno = ENOSYS;
+ return BHT_ERROR;
+}
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/windows/win_thread.c b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/windows/win_thread.c
new file mode 100644
index 000000000..09cf0c63f
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/windows/win_thread.c
@@ -0,0 +1,750 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#include "platform_api_vmcore.h"
+#include "platform_api_extension.h"
+
+#define bh_assert(v) assert(v)
+
+#define BH_SEM_COUNT_MAX 0xFFFF
+
+struct os_thread_data;
+
+typedef struct os_thread_wait_node {
+ korp_sem sem;
+ void *retval;
+ os_thread_wait_list next;
+} os_thread_wait_node;
+
+typedef struct os_thread_data {
+ /* Next thread data */
+ struct os_thread_data *next;
+ /* Thread data of parent thread */
+ struct os_thread_data *parent;
+ /* Thread Id */
+ DWORD thread_id;
+ /* Thread start routine */
+ thread_start_routine_t start_routine;
+ /* Thread start routine argument */
+ void *arg;
+ /* Wait node of current thread */
+ os_thread_wait_node wait_node;
+ /* Wait cond */
+ korp_cond wait_cond;
+ /* Wait lock */
+ korp_mutex wait_lock;
+ /* Waiting list of other threads who are joining this thread */
+ os_thread_wait_list thread_wait_list;
+ /* End node of the waiting list */
+ os_thread_wait_node *thread_wait_list_end;
+ /* Whether the thread has exited */
+ bool thread_exited;
+ /* Thread return value */
+ void *thread_retval;
+} os_thread_data;
+
+static bool is_thread_sys_inited = false;
+
+/* Thread data of supervisor thread */
+static os_thread_data supervisor_thread_data;
+
+/* Thread data list lock */
+static korp_mutex thread_data_list_lock;
+
+/* Thread data key */
+static DWORD thread_data_key;
+
+/* The GetCurrentThreadStackLimits API from "kernel32" */
+static void(WINAPI *GetCurrentThreadStackLimits_Kernel32)(PULONG_PTR,
+ PULONG_PTR) = NULL;
+
+int
+os_sem_init(korp_sem *sem);
+int
+os_sem_destroy(korp_sem *sem);
+int
+os_sem_wait(korp_sem *sem);
+int
+os_sem_reltimed_wait(korp_sem *sem, uint64 useconds);
+int
+os_sem_signal(korp_sem *sem);
+
+int
+os_thread_sys_init()
+{
+ HMODULE module;
+
+ if (is_thread_sys_inited)
+ return BHT_OK;
+
+ if ((thread_data_key = TlsAlloc()) == TLS_OUT_OF_INDEXES)
+ return BHT_ERROR;
+
+ /* Initialize supervisor thread data */
+ memset(&supervisor_thread_data, 0, sizeof(os_thread_data));
+
+ supervisor_thread_data.thread_id = GetCurrentThreadId();
+
+ if (os_sem_init(&supervisor_thread_data.wait_node.sem) != BHT_OK)
+ goto fail1;
+
+ if (os_mutex_init(&supervisor_thread_data.wait_lock) != BHT_OK)
+ goto fail2;
+
+ if (os_cond_init(&supervisor_thread_data.wait_cond) != BHT_OK)
+ goto fail3;
+
+ if (!TlsSetValue(thread_data_key, &supervisor_thread_data))
+ goto fail4;
+
+ if (os_mutex_init(&thread_data_list_lock) != BHT_OK)
+ goto fail5;
+
+ if ((module = GetModuleHandle((LPCTSTR) "kernel32"))) {
+ *(void **)&GetCurrentThreadStackLimits_Kernel32 =
+ GetProcAddress(module, "GetCurrentThreadStackLimits");
+ }
+
+ is_thread_sys_inited = true;
+ return BHT_OK;
+
+fail5:
+ TlsSetValue(thread_data_key, NULL);
+fail4:
+ os_cond_destroy(&supervisor_thread_data.wait_cond);
+fail3:
+ os_mutex_destroy(&supervisor_thread_data.wait_lock);
+fail2:
+ os_sem_destroy(&supervisor_thread_data.wait_node.sem);
+fail1:
+ TlsFree(thread_data_key);
+ return BHT_ERROR;
+}
+
+void
+os_thread_sys_destroy()
+{
+ if (is_thread_sys_inited) {
+ os_thread_data *thread_data, *thread_data_next;
+
+ thread_data = supervisor_thread_data.next;
+ while (thread_data) {
+ thread_data_next = thread_data->next;
+
+ /* Destroy resources of thread data */
+ os_cond_destroy(&thread_data->wait_cond);
+ os_sem_destroy(&thread_data->wait_node.sem);
+ os_mutex_destroy(&thread_data->wait_lock);
+ BH_FREE(thread_data);
+
+ thread_data = thread_data_next;
+ }
+
+ os_mutex_destroy(&thread_data_list_lock);
+ os_cond_destroy(&supervisor_thread_data.wait_cond);
+ os_mutex_destroy(&supervisor_thread_data.wait_lock);
+ os_sem_destroy(&supervisor_thread_data.wait_node.sem);
+ memset(&supervisor_thread_data, 0, sizeof(os_thread_data));
+ TlsFree(thread_data_key);
+ thread_data_key = 0;
+ is_thread_sys_inited = false;
+ }
+}
+
+static os_thread_data *
+thread_data_current()
+{
+ return (os_thread_data *)TlsGetValue(thread_data_key);
+}
+
+static void
+os_thread_cleanup(void *retval)
+{
+ os_thread_data *thread_data = thread_data_current();
+
+ bh_assert(thread_data != NULL);
+
+ os_mutex_lock(&thread_data->wait_lock);
+ if (thread_data->thread_wait_list) {
+ /* Signal each joining thread */
+ os_thread_wait_list head = thread_data->thread_wait_list;
+ while (head) {
+ os_thread_wait_list next = head->next;
+ head->retval = retval;
+ os_sem_signal(&head->sem);
+ head = next;
+ }
+ thread_data->thread_wait_list = thread_data->thread_wait_list_end =
+ NULL;
+ }
+ /* Set thread status and thread return value */
+ thread_data->thread_exited = true;
+ thread_data->thread_retval = retval;
+ os_mutex_unlock(&thread_data->wait_lock);
+}
+
+static unsigned __stdcall os_thread_wrapper(void *arg)
+{
+ os_thread_data *thread_data = arg;
+ os_thread_data *parent = thread_data->parent;
+ void *retval;
+ bool result;
+
+#if 0
+ os_printf("THREAD CREATED %p\n", thread_data);
+#endif
+
+ os_mutex_lock(&parent->wait_lock);
+ thread_data->thread_id = GetCurrentThreadId();
+ result = TlsSetValue(thread_data_key, thread_data);
+#ifdef OS_ENABLE_HW_BOUND_CHECK
+ if (result)
+ result = os_thread_signal_init() == 0 ? true : false;
+#endif
+ /* Notify parent thread */
+ os_cond_signal(&parent->wait_cond);
+ os_mutex_unlock(&parent->wait_lock);
+
+ if (!result)
+ return -1;
+
+ retval = thread_data->start_routine(thread_data->arg);
+
+ os_thread_cleanup(retval);
+ return 0;
+}
+
+int
+os_thread_create_with_prio(korp_tid *p_tid, thread_start_routine_t start,
+ void *arg, unsigned int stack_size, int prio)
+{
+ os_thread_data *parent = thread_data_current();
+ os_thread_data *thread_data;
+
+ if (!p_tid || !start)
+ return BHT_ERROR;
+
+ if (stack_size < BH_APPLET_PRESERVED_STACK_SIZE)
+ stack_size = BH_APPLET_PRESERVED_STACK_SIZE;
+
+ if (!(thread_data = BH_MALLOC(sizeof(os_thread_data))))
+ return BHT_ERROR;
+
+ memset(thread_data, 0, sizeof(os_thread_data));
+ thread_data->parent = parent;
+ thread_data->start_routine = start;
+ thread_data->arg = arg;
+
+ if (os_sem_init(&thread_data->wait_node.sem) != BHT_OK)
+ goto fail1;
+
+ if (os_mutex_init(&thread_data->wait_lock) != BHT_OK)
+ goto fail2;
+
+ if (os_cond_init(&thread_data->wait_cond) != BHT_OK)
+ goto fail3;
+
+ os_mutex_lock(&parent->wait_lock);
+ if (!_beginthreadex(NULL, stack_size, os_thread_wrapper, thread_data, 0,
+ NULL)) {
+ os_mutex_unlock(&parent->wait_lock);
+ goto fail4;
+ }
+
+ /* Add thread data into thread data list */
+ os_mutex_lock(&thread_data_list_lock);
+ thread_data->next = supervisor_thread_data.next;
+ supervisor_thread_data.next = thread_data;
+ os_mutex_unlock(&thread_data_list_lock);
+
+ /* Wait for the thread routine to set thread_data's tid
+ and add thread_data to thread data list */
+ os_cond_wait(&parent->wait_cond, &parent->wait_lock);
+ os_mutex_unlock(&parent->wait_lock);
+
+ *p_tid = (korp_tid)thread_data;
+ return BHT_OK;
+
+fail4:
+ os_cond_destroy(&thread_data->wait_cond);
+fail3:
+ os_mutex_destroy(&thread_data->wait_lock);
+fail2:
+ os_sem_destroy(&thread_data->wait_node.sem);
+fail1:
+ BH_FREE(thread_data);
+ return BHT_ERROR;
+}
+
+int
+os_thread_create(korp_tid *tid, thread_start_routine_t start, void *arg,
+ unsigned int stack_size)
+{
+ return os_thread_create_with_prio(tid, start, arg, stack_size,
+ BH_THREAD_DEFAULT_PRIORITY);
+}
+
+korp_tid
+os_self_thread()
+{
+ return (korp_tid)TlsGetValue(thread_data_key);
+}
+
+int
+os_thread_join(korp_tid thread, void **p_retval)
+{
+ os_thread_data *thread_data, *curr_thread_data;
+
+ /* Get thread data of current thread */
+ curr_thread_data = thread_data_current();
+ curr_thread_data->wait_node.next = NULL;
+
+ /* Get thread data of thread to join */
+ thread_data = (os_thread_data *)thread;
+ bh_assert(thread_data);
+
+ os_mutex_lock(&thread_data->wait_lock);
+
+ if (thread_data->thread_exited) {
+ /* Thread has exited */
+ if (p_retval)
+ *p_retval = thread_data->thread_retval;
+ os_mutex_unlock(&thread_data->wait_lock);
+ return BHT_OK;
+ }
+
+ /* Thread is running */
+ if (!thread_data->thread_wait_list) { /* Waiting list is empty */
+ thread_data->thread_wait_list = thread_data->thread_wait_list_end =
+ &curr_thread_data->wait_node;
+ }
+ else { /* Waiting list isn't empty */
+ /* Add to end of waiting list */
+ thread_data->thread_wait_list_end->next = &curr_thread_data->wait_node;
+ thread_data->thread_wait_list_end = &curr_thread_data->wait_node;
+ }
+
+ os_mutex_unlock(&thread_data->wait_lock);
+
+ /* Wait the sem */
+ os_sem_wait(&curr_thread_data->wait_node.sem);
+ if (p_retval)
+ *p_retval = curr_thread_data->wait_node.retval;
+ return BHT_OK;
+}
+
+int
+os_thread_detach(korp_tid thread)
+{
+ /* Do nothing */
+ return BHT_OK;
+ (void)thread;
+}
+
+void
+os_thread_exit(void *retval)
+{
+ os_thread_cleanup(retval);
+ _endthreadex(0);
+}
+
+int
+os_thread_env_init()
+{
+ os_thread_data *thread_data = TlsGetValue(thread_data_key);
+
+ if (thread_data)
+ /* Already created */
+ return BHT_OK;
+
+ if (!(thread_data = BH_MALLOC(sizeof(os_thread_data))))
+ return BHT_ERROR;
+
+ memset(thread_data, 0, sizeof(os_thread_data));
+ thread_data->thread_id = GetCurrentThreadId();
+
+ if (os_sem_init(&thread_data->wait_node.sem) != BHT_OK)
+ goto fail1;
+
+ if (os_mutex_init(&thread_data->wait_lock) != BHT_OK)
+ goto fail2;
+
+ if (os_cond_init(&thread_data->wait_cond) != BHT_OK)
+ goto fail3;
+
+ if (!TlsSetValue(thread_data_key, thread_data))
+ goto fail4;
+
+ return BHT_OK;
+
+fail4:
+ os_cond_destroy(&thread_data->wait_cond);
+fail3:
+ os_mutex_destroy(&thread_data->wait_lock);
+fail2:
+ os_sem_destroy(&thread_data->wait_node.sem);
+fail1:
+ BH_FREE(thread_data);
+ return BHT_ERROR;
+}
+
+void
+os_thread_env_destroy()
+{
+ os_thread_data *thread_data = TlsGetValue(thread_data_key);
+
+ /* Note that supervisor_thread_data's resources will be destroyed
+ by os_thread_sys_destroy() */
+ if (thread_data && thread_data != &supervisor_thread_data) {
+ TlsSetValue(thread_data_key, NULL);
+ os_cond_destroy(&thread_data->wait_cond);
+ os_mutex_destroy(&thread_data->wait_lock);
+ os_sem_destroy(&thread_data->wait_node.sem);
+ BH_FREE(thread_data);
+ }
+}
+
+bool
+os_thread_env_inited()
+{
+ os_thread_data *thread_data = TlsGetValue(thread_data_key);
+ return thread_data ? true : false;
+}
+
+int
+os_sem_init(korp_sem *sem)
+{
+ bh_assert(sem);
+ *sem = CreateSemaphore(NULL, 0, BH_SEM_COUNT_MAX, NULL);
+ return (*sem != NULL) ? BHT_OK : BHT_ERROR;
+}
+
+int
+os_sem_destroy(korp_sem *sem)
+{
+ bh_assert(sem);
+ CloseHandle(*sem);
+ return BHT_OK;
+}
+
+int
+os_sem_wait(korp_sem *sem)
+{
+ DWORD ret;
+
+ bh_assert(sem);
+
+ ret = WaitForSingleObject(*sem, INFINITE);
+
+ if (ret == WAIT_OBJECT_0)
+ return BHT_OK;
+ else if (ret == WAIT_TIMEOUT)
+ return (int)WAIT_TIMEOUT;
+ else /* WAIT_FAILED or others */
+ return BHT_ERROR;
+}
+
+int
+os_sem_reltimed_wait(korp_sem *sem, uint64 useconds)
+{
+ uint64 mseconds_64;
+ DWORD ret, mseconds;
+
+ bh_assert(sem);
+
+ if (useconds == BHT_WAIT_FOREVER)
+ mseconds = INFINITE;
+ else {
+ mseconds_64 = useconds / 1000;
+
+ if (mseconds_64 < (uint64)(UINT32_MAX - 1)) {
+ mseconds = (uint32)mseconds_64;
+ }
+ else {
+ mseconds = UINT32_MAX - 1;
+ os_printf("Warning: os_sem_reltimed_wait exceeds limit, "
+ "set to max timeout instead\n");
+ }
+ }
+
+ ret = WaitForSingleObject(*sem, mseconds);
+
+ if (ret == WAIT_OBJECT_0)
+ return BHT_OK;
+ else if (ret == WAIT_TIMEOUT)
+ return (int)WAIT_TIMEOUT;
+ else /* WAIT_FAILED or others */
+ return BHT_ERROR;
+}
+
+int
+os_sem_signal(korp_sem *sem)
+{
+ bh_assert(sem);
+ return ReleaseSemaphore(*sem, 1, NULL) != FALSE ? BHT_OK : BHT_ERROR;
+}
+
+int
+os_mutex_init(korp_mutex *mutex)
+{
+ bh_assert(mutex);
+ *mutex = CreateMutex(NULL, FALSE, NULL);
+ return (*mutex != NULL) ? BHT_OK : BHT_ERROR;
+}
+
+int
+os_recursive_mutex_init(korp_mutex *mutex)
+{
+ bh_assert(mutex);
+ *mutex = CreateMutex(NULL, FALSE, NULL);
+ return (*mutex != NULL) ? BHT_OK : BHT_ERROR;
+}
+
+int
+os_mutex_destroy(korp_mutex *mutex)
+{
+ assert(mutex);
+ return CloseHandle(*mutex) ? BHT_OK : BHT_ERROR;
+}
+
+int
+os_mutex_lock(korp_mutex *mutex)
+{
+ int ret;
+
+ assert(mutex);
+
+ if (*mutex == NULL) { /* static initializer? */
+ HANDLE p = CreateMutex(NULL, FALSE, NULL);
+
+ if (!p) {
+ return BHT_ERROR;
+ }
+
+ if (InterlockedCompareExchangePointer((PVOID *)mutex, (PVOID)p, NULL)
+ != NULL) {
+ /* lock has been created by other threads */
+ CloseHandle(p);
+ }
+ }
+
+ ret = WaitForSingleObject(*mutex, INFINITE);
+ return ret != WAIT_FAILED ? BHT_OK : BHT_ERROR;
+}
+
+int
+os_mutex_unlock(korp_mutex *mutex)
+{
+ bh_assert(mutex);
+ return ReleaseMutex(*mutex) ? BHT_OK : BHT_ERROR;
+}
+
+int
+os_cond_init(korp_cond *cond)
+{
+ bh_assert(cond);
+ if (os_mutex_init(&cond->wait_list_lock) != BHT_OK)
+ return BHT_ERROR;
+
+ cond->thread_wait_list = cond->thread_wait_list_end = NULL;
+ return BHT_OK;
+}
+
+int
+os_cond_destroy(korp_cond *cond)
+{
+ bh_assert(cond);
+ os_mutex_destroy(&cond->wait_list_lock);
+ return BHT_OK;
+}
+
+static int
+os_cond_wait_internal(korp_cond *cond, korp_mutex *mutex, bool timed,
+ uint64 useconds)
+{
+ os_thread_wait_node *node = &thread_data_current()->wait_node;
+
+ node->next = NULL;
+
+ bh_assert(cond);
+ bh_assert(mutex);
+ os_mutex_lock(&cond->wait_list_lock);
+ if (!cond->thread_wait_list) { /* Waiting list is empty */
+ cond->thread_wait_list = cond->thread_wait_list_end = node;
+ }
+ else { /* Waiting list isn't empty */
+ /* Add to end of wait list */
+ cond->thread_wait_list_end->next = node;
+ cond->thread_wait_list_end = node;
+ }
+ os_mutex_unlock(&cond->wait_list_lock);
+
+ /* Unlock mutex, wait sem and lock mutex again */
+ os_mutex_unlock(mutex);
+ int wait_result;
+ if (timed)
+ wait_result = os_sem_reltimed_wait(&node->sem, useconds);
+ else
+ wait_result = os_sem_wait(&node->sem);
+ os_mutex_lock(mutex);
+
+ /* Remove wait node from wait list */
+ os_mutex_lock(&cond->wait_list_lock);
+ if (cond->thread_wait_list == node) {
+ cond->thread_wait_list = node->next;
+
+ if (cond->thread_wait_list_end == node) {
+ bh_assert(node->next == NULL);
+ cond->thread_wait_list_end = NULL;
+ }
+ }
+ else {
+ /* Remove from the wait list */
+ os_thread_wait_node *p = cond->thread_wait_list;
+ while (p->next != node)
+ p = p->next;
+ p->next = node->next;
+
+ if (cond->thread_wait_list_end == node) {
+ cond->thread_wait_list_end = p;
+ }
+ }
+ os_mutex_unlock(&cond->wait_list_lock);
+
+ return wait_result;
+}
+
+int
+os_cond_wait(korp_cond *cond, korp_mutex *mutex)
+{
+ return os_cond_wait_internal(cond, mutex, false, 0);
+}
+
+int
+os_cond_reltimedwait(korp_cond *cond, korp_mutex *mutex, uint64 useconds)
+{
+ if (useconds == BHT_WAIT_FOREVER) {
+ return os_cond_wait_internal(cond, mutex, false, 0);
+ }
+ else {
+ return os_cond_wait_internal(cond, mutex, true, useconds);
+ }
+}
+
+int
+os_cond_signal(korp_cond *cond)
+{
+ /* Signal the head wait node of wait list */
+ os_mutex_lock(&cond->wait_list_lock);
+ if (cond->thread_wait_list)
+ os_sem_signal(&cond->thread_wait_list->sem);
+ os_mutex_unlock(&cond->wait_list_lock);
+
+ return BHT_OK;
+}
+
+int
+os_cond_broadcast(korp_cond *cond)
+{
+ /* Signal all of the wait node of wait list */
+ os_mutex_lock(&cond->wait_list_lock);
+ if (cond->thread_wait_list) {
+ os_thread_wait_node *p = cond->thread_wait_list;
+ while (p) {
+ os_sem_signal(&p->sem);
+ p = p->next;
+ }
+ }
+
+ os_mutex_unlock(&cond->wait_list_lock);
+
+ return BHT_OK;
+}
+
+static os_thread_local_attribute uint8 *thread_stack_boundary = NULL;
+
+static ULONG
+GetCurrentThreadStackLimits_Win7(PULONG_PTR p_low_limit,
+ PULONG_PTR p_high_limit)
+{
+ MEMORY_BASIC_INFORMATION mbi;
+ NT_TIB *tib = (NT_TIB *)NtCurrentTeb();
+
+ if (!tib) {
+ os_printf("warning: NtCurrentTeb() failed\n");
+ return -1;
+ }
+
+ *p_high_limit = (ULONG_PTR)tib->StackBase;
+
+ if (VirtualQuery(tib->StackLimit, &mbi, sizeof(mbi))) {
+ *p_low_limit = (ULONG_PTR)mbi.AllocationBase;
+ return 0;
+ }
+
+ os_printf("warning: VirtualQuery() failed\n");
+ return GetLastError();
+}
+
+uint8 *
+os_thread_get_stack_boundary()
+{
+ ULONG_PTR low_limit = 0, high_limit = 0;
+ uint32 page_size;
+
+ if (thread_stack_boundary)
+ return thread_stack_boundary;
+
+ page_size = os_getpagesize();
+ if (GetCurrentThreadStackLimits_Kernel32) {
+ GetCurrentThreadStackLimits_Kernel32(&low_limit, &high_limit);
+ }
+ else {
+ if (0 != GetCurrentThreadStackLimits_Win7(&low_limit, &high_limit))
+ return NULL;
+ }
+
+ /* 4 pages are set unaccessible by system, we reserved
+ one more page at least for safety */
+ thread_stack_boundary = (uint8 *)(uintptr_t)low_limit + page_size * 5;
+ return thread_stack_boundary;
+}
+
+#ifdef OS_ENABLE_HW_BOUND_CHECK
+static os_thread_local_attribute bool thread_signal_inited = false;
+
+int
+os_thread_signal_init()
+{
+#if WASM_DISABLE_STACK_HW_BOUND_CHECK == 0
+ ULONG StackSizeInBytes = 16 * 1024;
+#endif
+ bool ret;
+
+ if (thread_signal_inited)
+ return 0;
+
+#if WASM_DISABLE_STACK_HW_BOUND_CHECK == 0
+ ret = SetThreadStackGuarantee(&StackSizeInBytes);
+#else
+ ret = true;
+#endif
+ if (ret)
+ thread_signal_inited = true;
+ return ret ? 0 : -1;
+}
+
+void
+os_thread_signal_destroy()
+{
+ /* Do nothing */
+}
+
+bool
+os_thread_signal_inited()
+{
+ return thread_signal_inited;
+}
+#endif
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/windows/win_time.c b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/windows/win_time.c
new file mode 100644
index 000000000..20e90d5eb
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/windows/win_time.c
@@ -0,0 +1,20 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#include "platform_api_vmcore.h"
+
+uint64
+os_time_get_boot_microsecond()
+{
+ struct timespec ts;
+#if defined(__MINGW32__)
+ // https://www.mail-archive.com/mingw-w64-public@lists.sourceforge.net/msg18361.html
+ clock_gettime(CLOCK_REALTIME, &ts);
+#else
+ timespec_get(&ts, TIME_UTC);
+#endif
+
+ return ((uint64)ts.tv_sec) * 1000 * 1000 + ((uint64)ts.tv_nsec) / 1000;
+}
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/zephyr/platform_internal.h b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/zephyr/platform_internal.h
new file mode 100644
index 000000000..d2a94e4ad
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/zephyr/platform_internal.h
@@ -0,0 +1,149 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#ifndef _PLATFORM_INTERNAL_H
+#define _PLATFORM_INTERNAL_H
+
+#include <autoconf.h>
+#include <version.h>
+
+#if KERNEL_VERSION_NUMBER < 0x030200 /* version 3.2.0 */
+#include <zephyr.h>
+#include <kernel.h>
+#if KERNEL_VERSION_NUMBER >= 0x020200 /* version 2.2.0 */
+#include <sys/printk.h>
+#else
+#include <misc/printk.h>
+#endif
+#else /* else of KERNEL_VERSION_NUMBER < 0x030200 */
+#include <zephyr/kernel.h>
+#include <zephyr/sys/printk.h>
+#endif /* end of KERNEL_VERSION_NUMBER < 0x030200 */
+
+#include <inttypes.h>
+#include <stdarg.h>
+#include <ctype.h>
+#include <limits.h>
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <strings.h>
+
+#ifndef CONFIG_NET_BUF_USER_DATA_SIZE
+#define CONFIG_NET_BUF_USER_DATA_SIZE 0
+#endif
+
+#if KERNEL_VERSION_NUMBER < 0x030200 /* version 3.2.0 */
+#include <net/net_pkt.h>
+#include <net/net_if.h>
+#include <net/net_ip.h>
+#include <net/net_core.h>
+#include <net/net_context.h>
+
+#ifdef CONFIG_ARM_MPU
+#include <arch/arm/aarch32/cortex_m/cmsis.h>
+#endif
+#else /* else of KERNEL_VERSION_NUMBER < 0x030200 */
+#include <zephyr/net/net_pkt.h>
+#include <zephyr/net/net_if.h>
+#include <zephyr/net/net_ip.h>
+#include <zephyr/net/net_core.h>
+#include <zephyr/net/net_context.h>
+
+#ifdef CONFIG_ARM_MPU
+#include <zephyr/arch/arm/aarch32/cortex_m/cmsis.h>
+#endif
+#endif /* end of KERNEL_VERSION_NUMBER < 0x030200 */
+
+#ifndef BH_PLATFORM_ZEPHYR
+#define BH_PLATFORM_ZEPHYR
+#endif
+
+#define BH_APPLET_PRESERVED_STACK_SIZE (2 * BH_KB)
+
+/* Default thread priority */
+#define BH_THREAD_DEFAULT_PRIORITY 7
+
+typedef struct k_thread korp_thread;
+typedef korp_thread *korp_tid;
+typedef struct k_mutex korp_mutex;
+typedef unsigned int korp_sem;
+
+struct os_thread_wait_node;
+typedef struct os_thread_wait_node *os_thread_wait_list;
+typedef struct korp_cond {
+ struct k_mutex wait_list_lock;
+ os_thread_wait_list thread_wait_list;
+} korp_cond;
+
+#ifndef Z_TIMEOUT_MS
+#define Z_TIMEOUT_MS(ms) ms
+#endif
+
+/* clang-format off */
+void abort(void);
+size_t strspn(const char *s, const char *accept);
+size_t strcspn(const char *s, const char *reject);
+
+/* math functions which are not provided by os */
+double atan(double x);
+double atan2(double y, double x);
+double sqrt(double x);
+double floor(double x);
+double ceil(double x);
+double fmin(double x, double y);
+double fmax(double x, double y);
+double rint(double x);
+double fabs(double x);
+double trunc(double x);
+float sqrtf(float x);
+float floorf(float x);
+float ceilf(float x);
+float fminf(float x, float y);
+float fmaxf(float x, float y);
+float rintf(float x);
+float fabsf(float x);
+float truncf(float x);
+int signbit(double x);
+int isnan(double x);
+double pow(double x, double y);
+double scalbn(double x, int n);
+
+unsigned long long int strtoull(const char *nptr, char **endptr, int base);
+double strtod(const char *nptr, char **endptr);
+float strtof(const char *nptr, char **endptr);
+/* clang-format on */
+
+#if KERNEL_VERSION_NUMBER >= 0x030100 /* version 3.1.0 */
+#define BH_HAS_SQRT
+#define BH_HAS_SQRTF
+#endif
+
+/**
+ * @brief Allocate executable memroy
+ *
+ * @param size size of the memory to be allocated
+ *
+ * @return the address of the allocated memory if not NULL
+ */
+typedef void *(*exec_mem_alloc_func_t)(unsigned int size);
+
+/**
+ * @brief Release executable memroy
+ *
+ * @param the address of the executable memory to be released
+ */
+typedef void (*exec_mem_free_func_t)(void *addr);
+
+/* Below function are called by external project to set related function
+ * pointers that will be used to malloc/free executable memory. Otherwise
+ * default mechanise will be used.
+ */
+void
+set_exec_mem_alloc_func(exec_mem_alloc_func_t alloc_func,
+ exec_mem_free_func_t free_func);
+
+#endif
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/zephyr/shared_platform.cmake b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/zephyr/shared_platform.cmake
new file mode 100644
index 000000000..9b043b52f
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/zephyr/shared_platform.cmake
@@ -0,0 +1,16 @@
+# Copyright (C) 2019 Intel Corporation. All rights reserved.
+# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+
+set (PLATFORM_SHARED_DIR ${CMAKE_CURRENT_LIST_DIR})
+
+add_definitions(-DBH_PLATFORM_ZEPHYR)
+
+include_directories(${PLATFORM_SHARED_DIR})
+include_directories(${PLATFORM_SHARED_DIR}/../include)
+
+include (${CMAKE_CURRENT_LIST_DIR}/../common/math/platform_api_math.cmake)
+
+file (GLOB_RECURSE source_all ${PLATFORM_SHARED_DIR}/*.c)
+
+set (PLATFORM_SHARED_SOURCE ${source_all} ${PLATFORM_COMMON_MATH_SOURCE})
+
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/zephyr/zephyr_platform.c b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/zephyr/zephyr_platform.c
new file mode 100644
index 000000000..b4f2e5ec7
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/zephyr/zephyr_platform.c
@@ -0,0 +1,223 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#include "platform_api_vmcore.h"
+#include "platform_api_extension.h"
+
+/* function pointers for executable memory management */
+static exec_mem_alloc_func_t exec_mem_alloc_func = NULL;
+static exec_mem_free_func_t exec_mem_free_func = NULL;
+
+#if WASM_ENABLE_AOT != 0
+#ifdef CONFIG_ARM_MPU
+/**
+ * This function will allow execute from sram region.
+ * This is needed for AOT code because by default all soc will
+ * disable the execute from SRAM.
+ */
+static void
+disable_mpu_rasr_xn(void)
+{
+ uint32 index;
+ /* Kept the max index as 8 (irrespective of soc) because the sram
+ would most likely be set at index 2. */
+ for (index = 0U; index < 8; index++) {
+ MPU->RNR = index;
+ if (MPU->RASR & MPU_RASR_XN_Msk) {
+ MPU->RASR |= ~MPU_RASR_XN_Msk;
+ }
+ }
+}
+#endif /* end of CONFIG_ARM_MPU */
+#endif
+
+static int
+_stdout_hook_iwasm(int c)
+{
+ printk("%c", (char)c);
+ return 1;
+}
+
+int
+os_thread_sys_init();
+
+void
+os_thread_sys_destroy();
+
+int
+bh_platform_init()
+{
+ extern void __stdout_hook_install(int (*hook)(int));
+ /* Enable printf() in Zephyr */
+ __stdout_hook_install(_stdout_hook_iwasm);
+
+#if WASM_ENABLE_AOT != 0
+#ifdef CONFIG_ARM_MPU
+ /* Enable executable memory support */
+ disable_mpu_rasr_xn();
+#endif
+#endif
+
+ return os_thread_sys_init();
+}
+
+void
+bh_platform_destroy()
+{
+ os_thread_sys_destroy();
+}
+
+void *
+os_malloc(unsigned size)
+{
+ return NULL;
+}
+
+void *
+os_realloc(void *ptr, unsigned size)
+{
+ return NULL;
+}
+
+void
+os_free(void *ptr)
+{}
+
+int
+os_dumps_proc_mem_info(char *out, unsigned int size)
+{
+ return -1;
+}
+
+#if 0
+struct out_context {
+ int count;
+};
+
+typedef int (*out_func_t)(int c, void *ctx);
+
+static int
+char_out(int c, void *ctx)
+{
+ struct out_context *out_ctx = (struct out_context*)ctx;
+ out_ctx->count++;
+ return _stdout_hook_iwasm(c);
+}
+
+int
+os_vprintf(const char *fmt, va_list ap)
+{
+#if 0
+ struct out_context ctx = { 0 };
+ cbvprintf(char_out, &ctx, fmt, ap);
+ return ctx.count;
+#else
+ vprintk(fmt, ap);
+ return 0;
+#endif
+}
+#endif
+
+int
+os_printf(const char *format, ...)
+{
+ int ret = 0;
+ va_list ap;
+
+ va_start(ap, format);
+#ifndef BH_VPRINTF
+ ret += vprintf(format, ap);
+#else
+ ret += BH_VPRINTF(format, ap);
+#endif
+ va_end(ap);
+
+ return ret;
+}
+
+int
+os_vprintf(const char *format, va_list ap)
+{
+#ifndef BH_VPRINTF
+ return vprintf(format, ap);
+#else
+ return BH_VPRINTF(format, ap);
+#endif
+}
+
+#if KERNEL_VERSION_NUMBER <= 0x020400 /* version 2.4.0 */
+void
+abort(void)
+{
+ int i = 0;
+ os_printf("%d\n", 1 / i);
+}
+#endif
+
+#if KERNEL_VERSION_NUMBER <= 0x010E01 /* version 1.14.1 */
+size_t
+strspn(const char *s, const char *accept)
+{
+ os_printf("## unimplemented function %s called", __FUNCTION__);
+ return 0;
+}
+
+size_t
+strcspn(const char *s, const char *reject)
+{
+ os_printf("## unimplemented function %s called", __FUNCTION__);
+ return 0;
+}
+#endif
+
+void *
+os_mmap(void *hint, size_t size, int prot, int flags)
+{
+ if ((uint64)size >= UINT32_MAX)
+ return NULL;
+ if (exec_mem_alloc_func)
+ return exec_mem_alloc_func((uint32)size);
+ else
+ return BH_MALLOC(size);
+}
+
+void
+os_munmap(void *addr, size_t size)
+{
+ if (exec_mem_free_func)
+ exec_mem_free_func(addr);
+ else
+ BH_FREE(addr);
+}
+
+int
+os_mprotect(void *addr, size_t size, int prot)
+{
+ return 0;
+}
+
+void
+os_dcache_flush()
+{
+#if defined(CONFIG_CPU_CORTEX_M7) && defined(CONFIG_ARM_MPU)
+ uint32 key;
+ key = irq_lock();
+ SCB_CleanDCache();
+ irq_unlock(key);
+#elif defined(CONFIG_SOC_CVF_EM7D) && defined(CONFIG_ARC_MPU) \
+ && defined(CONFIG_CACHE_FLUSHING)
+ __asm__ __volatile__("sync");
+ z_arc_v2_aux_reg_write(_ARC_V2_DC_FLSH, BIT(0));
+ __asm__ __volatile__("sync");
+#endif
+}
+
+void
+set_exec_mem_alloc_func(exec_mem_alloc_func_t alloc_func,
+ exec_mem_free_func_t free_func)
+{
+ exec_mem_alloc_func = alloc_func;
+ exec_mem_free_func = free_func;
+}
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/zephyr/zephyr_thread.c b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/zephyr/zephyr_thread.c
new file mode 100644
index 000000000..1ee2c5cef
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/zephyr/zephyr_thread.c
@@ -0,0 +1,576 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#include "platform_api_vmcore.h"
+#include "platform_api_extension.h"
+
+/* clang-format off */
+#define bh_assert(v) do { \
+ if (!(v)) { \
+ printf("\nASSERTION FAILED: %s, at %s, line %d\n", \
+ #v, __FILE__, __LINE__); \
+ abort(); \
+ } \
+} while (0)
+/* clang-format on */
+
+#if defined(CONFIG_ARM_MPU) || defined(CONFIG_ARC_MPU) \
+ || KERNEL_VERSION_NUMBER > 0x020300 /* version 2.3.0 */
+#define BH_ENABLE_ZEPHYR_MPU_STACK 1
+#elif !defined(BH_ENABLE_ZEPHYR_MPU_STACK)
+#define BH_ENABLE_ZEPHYR_MPU_STACK 0
+#endif
+#if !defined(BH_ZEPHYR_MPU_STACK_SIZE)
+#define BH_ZEPHYR_MPU_STACK_SIZE APP_THREAD_STACK_SIZE_MIN
+#endif
+#if !defined(BH_ZEPHYR_MPU_STACK_COUNT)
+#define BH_ZEPHYR_MPU_STACK_COUNT 4
+#endif
+
+#if BH_ENABLE_ZEPHYR_MPU_STACK != 0
+static K_THREAD_STACK_ARRAY_DEFINE(mpu_stacks, BH_ZEPHYR_MPU_STACK_COUNT,
+ BH_ZEPHYR_MPU_STACK_SIZE);
+static bool mpu_stack_allocated[BH_ZEPHYR_MPU_STACK_COUNT];
+static struct k_mutex mpu_stack_lock;
+
+static char *
+mpu_stack_alloc()
+{
+ int i;
+
+ k_mutex_lock(&mpu_stack_lock, K_FOREVER);
+ for (i = 0; i < BH_ZEPHYR_MPU_STACK_COUNT; i++) {
+ if (!mpu_stack_allocated[i]) {
+ mpu_stack_allocated[i] = true;
+ k_mutex_unlock(&mpu_stack_lock);
+ return (char *)mpu_stacks[i];
+ }
+ }
+ k_mutex_unlock(&mpu_stack_lock);
+ return NULL;
+}
+
+static void
+mpu_stack_free(char *stack)
+{
+ int i;
+
+ k_mutex_lock(&mpu_stack_lock, K_FOREVER);
+ for (i = 0; i < BH_ZEPHYR_MPU_STACK_COUNT; i++) {
+ if ((char *)mpu_stacks[i] == stack)
+ mpu_stack_allocated[i] = false;
+ }
+ k_mutex_unlock(&mpu_stack_lock);
+}
+#endif
+
+typedef struct os_thread_wait_node {
+ struct k_sem sem;
+ os_thread_wait_list next;
+} os_thread_wait_node;
+
+typedef struct os_thread_data {
+ /* Next thread data */
+ struct os_thread_data *next;
+ /* Zephyr thread handle */
+ korp_tid tid;
+ /* Jeff thread local root */
+ void *tlr;
+ /* Lock for waiting list */
+ struct k_mutex wait_list_lock;
+ /* Waiting list of other threads who are joining this thread */
+ os_thread_wait_list thread_wait_list;
+ /* Thread stack size */
+ unsigned stack_size;
+#if BH_ENABLE_ZEPHYR_MPU_STACK == 0
+ /* Thread stack */
+ char stack[1];
+#else
+ char *stack;
+#endif
+} os_thread_data;
+
+typedef struct os_thread_obj {
+ struct k_thread thread;
+ /* Whether the thread is terminated and this thread object is to
+ be freed in the future. */
+ bool to_be_freed;
+ struct os_thread_obj *next;
+} os_thread_obj;
+
+static bool is_thread_sys_inited = false;
+
+/* Thread data of supervisor thread */
+static os_thread_data supervisor_thread_data;
+
+/* Lock for thread data list */
+static struct k_mutex thread_data_lock;
+
+/* Thread data list */
+static os_thread_data *thread_data_list = NULL;
+
+/* Lock for thread object list */
+static struct k_mutex thread_obj_lock;
+
+/* Thread object list */
+static os_thread_obj *thread_obj_list = NULL;
+
+static void
+thread_data_list_add(os_thread_data *thread_data)
+{
+ k_mutex_lock(&thread_data_lock, K_FOREVER);
+ if (!thread_data_list)
+ thread_data_list = thread_data;
+ else {
+ /* If already in list, just return */
+ os_thread_data *p = thread_data_list;
+ while (p) {
+ if (p == thread_data) {
+ k_mutex_unlock(&thread_data_lock);
+ return;
+ }
+ p = p->next;
+ }
+
+ /* Set as head of list */
+ thread_data->next = thread_data_list;
+ thread_data_list = thread_data;
+ }
+ k_mutex_unlock(&thread_data_lock);
+}
+
+static void
+thread_data_list_remove(os_thread_data *thread_data)
+{
+ k_mutex_lock(&thread_data_lock, K_FOREVER);
+ if (thread_data_list) {
+ if (thread_data_list == thread_data)
+ thread_data_list = thread_data_list->next;
+ else {
+ /* Search and remove it from list */
+ os_thread_data *p = thread_data_list;
+ while (p && p->next != thread_data)
+ p = p->next;
+ if (p && p->next == thread_data)
+ p->next = p->next->next;
+ }
+ }
+ k_mutex_unlock(&thread_data_lock);
+}
+
+static os_thread_data *
+thread_data_list_lookup(k_tid_t tid)
+{
+ k_mutex_lock(&thread_data_lock, K_FOREVER);
+ if (thread_data_list) {
+ os_thread_data *p = thread_data_list;
+ while (p) {
+ if (p->tid == tid) {
+ /* Found */
+ k_mutex_unlock(&thread_data_lock);
+ return p;
+ }
+ p = p->next;
+ }
+ }
+ k_mutex_unlock(&thread_data_lock);
+ return NULL;
+}
+
+static void
+thread_obj_list_add(os_thread_obj *thread_obj)
+{
+ k_mutex_lock(&thread_obj_lock, K_FOREVER);
+ if (!thread_obj_list)
+ thread_obj_list = thread_obj;
+ else {
+ /* Set as head of list */
+ thread_obj->next = thread_obj_list;
+ thread_obj_list = thread_obj;
+ }
+ k_mutex_unlock(&thread_obj_lock);
+}
+
+static void
+thread_obj_list_reclaim()
+{
+ os_thread_obj *p, *p_prev;
+ k_mutex_lock(&thread_obj_lock, K_FOREVER);
+ p_prev = NULL;
+ p = thread_obj_list;
+ while (p) {
+ if (p->to_be_freed) {
+ if (p_prev == NULL) { /* p is the head of list */
+ thread_obj_list = p->next;
+ BH_FREE(p);
+ p = thread_obj_list;
+ }
+ else { /* p is not the head of list */
+ p_prev->next = p->next;
+ BH_FREE(p);
+ p = p_prev->next;
+ }
+ }
+ else {
+ p_prev = p;
+ p = p->next;
+ }
+ }
+ k_mutex_unlock(&thread_obj_lock);
+}
+
+int
+os_thread_sys_init()
+{
+ if (is_thread_sys_inited)
+ return BHT_OK;
+
+#if BH_ENABLE_ZEPHYR_MPU_STACK != 0
+ k_mutex_init(&mpu_stack_lock);
+#endif
+ k_mutex_init(&thread_data_lock);
+ k_mutex_init(&thread_obj_lock);
+
+ /* Initialize supervisor thread data */
+ memset(&supervisor_thread_data, 0, sizeof(supervisor_thread_data));
+ supervisor_thread_data.tid = k_current_get();
+ /* Set as head of thread data list */
+ thread_data_list = &supervisor_thread_data;
+
+ is_thread_sys_inited = true;
+ return BHT_OK;
+}
+
+void
+os_thread_sys_destroy(void)
+{
+ if (is_thread_sys_inited) {
+ is_thread_sys_inited = false;
+ }
+}
+
+static os_thread_data *
+thread_data_current()
+{
+ k_tid_t tid = k_current_get();
+ return thread_data_list_lookup(tid);
+}
+
+static void
+os_thread_cleanup(void)
+{
+ os_thread_data *thread_data = thread_data_current();
+
+ bh_assert(thread_data != NULL);
+ k_mutex_lock(&thread_data->wait_list_lock, K_FOREVER);
+ if (thread_data->thread_wait_list) {
+ /* Signal each joining thread */
+ os_thread_wait_list head = thread_data->thread_wait_list;
+ while (head) {
+ os_thread_wait_list next = head->next;
+ k_sem_give(&head->sem);
+ /* head will be freed by joining thread */
+ head = next;
+ }
+ thread_data->thread_wait_list = NULL;
+ }
+ k_mutex_unlock(&thread_data->wait_list_lock);
+
+ thread_data_list_remove(thread_data);
+ /* Set flag to true for the next thread creating to
+ free the thread object */
+ ((os_thread_obj *)thread_data->tid)->to_be_freed = true;
+#if BH_ENABLE_ZEPHYR_MPU_STACK != 0
+ mpu_stack_free(thread_data->stack);
+#endif
+ BH_FREE(thread_data);
+}
+
+static void
+os_thread_wrapper(void *start, void *arg, void *thread_data)
+{
+ /* Set thread custom data */
+ ((os_thread_data *)thread_data)->tid = k_current_get();
+ thread_data_list_add(thread_data);
+
+ ((thread_start_routine_t)start)(arg);
+ os_thread_cleanup();
+}
+
+int
+os_thread_create(korp_tid *p_tid, thread_start_routine_t start, void *arg,
+ unsigned int stack_size)
+{
+ return os_thread_create_with_prio(p_tid, start, arg, stack_size,
+ BH_THREAD_DEFAULT_PRIORITY);
+}
+
+int
+os_thread_create_with_prio(korp_tid *p_tid, thread_start_routine_t start,
+ void *arg, unsigned int stack_size, int prio)
+{
+ korp_tid tid;
+ os_thread_data *thread_data;
+ unsigned thread_data_size;
+
+ if (!p_tid || !stack_size)
+ return BHT_ERROR;
+
+ /* Free the thread objects of terminated threads */
+ thread_obj_list_reclaim();
+
+ /* Create and initialize thread object */
+ if (!(tid = BH_MALLOC(sizeof(os_thread_obj))))
+ return BHT_ERROR;
+
+ memset(tid, 0, sizeof(os_thread_obj));
+
+ /* Create and initialize thread data */
+#if BH_ENABLE_ZEPHYR_MPU_STACK == 0
+ if (stack_size < APP_THREAD_STACK_SIZE_MIN)
+ stack_size = APP_THREAD_STACK_SIZE_MIN;
+ thread_data_size = offsetof(os_thread_data, stack) + stack_size;
+#else
+ stack_size = BH_ZEPHYR_MPU_STACK_SIZE;
+ thread_data_size = sizeof(os_thread_data);
+#endif
+ if (!(thread_data = BH_MALLOC(thread_data_size))) {
+ goto fail1;
+ }
+
+ memset(thread_data, 0, thread_data_size);
+ k_mutex_init(&thread_data->wait_list_lock);
+ thread_data->stack_size = stack_size;
+ thread_data->tid = tid;
+
+#if BH_ENABLE_ZEPHYR_MPU_STACK != 0
+ if (!(thread_data->stack = mpu_stack_alloc())) {
+ goto fail2;
+ }
+#endif
+
+ /* Create the thread */
+ if (!((tid = k_thread_create(tid, (k_thread_stack_t *)thread_data->stack,
+ stack_size, os_thread_wrapper, start, arg,
+ thread_data, prio, 0, K_NO_WAIT)))) {
+ goto fail3;
+ }
+
+ bh_assert(tid == thread_data->tid);
+
+ /* Set thread custom data */
+ thread_data_list_add(thread_data);
+ thread_obj_list_add((os_thread_obj *)tid);
+ *p_tid = tid;
+ return BHT_OK;
+
+fail3:
+#if BH_ENABLE_ZEPHYR_MPU_STACK != 0
+ mpu_stack_free(thread_data->stack);
+fail2:
+#endif
+ BH_FREE(thread_data);
+fail1:
+ BH_FREE(tid);
+ return BHT_ERROR;
+}
+
+korp_tid
+os_self_thread()
+{
+ return (korp_tid)k_current_get();
+}
+
+int
+os_thread_join(korp_tid thread, void **value_ptr)
+{
+ (void)value_ptr;
+ os_thread_data *thread_data;
+ os_thread_wait_node *node;
+
+ /* Create wait node and append it to wait list */
+ if (!(node = BH_MALLOC(sizeof(os_thread_wait_node))))
+ return BHT_ERROR;
+
+ k_sem_init(&node->sem, 0, 1);
+ node->next = NULL;
+
+ /* Get thread data */
+ thread_data = thread_data_list_lookup(thread);
+ bh_assert(thread_data != NULL);
+
+ k_mutex_lock(&thread_data->wait_list_lock, K_FOREVER);
+ if (!thread_data->thread_wait_list)
+ thread_data->thread_wait_list = node;
+ else {
+ /* Add to end of waiting list */
+ os_thread_wait_node *p = thread_data->thread_wait_list;
+ while (p->next)
+ p = p->next;
+ p->next = node;
+ }
+ k_mutex_unlock(&thread_data->wait_list_lock);
+
+ /* Wait the sem */
+ k_sem_take(&node->sem, K_FOREVER);
+
+ /* Wait some time for the thread to be actually terminated */
+ k_sleep(Z_TIMEOUT_MS(100));
+
+ /* Destroy resource */
+ BH_FREE(node);
+ return BHT_OK;
+}
+
+int
+os_mutex_init(korp_mutex *mutex)
+{
+ k_mutex_init(mutex);
+ return BHT_OK;
+}
+
+int
+os_recursive_mutex_init(korp_mutex *mutex)
+{
+ k_mutex_init(mutex);
+ return BHT_OK;
+}
+
+int
+os_mutex_destroy(korp_mutex *mutex)
+{
+ (void)mutex;
+ return BHT_OK;
+}
+
+int
+os_mutex_lock(korp_mutex *mutex)
+{
+ return k_mutex_lock(mutex, K_FOREVER);
+}
+
+int
+os_mutex_unlock(korp_mutex *mutex)
+{
+#if KERNEL_VERSION_NUMBER >= 0x020200 /* version 2.2.0 */
+ return k_mutex_unlock(mutex);
+#else
+ k_mutex_unlock(mutex);
+ return 0;
+#endif
+}
+
+int
+os_cond_init(korp_cond *cond)
+{
+ k_mutex_init(&cond->wait_list_lock);
+ cond->thread_wait_list = NULL;
+ return BHT_OK;
+}
+
+int
+os_cond_destroy(korp_cond *cond)
+{
+ (void)cond;
+ return BHT_OK;
+}
+
+static int
+os_cond_wait_internal(korp_cond *cond, korp_mutex *mutex, bool timed, int mills)
+{
+ os_thread_wait_node *node;
+
+ /* Create wait node and append it to wait list */
+ if (!(node = BH_MALLOC(sizeof(os_thread_wait_node))))
+ return BHT_ERROR;
+
+ k_sem_init(&node->sem, 0, 1);
+ node->next = NULL;
+
+ k_mutex_lock(&cond->wait_list_lock, K_FOREVER);
+ if (!cond->thread_wait_list)
+ cond->thread_wait_list = node;
+ else {
+ /* Add to end of wait list */
+ os_thread_wait_node *p = cond->thread_wait_list;
+ while (p->next)
+ p = p->next;
+ p->next = node;
+ }
+ k_mutex_unlock(&cond->wait_list_lock);
+
+ /* Unlock mutex, wait sem and lock mutex again */
+ k_mutex_unlock(mutex);
+ k_sem_take(&node->sem, timed ? Z_TIMEOUT_MS(mills) : K_FOREVER);
+ k_mutex_lock(mutex, K_FOREVER);
+
+ /* Remove wait node from wait list */
+ k_mutex_lock(&cond->wait_list_lock, K_FOREVER);
+ if (cond->thread_wait_list == node)
+ cond->thread_wait_list = node->next;
+ else {
+ /* Remove from the wait list */
+ os_thread_wait_node *p = cond->thread_wait_list;
+ while (p->next != node)
+ p = p->next;
+ p->next = node->next;
+ }
+ BH_FREE(node);
+ k_mutex_unlock(&cond->wait_list_lock);
+
+ return BHT_OK;
+}
+
+int
+os_cond_wait(korp_cond *cond, korp_mutex *mutex)
+{
+ return os_cond_wait_internal(cond, mutex, false, 0);
+}
+
+int
+os_cond_reltimedwait(korp_cond *cond, korp_mutex *mutex, uint64 useconds)
+{
+
+ if (useconds == BHT_WAIT_FOREVER) {
+ return os_cond_wait_internal(cond, mutex, false, 0);
+ }
+ else {
+ uint64 mills_64 = useconds / 1000;
+ int32 mills;
+
+ if (mills_64 < (uint64)INT32_MAX) {
+ mills = (int32)mills_64;
+ }
+ else {
+ mills = INT32_MAX;
+ os_printf("Warning: os_cond_reltimedwait exceeds limit, "
+ "set to max timeout instead\n");
+ }
+ return os_cond_wait_internal(cond, mutex, true, mills);
+ }
+}
+
+int
+os_cond_signal(korp_cond *cond)
+{
+ /* Signal the head wait node of wait list */
+ k_mutex_lock(&cond->wait_list_lock, K_FOREVER);
+ if (cond->thread_wait_list)
+ k_sem_give(&cond->thread_wait_list->sem);
+ k_mutex_unlock(&cond->wait_list_lock);
+
+ return BHT_OK;
+}
+
+uint8 *
+os_thread_get_stack_boundary()
+{
+#if defined(CONFIG_THREAD_STACK_INFO)
+ korp_tid thread = k_current_get();
+ return (uint8 *)thread->stack_info.start;
+#else
+ return NULL;
+#endif
+}
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/zephyr/zephyr_time.c b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/zephyr/zephyr_time.c
new file mode 100644
index 000000000..99eb3b354
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/zephyr/zephyr_time.c
@@ -0,0 +1,12 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#include "platform_api_vmcore.h"
+
+uint64
+os_time_get_boot_microsecond()
+{
+ return k_uptime_get() * 1000;
+}
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/utils/SConscript b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/utils/SConscript
new file mode 100644
index 000000000..358f2ffca
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/utils/SConscript
@@ -0,0 +1,17 @@
+#
+# Copyright (c) 2021, RT-Thread Development Team
+#
+# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+#
+
+from building import *
+import os
+
+cwd = GetCurrentDir()
+
+src = Glob('*.c')
+CPPPATH = [cwd]
+
+group = DefineGroup('iwasm_shared_utils', src, depend = [''], CPPPATH = CPPPATH)
+
+Return('group')
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/utils/bh_assert.c b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/utils/bh_assert.c
new file mode 100644
index 000000000..246c55d1b
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/utils/bh_assert.c
@@ -0,0 +1,25 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#include "bh_assert.h"
+
+void
+bh_assert_internal(int64 v, const char *file_name, int line_number,
+ const char *expr_string)
+{
+ if (v)
+ return;
+
+ if (!file_name)
+ file_name = "NULL FILENAME";
+
+ if (!expr_string)
+ expr_string = "NULL EXPR_STRING";
+
+ os_printf("\nASSERTION FAILED: %s, at file %s, line %d\n", expr_string,
+ file_name, line_number);
+
+ abort();
+}
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/utils/bh_assert.h b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/utils/bh_assert.h
new file mode 100644
index 000000000..b7c995af8
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/utils/bh_assert.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#ifndef _BH_ASSERT_H
+#define _BH_ASSERT_H
+
+#include "bh_platform.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#if BH_DEBUG != 0
+void
+bh_assert_internal(int64 v, const char *file_name, int line_number,
+ const char *expr_string);
+#define bh_assert(expr) \
+ bh_assert_internal((int64)(uintptr_t)(expr), __FILE__, __LINE__, #expr)
+#else
+#define bh_assert(expr) (void)0
+#endif /* end of BH_DEBUG */
+
+#if !defined(__has_extension)
+#define __has_extension(a) 0
+#endif
+
+#if __STDC_VERSION__ >= 201112L \
+ || (defined(__GNUC__) && __GNUC__ * 0x100 + __GNUC_MINOR__ >= 0x406) \
+ || __has_extension(c_static_assert)
+
+#define bh_static_assert(expr) _Static_assert(expr, #expr)
+#else
+#define bh_static_assert(expr) /* nothing */
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* end of _BH_ASSERT_H */
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/utils/bh_common.c b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/utils/bh_common.c
new file mode 100644
index 000000000..aeeab26bd
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/utils/bh_common.c
@@ -0,0 +1,163 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#include "bh_common.h"
+
+static char *
+align_ptr(char *src, unsigned int b)
+{
+ uintptr_t v = (uintptr_t)src;
+ uintptr_t m = b - 1;
+ return (char *)((v + m) & ~m);
+}
+
+/*
+Memory copy, with word alignment
+*/
+int
+b_memcpy_wa(void *s1, unsigned int s1max, const void *s2, unsigned int n)
+{
+ char *dest = (char *)s1;
+ char *src = (char *)s2;
+
+ char *pa = align_ptr(src, 4);
+ char *pb = align_ptr((src + n), 4);
+
+ unsigned int buff;
+ const char *p_byte_read;
+
+ unsigned int *p;
+ char *ps;
+
+ if (pa > src) {
+ pa -= 4;
+ }
+
+ for (p = (unsigned int *)pa; p < (unsigned int *)pb; p++) {
+ buff = *(p);
+ p_byte_read = ((char *)&buff);
+
+ /* read leading word */
+ if ((char *)p <= src) {
+ for (ps = src; ps < ((char *)p + 4); ps++) {
+ if (ps >= src + n) {
+ break;
+ }
+ p_byte_read = ((char *)&buff) + (ps - (char *)p);
+ *dest++ = *p_byte_read;
+ }
+ }
+ /* read trailing word */
+ else if ((char *)p >= pb - 4) {
+ for (ps = (char *)p; ps < src + n; ps++) {
+ *dest++ = *p_byte_read++;
+ }
+ }
+ /* read meaning word(s) */
+ else {
+ if ((char *)p + 4 >= src + n) {
+ for (ps = (char *)p; ps < src + n; ps++) {
+ *dest++ = *p_byte_read++;
+ }
+ }
+ else {
+ *(unsigned int *)dest = buff;
+ dest += 4;
+ }
+ }
+ }
+
+ return 0;
+}
+
+int
+b_memcpy_s(void *s1, unsigned int s1max, const void *s2, unsigned int n)
+{
+ char *dest = (char *)s1;
+ char *src = (char *)s2;
+ if (n == 0) {
+ return 0;
+ }
+
+ if (s1 == NULL) {
+ return -1;
+ }
+ if (s2 == NULL || n > s1max) {
+ memset(dest, 0, s1max);
+ return -1;
+ }
+ memcpy(dest, src, n);
+ return 0;
+}
+
+int
+b_memmove_s(void *s1, unsigned int s1max, const void *s2, unsigned int n)
+{
+ char *dest = (char *)s1;
+ char *src = (char *)s2;
+ if (n == 0) {
+ return 0;
+ }
+
+ if (s1 == NULL) {
+ return -1;
+ }
+ if (s2 == NULL || n > s1max) {
+ memset(dest, 0, s1max);
+ return -1;
+ }
+ memmove(dest, src, n);
+ return 0;
+}
+
+int
+b_strcat_s(char *s1, unsigned int s1max, const char *s2)
+{
+ if (NULL == s1 || NULL == s2 || s1max < (strlen(s1) + strlen(s2) + 1)) {
+ return -1;
+ }
+
+ memcpy(s1 + strlen(s1), s2, strlen(s2) + 1);
+ return 0;
+}
+
+int
+b_strcpy_s(char *s1, unsigned int s1max, const char *s2)
+{
+ if (NULL == s1 || NULL == s2 || s1max < (strlen(s2) + 1)) {
+ return -1;
+ }
+
+ memcpy(s1, s2, strlen(s2) + 1);
+ return 0;
+}
+
+char *
+bh_strdup(const char *s)
+{
+ uint32 size;
+ char *s1 = NULL;
+
+ if (s) {
+ size = (uint32)(strlen(s) + 1);
+ if ((s1 = BH_MALLOC(size)))
+ bh_memcpy_s(s1, size, s, size);
+ }
+ return s1;
+}
+
+char *
+wa_strdup(const char *s)
+{
+ uint32 size;
+ char *s1 = NULL;
+
+ if (s) {
+ size = (uint32)(strlen(s) + 1);
+ if ((s1 = WA_MALLOC(size)))
+ bh_memcpy_s(s1, size, s, size);
+ }
+ return s1;
+}
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/utils/bh_common.h b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/utils/bh_common.h
new file mode 100644
index 000000000..edb962eb1
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/utils/bh_common.h
@@ -0,0 +1,73 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#ifndef _BH_COMMON_H
+#define _BH_COMMON_H
+
+#include "bh_platform.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define bh_memcpy_s(dest, dlen, src, slen) \
+ do { \
+ int _ret = slen == 0 ? 0 : b_memcpy_s(dest, dlen, src, slen); \
+ (void)_ret; \
+ bh_assert(_ret == 0); \
+ } while (0)
+
+#define bh_memcpy_wa(dest, dlen, src, slen) \
+ do { \
+ int _ret = slen == 0 ? 0 : b_memcpy_wa(dest, dlen, src, slen); \
+ (void)_ret; \
+ bh_assert(_ret == 0); \
+ } while (0)
+
+#define bh_memmove_s(dest, dlen, src, slen) \
+ do { \
+ int _ret = slen == 0 ? 0 : b_memmove_s(dest, dlen, src, slen); \
+ (void)_ret; \
+ bh_assert(_ret == 0); \
+ } while (0)
+
+#define bh_strcat_s(dest, dlen, src) \
+ do { \
+ int _ret = b_strcat_s(dest, dlen, src); \
+ (void)_ret; \
+ bh_assert(_ret == 0); \
+ } while (0)
+
+#define bh_strcpy_s(dest, dlen, src) \
+ do { \
+ int _ret = b_strcpy_s(dest, dlen, src); \
+ (void)_ret; \
+ bh_assert(_ret == 0); \
+ } while (0)
+
+int
+b_memcpy_s(void *s1, unsigned int s1max, const void *s2, unsigned int n);
+int
+b_memcpy_wa(void *s1, unsigned int s1max, const void *s2, unsigned int n);
+int
+b_memmove_s(void *s1, unsigned int s1max, const void *s2, unsigned int n);
+int
+b_strcat_s(char *s1, unsigned int s1max, const char *s2);
+int
+b_strcpy_s(char *s1, unsigned int s1max, const char *s2);
+
+/* strdup with string allocated by BH_MALLOC */
+char *
+bh_strdup(const char *s);
+
+/* strdup with string allocated by WA_MALLOC */
+char *
+wa_strdup(const char *s);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/utils/bh_hashmap.c b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/utils/bh_hashmap.c
new file mode 100644
index 000000000..3502239ad
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/utils/bh_hashmap.c
@@ -0,0 +1,337 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#include "bh_hashmap.h"
+
+typedef struct HashMapElem {
+ void *key;
+ void *value;
+ struct HashMapElem *next;
+} HashMapElem;
+
+struct HashMap {
+ /* size of element array */
+ uint32 size;
+ /* lock for elements */
+ korp_mutex *lock;
+ /* hash function of key */
+ HashFunc hash_func;
+ /* key equal function */
+ KeyEqualFunc key_equal_func;
+ KeyDestroyFunc key_destroy_func;
+ ValueDestroyFunc value_destroy_func;
+ HashMapElem *elements[1];
+};
+
+HashMap *
+bh_hash_map_create(uint32 size, bool use_lock, HashFunc hash_func,
+ KeyEqualFunc key_equal_func, KeyDestroyFunc key_destroy_func,
+ ValueDestroyFunc value_destroy_func)
+{
+ HashMap *map;
+ uint64 total_size;
+
+ if (size < HASH_MAP_MIN_SIZE)
+ size = HASH_MAP_MIN_SIZE;
+
+ if (size > HASH_MAP_MAX_SIZE) {
+ LOG_ERROR("HashMap create failed: size is too large.\n");
+ return NULL;
+ }
+
+ if (!hash_func || !key_equal_func) {
+ LOG_ERROR("HashMap create failed: hash function or key equal function "
+ " is NULL.\n");
+ return NULL;
+ }
+
+ total_size = offsetof(HashMap, elements)
+ + sizeof(HashMapElem *) * (uint64)size
+ + (use_lock ? sizeof(korp_mutex) : 0);
+
+ if (total_size >= UINT32_MAX || !(map = BH_MALLOC((uint32)total_size))) {
+ LOG_ERROR("HashMap create failed: alloc memory failed.\n");
+ return NULL;
+ }
+
+ memset(map, 0, (uint32)total_size);
+
+ if (use_lock) {
+ map->lock = (korp_mutex *)((uint8 *)map + offsetof(HashMap, elements)
+ + sizeof(HashMapElem *) * size);
+ if (os_mutex_init(map->lock)) {
+ LOG_ERROR("HashMap create failed: init map lock failed.\n");
+ BH_FREE(map);
+ return NULL;
+ }
+ }
+
+ map->size = size;
+ map->hash_func = hash_func;
+ map->key_equal_func = key_equal_func;
+ map->key_destroy_func = key_destroy_func;
+ map->value_destroy_func = value_destroy_func;
+ return map;
+}
+
+bool
+bh_hash_map_insert(HashMap *map, void *key, void *value)
+{
+ uint32 index;
+ HashMapElem *elem;
+
+ if (!map || !key) {
+ LOG_ERROR("HashMap insert elem failed: map or key is NULL.\n");
+ return false;
+ }
+
+ if (map->lock) {
+ os_mutex_lock(map->lock);
+ }
+
+ index = map->hash_func(key) % map->size;
+ elem = map->elements[index];
+ while (elem) {
+ if (map->key_equal_func(elem->key, key)) {
+ LOG_ERROR("HashMap insert elem failed: duplicated key found.\n");
+ goto fail;
+ }
+ elem = elem->next;
+ }
+
+ if (!(elem = BH_MALLOC(sizeof(HashMapElem)))) {
+ LOG_ERROR("HashMap insert elem failed: alloc memory failed.\n");
+ goto fail;
+ }
+
+ elem->key = key;
+ elem->value = value;
+ elem->next = map->elements[index];
+ map->elements[index] = elem;
+
+ if (map->lock) {
+ os_mutex_unlock(map->lock);
+ }
+ return true;
+
+fail:
+ if (map->lock) {
+ os_mutex_unlock(map->lock);
+ }
+ return false;
+}
+
+void *
+bh_hash_map_find(HashMap *map, void *key)
+{
+ uint32 index;
+ HashMapElem *elem;
+ void *value;
+
+ if (!map || !key) {
+ LOG_ERROR("HashMap find elem failed: map or key is NULL.\n");
+ return NULL;
+ }
+
+ if (map->lock) {
+ os_mutex_lock(map->lock);
+ }
+
+ index = map->hash_func(key) % map->size;
+ elem = map->elements[index];
+
+ while (elem) {
+ if (map->key_equal_func(elem->key, key)) {
+ value = elem->value;
+ if (map->lock) {
+ os_mutex_unlock(map->lock);
+ }
+ return value;
+ }
+ elem = elem->next;
+ }
+
+ if (map->lock) {
+ os_mutex_unlock(map->lock);
+ }
+ return NULL;
+}
+
+bool
+bh_hash_map_update(HashMap *map, void *key, void *value, void **p_old_value)
+{
+ uint32 index;
+ HashMapElem *elem;
+
+ if (!map || !key) {
+ LOG_ERROR("HashMap update elem failed: map or key is NULL.\n");
+ return false;
+ }
+
+ if (map->lock) {
+ os_mutex_lock(map->lock);
+ }
+
+ index = map->hash_func(key) % map->size;
+ elem = map->elements[index];
+
+ while (elem) {
+ if (map->key_equal_func(elem->key, key)) {
+ if (p_old_value)
+ *p_old_value = elem->value;
+ elem->value = value;
+ if (map->lock) {
+ os_mutex_unlock(map->lock);
+ }
+ return true;
+ }
+ elem = elem->next;
+ }
+
+ if (map->lock) {
+ os_mutex_unlock(map->lock);
+ }
+ return false;
+}
+
+bool
+bh_hash_map_remove(HashMap *map, void *key, void **p_old_key,
+ void **p_old_value)
+{
+ uint32 index;
+ HashMapElem *elem, *prev;
+
+ if (!map || !key) {
+ LOG_ERROR("HashMap remove elem failed: map or key is NULL.\n");
+ return false;
+ }
+
+ if (map->lock) {
+ os_mutex_lock(map->lock);
+ }
+
+ index = map->hash_func(key) % map->size;
+ prev = elem = map->elements[index];
+
+ while (elem) {
+ if (map->key_equal_func(elem->key, key)) {
+ if (p_old_key)
+ *p_old_key = elem->key;
+ if (p_old_value)
+ *p_old_value = elem->value;
+
+ if (elem == map->elements[index])
+ map->elements[index] = elem->next;
+ else
+ prev->next = elem->next;
+
+ BH_FREE(elem);
+
+ if (map->lock) {
+ os_mutex_unlock(map->lock);
+ }
+ return true;
+ }
+
+ prev = elem;
+ elem = elem->next;
+ }
+
+ if (map->lock) {
+ os_mutex_unlock(map->lock);
+ }
+ return false;
+}
+
+bool
+bh_hash_map_destroy(HashMap *map)
+{
+ uint32 index;
+ HashMapElem *elem, *next;
+
+ if (!map) {
+ LOG_ERROR("HashMap destroy failed: map is NULL.\n");
+ return false;
+ }
+
+ if (map->lock) {
+ os_mutex_lock(map->lock);
+ }
+
+ for (index = 0; index < map->size; index++) {
+ elem = map->elements[index];
+ while (elem) {
+ next = elem->next;
+
+ if (map->key_destroy_func) {
+ map->key_destroy_func(elem->key);
+ }
+ if (map->value_destroy_func) {
+ map->value_destroy_func(elem->value);
+ }
+ BH_FREE(elem);
+
+ elem = next;
+ }
+ }
+
+ if (map->lock) {
+ os_mutex_unlock(map->lock);
+ os_mutex_destroy(map->lock);
+ }
+ BH_FREE(map);
+ return true;
+}
+
+uint32
+bh_hash_map_get_struct_size(HashMap *hashmap)
+{
+ uint32 size = (uint32)(uintptr_t)offsetof(HashMap, elements)
+ + (uint32)sizeof(HashMapElem *) * hashmap->size;
+
+ if (hashmap->lock) {
+ size += (uint32)sizeof(korp_mutex);
+ }
+
+ return size;
+}
+
+uint32
+bh_hash_map_get_elem_struct_size()
+{
+ return (uint32)sizeof(HashMapElem);
+}
+
+bool
+bh_hash_map_traverse(HashMap *map, TraverseCallbackFunc callback,
+ void *user_data)
+{
+ uint32 index;
+ HashMapElem *elem, *next;
+
+ if (!map || !callback) {
+ LOG_ERROR("HashMap traverse failed: map or callback is NULL.\n");
+ return false;
+ }
+
+ if (map->lock) {
+ os_mutex_lock(map->lock);
+ }
+
+ for (index = 0; index < map->size; index++) {
+ elem = map->elements[index];
+ while (elem) {
+ next = elem->next;
+ callback(elem->key, elem->value, user_data);
+ elem = next;
+ }
+ }
+
+ if (map->lock) {
+ os_mutex_unlock(map->lock);
+ }
+
+ return true;
+}
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/utils/bh_hashmap.h b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/utils/bh_hashmap.h
new file mode 100644
index 000000000..38aa2c668
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/utils/bh_hashmap.h
@@ -0,0 +1,168 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#ifndef WASM_HASHMAP_H
+#define WASM_HASHMAP_H
+
+#include "bh_platform.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Minimum initial size of hash map */
+#define HASH_MAP_MIN_SIZE 4
+
+/* Maximum initial size of hash map */
+#define HASH_MAP_MAX_SIZE 65536
+
+struct HashMap;
+typedef struct HashMap HashMap;
+
+/* Hash function: to get the hash value of key. */
+typedef uint32 (*HashFunc)(const void *key);
+
+/* Key equal function: to check whether two keys are equal. */
+typedef bool (*KeyEqualFunc)(void *key1, void *key2);
+
+/* Key destroy function: to destroy the key, auto called
+ for each key when the hash map is destroyed. */
+typedef void (*KeyDestroyFunc)(void *key);
+
+/* Value destroy function: to destroy the value, auto called
+ for each value when the hash map is destroyed. */
+typedef void (*ValueDestroyFunc)(void *value);
+
+/* traverse callback function:
+ auto called when traverse every hash element */
+typedef void (*TraverseCallbackFunc)(void *key, void *value, void *user_data);
+
+/**
+ * Create a hash map.
+ *
+ * @param size: the initial size of the hash map
+ * @param use_lock whether to lock the hash map when operating on it
+ * @param hash_func hash function of the key, must be specified
+ * @param key_equal_func key equal function, check whether two keys
+ * are equal, must be specified
+ * @param key_destroy_func key destroy function, called for each key if not NULL
+ * when the hash map is destroyed
+ * @param value_destroy_func value destroy function, called for each value if
+ * not NULL when the hash map is destroyed
+ *
+ * @return the hash map created, NULL if failed
+ */
+HashMap *
+bh_hash_map_create(uint32 size, bool use_lock, HashFunc hash_func,
+ KeyEqualFunc key_equal_func, KeyDestroyFunc key_destroy_func,
+ ValueDestroyFunc value_destroy_func);
+
+/**
+ * Insert an element to the hash map
+ *
+ * @param map the hash map to insert element
+ * @key the key of the element
+ * @value the value of the element
+ *
+ * @return true if success, false otherwise
+ * Note: fail if key is NULL or duplicated key exists in the hash map,
+ */
+bool
+bh_hash_map_insert(HashMap *map, void *key, void *value);
+
+/**
+ * Find an element in the hash map
+ *
+ * @param map the hash map to find element
+ * @key the key of the element
+ *
+ * @return the value of the found element if success, NULL otherwise
+ */
+void *
+bh_hash_map_find(HashMap *map, void *key);
+
+/**
+ * Update an element in the hash map with new value
+ *
+ * @param map the hash map to update element
+ * @key the key of the element
+ * @value the new value of the element
+ * @p_old_value if not NULL, copies the old value to it
+ *
+ * @return true if success, false otherwise
+ * Note: the old value won't be destroyed by value destroy function,
+ * it will be copied to p_old_value for user to process.
+ */
+bool
+bh_hash_map_update(HashMap *map, void *key, void *value, void **p_old_value);
+
+/**
+ * Remove an element from the hash map
+ *
+ * @param map the hash map to remove element
+ * @key the key of the element
+ * @p_old_key if not NULL, copies the old key to it
+ * @p_old_value if not NULL, copies the old value to it
+ *
+ * @return true if success, false otherwise
+ * Note: the old key and old value won't be destroyed by key destroy
+ * function and value destroy function, they will be copied to
+ * p_old_key and p_old_value for user to process.
+ */
+bool
+bh_hash_map_remove(HashMap *map, void *key, void **p_old_key,
+ void **p_old_value);
+
+/**
+ * Destroy the hashmap
+ *
+ * @param map the hash map to destroy
+ *
+ * @return true if success, false otherwise
+ * Note: the key destroy function and value destroy function will be
+ * called to destroy each element's key and value if they are
+ * not NULL.
+ */
+bool
+bh_hash_map_destroy(HashMap *map);
+
+/**
+ * Get the structure size of HashMap
+ *
+ * @param map the hash map to calculate
+ *
+ * @return the memory space occupied by HashMap structure
+ */
+uint32
+bh_hash_map_get_struct_size(HashMap *hashmap);
+
+/**
+ * Get the structure size of HashMap Element
+ *
+ * @return the memory space occupied by HashMapElem structure
+ */
+uint32
+bh_hash_map_get_elem_struct_size(void);
+
+/**
+ * Traverse the hash map and call the callback function
+ *
+ * @param map the hash map to traverse
+ * @param callback the function to be called for every element
+ * @param user_data the argument to be passed to the callback function
+ *
+ * @return true if success, false otherwise
+ * Note: if the hash map has lock, the map will be locked during traverse,
+ * keep the callback function as simple as possible.
+ */
+bool
+bh_hash_map_traverse(HashMap *map, TraverseCallbackFunc callback,
+ void *user_data);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* endof WASM_HASHMAP_H */
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/utils/bh_list.c b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/utils/bh_list.c
new file mode 100644
index 000000000..7102d42a1
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/utils/bh_list.c
@@ -0,0 +1,111 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#include "bh_list.h"
+
+#if BH_DEBUG != 0
+/**
+ * Test whehter a pointer value has exist in given list.
+ *
+ * @param list pointer to list.
+ * @param elem pointer to elem that will be inserted into list.
+ * @return <code>true</code> if the pointer has been in the list;
+ * <code>false</code> otherwise.
+ */
+static bool
+bh_list_is_elem_exist(bh_list *list, void *elem);
+#endif
+
+bh_list_status
+bh_list_init(bh_list *list)
+{
+ if (!list)
+ return BH_LIST_ERROR;
+
+ (list->head).next = NULL;
+ list->len = 0;
+ return BH_LIST_SUCCESS;
+}
+
+bh_list_status
+bh_list_insert(bh_list *list, void *elem)
+{
+ bh_list_link *p = NULL;
+
+ if (!list || !elem)
+ return BH_LIST_ERROR;
+#if BH_DEBUG != 0
+ bh_assert(!bh_list_is_elem_exist(list, elem));
+#endif
+ p = (bh_list_link *)elem;
+ p->next = (list->head).next;
+ (list->head).next = p;
+ list->len++;
+ return BH_LIST_SUCCESS;
+}
+
+bh_list_status
+bh_list_remove(bh_list *list, void *elem)
+{
+ bh_list_link *cur = NULL;
+ bh_list_link *prev = NULL;
+
+ if (!list || !elem)
+ return BH_LIST_ERROR;
+
+ cur = (list->head).next;
+
+ while (cur) {
+ if (cur == elem) {
+ if (prev)
+ prev->next = cur->next;
+ else
+ (list->head).next = cur->next;
+
+ list->len--;
+ return BH_LIST_SUCCESS;
+ }
+
+ prev = cur;
+ cur = cur->next;
+ }
+
+ return BH_LIST_ERROR;
+}
+
+uint32
+bh_list_length(bh_list *list)
+{
+ return (list ? list->len : 0);
+}
+
+void *
+bh_list_first_elem(bh_list *list)
+{
+ return (list ? (list->head).next : NULL);
+}
+
+void *
+bh_list_elem_next(void *node)
+{
+ return (node ? ((bh_list_link *)node)->next : NULL);
+}
+
+#if BH_DEBUG != 0
+static bool
+bh_list_is_elem_exist(bh_list *list, void *elem)
+{
+ bh_list_link *p = NULL;
+
+ if (!list || !elem)
+ return false;
+
+ p = (list->head).next;
+ while (p && p != elem)
+ p = p->next;
+
+ return (p != NULL);
+}
+#endif
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/utils/bh_list.h b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/utils/bh_list.h
new file mode 100644
index 000000000..f10215324
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/utils/bh_list.h
@@ -0,0 +1,109 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#ifndef _BH_LIST_H
+#define _BH_LIST_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "bh_platform.h"
+
+/* List user should embedded bh_list_link into list elem data structure
+ * definition. And bh_list_link data field should be the first field.
+ * For example, if we would like to use bh_list for our own data type A,
+ * A must be defined as a structure like below:
+ * struct A {
+ * bh_list_link l;
+ * ...
+ * };
+ *
+ * bh_list_link is defined as a structure (not typedef void*).
+ * It will make extend list into bi-direction easy.
+ */
+typedef struct bh_list_link {
+ struct bh_list_link *next;
+} bh_list_link;
+
+typedef struct bh_list {
+ bh_list_link head;
+ uint32 len;
+} bh_list;
+
+/* list operation return value */
+typedef enum bh_list_status {
+ BH_LIST_SUCCESS = 0,
+ BH_LIST_ERROR = -1
+} bh_list_status;
+
+/**
+ * Initialize a list.
+ *
+ * @param list pointer to list.
+ * @return <code>BH_LIST_ERROR</code> if OK;
+ * <code>BH_LIST_ERROR</code> if list pointer is NULL.
+ */
+bh_list_status
+bh_list_init(bh_list *list);
+
+/**
+ * Insert an elem pointer into list. The list node memory is maintained by list
+ * while elem memory is the responsibility of list user.
+ *
+ * @param list pointer to list.
+ * @param elem pointer to elem that will be inserted into list.
+ * @return <code>BH_LIST_ERROR</code> if OK;
+ * <code>BH_LIST_ERROR</code> if input is invalid or no memory
+ * available.
+ */
+bh_list_status
+bh_list_insert(bh_list *list, void *elem);
+
+/**
+ * Remove an elem pointer from list. The list node memory is maintained by list
+ * while elem memory is the responsibility of list user.
+ *
+ * @param list pointer to list.
+ * @param elem pointer to elem that will be inserted into list.
+ * @return <code>BH_LIST_ERROR</code> if OK;
+ * <code>BH_LIST_ERROR</code> if element does not exist in given
+ * list.
+ */
+bh_list_status
+bh_list_remove(bh_list *list, void *elem);
+
+/**
+ * Get the list length.
+ *
+ * @param list pointer to list.
+ * @return the length of the list.
+ */
+uint32
+bh_list_length(bh_list *list);
+
+/**
+ * Get the first elem in the list.
+ *
+ * @param list pointer to list.
+ * @return pointer to the first node.
+ */
+void *
+bh_list_first_elem(bh_list *list);
+
+/**
+ * Get the next elem of given list input elem.
+ *
+ * @param node pointer to list node.
+ * @return pointer to next list node.
+ */
+void *
+bh_list_elem_next(void *node);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* #ifndef _BH_LIST_H */
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/utils/bh_log.c b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/utils/bh_log.c
new file mode 100644
index 000000000..78c058065
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/utils/bh_log.c
@@ -0,0 +1,107 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#include "bh_log.h"
+
+/**
+ * The verbose level of the log system. Only those verbose logs whose
+ * levels are less than or equal to this value are outputed.
+ */
+static uint32 log_verbose_level = BH_LOG_LEVEL_WARNING;
+
+void
+bh_log_set_verbose_level(uint32 level)
+{
+ log_verbose_level = level;
+}
+
+void
+bh_log(LogLevel log_level, const char *file, int line, const char *fmt, ...)
+{
+ va_list ap;
+ korp_tid self;
+ char buf[32] = { 0 };
+ uint64 usec;
+ uint32 t, h, m, s, mills;
+
+ if ((uint32)log_level > log_verbose_level)
+ return;
+
+ self = os_self_thread();
+
+ usec = os_time_get_boot_microsecond();
+ t = (uint32)(usec / 1000000) % (24 * 60 * 60);
+ h = t / (60 * 60);
+ t = t % (60 * 60);
+ m = t / 60;
+ s = t % 60;
+ mills = (uint32)(usec % 1000);
+
+ snprintf(buf, sizeof(buf),
+ "%02" PRIu32 ":%02" PRIu32 ":%02" PRIu32 ":%03" PRIu32, h, m, s,
+ mills);
+
+ os_printf("[%s - %" PRIXPTR "]: ", buf, (uintptr_t)self);
+
+ if (file)
+ os_printf("%s, line %d, ", file, line);
+
+ va_start(ap, fmt);
+ os_vprintf(fmt, ap);
+ va_end(ap);
+
+ os_printf("\n");
+}
+
+static uint32 last_time_ms = 0;
+static uint32 total_time_ms = 0;
+
+void
+bh_print_time(const char *prompt)
+{
+ uint32 curr_time_ms;
+
+ if (log_verbose_level < 3)
+ return;
+
+ curr_time_ms = (uint32)bh_get_tick_ms();
+
+ if (last_time_ms == 0)
+ last_time_ms = curr_time_ms;
+
+ total_time_ms += curr_time_ms - last_time_ms;
+
+ os_printf("%-48s time of last stage: %" PRIu32 " ms, total time: %" PRIu32
+ " ms\n",
+ prompt, curr_time_ms - last_time_ms, total_time_ms);
+
+ last_time_ms = curr_time_ms;
+}
+
+void
+bh_print_proc_mem(const char *prompt)
+{
+ char buf[1024] = { 0 };
+
+ if (log_verbose_level < BH_LOG_LEVEL_DEBUG)
+ return;
+
+ if (os_dumps_proc_mem_info(buf, sizeof(buf)) != 0)
+ return;
+
+ os_printf("%s\n", prompt);
+ os_printf("===== memory usage =====\n");
+ os_printf("%s", buf);
+ os_printf("==========\n");
+ return;
+}
+
+void
+bh_log_proc_mem(const char *function, uint32 line)
+{
+ char prompt[128] = { 0 };
+ snprintf(prompt, sizeof(prompt), "[MEM] %s(...) L%" PRIu32, function, line);
+ bh_print_proc_mem(prompt);
+}
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/utils/bh_log.h b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/utils/bh_log.h
new file mode 100644
index 000000000..e0bc61da2
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/utils/bh_log.h
@@ -0,0 +1,88 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+/**
+ * @file bh_log.h
+ * @date Tue Nov 8 18:19:10 2011
+ *
+ * @brief This log system supports wrapping multiple outputs into one
+ * log message. This is useful for outputting variable-length logs
+ * without additional memory overhead (the buffer for concatenating
+ * the message), e.g. exception stack trace, which cannot be printed
+ * by a single log calling without the help of an additional buffer.
+ * Avoiding additional memory buffer is useful for resource-constraint
+ * systems. It can minimize the impact of log system on applications
+ * and logs can be printed even when no enough memory is available.
+ * Functions with prefix "_" are private functions. Only macros that
+ * are not start with "_" are exposed and can be used.
+ */
+
+#ifndef _BH_LOG_H
+#define _BH_LOG_H
+
+#include "bh_platform.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef enum {
+ BH_LOG_LEVEL_FATAL = 0,
+ BH_LOG_LEVEL_ERROR = 1,
+ BH_LOG_LEVEL_WARNING = 2,
+ BH_LOG_LEVEL_DEBUG = 3,
+ BH_LOG_LEVEL_VERBOSE = 4
+} LogLevel;
+
+void
+bh_log_set_verbose_level(uint32 level);
+
+void
+bh_log(LogLevel log_level, const char *file, int line, const char *fmt, ...);
+
+#ifdef BH_PLATFORM_NUTTX
+
+#undef LOG_FATAL
+#undef LOG_ERROR
+#undef LOG_WARNING
+#undef LOG_VERBOSE
+#undef LOG_DEBUG
+
+#endif
+
+#if BH_DEBUG != 0
+#define LOG_FATAL(...) \
+ bh_log(BH_LOG_LEVEL_FATAL, __FILE__, __LINE__, __VA_ARGS__)
+#else
+#define LOG_FATAL(...) \
+ bh_log(BH_LOG_LEVEL_FATAL, __FUNCTION__, __LINE__, __VA_ARGS__)
+#endif
+
+#define LOG_ERROR(...) bh_log(BH_LOG_LEVEL_ERROR, NULL, 0, __VA_ARGS__)
+#define LOG_WARNING(...) bh_log(BH_LOG_LEVEL_WARNING, NULL, 0, __VA_ARGS__)
+#define LOG_VERBOSE(...) bh_log(BH_LOG_LEVEL_VERBOSE, NULL, 0, __VA_ARGS__)
+
+#if BH_DEBUG != 0
+#define LOG_DEBUG(...) \
+ bh_log(BH_LOG_LEVEL_DEBUG, __FILE__, __LINE__, __VA_ARGS__)
+#else
+#define LOG_DEBUG(...) (void)0
+#endif
+
+void
+bh_print_time(const char *prompt);
+
+void
+bh_print_proc_mem(const char *prompt);
+
+void
+bh_log_proc_mem(const char *function, uint32 line);
+
+#define LOG_PROC_MEM(...) bh_log_proc_mem(__FUNCTION__, __LINE__)
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _BH_LOG_H */
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/utils/bh_platform.h b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/utils/bh_platform.h
new file mode 100644
index 000000000..86aef839d
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/utils/bh_platform.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#ifndef _BH_PLATFORM_H
+#define _BH_PLATFORM_H
+
+#include "../platform/include/platform_common.h"
+#include "../platform/include/platform_api_vmcore.h"
+#include "../platform/include/platform_api_extension.h"
+#include "bh_assert.h"
+#include "bh_common.h"
+#include "bh_hashmap.h"
+#include "bh_list.h"
+#include "bh_log.h"
+#include "bh_queue.h"
+#include "bh_vector.h"
+#include "runtime_timer.h"
+
+/**
+ * WA_MALLOC/WA_FREE need to be redefined for both
+ * runtime native and WASM app respectively.
+ *
+ * Some source files are shared for building native and WASM,
+ * and this the mem allocator API for these files.
+ *
+ * Here we define it for the native world
+ */
+#ifndef WA_MALLOC
+#define WA_MALLOC wasm_runtime_malloc
+#endif
+
+#ifndef WA_FREE
+#define WA_FREE wasm_runtime_free
+#endif
+
+#endif /* #ifndef _BH_PLATFORM_H */
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/utils/bh_queue.c b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/utils/bh_queue.c
new file mode 100644
index 000000000..7c860d11a
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/utils/bh_queue.c
@@ -0,0 +1,256 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#include "bh_queue.h"
+
+typedef struct bh_queue_node {
+ struct bh_queue_node *next;
+ struct bh_queue_node *prev;
+ unsigned short tag;
+ unsigned int len;
+ void *body;
+ bh_msg_cleaner msg_cleaner;
+} bh_queue_node;
+
+struct bh_queue {
+ bh_queue_mutex queue_lock;
+ bh_queue_cond queue_wait_cond;
+ unsigned int cnt;
+ unsigned int max;
+ unsigned int drops;
+ bh_queue_node *head;
+ bh_queue_node *tail;
+
+ bool exit_loop_run;
+};
+
+char *
+bh_message_payload(bh_message_t message)
+{
+ return message->body;
+}
+
+uint32
+bh_message_payload_len(bh_message_t message)
+{
+ return message->len;
+}
+
+int
+bh_message_type(bh_message_t message)
+{
+ return message->tag;
+}
+
+bh_queue *
+bh_queue_create()
+{
+ int ret;
+ bh_queue *queue = bh_queue_malloc(sizeof(bh_queue));
+
+ if (queue) {
+ memset(queue, 0, sizeof(bh_queue));
+ queue->max = DEFAULT_QUEUE_LENGTH;
+
+ ret = bh_queue_mutex_init(&queue->queue_lock);
+ if (ret != 0) {
+ bh_queue_free(queue);
+ return NULL;
+ }
+
+ ret = bh_queue_cond_init(&queue->queue_wait_cond);
+ if (ret != 0) {
+ bh_queue_mutex_destroy(&queue->queue_lock);
+ bh_queue_free(queue);
+ return NULL;
+ }
+ }
+
+ return queue;
+}
+
+void
+bh_queue_destroy(bh_queue *queue)
+{
+ bh_queue_node *node;
+
+ if (!queue)
+ return;
+
+ bh_queue_mutex_lock(&queue->queue_lock);
+ while (queue->head) {
+ node = queue->head;
+ queue->head = node->next;
+
+ bh_free_msg(node);
+ }
+ bh_queue_mutex_unlock(&queue->queue_lock);
+
+ bh_queue_cond_destroy(&queue->queue_wait_cond);
+ bh_queue_mutex_destroy(&queue->queue_lock);
+ bh_queue_free(queue);
+}
+
+bool
+bh_post_msg2(bh_queue *queue, bh_queue_node *msg)
+{
+ if (queue->cnt >= queue->max) {
+ queue->drops++;
+ bh_free_msg(msg);
+ return false;
+ }
+
+ bh_queue_mutex_lock(&queue->queue_lock);
+
+ if (queue->cnt == 0) {
+ bh_assert(queue->head == NULL);
+ bh_assert(queue->tail == NULL);
+ queue->head = queue->tail = msg;
+ msg->next = msg->prev = NULL;
+ queue->cnt = 1;
+
+ bh_queue_cond_signal(&queue->queue_wait_cond);
+ }
+ else {
+ msg->next = NULL;
+ msg->prev = queue->tail;
+ queue->tail->next = msg;
+ queue->tail = msg;
+ queue->cnt++;
+ }
+
+ bh_queue_mutex_unlock(&queue->queue_lock);
+
+ return true;
+}
+
+bool
+bh_post_msg(bh_queue *queue, unsigned short tag, void *body, unsigned int len)
+{
+ bh_queue_node *msg = bh_new_msg(tag, body, len, NULL);
+ if (msg == NULL) {
+ queue->drops++;
+ if (len != 0 && body)
+ BH_FREE(body);
+ return false;
+ }
+
+ if (!bh_post_msg2(queue, msg)) {
+ // bh_post_msg2 already freed the msg for failure
+ return false;
+ }
+
+ return true;
+}
+
+bh_queue_node *
+bh_new_msg(unsigned short tag, void *body, unsigned int len, void *handler)
+{
+ bh_queue_node *msg =
+ (bh_queue_node *)bh_queue_malloc(sizeof(bh_queue_node));
+ if (msg == NULL)
+ return NULL;
+ memset(msg, 0, sizeof(bh_queue_node));
+ msg->len = len;
+ msg->body = body;
+ msg->tag = tag;
+ msg->msg_cleaner = (bh_msg_cleaner)handler;
+
+ return msg;
+}
+
+void
+bh_free_msg(bh_queue_node *msg)
+{
+ if (msg->msg_cleaner) {
+ msg->msg_cleaner(msg->body);
+ bh_queue_free(msg);
+ return;
+ }
+
+ // note: sometime we just use the payload pointer for a integer value
+ // len!=0 is the only indicator about the body is an allocated buffer.
+ if (msg->body && msg->len)
+ bh_queue_free(msg->body);
+
+ bh_queue_free(msg);
+}
+
+bh_message_t
+bh_get_msg(bh_queue *queue, uint64 timeout_us)
+{
+ bh_queue_node *msg = NULL;
+ bh_queue_mutex_lock(&queue->queue_lock);
+
+ if (queue->cnt == 0) {
+ bh_assert(queue->head == NULL);
+ bh_assert(queue->tail == NULL);
+
+ if (timeout_us == 0) {
+ bh_queue_mutex_unlock(&queue->queue_lock);
+ return NULL;
+ }
+
+ bh_queue_cond_timedwait(&queue->queue_wait_cond, &queue->queue_lock,
+ timeout_us);
+ }
+
+ if (queue->cnt == 0) {
+ bh_assert(queue->head == NULL);
+ bh_assert(queue->tail == NULL);
+ }
+ else if (queue->cnt == 1) {
+ bh_assert(queue->head == queue->tail);
+
+ msg = queue->head;
+ queue->head = queue->tail = NULL;
+ queue->cnt = 0;
+ }
+ else {
+ msg = queue->head;
+ queue->head = queue->head->next;
+ queue->head->prev = NULL;
+ queue->cnt--;
+ }
+
+ bh_queue_mutex_unlock(&queue->queue_lock);
+
+ return msg;
+}
+
+unsigned
+bh_queue_get_message_count(bh_queue *queue)
+{
+ if (!queue)
+ return 0;
+
+ return queue->cnt;
+}
+
+void
+bh_queue_enter_loop_run(bh_queue *queue, bh_queue_handle_msg_callback handle_cb,
+ void *arg)
+{
+ if (!queue)
+ return;
+
+ while (!queue->exit_loop_run) {
+ bh_queue_node *message = bh_get_msg(queue, BHT_WAIT_FOREVER);
+
+ if (message) {
+ handle_cb(message, arg);
+ bh_free_msg(message);
+ }
+ }
+}
+
+void
+bh_queue_exit_loop_run(bh_queue *queue)
+{
+ if (queue) {
+ queue->exit_loop_run = true;
+ bh_queue_cond_signal(&queue->queue_wait_cond);
+ }
+}
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/utils/bh_queue.h b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/utils/bh_queue.h
new file mode 100644
index 000000000..c15f43526
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/utils/bh_queue.h
@@ -0,0 +1,80 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#ifndef _BH_QUEUE_H
+#define _BH_QUEUE_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "bh_platform.h"
+
+struct bh_queue_node;
+typedef struct bh_queue_node *bh_message_t;
+struct bh_queue;
+typedef struct bh_queue bh_queue;
+
+typedef void (*bh_queue_handle_msg_callback)(void *message, void *arg);
+
+#define bh_queue_malloc BH_MALLOC
+#define bh_queue_free BH_FREE
+
+#define bh_queue_mutex korp_mutex
+#define bh_queue_cond korp_cond
+
+#define bh_queue_mutex_init os_mutex_init
+#define bh_queue_mutex_destroy os_mutex_destroy
+#define bh_queue_mutex_lock os_mutex_lock
+#define bh_queue_mutex_unlock os_mutex_unlock
+
+#define bh_queue_cond_init os_cond_init
+#define bh_queue_cond_destroy os_cond_destroy
+#define bh_queue_cond_wait os_cond_wait
+#define bh_queue_cond_timedwait os_cond_reltimedwait
+#define bh_queue_cond_signal os_cond_signal
+#define bh_queue_cond_broadcast os_cond_broadcast
+
+typedef void (*bh_msg_cleaner)(void *msg);
+
+bh_queue *
+bh_queue_create(void);
+
+void
+bh_queue_destroy(bh_queue *queue);
+
+char *
+bh_message_payload(bh_message_t message);
+uint32
+bh_message_payload_len(bh_message_t message);
+int
+bh_message_type(bh_message_t message);
+
+bh_message_t
+bh_new_msg(unsigned short tag, void *body, unsigned int len, void *handler);
+void
+bh_free_msg(bh_message_t msg);
+bool
+bh_post_msg(bh_queue *queue, unsigned short tag, void *body, unsigned int len);
+bool
+bh_post_msg2(bh_queue *queue, bh_message_t msg);
+
+bh_message_t
+bh_get_msg(bh_queue *queue, uint64 timeout_us);
+
+unsigned
+bh_queue_get_message_count(bh_queue *queue);
+
+void
+bh_queue_enter_loop_run(bh_queue *queue, bh_queue_handle_msg_callback handle_cb,
+ void *arg);
+void
+bh_queue_exit_loop_run(bh_queue *queue);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* #ifndef _BH_QUEUE_H */
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/utils/bh_vector.c b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/utils/bh_vector.c
new file mode 100644
index 000000000..352ce7192
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/utils/bh_vector.c
@@ -0,0 +1,279 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#include "bh_vector.h"
+
+static uint8 *
+alloc_vector_data(size_t length, size_t size_elem)
+{
+ uint64 total_size = ((uint64)size_elem) * length;
+ uint8 *data;
+
+ if (length > UINT32_MAX || size_elem > UINT32_MAX
+ || total_size > UINT32_MAX) {
+ return NULL;
+ }
+
+ if ((data = BH_MALLOC((uint32)total_size))) {
+ memset(data, 0, (uint32)total_size);
+ }
+
+ return data;
+}
+
+/**
+ * every caller of `extend_vector` must provide
+ * a thread-safe environment.
+ */
+static bool
+extend_vector(Vector *vector, size_t length)
+{
+ uint8 *data;
+
+ if (length <= vector->max_elems)
+ return true;
+
+ if (length < vector->size_elem * 3 / 2)
+ length = vector->size_elem * 3 / 2;
+
+ if (!(data = alloc_vector_data(length, vector->size_elem))) {
+ return false;
+ }
+
+ bh_memcpy_s(data, (uint32)(vector->size_elem * length), vector->data,
+ (uint32)(vector->size_elem * vector->max_elems));
+ BH_FREE(vector->data);
+
+ vector->data = data;
+ vector->max_elems = length;
+ return true;
+}
+
+bool
+bh_vector_init(Vector *vector, size_t init_length, size_t size_elem,
+ bool use_lock)
+{
+ if (!vector) {
+ LOG_ERROR("Init vector failed: vector is NULL.\n");
+ return false;
+ }
+
+ if (init_length == 0) {
+ init_length = 4;
+ }
+
+ if (!(vector->data = alloc_vector_data(init_length, size_elem))) {
+ LOG_ERROR("Init vector failed: alloc memory failed.\n");
+ return false;
+ }
+
+ vector->size_elem = size_elem;
+ vector->max_elems = init_length;
+ vector->num_elems = 0;
+ vector->lock = NULL;
+
+ if (use_lock) {
+ if (!(vector->lock = BH_MALLOC(sizeof(korp_mutex)))) {
+ LOG_ERROR("Init vector failed: alloc locker failed.\n");
+ bh_vector_destroy(vector);
+ return false;
+ }
+
+ if (BHT_OK != os_mutex_init(vector->lock)) {
+ LOG_ERROR("Init vector failed: init locker failed.\n");
+
+ BH_FREE(vector->lock);
+ vector->lock = NULL;
+
+ bh_vector_destroy(vector);
+ return false;
+ }
+ }
+
+ return true;
+}
+
+bool
+bh_vector_set(Vector *vector, uint32 index, const void *elem_buf)
+{
+ if (!vector || !elem_buf) {
+ LOG_ERROR("Set vector elem failed: vector or elem buf is NULL.\n");
+ return false;
+ }
+
+ if (index >= vector->num_elems) {
+ LOG_ERROR("Set vector elem failed: invalid elem index.\n");
+ return false;
+ }
+
+ if (vector->lock)
+ os_mutex_lock(vector->lock);
+ bh_memcpy_s(vector->data + vector->size_elem * index,
+ (uint32)vector->size_elem, elem_buf, (uint32)vector->size_elem);
+ if (vector->lock)
+ os_mutex_unlock(vector->lock);
+ return true;
+}
+
+bool
+bh_vector_get(Vector *vector, uint32 index, void *elem_buf)
+{
+ if (!vector || !elem_buf) {
+ LOG_ERROR("Get vector elem failed: vector or elem buf is NULL.\n");
+ return false;
+ }
+
+ if (index >= vector->num_elems) {
+ LOG_ERROR("Get vector elem failed: invalid elem index.\n");
+ return false;
+ }
+
+ if (vector->lock)
+ os_mutex_lock(vector->lock);
+ bh_memcpy_s(elem_buf, (uint32)vector->size_elem,
+ vector->data + vector->size_elem * index,
+ (uint32)vector->size_elem);
+ if (vector->lock)
+ os_mutex_unlock(vector->lock);
+ return true;
+}
+
+bool
+bh_vector_insert(Vector *vector, uint32 index, const void *elem_buf)
+{
+ size_t i;
+ uint8 *p;
+ bool ret = false;
+
+ if (!vector || !elem_buf) {
+ LOG_ERROR("Insert vector elem failed: vector or elem buf is NULL.\n");
+ goto just_return;
+ }
+
+ if (index >= vector->num_elems) {
+ LOG_ERROR("Insert vector elem failed: invalid elem index.\n");
+ goto just_return;
+ }
+
+ if (vector->lock)
+ os_mutex_lock(vector->lock);
+
+ if (!extend_vector(vector, vector->num_elems + 1)) {
+ LOG_ERROR("Insert vector elem failed: extend vector failed.\n");
+ goto unlock_return;
+ }
+
+ p = vector->data + vector->size_elem * vector->num_elems;
+ for (i = vector->num_elems - 1; i > index; i--) {
+ bh_memcpy_s(p, (uint32)vector->size_elem, p - vector->size_elem,
+ (uint32)vector->size_elem);
+ p -= vector->size_elem;
+ }
+
+ bh_memcpy_s(p, (uint32)vector->size_elem, elem_buf,
+ (uint32)vector->size_elem);
+ vector->num_elems++;
+ ret = true;
+
+unlock_return:
+ if (vector->lock)
+ os_mutex_unlock(vector->lock);
+just_return:
+ return ret;
+}
+
+bool
+bh_vector_append(Vector *vector, const void *elem_buf)
+{
+ bool ret = false;
+
+ if (!vector || !elem_buf) {
+ LOG_ERROR("Append vector elem failed: vector or elem buf is NULL.\n");
+ goto just_return;
+ }
+
+ /* make sure one more slot is used by the thread who allocas it */
+ if (vector->lock)
+ os_mutex_lock(vector->lock);
+
+ if (!extend_vector(vector, vector->num_elems + 1)) {
+ LOG_ERROR("Append ector elem failed: extend vector failed.\n");
+ goto unlock_return;
+ }
+
+ bh_memcpy_s(vector->data + vector->size_elem * vector->num_elems,
+ (uint32)vector->size_elem, elem_buf, (uint32)vector->size_elem);
+ vector->num_elems++;
+ ret = true;
+
+unlock_return:
+ if (vector->lock)
+ os_mutex_unlock(vector->lock);
+just_return:
+ return ret;
+}
+
+bool
+bh_vector_remove(Vector *vector, uint32 index, void *old_elem_buf)
+{
+ uint32 i;
+ uint8 *p;
+
+ if (!vector) {
+ LOG_ERROR("Remove vector elem failed: vector is NULL.\n");
+ return false;
+ }
+
+ if (index >= vector->num_elems) {
+ LOG_ERROR("Remove vector elem failed: invalid elem index.\n");
+ return false;
+ }
+
+ if (vector->lock)
+ os_mutex_lock(vector->lock);
+ p = vector->data + vector->size_elem * index;
+
+ if (old_elem_buf) {
+ bh_memcpy_s(old_elem_buf, (uint32)vector->size_elem, p,
+ (uint32)vector->size_elem);
+ }
+
+ for (i = index; i < vector->num_elems - 1; i++) {
+ bh_memcpy_s(p, (uint32)vector->size_elem, p + vector->size_elem,
+ (uint32)vector->size_elem);
+ p += vector->size_elem;
+ }
+
+ vector->num_elems--;
+ if (vector->lock)
+ os_mutex_unlock(vector->lock);
+ return true;
+}
+
+size_t
+bh_vector_size(const Vector *vector)
+{
+ return vector ? vector->num_elems : 0;
+}
+
+bool
+bh_vector_destroy(Vector *vector)
+{
+ if (!vector) {
+ LOG_ERROR("Destroy vector elem failed: vector is NULL.\n");
+ return false;
+ }
+
+ if (vector->data)
+ BH_FREE(vector->data);
+
+ if (vector->lock) {
+ os_mutex_destroy(vector->lock);
+ BH_FREE(vector->lock);
+ }
+
+ memset(vector, 0, sizeof(Vector));
+ return true;
+}
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/utils/bh_vector.h b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/utils/bh_vector.h
new file mode 100644
index 000000000..d0aaaf19b
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/utils/bh_vector.h
@@ -0,0 +1,126 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#ifndef _WASM_VECTOR_H
+#define _WASM_VECTOR_H
+
+#include "bh_platform.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define DEFAULT_VECTOR_INIT_SIZE 8
+
+typedef struct Vector {
+ /* max element number */
+ size_t max_elems;
+ /* vector data allocated */
+ uint8 *data;
+ /* current element num */
+ size_t num_elems;
+ /* size of each element */
+ size_t size_elem;
+ void *lock;
+} Vector;
+
+/**
+ * Initialize vector
+ *
+ * @param vector the vector to init
+ * @param init_length the initial length of the vector
+ * @param size_elem size of each element
+ *
+ * @return true if success, false otherwise
+ */
+bool
+bh_vector_init(Vector *vector, size_t init_length, size_t size_elem,
+ bool use_lock);
+
+/**
+ * Set element of vector
+ *
+ * @param vector the vector to set
+ * @param index the index of the element to set
+ * @param elem_buf the element buffer which stores the element data
+ *
+ * @return true if success, false otherwise
+ */
+bool
+bh_vector_set(Vector *vector, uint32 index, const void *elem_buf);
+
+/**
+ * Get element of vector
+ *
+ * @param vector the vector to get
+ * @param index the index of the element to get
+ * @param elem_buf the element buffer to store the element data,
+ * whose length must be no less than element size
+ *
+ * @return true if success, false otherwise
+ */
+bool
+bh_vector_get(Vector *vector, uint32 index, void *elem_buf);
+
+/**
+ * Insert element of vector
+ *
+ * @param vector the vector to insert
+ * @param index the index of the element to insert
+ * @param elem_buf the element buffer which stores the element data
+ *
+ * @return true if success, false otherwise
+ */
+bool
+bh_vector_insert(Vector *vector, uint32 index, const void *elem_buf);
+
+/**
+ * Append element to the end of vector
+ *
+ * @param vector the vector to append
+ * @param elem_buf the element buffer which stores the element data
+ *
+ * @return true if success, false otherwise
+ */
+bool
+bh_vector_append(Vector *vector, const void *elem_buf);
+
+/**
+ * Remove element from vector
+ *
+ * @param vector the vector to remove element
+ * @param index the index of the element to remove
+ * @param old_elem_buf if not NULL, copies the element data to the buffer
+ *
+ * @return true if success, false otherwise
+ */
+bool
+bh_vector_remove(Vector *vector, uint32 index, void *old_elem_buf);
+
+/**
+ * Return the size of the vector
+ *
+ * @param vector the vector to get size
+ *
+ * @return return the size of the vector
+ */
+size_t
+bh_vector_size(const Vector *vector);
+
+/**
+ * Destroy the vector
+ *
+ * @param vector the vector to destroy
+ *
+ * @return true if success, false otherwise
+ */
+bool
+bh_vector_destroy(Vector *vector);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* endof _WASM_VECTOR_H */
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/utils/runtime_timer.c b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/utils/runtime_timer.c
new file mode 100644
index 000000000..8fccf4c2f
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/utils/runtime_timer.c
@@ -0,0 +1,469 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#include "runtime_timer.h"
+
+#if 1
+#define PRINT(...) (void)0
+#else
+#define PRINT printf
+#endif
+
+typedef struct _app_timer {
+ struct _app_timer *next;
+ uint32 id;
+ uint32 interval;
+ uint64 expiry;
+ bool is_periodic;
+} app_timer_t;
+
+struct _timer_ctx {
+ app_timer_t *app_timers;
+ app_timer_t *idle_timers;
+ app_timer_t *free_timers;
+ uint32 max_timer_id;
+ int pre_allocated;
+ uint32 owner;
+
+ /* mutex and condition */
+ korp_cond cond;
+ korp_mutex mutex;
+
+ timer_callback_f timer_callback;
+ check_timer_expiry_f refresh_checker;
+};
+
+uint64
+bh_get_tick_ms()
+{
+ return os_time_get_boot_microsecond() / 1000;
+}
+
+uint32
+bh_get_elpased_ms(uint32 *last_system_clock)
+{
+ uint32 elpased_ms;
+ /* attention: the bh_get_tick_ms() return 64 bits integer, but
+ the bh_get_elpased_ms() is designed to use 32 bits clock count */
+ uint32 now = (uint32)bh_get_tick_ms();
+
+ /* system clock overrun */
+ if (now < *last_system_clock) {
+ PRINT("system clock overrun!\n");
+ elpased_ms = now + (UINT32_MAX - *last_system_clock) + 1;
+ }
+ else {
+ elpased_ms = now - *last_system_clock;
+ }
+
+ *last_system_clock = now;
+ return elpased_ms;
+}
+
+static app_timer_t *
+remove_timer_from(timer_ctx_t ctx, uint32 timer_id, bool active_list)
+{
+ app_timer_t **head, *prev, *t;
+
+ os_mutex_lock(&ctx->mutex);
+
+ if (active_list)
+ head = &ctx->app_timers;
+ else
+ head = &ctx->idle_timers;
+
+ t = *head;
+ prev = NULL;
+
+ while (t) {
+ if (t->id == timer_id) {
+ if (prev == NULL) {
+ *head = t->next;
+ PRINT("removed timer [%d] at head from list %d\n", t->id,
+ active_list);
+ }
+ else {
+ prev->next = t->next;
+ PRINT("removed timer [%d] after [%d] from list %d\n", t->id,
+ prev->id, active_list);
+ }
+ os_mutex_unlock(&ctx->mutex);
+
+ if (active_list && prev == NULL && ctx->refresh_checker)
+ ctx->refresh_checker(ctx);
+ return t;
+ }
+ else {
+ prev = t;
+ t = t->next;
+ }
+ }
+
+ os_mutex_unlock(&ctx->mutex);
+ return NULL;
+}
+
+static app_timer_t *
+remove_timer(timer_ctx_t ctx, uint32 timer_id, bool *active)
+{
+ app_timer_t *t = remove_timer_from(ctx, timer_id, true);
+
+ if (t) {
+ if (active)
+ *active = true;
+ return t;
+ }
+
+ if (active)
+ *active = false;
+ return remove_timer_from(ctx, timer_id, false);
+}
+
+static void
+reschedule_timer(timer_ctx_t ctx, app_timer_t *timer)
+{
+ app_timer_t *t;
+ app_timer_t *prev = NULL;
+
+ os_mutex_lock(&ctx->mutex);
+
+ t = ctx->app_timers;
+ timer->next = NULL;
+ timer->expiry = bh_get_tick_ms() + timer->interval;
+
+ while (t) {
+ if (timer->expiry < t->expiry) {
+ if (prev == NULL) {
+ timer->next = ctx->app_timers;
+ ctx->app_timers = timer;
+ PRINT("rescheduled timer [%d] at head\n", timer->id);
+ }
+ else {
+ timer->next = t;
+ prev->next = timer;
+ PRINT("rescheduled timer [%d] after [%d]\n", timer->id,
+ prev->id);
+ }
+
+ goto out;
+ }
+ else {
+ prev = t;
+ t = t->next;
+ }
+ }
+
+ if (prev) {
+ /* insert to the list end */
+ prev->next = timer;
+ PRINT("rescheduled timer [%d] at end, after [%d]\n", timer->id,
+ prev->id);
+ }
+ else {
+ /* insert at the begin */
+ bh_assert(ctx->app_timers == NULL);
+ ctx->app_timers = timer;
+ PRINT("rescheduled timer [%d] as first\n", timer->id);
+ }
+
+out:
+ os_mutex_unlock(&ctx->mutex);
+
+ /* ensure the refresh_checker() is called out of the lock */
+ if (prev == NULL && ctx->refresh_checker)
+ ctx->refresh_checker(ctx);
+}
+
+static void
+release_timer(timer_ctx_t ctx, app_timer_t *t)
+{
+ if (ctx->pre_allocated) {
+ os_mutex_lock(&ctx->mutex);
+ t->next = ctx->free_timers;
+ ctx->free_timers = t;
+ PRINT("recycle timer :%d\n", t->id);
+ os_mutex_unlock(&ctx->mutex);
+ }
+ else {
+ PRINT("destroy timer :%d\n", t->id);
+ BH_FREE(t);
+ }
+}
+
+void
+release_timer_list(app_timer_t **p_list)
+{
+ app_timer_t *t = *p_list;
+
+ while (t) {
+ app_timer_t *next = t->next;
+ PRINT("destroy timer list:%d\n", t->id);
+ BH_FREE(t);
+ t = next;
+ }
+
+ *p_list = NULL;
+}
+
+/*
+ * API exposed
+ */
+
+timer_ctx_t
+create_timer_ctx(timer_callback_f timer_handler,
+ check_timer_expiry_f expiery_checker, int prealloc_num,
+ unsigned int owner)
+{
+ timer_ctx_t ctx = (timer_ctx_t)BH_MALLOC(sizeof(struct _timer_ctx));
+
+ if (ctx == NULL)
+ return NULL;
+
+ memset(ctx, 0, sizeof(struct _timer_ctx));
+
+ ctx->timer_callback = timer_handler;
+ ctx->pre_allocated = prealloc_num;
+ ctx->refresh_checker = expiery_checker;
+ ctx->owner = owner;
+
+ while (prealloc_num > 0) {
+ app_timer_t *timer = (app_timer_t *)BH_MALLOC(sizeof(app_timer_t));
+
+ if (timer == NULL)
+ goto cleanup;
+
+ memset(timer, 0, sizeof(*timer));
+ timer->next = ctx->free_timers;
+ ctx->free_timers = timer;
+ prealloc_num--;
+ }
+
+ if (os_cond_init(&ctx->cond) != 0)
+ goto cleanup;
+
+ if (os_mutex_init(&ctx->mutex) != 0) {
+ os_cond_destroy(&ctx->cond);
+ goto cleanup;
+ }
+
+ PRINT("timer ctx created. pre-alloc: %d\n", ctx->pre_allocated);
+ return ctx;
+
+cleanup:
+ if (ctx) {
+ release_timer_list(&ctx->free_timers);
+ BH_FREE(ctx);
+ }
+ PRINT("timer ctx create failed\n");
+ return NULL;
+}
+
+void
+destroy_timer_ctx(timer_ctx_t ctx)
+{
+ while (ctx->free_timers) {
+ void *tmp = ctx->free_timers;
+ ctx->free_timers = ctx->free_timers->next;
+ BH_FREE(tmp);
+ }
+
+ cleanup_app_timers(ctx);
+
+ os_cond_destroy(&ctx->cond);
+ os_mutex_destroy(&ctx->mutex);
+ BH_FREE(ctx);
+}
+
+unsigned int
+timer_ctx_get_owner(timer_ctx_t ctx)
+{
+ return ctx->owner;
+}
+
+void
+add_idle_timer(timer_ctx_t ctx, app_timer_t *timer)
+{
+ os_mutex_lock(&ctx->mutex);
+ timer->next = ctx->idle_timers;
+ ctx->idle_timers = timer;
+ os_mutex_unlock(&ctx->mutex);
+}
+
+uint32
+sys_create_timer(timer_ctx_t ctx, int interval, bool is_period, bool auto_start)
+{
+ app_timer_t *timer;
+
+ if (ctx->pre_allocated) {
+ if (ctx->free_timers == NULL) {
+ return (uint32)-1;
+ }
+ else {
+ timer = ctx->free_timers;
+ ctx->free_timers = timer->next;
+ }
+ }
+ else {
+ timer = (app_timer_t *)BH_MALLOC(sizeof(app_timer_t));
+ if (timer == NULL)
+ return (uint32)-1;
+ }
+
+ memset(timer, 0, sizeof(*timer));
+
+ ctx->max_timer_id++;
+ if (ctx->max_timer_id == (uint32)-1)
+ ctx->max_timer_id++;
+ timer->id = ctx->max_timer_id;
+ timer->interval = (uint32)interval;
+ timer->is_periodic = is_period;
+
+ if (auto_start)
+ reschedule_timer(ctx, timer);
+ else
+ add_idle_timer(ctx, timer);
+
+ return timer->id;
+}
+
+bool
+sys_timer_cancel(timer_ctx_t ctx, uint32 timer_id)
+{
+ bool from_active;
+ app_timer_t *t = remove_timer(ctx, timer_id, &from_active);
+
+ if (t == NULL)
+ return false;
+
+ add_idle_timer(ctx, t);
+
+ PRINT("sys_timer_stop called\n");
+ return from_active;
+}
+
+bool
+sys_timer_destroy(timer_ctx_t ctx, uint32 timer_id)
+{
+ bool from_active;
+ app_timer_t *t = remove_timer(ctx, timer_id, &from_active);
+
+ if (t == NULL)
+ return false;
+
+ release_timer(ctx, t);
+
+ PRINT("sys_timer_destroy called\n");
+ return true;
+}
+
+bool
+sys_timer_restart(timer_ctx_t ctx, uint32 timer_id, int interval)
+{
+ app_timer_t *t = remove_timer(ctx, timer_id, NULL);
+
+ if (t == NULL)
+ return false;
+
+ t->interval = (uint32)interval;
+
+ reschedule_timer(ctx, t);
+
+ PRINT("sys_timer_restart called\n");
+ return true;
+}
+
+/*
+ * API called by the timer manager from another thread or the kernel timer
+ * handler
+ */
+
+/**
+ * lookup the app queue by the module name
+ * post a timeout message to the app queue
+ */
+static void
+handle_expired_timers(timer_ctx_t ctx, app_timer_t *expired)
+{
+ while (expired) {
+ app_timer_t *t = expired;
+ ctx->timer_callback(t->id, ctx->owner);
+
+ /* get next expired timer first, since the following
+ operation may change expired->next */
+ expired = expired->next;
+ if (t->is_periodic) {
+ /* if it is repeating, then reschedule it; */
+ reschedule_timer(ctx, t);
+ }
+ else {
+ /* else move it to idle list */
+ add_idle_timer(ctx, t);
+ }
+ }
+}
+
+uint32
+get_expiry_ms(timer_ctx_t ctx)
+{
+ uint32 ms_to_next_expiry;
+ uint64 now = bh_get_tick_ms();
+
+ os_mutex_lock(&ctx->mutex);
+ if (ctx->app_timers == NULL)
+ ms_to_next_expiry = (uint32)-1;
+ else if (ctx->app_timers->expiry >= now)
+ ms_to_next_expiry = (uint32)(ctx->app_timers->expiry - now);
+ else
+ ms_to_next_expiry = 0;
+ os_mutex_unlock(&ctx->mutex);
+
+ return ms_to_next_expiry;
+}
+
+uint32
+check_app_timers(timer_ctx_t ctx)
+{
+ app_timer_t *t, *expired = NULL, *expired_end = NULL;
+ uint64 now = bh_get_tick_ms();
+
+ os_mutex_lock(&ctx->mutex);
+
+ t = ctx->app_timers;
+ while (t) {
+ if (now >= t->expiry) {
+ ctx->app_timers = t->next;
+
+ /* append t to the end of expired list */
+ t->next = NULL;
+ if (!expired_end) {
+ expired = expired_end = t;
+ }
+ else {
+ expired_end->next = t;
+ expired_end = t;
+ }
+
+ t = ctx->app_timers;
+ }
+ else {
+ break;
+ }
+ }
+ os_mutex_unlock(&ctx->mutex);
+
+ handle_expired_timers(ctx, expired);
+ return get_expiry_ms(ctx);
+}
+
+void
+cleanup_app_timers(timer_ctx_t ctx)
+{
+ os_mutex_lock(&ctx->mutex);
+
+ release_timer_list(&ctx->app_timers);
+ release_timer_list(&ctx->idle_timers);
+
+ os_mutex_unlock(&ctx->mutex);
+}
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/utils/runtime_timer.h b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/utils/runtime_timer.h
new file mode 100644
index 000000000..b8d90c5ff
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/utils/runtime_timer.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#ifndef LIB_BASE_RUNTIME_TIMER_H_
+#define LIB_BASE_RUNTIME_TIMER_H_
+
+#include "bh_platform.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+uint64
+bh_get_tick_ms(void);
+uint32
+bh_get_elpased_ms(uint32 *last_system_clock);
+
+struct _timer_ctx;
+typedef struct _timer_ctx *timer_ctx_t;
+typedef void (*timer_callback_f)(unsigned int id, unsigned int owner);
+typedef void (*check_timer_expiry_f)(timer_ctx_t ctx);
+
+timer_ctx_t
+create_timer_ctx(timer_callback_f timer_handler, check_timer_expiry_f,
+ int prealloc_num, unsigned int owner);
+void destroy_timer_ctx(timer_ctx_t);
+unsigned int
+timer_ctx_get_owner(timer_ctx_t ctx);
+
+uint32
+sys_create_timer(timer_ctx_t ctx, int interval, bool is_period,
+ bool auto_start);
+bool
+sys_timer_destroy(timer_ctx_t ctx, uint32 timer_id);
+bool
+sys_timer_cancel(timer_ctx_t ctx, uint32 timer_id);
+bool
+sys_timer_restart(timer_ctx_t ctx, uint32 timer_id, int interval);
+void
+cleanup_app_timers(timer_ctx_t ctx);
+uint32
+check_app_timers(timer_ctx_t ctx);
+uint32
+get_expiry_ms(timer_ctx_t ctx);
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* LIB_BASE_RUNTIME_TIMER_H_ */
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/utils/shared_utils.cmake b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/utils/shared_utils.cmake
new file mode 100644
index 000000000..5b7d02dde
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/utils/shared_utils.cmake
@@ -0,0 +1,12 @@
+# Copyright (C) 2019 Intel Corporation. All rights reserved.
+# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+
+set (UTILS_SHARED_DIR ${CMAKE_CURRENT_LIST_DIR})
+
+include_directories(${UTILS_SHARED_DIR})
+
+file (GLOB source_all ${UTILS_SHARED_DIR}/*.c)
+
+set (UTILS_SHARED_SOURCE ${source_all})
+
+LIST (APPEND RUNTIME_LIB_HEADER_LIST "${UTILS_SHARED_DIR}/runtime_timer.h")
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/utils/uncommon/SConscript b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/utils/uncommon/SConscript
new file mode 100644
index 000000000..f608645fe
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/utils/uncommon/SConscript
@@ -0,0 +1,32 @@
+#
+# Copyright (c) 2021, RT-Thread Development Team
+#
+# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+#
+
+from building import *
+import os
+
+cwd = GetCurrentDir()
+
+# src = Split('''
+# ''')
+
+
+def addSrcFiles(arr, path):
+ for f in os.listdir(path):
+ fpath = os.path.join(path, f);
+ if os.path.isfile(fpath):
+ ext = os.path.splitext(fpath)[-1]
+ if ext == '.c' or ext == '.cpp':
+ arr += [fpath]
+ #elif os.path.isdir(fpath):
+ # addSrcFiles(arr, fpath)
+
+src = Glob('*.c')
+src += Glob('*.cpp')
+CPPPATH = [cwd]
+
+group = DefineGroup('iwasm_shared_utils_uncommon', src, depend = [''], CPPPATH = CPPPATH)
+
+Return('group')
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/utils/uncommon/bh_getopt.c b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/utils/uncommon/bh_getopt.c
new file mode 100644
index 000000000..19e23a7b5
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/utils/uncommon/bh_getopt.c
@@ -0,0 +1,65 @@
+/*
+ * Copyright (C) 2020 Ant Financial Services Group. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#ifndef __GNUC__
+
+#include "bh_getopt.h"
+#include <stdio.h>
+#include <string.h>
+
+char *optarg = NULL;
+int optind = 1;
+
+int
+getopt(int argc, char *const argv[], const char *optstring)
+{
+ static int sp = 1;
+ int opt;
+ char *p;
+
+ if (sp == 1) {
+ if ((optind >= argc) || (argv[optind][0] != '-')
+ || (argv[optind][1] == 0)) {
+ return -1;
+ }
+ else if (!strcmp(argv[optind], "--")) {
+ optind++;
+ return -1;
+ }
+ }
+
+ opt = argv[optind][sp];
+ p = strchr(optstring, opt);
+ if (opt == ':' || p == NULL) {
+ printf("illegal option : '-%c'\n", opt);
+ if (argv[optind][++sp] == '\0') {
+ optind++;
+ sp = 1;
+ }
+ return ('?');
+ }
+ if (p[1] == ':') {
+ if (argv[optind][sp + 1] != '\0')
+ optarg = &argv[optind++][sp + 1];
+ else if (++optind >= argc) {
+ printf("option '-%c' requires an argument :\n", opt);
+ sp = 1;
+ return ('?');
+ }
+ else {
+ optarg = argv[optind++];
+ }
+ sp = 1;
+ }
+ else {
+ if (argv[optind][++sp] == '\0') {
+ sp = 1;
+ optind++;
+ }
+ optarg = NULL;
+ }
+ return (opt);
+}
+#endif
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/utils/uncommon/bh_getopt.h b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/utils/uncommon/bh_getopt.h
new file mode 100644
index 000000000..efd3ab403
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/utils/uncommon/bh_getopt.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2020 Ant Financial Services Group. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#ifdef __GNUC__
+#include <getopt.h>
+#endif
+#ifndef __GNUC__
+#ifndef GETOPT_H__
+#define GETOPT_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+extern char *optarg;
+extern int optind;
+
+int
+getopt(int argc, char *const argv[], const char *optstring);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* end of GETOPT_H__ */
+#endif /* end of __GNUC__ */
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/utils/uncommon/bh_read_file.c b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/utils/uncommon/bh_read_file.c
new file mode 100644
index 000000000..5ddf1b601
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/utils/uncommon/bh_read_file.c
@@ -0,0 +1,117 @@
+#include "bh_read_file.h"
+
+#include <sys/stat.h>
+#include <fcntl.h>
+#if defined(_WIN32) || defined(_WIN32_)
+#include <io.h>
+#else
+#include <unistd.h>
+#endif
+
+#if defined(_WIN32) || defined(_WIN32_)
+
+#if defined(__MINGW32__) && !defined(_SH_DENYNO)
+#define _SH_DENYNO 0x40
+#endif
+
+char *
+bh_read_file_to_buffer(const char *filename, uint32 *ret_size)
+{
+ char *buffer;
+ int file;
+ uint32 file_size, buf_size, read_size;
+ struct stat stat_buf;
+
+ if (!filename || !ret_size) {
+ printf("Read file to buffer failed: invalid filename or ret size.\n");
+ return NULL;
+ }
+
+ if (_sopen_s(&file, filename, _O_RDONLY | _O_BINARY, _SH_DENYNO, 0)) {
+ printf("Read file to buffer failed: open file %s failed.\n", filename);
+ return NULL;
+ }
+
+ if (fstat(file, &stat_buf) != 0) {
+ printf("Read file to buffer failed: fstat file %s failed.\n", filename);
+ _close(file);
+ return NULL;
+ }
+ file_size = (uint32)stat_buf.st_size;
+
+ /* At lease alloc 1 byte to avoid malloc failed */
+ buf_size = file_size > 0 ? file_size : 1;
+
+ if (!(buffer = (char *)BH_MALLOC(buf_size))) {
+ printf("Read file to buffer failed: alloc memory failed.\n");
+ _close(file);
+ return NULL;
+ }
+#if WASM_ENABLE_MEMORY_TRACING != 0
+ printf("Read file, total size: %u\n", file_size);
+#endif
+
+ read_size = _read(file, buffer, file_size);
+ _close(file);
+
+ if (read_size < file_size) {
+ printf("Read file to buffer failed: read file content failed.\n");
+ BH_FREE(buffer);
+ return NULL;
+ }
+
+ *ret_size = file_size;
+ return buffer;
+}
+#else /* else of defined(_WIN32) || defined(_WIN32_) */
+char *
+bh_read_file_to_buffer(const char *filename, uint32 *ret_size)
+{
+ char *buffer;
+ int file;
+ uint32 file_size, buf_size, read_size;
+ struct stat stat_buf;
+
+ if (!filename || !ret_size) {
+ printf("Read file to buffer failed: invalid filename or ret size.\n");
+ return NULL;
+ }
+
+ if ((file = open(filename, O_RDONLY, 0)) == -1) {
+ printf("Read file to buffer failed: open file %s failed.\n", filename);
+ return NULL;
+ }
+
+ if (fstat(file, &stat_buf) != 0) {
+ printf("Read file to buffer failed: fstat file %s failed.\n", filename);
+ close(file);
+ return NULL;
+ }
+
+ file_size = (uint32)stat_buf.st_size;
+
+ /* At lease alloc 1 byte to avoid malloc failed */
+ buf_size = file_size > 0 ? file_size : 1;
+
+ if (!(buffer = BH_MALLOC(buf_size))) {
+ printf("Read file to buffer failed: alloc memory failed.\n");
+ close(file);
+ return NULL;
+ }
+#if WASM_ENABLE_MEMORY_TRACING != 0
+ printf("Read file, total size: %u\n", file_size);
+#endif
+
+ read_size = (uint32)read(file, buffer, file_size);
+ close(file);
+
+ if (read_size < file_size) {
+ printf("Read file to buffer failed: read file content failed.\n");
+ BH_FREE(buffer);
+ return NULL;
+ }
+
+ *ret_size = file_size;
+ return buffer;
+}
+#endif /* end of defined(_WIN32) || defined(_WIN32_) */
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/utils/uncommon/bh_read_file.h b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/utils/uncommon/bh_read_file.h
new file mode 100644
index 000000000..bbebf847f
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/utils/uncommon/bh_read_file.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#ifndef _BH_FILE_H
+#define _BH_FILE_H
+
+#include "bh_platform.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+char *
+bh_read_file_to_buffer(const char *filename, uint32 *ret_size);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* end of _BH_FILE_H */
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/utils/uncommon/shared_uncommon.cmake b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/utils/uncommon/shared_uncommon.cmake
new file mode 100644
index 000000000..0a15b87b8
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/utils/uncommon/shared_uncommon.cmake
@@ -0,0 +1,11 @@
+# Copyright (C) 2019 Intel Corporation. All rights reserved.
+# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+
+set (UNCOMMON_SHARED_DIR ${CMAKE_CURRENT_LIST_DIR})
+
+include_directories(${UNCOMMON_SHARED_DIR})
+
+file (GLOB_RECURSE source_all ${UNCOMMON_SHARED_DIR}/*.c)
+
+set (UNCOMMON_SHARED_SOURCE ${source_all})
+