summaryrefslogtreecommitdiffstats
path: root/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/zephyr
diff options
context:
space:
mode:
Diffstat (limited to 'fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/zephyr')
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/zephyr/platform_internal.h149
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/zephyr/shared_platform.cmake16
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/zephyr/zephyr_platform.c223
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/zephyr/zephyr_thread.c576
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/zephyr/zephyr_time.c12
5 files changed, 976 insertions, 0 deletions
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/zephyr/platform_internal.h b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/zephyr/platform_internal.h
new file mode 100644
index 000000000..d2a94e4ad
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/zephyr/platform_internal.h
@@ -0,0 +1,149 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#ifndef _PLATFORM_INTERNAL_H
+#define _PLATFORM_INTERNAL_H
+
+#include <autoconf.h>
+#include <version.h>
+
+#if KERNEL_VERSION_NUMBER < 0x030200 /* version 3.2.0 */
+#include <zephyr.h>
+#include <kernel.h>
+#if KERNEL_VERSION_NUMBER >= 0x020200 /* version 2.2.0 */
+#include <sys/printk.h>
+#else
+#include <misc/printk.h>
+#endif
+#else /* else of KERNEL_VERSION_NUMBER < 0x030200 */
+#include <zephyr/kernel.h>
+#include <zephyr/sys/printk.h>
+#endif /* end of KERNEL_VERSION_NUMBER < 0x030200 */
+
+#include <inttypes.h>
+#include <stdarg.h>
+#include <ctype.h>
+#include <limits.h>
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <strings.h>
+
+#ifndef CONFIG_NET_BUF_USER_DATA_SIZE
+#define CONFIG_NET_BUF_USER_DATA_SIZE 0
+#endif
+
+#if KERNEL_VERSION_NUMBER < 0x030200 /* version 3.2.0 */
+#include <net/net_pkt.h>
+#include <net/net_if.h>
+#include <net/net_ip.h>
+#include <net/net_core.h>
+#include <net/net_context.h>
+
+#ifdef CONFIG_ARM_MPU
+#include <arch/arm/aarch32/cortex_m/cmsis.h>
+#endif
+#else /* else of KERNEL_VERSION_NUMBER < 0x030200 */
+#include <zephyr/net/net_pkt.h>
+#include <zephyr/net/net_if.h>
+#include <zephyr/net/net_ip.h>
+#include <zephyr/net/net_core.h>
+#include <zephyr/net/net_context.h>
+
+#ifdef CONFIG_ARM_MPU
+#include <zephyr/arch/arm/aarch32/cortex_m/cmsis.h>
+#endif
+#endif /* end of KERNEL_VERSION_NUMBER < 0x030200 */
+
+#ifndef BH_PLATFORM_ZEPHYR
+#define BH_PLATFORM_ZEPHYR
+#endif
+
+#define BH_APPLET_PRESERVED_STACK_SIZE (2 * BH_KB)
+
+/* Default thread priority */
+#define BH_THREAD_DEFAULT_PRIORITY 7
+
+typedef struct k_thread korp_thread;
+typedef korp_thread *korp_tid;
+typedef struct k_mutex korp_mutex;
+typedef unsigned int korp_sem;
+
+struct os_thread_wait_node;
+typedef struct os_thread_wait_node *os_thread_wait_list;
+typedef struct korp_cond {
+ struct k_mutex wait_list_lock;
+ os_thread_wait_list thread_wait_list;
+} korp_cond;
+
+#ifndef Z_TIMEOUT_MS
+#define Z_TIMEOUT_MS(ms) ms
+#endif
+
+/* clang-format off */
+void abort(void);
+size_t strspn(const char *s, const char *accept);
+size_t strcspn(const char *s, const char *reject);
+
+/* math functions which are not provided by os */
+double atan(double x);
+double atan2(double y, double x);
+double sqrt(double x);
+double floor(double x);
+double ceil(double x);
+double fmin(double x, double y);
+double fmax(double x, double y);
+double rint(double x);
+double fabs(double x);
+double trunc(double x);
+float sqrtf(float x);
+float floorf(float x);
+float ceilf(float x);
+float fminf(float x, float y);
+float fmaxf(float x, float y);
+float rintf(float x);
+float fabsf(float x);
+float truncf(float x);
+int signbit(double x);
+int isnan(double x);
+double pow(double x, double y);
+double scalbn(double x, int n);
+
+unsigned long long int strtoull(const char *nptr, char **endptr, int base);
+double strtod(const char *nptr, char **endptr);
+float strtof(const char *nptr, char **endptr);
+/* clang-format on */
+
+#if KERNEL_VERSION_NUMBER >= 0x030100 /* version 3.1.0 */
+#define BH_HAS_SQRT
+#define BH_HAS_SQRTF
+#endif
+
+/**
+ * @brief Allocate executable memroy
+ *
+ * @param size size of the memory to be allocated
+ *
+ * @return the address of the allocated memory if not NULL
+ */
+typedef void *(*exec_mem_alloc_func_t)(unsigned int size);
+
+/**
+ * @brief Release executable memroy
+ *
+ * @param the address of the executable memory to be released
+ */
+typedef void (*exec_mem_free_func_t)(void *addr);
+
+/* Below function are called by external project to set related function
+ * pointers that will be used to malloc/free executable memory. Otherwise
+ * default mechanise will be used.
+ */
+void
+set_exec_mem_alloc_func(exec_mem_alloc_func_t alloc_func,
+ exec_mem_free_func_t free_func);
+
+#endif
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/zephyr/shared_platform.cmake b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/zephyr/shared_platform.cmake
new file mode 100644
index 000000000..9b043b52f
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/zephyr/shared_platform.cmake
@@ -0,0 +1,16 @@
+# Copyright (C) 2019 Intel Corporation. All rights reserved.
+# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+
+set (PLATFORM_SHARED_DIR ${CMAKE_CURRENT_LIST_DIR})
+
+add_definitions(-DBH_PLATFORM_ZEPHYR)
+
+include_directories(${PLATFORM_SHARED_DIR})
+include_directories(${PLATFORM_SHARED_DIR}/../include)
+
+include (${CMAKE_CURRENT_LIST_DIR}/../common/math/platform_api_math.cmake)
+
+file (GLOB_RECURSE source_all ${PLATFORM_SHARED_DIR}/*.c)
+
+set (PLATFORM_SHARED_SOURCE ${source_all} ${PLATFORM_COMMON_MATH_SOURCE})
+
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/zephyr/zephyr_platform.c b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/zephyr/zephyr_platform.c
new file mode 100644
index 000000000..b4f2e5ec7
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/zephyr/zephyr_platform.c
@@ -0,0 +1,223 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#include "platform_api_vmcore.h"
+#include "platform_api_extension.h"
+
+/* function pointers for executable memory management */
+static exec_mem_alloc_func_t exec_mem_alloc_func = NULL;
+static exec_mem_free_func_t exec_mem_free_func = NULL;
+
+#if WASM_ENABLE_AOT != 0
+#ifdef CONFIG_ARM_MPU
+/**
+ * This function will allow execute from sram region.
+ * This is needed for AOT code because by default all soc will
+ * disable the execute from SRAM.
+ */
+static void
+disable_mpu_rasr_xn(void)
+{
+ uint32 index;
+ /* Kept the max index as 8 (irrespective of soc) because the sram
+ would most likely be set at index 2. */
+ for (index = 0U; index < 8; index++) {
+ MPU->RNR = index;
+ if (MPU->RASR & MPU_RASR_XN_Msk) {
+ MPU->RASR |= ~MPU_RASR_XN_Msk;
+ }
+ }
+}
+#endif /* end of CONFIG_ARM_MPU */
+#endif
+
+static int
+_stdout_hook_iwasm(int c)
+{
+ printk("%c", (char)c);
+ return 1;
+}
+
+int
+os_thread_sys_init();
+
+void
+os_thread_sys_destroy();
+
+int
+bh_platform_init()
+{
+ extern void __stdout_hook_install(int (*hook)(int));
+ /* Enable printf() in Zephyr */
+ __stdout_hook_install(_stdout_hook_iwasm);
+
+#if WASM_ENABLE_AOT != 0
+#ifdef CONFIG_ARM_MPU
+ /* Enable executable memory support */
+ disable_mpu_rasr_xn();
+#endif
+#endif
+
+ return os_thread_sys_init();
+}
+
+void
+bh_platform_destroy()
+{
+ os_thread_sys_destroy();
+}
+
+void *
+os_malloc(unsigned size)
+{
+ return NULL;
+}
+
+void *
+os_realloc(void *ptr, unsigned size)
+{
+ return NULL;
+}
+
+void
+os_free(void *ptr)
+{}
+
+int
+os_dumps_proc_mem_info(char *out, unsigned int size)
+{
+ return -1;
+}
+
+#if 0
+struct out_context {
+ int count;
+};
+
+typedef int (*out_func_t)(int c, void *ctx);
+
+static int
+char_out(int c, void *ctx)
+{
+ struct out_context *out_ctx = (struct out_context*)ctx;
+ out_ctx->count++;
+ return _stdout_hook_iwasm(c);
+}
+
+int
+os_vprintf(const char *fmt, va_list ap)
+{
+#if 0
+ struct out_context ctx = { 0 };
+ cbvprintf(char_out, &ctx, fmt, ap);
+ return ctx.count;
+#else
+ vprintk(fmt, ap);
+ return 0;
+#endif
+}
+#endif
+
+int
+os_printf(const char *format, ...)
+{
+ int ret = 0;
+ va_list ap;
+
+ va_start(ap, format);
+#ifndef BH_VPRINTF
+ ret += vprintf(format, ap);
+#else
+ ret += BH_VPRINTF(format, ap);
+#endif
+ va_end(ap);
+
+ return ret;
+}
+
+int
+os_vprintf(const char *format, va_list ap)
+{
+#ifndef BH_VPRINTF
+ return vprintf(format, ap);
+#else
+ return BH_VPRINTF(format, ap);
+#endif
+}
+
+#if KERNEL_VERSION_NUMBER <= 0x020400 /* version 2.4.0 */
+void
+abort(void)
+{
+ int i = 0;
+ os_printf("%d\n", 1 / i);
+}
+#endif
+
+#if KERNEL_VERSION_NUMBER <= 0x010E01 /* version 1.14.1 */
+size_t
+strspn(const char *s, const char *accept)
+{
+ os_printf("## unimplemented function %s called", __FUNCTION__);
+ return 0;
+}
+
+size_t
+strcspn(const char *s, const char *reject)
+{
+ os_printf("## unimplemented function %s called", __FUNCTION__);
+ return 0;
+}
+#endif
+
+void *
+os_mmap(void *hint, size_t size, int prot, int flags)
+{
+ if ((uint64)size >= UINT32_MAX)
+ return NULL;
+ if (exec_mem_alloc_func)
+ return exec_mem_alloc_func((uint32)size);
+ else
+ return BH_MALLOC(size);
+}
+
+void
+os_munmap(void *addr, size_t size)
+{
+ if (exec_mem_free_func)
+ exec_mem_free_func(addr);
+ else
+ BH_FREE(addr);
+}
+
+int
+os_mprotect(void *addr, size_t size, int prot)
+{
+ return 0;
+}
+
+void
+os_dcache_flush()
+{
+#if defined(CONFIG_CPU_CORTEX_M7) && defined(CONFIG_ARM_MPU)
+ uint32 key;
+ key = irq_lock();
+ SCB_CleanDCache();
+ irq_unlock(key);
+#elif defined(CONFIG_SOC_CVF_EM7D) && defined(CONFIG_ARC_MPU) \
+ && defined(CONFIG_CACHE_FLUSHING)
+ __asm__ __volatile__("sync");
+ z_arc_v2_aux_reg_write(_ARC_V2_DC_FLSH, BIT(0));
+ __asm__ __volatile__("sync");
+#endif
+}
+
+void
+set_exec_mem_alloc_func(exec_mem_alloc_func_t alloc_func,
+ exec_mem_free_func_t free_func)
+{
+ exec_mem_alloc_func = alloc_func;
+ exec_mem_free_func = free_func;
+}
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/zephyr/zephyr_thread.c b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/zephyr/zephyr_thread.c
new file mode 100644
index 000000000..1ee2c5cef
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/zephyr/zephyr_thread.c
@@ -0,0 +1,576 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#include "platform_api_vmcore.h"
+#include "platform_api_extension.h"
+
+/* clang-format off */
+#define bh_assert(v) do { \
+ if (!(v)) { \
+ printf("\nASSERTION FAILED: %s, at %s, line %d\n", \
+ #v, __FILE__, __LINE__); \
+ abort(); \
+ } \
+} while (0)
+/* clang-format on */
+
+#if defined(CONFIG_ARM_MPU) || defined(CONFIG_ARC_MPU) \
+ || KERNEL_VERSION_NUMBER > 0x020300 /* version 2.3.0 */
+#define BH_ENABLE_ZEPHYR_MPU_STACK 1
+#elif !defined(BH_ENABLE_ZEPHYR_MPU_STACK)
+#define BH_ENABLE_ZEPHYR_MPU_STACK 0
+#endif
+#if !defined(BH_ZEPHYR_MPU_STACK_SIZE)
+#define BH_ZEPHYR_MPU_STACK_SIZE APP_THREAD_STACK_SIZE_MIN
+#endif
+#if !defined(BH_ZEPHYR_MPU_STACK_COUNT)
+#define BH_ZEPHYR_MPU_STACK_COUNT 4
+#endif
+
+#if BH_ENABLE_ZEPHYR_MPU_STACK != 0
+static K_THREAD_STACK_ARRAY_DEFINE(mpu_stacks, BH_ZEPHYR_MPU_STACK_COUNT,
+ BH_ZEPHYR_MPU_STACK_SIZE);
+static bool mpu_stack_allocated[BH_ZEPHYR_MPU_STACK_COUNT];
+static struct k_mutex mpu_stack_lock;
+
+static char *
+mpu_stack_alloc()
+{
+ int i;
+
+ k_mutex_lock(&mpu_stack_lock, K_FOREVER);
+ for (i = 0; i < BH_ZEPHYR_MPU_STACK_COUNT; i++) {
+ if (!mpu_stack_allocated[i]) {
+ mpu_stack_allocated[i] = true;
+ k_mutex_unlock(&mpu_stack_lock);
+ return (char *)mpu_stacks[i];
+ }
+ }
+ k_mutex_unlock(&mpu_stack_lock);
+ return NULL;
+}
+
+static void
+mpu_stack_free(char *stack)
+{
+ int i;
+
+ k_mutex_lock(&mpu_stack_lock, K_FOREVER);
+ for (i = 0; i < BH_ZEPHYR_MPU_STACK_COUNT; i++) {
+ if ((char *)mpu_stacks[i] == stack)
+ mpu_stack_allocated[i] = false;
+ }
+ k_mutex_unlock(&mpu_stack_lock);
+}
+#endif
+
+typedef struct os_thread_wait_node {
+ struct k_sem sem;
+ os_thread_wait_list next;
+} os_thread_wait_node;
+
+typedef struct os_thread_data {
+ /* Next thread data */
+ struct os_thread_data *next;
+ /* Zephyr thread handle */
+ korp_tid tid;
+ /* Jeff thread local root */
+ void *tlr;
+ /* Lock for waiting list */
+ struct k_mutex wait_list_lock;
+ /* Waiting list of other threads who are joining this thread */
+ os_thread_wait_list thread_wait_list;
+ /* Thread stack size */
+ unsigned stack_size;
+#if BH_ENABLE_ZEPHYR_MPU_STACK == 0
+ /* Thread stack */
+ char stack[1];
+#else
+ char *stack;
+#endif
+} os_thread_data;
+
+typedef struct os_thread_obj {
+ struct k_thread thread;
+ /* Whether the thread is terminated and this thread object is to
+ be freed in the future. */
+ bool to_be_freed;
+ struct os_thread_obj *next;
+} os_thread_obj;
+
+static bool is_thread_sys_inited = false;
+
+/* Thread data of supervisor thread */
+static os_thread_data supervisor_thread_data;
+
+/* Lock for thread data list */
+static struct k_mutex thread_data_lock;
+
+/* Thread data list */
+static os_thread_data *thread_data_list = NULL;
+
+/* Lock for thread object list */
+static struct k_mutex thread_obj_lock;
+
+/* Thread object list */
+static os_thread_obj *thread_obj_list = NULL;
+
+static void
+thread_data_list_add(os_thread_data *thread_data)
+{
+ k_mutex_lock(&thread_data_lock, K_FOREVER);
+ if (!thread_data_list)
+ thread_data_list = thread_data;
+ else {
+ /* If already in list, just return */
+ os_thread_data *p = thread_data_list;
+ while (p) {
+ if (p == thread_data) {
+ k_mutex_unlock(&thread_data_lock);
+ return;
+ }
+ p = p->next;
+ }
+
+ /* Set as head of list */
+ thread_data->next = thread_data_list;
+ thread_data_list = thread_data;
+ }
+ k_mutex_unlock(&thread_data_lock);
+}
+
+static void
+thread_data_list_remove(os_thread_data *thread_data)
+{
+ k_mutex_lock(&thread_data_lock, K_FOREVER);
+ if (thread_data_list) {
+ if (thread_data_list == thread_data)
+ thread_data_list = thread_data_list->next;
+ else {
+ /* Search and remove it from list */
+ os_thread_data *p = thread_data_list;
+ while (p && p->next != thread_data)
+ p = p->next;
+ if (p && p->next == thread_data)
+ p->next = p->next->next;
+ }
+ }
+ k_mutex_unlock(&thread_data_lock);
+}
+
+static os_thread_data *
+thread_data_list_lookup(k_tid_t tid)
+{
+ k_mutex_lock(&thread_data_lock, K_FOREVER);
+ if (thread_data_list) {
+ os_thread_data *p = thread_data_list;
+ while (p) {
+ if (p->tid == tid) {
+ /* Found */
+ k_mutex_unlock(&thread_data_lock);
+ return p;
+ }
+ p = p->next;
+ }
+ }
+ k_mutex_unlock(&thread_data_lock);
+ return NULL;
+}
+
+static void
+thread_obj_list_add(os_thread_obj *thread_obj)
+{
+ k_mutex_lock(&thread_obj_lock, K_FOREVER);
+ if (!thread_obj_list)
+ thread_obj_list = thread_obj;
+ else {
+ /* Set as head of list */
+ thread_obj->next = thread_obj_list;
+ thread_obj_list = thread_obj;
+ }
+ k_mutex_unlock(&thread_obj_lock);
+}
+
+static void
+thread_obj_list_reclaim()
+{
+ os_thread_obj *p, *p_prev;
+ k_mutex_lock(&thread_obj_lock, K_FOREVER);
+ p_prev = NULL;
+ p = thread_obj_list;
+ while (p) {
+ if (p->to_be_freed) {
+ if (p_prev == NULL) { /* p is the head of list */
+ thread_obj_list = p->next;
+ BH_FREE(p);
+ p = thread_obj_list;
+ }
+ else { /* p is not the head of list */
+ p_prev->next = p->next;
+ BH_FREE(p);
+ p = p_prev->next;
+ }
+ }
+ else {
+ p_prev = p;
+ p = p->next;
+ }
+ }
+ k_mutex_unlock(&thread_obj_lock);
+}
+
+int
+os_thread_sys_init()
+{
+ if (is_thread_sys_inited)
+ return BHT_OK;
+
+#if BH_ENABLE_ZEPHYR_MPU_STACK != 0
+ k_mutex_init(&mpu_stack_lock);
+#endif
+ k_mutex_init(&thread_data_lock);
+ k_mutex_init(&thread_obj_lock);
+
+ /* Initialize supervisor thread data */
+ memset(&supervisor_thread_data, 0, sizeof(supervisor_thread_data));
+ supervisor_thread_data.tid = k_current_get();
+ /* Set as head of thread data list */
+ thread_data_list = &supervisor_thread_data;
+
+ is_thread_sys_inited = true;
+ return BHT_OK;
+}
+
+void
+os_thread_sys_destroy(void)
+{
+ if (is_thread_sys_inited) {
+ is_thread_sys_inited = false;
+ }
+}
+
+static os_thread_data *
+thread_data_current()
+{
+ k_tid_t tid = k_current_get();
+ return thread_data_list_lookup(tid);
+}
+
+static void
+os_thread_cleanup(void)
+{
+ os_thread_data *thread_data = thread_data_current();
+
+ bh_assert(thread_data != NULL);
+ k_mutex_lock(&thread_data->wait_list_lock, K_FOREVER);
+ if (thread_data->thread_wait_list) {
+ /* Signal each joining thread */
+ os_thread_wait_list head = thread_data->thread_wait_list;
+ while (head) {
+ os_thread_wait_list next = head->next;
+ k_sem_give(&head->sem);
+ /* head will be freed by joining thread */
+ head = next;
+ }
+ thread_data->thread_wait_list = NULL;
+ }
+ k_mutex_unlock(&thread_data->wait_list_lock);
+
+ thread_data_list_remove(thread_data);
+ /* Set flag to true for the next thread creating to
+ free the thread object */
+ ((os_thread_obj *)thread_data->tid)->to_be_freed = true;
+#if BH_ENABLE_ZEPHYR_MPU_STACK != 0
+ mpu_stack_free(thread_data->stack);
+#endif
+ BH_FREE(thread_data);
+}
+
+static void
+os_thread_wrapper(void *start, void *arg, void *thread_data)
+{
+ /* Set thread custom data */
+ ((os_thread_data *)thread_data)->tid = k_current_get();
+ thread_data_list_add(thread_data);
+
+ ((thread_start_routine_t)start)(arg);
+ os_thread_cleanup();
+}
+
+int
+os_thread_create(korp_tid *p_tid, thread_start_routine_t start, void *arg,
+ unsigned int stack_size)
+{
+ return os_thread_create_with_prio(p_tid, start, arg, stack_size,
+ BH_THREAD_DEFAULT_PRIORITY);
+}
+
+int
+os_thread_create_with_prio(korp_tid *p_tid, thread_start_routine_t start,
+ void *arg, unsigned int stack_size, int prio)
+{
+ korp_tid tid;
+ os_thread_data *thread_data;
+ unsigned thread_data_size;
+
+ if (!p_tid || !stack_size)
+ return BHT_ERROR;
+
+ /* Free the thread objects of terminated threads */
+ thread_obj_list_reclaim();
+
+ /* Create and initialize thread object */
+ if (!(tid = BH_MALLOC(sizeof(os_thread_obj))))
+ return BHT_ERROR;
+
+ memset(tid, 0, sizeof(os_thread_obj));
+
+ /* Create and initialize thread data */
+#if BH_ENABLE_ZEPHYR_MPU_STACK == 0
+ if (stack_size < APP_THREAD_STACK_SIZE_MIN)
+ stack_size = APP_THREAD_STACK_SIZE_MIN;
+ thread_data_size = offsetof(os_thread_data, stack) + stack_size;
+#else
+ stack_size = BH_ZEPHYR_MPU_STACK_SIZE;
+ thread_data_size = sizeof(os_thread_data);
+#endif
+ if (!(thread_data = BH_MALLOC(thread_data_size))) {
+ goto fail1;
+ }
+
+ memset(thread_data, 0, thread_data_size);
+ k_mutex_init(&thread_data->wait_list_lock);
+ thread_data->stack_size = stack_size;
+ thread_data->tid = tid;
+
+#if BH_ENABLE_ZEPHYR_MPU_STACK != 0
+ if (!(thread_data->stack = mpu_stack_alloc())) {
+ goto fail2;
+ }
+#endif
+
+ /* Create the thread */
+ if (!((tid = k_thread_create(tid, (k_thread_stack_t *)thread_data->stack,
+ stack_size, os_thread_wrapper, start, arg,
+ thread_data, prio, 0, K_NO_WAIT)))) {
+ goto fail3;
+ }
+
+ bh_assert(tid == thread_data->tid);
+
+ /* Set thread custom data */
+ thread_data_list_add(thread_data);
+ thread_obj_list_add((os_thread_obj *)tid);
+ *p_tid = tid;
+ return BHT_OK;
+
+fail3:
+#if BH_ENABLE_ZEPHYR_MPU_STACK != 0
+ mpu_stack_free(thread_data->stack);
+fail2:
+#endif
+ BH_FREE(thread_data);
+fail1:
+ BH_FREE(tid);
+ return BHT_ERROR;
+}
+
+korp_tid
+os_self_thread()
+{
+ return (korp_tid)k_current_get();
+}
+
+int
+os_thread_join(korp_tid thread, void **value_ptr)
+{
+ (void)value_ptr;
+ os_thread_data *thread_data;
+ os_thread_wait_node *node;
+
+ /* Create wait node and append it to wait list */
+ if (!(node = BH_MALLOC(sizeof(os_thread_wait_node))))
+ return BHT_ERROR;
+
+ k_sem_init(&node->sem, 0, 1);
+ node->next = NULL;
+
+ /* Get thread data */
+ thread_data = thread_data_list_lookup(thread);
+ bh_assert(thread_data != NULL);
+
+ k_mutex_lock(&thread_data->wait_list_lock, K_FOREVER);
+ if (!thread_data->thread_wait_list)
+ thread_data->thread_wait_list = node;
+ else {
+ /* Add to end of waiting list */
+ os_thread_wait_node *p = thread_data->thread_wait_list;
+ while (p->next)
+ p = p->next;
+ p->next = node;
+ }
+ k_mutex_unlock(&thread_data->wait_list_lock);
+
+ /* Wait the sem */
+ k_sem_take(&node->sem, K_FOREVER);
+
+ /* Wait some time for the thread to be actually terminated */
+ k_sleep(Z_TIMEOUT_MS(100));
+
+ /* Destroy resource */
+ BH_FREE(node);
+ return BHT_OK;
+}
+
+int
+os_mutex_init(korp_mutex *mutex)
+{
+ k_mutex_init(mutex);
+ return BHT_OK;
+}
+
+int
+os_recursive_mutex_init(korp_mutex *mutex)
+{
+ k_mutex_init(mutex);
+ return BHT_OK;
+}
+
+int
+os_mutex_destroy(korp_mutex *mutex)
+{
+ (void)mutex;
+ return BHT_OK;
+}
+
+int
+os_mutex_lock(korp_mutex *mutex)
+{
+ return k_mutex_lock(mutex, K_FOREVER);
+}
+
+int
+os_mutex_unlock(korp_mutex *mutex)
+{
+#if KERNEL_VERSION_NUMBER >= 0x020200 /* version 2.2.0 */
+ return k_mutex_unlock(mutex);
+#else
+ k_mutex_unlock(mutex);
+ return 0;
+#endif
+}
+
+int
+os_cond_init(korp_cond *cond)
+{
+ k_mutex_init(&cond->wait_list_lock);
+ cond->thread_wait_list = NULL;
+ return BHT_OK;
+}
+
+int
+os_cond_destroy(korp_cond *cond)
+{
+ (void)cond;
+ return BHT_OK;
+}
+
+static int
+os_cond_wait_internal(korp_cond *cond, korp_mutex *mutex, bool timed, int mills)
+{
+ os_thread_wait_node *node;
+
+ /* Create wait node and append it to wait list */
+ if (!(node = BH_MALLOC(sizeof(os_thread_wait_node))))
+ return BHT_ERROR;
+
+ k_sem_init(&node->sem, 0, 1);
+ node->next = NULL;
+
+ k_mutex_lock(&cond->wait_list_lock, K_FOREVER);
+ if (!cond->thread_wait_list)
+ cond->thread_wait_list = node;
+ else {
+ /* Add to end of wait list */
+ os_thread_wait_node *p = cond->thread_wait_list;
+ while (p->next)
+ p = p->next;
+ p->next = node;
+ }
+ k_mutex_unlock(&cond->wait_list_lock);
+
+ /* Unlock mutex, wait sem and lock mutex again */
+ k_mutex_unlock(mutex);
+ k_sem_take(&node->sem, timed ? Z_TIMEOUT_MS(mills) : K_FOREVER);
+ k_mutex_lock(mutex, K_FOREVER);
+
+ /* Remove wait node from wait list */
+ k_mutex_lock(&cond->wait_list_lock, K_FOREVER);
+ if (cond->thread_wait_list == node)
+ cond->thread_wait_list = node->next;
+ else {
+ /* Remove from the wait list */
+ os_thread_wait_node *p = cond->thread_wait_list;
+ while (p->next != node)
+ p = p->next;
+ p->next = node->next;
+ }
+ BH_FREE(node);
+ k_mutex_unlock(&cond->wait_list_lock);
+
+ return BHT_OK;
+}
+
+int
+os_cond_wait(korp_cond *cond, korp_mutex *mutex)
+{
+ return os_cond_wait_internal(cond, mutex, false, 0);
+}
+
+int
+os_cond_reltimedwait(korp_cond *cond, korp_mutex *mutex, uint64 useconds)
+{
+
+ if (useconds == BHT_WAIT_FOREVER) {
+ return os_cond_wait_internal(cond, mutex, false, 0);
+ }
+ else {
+ uint64 mills_64 = useconds / 1000;
+ int32 mills;
+
+ if (mills_64 < (uint64)INT32_MAX) {
+ mills = (int32)mills_64;
+ }
+ else {
+ mills = INT32_MAX;
+ os_printf("Warning: os_cond_reltimedwait exceeds limit, "
+ "set to max timeout instead\n");
+ }
+ return os_cond_wait_internal(cond, mutex, true, mills);
+ }
+}
+
+int
+os_cond_signal(korp_cond *cond)
+{
+ /* Signal the head wait node of wait list */
+ k_mutex_lock(&cond->wait_list_lock, K_FOREVER);
+ if (cond->thread_wait_list)
+ k_sem_give(&cond->thread_wait_list->sem);
+ k_mutex_unlock(&cond->wait_list_lock);
+
+ return BHT_OK;
+}
+
+uint8 *
+os_thread_get_stack_boundary()
+{
+#if defined(CONFIG_THREAD_STACK_INFO)
+ korp_tid thread = k_current_get();
+ return (uint8 *)thread->stack_info.start;
+#else
+ return NULL;
+#endif
+}
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/zephyr/zephyr_time.c b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/zephyr/zephyr_time.c
new file mode 100644
index 000000000..99eb3b354
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/zephyr/zephyr_time.c
@@ -0,0 +1,12 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#include "platform_api_vmcore.h"
+
+uint64
+os_time_get_boot_microsecond()
+{
+ return k_uptime_get() * 1000;
+}