summaryrefslogtreecommitdiffstats
path: root/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/common/posix
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-03-09 13:19:22 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-03-09 13:19:22 +0000
commitc21c3b0befeb46a51b6bf3758ffa30813bea0ff0 (patch)
tree9754ff1ca740f6346cf8483ec915d4054bc5da2d /fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/common/posix
parentAdding upstream version 1.43.2. (diff)
downloadnetdata-c21c3b0befeb46a51b6bf3758ffa30813bea0ff0.tar.xz
netdata-c21c3b0befeb46a51b6bf3758ffa30813bea0ff0.zip
Adding upstream version 1.44.3.upstream/1.44.3
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/common/posix')
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/common/posix/platform_api_posix.cmake8
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/common/posix/posix_malloc.c72
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/common/posix/posix_memmap.c253
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/common/posix/posix_socket.c1028
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/common/posix/posix_thread.c680
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/common/posix/posix_time.c17
6 files changed, 2058 insertions, 0 deletions
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/common/posix/platform_api_posix.cmake b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/common/posix/platform_api_posix.cmake
new file mode 100644
index 000000000..4abefff1e
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/common/posix/platform_api_posix.cmake
@@ -0,0 +1,8 @@
+# Copyright (C) 2019 Intel Corporation. All rights reserved.
+# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+
+set (PLATFORM_COMMON_POSIX_DIR ${CMAKE_CURRENT_LIST_DIR})
+
+file (GLOB_RECURSE source_all ${PLATFORM_COMMON_POSIX_DIR}/*.c)
+
+set (PLATFORM_COMMON_POSIX_SOURCE ${source_all} )
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/common/posix/posix_malloc.c b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/common/posix/posix_malloc.c
new file mode 100644
index 000000000..912998ee0
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/common/posix/posix_malloc.c
@@ -0,0 +1,72 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#include "platform_api_vmcore.h"
+
+void *
+os_malloc(unsigned size)
+{
+ return malloc(size);
+}
+
+void *
+os_realloc(void *ptr, unsigned size)
+{
+ return realloc(ptr, size);
+}
+
+void
+os_free(void *ptr)
+{
+ free(ptr);
+}
+
+int
+os_dumps_proc_mem_info(char *out, unsigned int size)
+{
+ int ret = -1;
+ FILE *f;
+ char line[128] = { 0 };
+ unsigned int out_idx = 0;
+
+ if (!out || !size)
+ goto quit;
+
+ f = fopen("/proc/self/status", "r");
+ if (!f) {
+ perror("fopen failed: ");
+ goto quit;
+ }
+
+ memset(out, 0, size);
+
+ while (fgets(line, sizeof(line), f)) {
+#if WASM_ENABLE_MEMORY_PROFILING != 0
+ if (strncmp(line, "Vm", 2) == 0 || strncmp(line, "Rss", 3) == 0) {
+#else
+ if (strncmp(line, "VmRSS", 5) == 0
+ || strncmp(line, "RssAnon", 7) == 0) {
+#endif
+ size_t line_len = strlen(line);
+ if (line_len >= size - 1 - out_idx)
+ goto close_file;
+
+ /* copying without null-terminated byte */
+ memcpy(out + out_idx, line, line_len);
+ out_idx += line_len;
+ }
+ }
+
+ if (ferror(f)) {
+ perror("fgets failed: ");
+ goto close_file;
+ }
+
+ ret = 0;
+close_file:
+ fclose(f);
+quit:
+ return ret;
+} \ No newline at end of file
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/common/posix/posix_memmap.c b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/common/posix/posix_memmap.c
new file mode 100644
index 000000000..2dfbee453
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/common/posix/posix_memmap.c
@@ -0,0 +1,253 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#include "platform_api_vmcore.h"
+
+#ifndef BH_ENABLE_TRACE_MMAP
+#define BH_ENABLE_TRACE_MMAP 0
+#endif
+
+#if BH_ENABLE_TRACE_MMAP != 0
+static size_t total_size_mmapped = 0;
+static size_t total_size_munmapped = 0;
+#endif
+
+#define HUGE_PAGE_SIZE (2 * 1024 * 1024)
+
+#if !defined(__APPLE__) && !defined(__NuttX__) && defined(MADV_HUGEPAGE)
+static inline uintptr_t
+round_up(uintptr_t v, uintptr_t b)
+{
+ uintptr_t m = b - 1;
+ return (v + m) & ~m;
+}
+
+static inline uintptr_t
+round_down(uintptr_t v, uintptr_t b)
+{
+ uintptr_t m = b - 1;
+ return v & ~m;
+}
+#endif
+
+void *
+os_mmap(void *hint, size_t size, int prot, int flags)
+{
+ int map_prot = PROT_NONE;
+ int map_flags = MAP_ANONYMOUS | MAP_PRIVATE;
+ uint64 request_size, page_size;
+ uint8 *addr = MAP_FAILED;
+ uint32 i;
+
+ page_size = (uint64)getpagesize();
+ request_size = (size + page_size - 1) & ~(page_size - 1);
+
+#if !defined(__APPLE__) && !defined(__NuttX__) && defined(MADV_HUGEPAGE)
+ /* huge page isn't supported on MacOS and NuttX */
+ if (request_size >= HUGE_PAGE_SIZE)
+ /* apply one extra huge page */
+ request_size += HUGE_PAGE_SIZE;
+#endif
+
+ if ((size_t)request_size < size)
+ /* integer overflow */
+ return NULL;
+
+ if (request_size > 16 * (uint64)UINT32_MAX)
+ /* at most 16 G is allowed */
+ return NULL;
+
+ if (prot & MMAP_PROT_READ)
+ map_prot |= PROT_READ;
+
+ if (prot & MMAP_PROT_WRITE)
+ map_prot |= PROT_WRITE;
+
+ if (prot & MMAP_PROT_EXEC)
+ map_prot |= PROT_EXEC;
+
+#if defined(BUILD_TARGET_X86_64) || defined(BUILD_TARGET_AMD_64)
+#ifndef __APPLE__
+ if (flags & MMAP_MAP_32BIT)
+ map_flags |= MAP_32BIT;
+#endif
+#endif
+
+ if (flags & MMAP_MAP_FIXED)
+ map_flags |= MAP_FIXED;
+
+#if defined(BUILD_TARGET_RISCV64_LP64D) || defined(BUILD_TARGET_RISCV64_LP64)
+ /* As AOT relocation in RISCV64 may require that the code/data mapped
+ * is in range 0 to 2GB, we try to map the memory with hint address
+ * (mmap's first argument) to meet the requirement.
+ */
+ if (!hint && !(flags & MMAP_MAP_FIXED) && (flags & MMAP_MAP_32BIT)) {
+ uint8 *stack_addr = (uint8 *)&map_prot;
+ uint8 *text_addr = (uint8 *)os_mmap;
+ /* hint address begins with 1MB */
+ static uint8 *hint_addr = (uint8 *)(uintptr_t)BH_MB;
+
+ if ((hint_addr - text_addr >= 0 && hint_addr - text_addr < 100 * BH_MB)
+ || (text_addr - hint_addr >= 0
+ && text_addr - hint_addr < 100 * BH_MB)) {
+ /* hint address is possibly in text section, skip it */
+ hint_addr += 100 * BH_MB;
+ }
+
+ if ((hint_addr - stack_addr >= 0 && hint_addr - stack_addr < 8 * BH_MB)
+ || (stack_addr - hint_addr >= 0
+ && stack_addr - hint_addr < 8 * BH_MB)) {
+ /* hint address is possibly in native stack area, skip it */
+ hint_addr += 8 * BH_MB;
+ }
+
+ /* try 10 times, step with 1MB each time */
+ for (i = 0; i < 10 && hint_addr < (uint8 *)(uintptr_t)(2ULL * BH_GB);
+ i++) {
+ addr = mmap(hint_addr, request_size, map_prot, map_flags, -1, 0);
+ if (addr != MAP_FAILED) {
+ if (addr > (uint8 *)(uintptr_t)(2ULL * BH_GB)) {
+ /* unmap and try again if the mapped address doesn't
+ * meet the requirement */
+ os_munmap(addr, request_size);
+ }
+ else {
+ /* success, reset next hint address */
+ hint_addr += request_size;
+ break;
+ }
+ }
+ hint_addr += BH_MB;
+ }
+ }
+#endif /* end of BUILD_TARGET_RISCV64_LP64D || BUILD_TARGET_RISCV64_LP64 */
+
+ /* memory has't been mapped or was mapped failed previously */
+ if (addr == MAP_FAILED) {
+ /* try 5 times */
+ for (i = 0; i < 5; i++) {
+ addr = mmap(hint, request_size, map_prot, map_flags, -1, 0);
+ if (addr != MAP_FAILED)
+ break;
+ }
+ }
+
+ if (addr == MAP_FAILED) {
+#if BH_ENABLE_TRACE_MMAP != 0
+ os_printf("mmap failed\n");
+#endif
+ return NULL;
+ }
+
+#if BH_ENABLE_TRACE_MMAP != 0
+ total_size_mmapped += request_size;
+ os_printf("mmap return: %p with size: %zu, total_size_mmapped: %zu, "
+ "total_size_munmapped: %zu\n",
+ addr, request_size, total_size_mmapped, total_size_munmapped);
+#endif
+
+#if !defined(__APPLE__) && !defined(__NuttX__) && defined(MADV_HUGEPAGE)
+ /* huge page isn't supported on MacOS and NuttX */
+ if (request_size > HUGE_PAGE_SIZE) {
+ uintptr_t huge_start, huge_end;
+ size_t prefix_size = 0, suffix_size = HUGE_PAGE_SIZE;
+
+ huge_start = round_up((uintptr_t)addr, HUGE_PAGE_SIZE);
+
+ if (huge_start > (uintptr_t)addr) {
+ prefix_size += huge_start - (uintptr_t)addr;
+ suffix_size -= huge_start - (uintptr_t)addr;
+ }
+
+ /* unmap one extra huge page */
+
+ if (prefix_size > 0) {
+ munmap(addr, prefix_size);
+#if BH_ENABLE_TRACE_MMAP != 0
+ total_size_munmapped += prefix_size;
+ os_printf("munmap %p with size: %zu, total_size_mmapped: %zu, "
+ "total_size_munmapped: %zu\n",
+ addr, prefix_size, total_size_mmapped,
+ total_size_munmapped);
+#endif
+ }
+ if (suffix_size > 0) {
+ munmap(addr + request_size - suffix_size, suffix_size);
+#if BH_ENABLE_TRACE_MMAP != 0
+ total_size_munmapped += suffix_size;
+ os_printf("munmap %p with size: %zu, total_size_mmapped: %zu, "
+ "total_size_munmapped: %zu\n",
+ addr + request_size - suffix_size, suffix_size,
+ total_size_mmapped, total_size_munmapped);
+#endif
+ }
+
+ addr = (uint8 *)huge_start;
+ request_size -= HUGE_PAGE_SIZE;
+
+ huge_end = round_down(huge_start + request_size, HUGE_PAGE_SIZE);
+ if (huge_end > huge_start) {
+ int ret = madvise((void *)huge_start, huge_end - huge_start,
+ MADV_HUGEPAGE);
+ if (ret) {
+#if BH_ENABLE_TRACE_MMAP != 0
+ os_printf(
+ "warning: madvise(%p, %lu) huge page failed, return %d\n",
+ (void *)huge_start, huge_end - huge_start, ret);
+#endif
+ }
+ }
+ }
+#endif /* end of __APPLE__ || __NuttX__ || !MADV_HUGEPAGE */
+
+ return addr;
+}
+
+void
+os_munmap(void *addr, size_t size)
+{
+ uint64 page_size = (uint64)getpagesize();
+ uint64 request_size = (size + page_size - 1) & ~(page_size - 1);
+
+ if (addr) {
+ if (munmap(addr, request_size)) {
+ os_printf("os_munmap error addr:%p, size:0x%" PRIx64 ", errno:%d\n",
+ addr, request_size, errno);
+ return;
+ }
+#if BH_ENABLE_TRACE_MMAP != 0
+ total_size_munmapped += request_size;
+ os_printf("munmap %p with size: %zu, total_size_mmapped: %zu, "
+ "total_size_munmapped: %zu\n",
+ addr, request_size, total_size_mmapped, total_size_munmapped);
+#endif
+ }
+}
+
+int
+os_mprotect(void *addr, size_t size, int prot)
+{
+ int map_prot = PROT_NONE;
+ uint64 page_size = (uint64)getpagesize();
+ uint64 request_size = (size + page_size - 1) & ~(page_size - 1);
+
+ if (!addr)
+ return 0;
+
+ if (prot & MMAP_PROT_READ)
+ map_prot |= PROT_READ;
+
+ if (prot & MMAP_PROT_WRITE)
+ map_prot |= PROT_WRITE;
+
+ if (prot & MMAP_PROT_EXEC)
+ map_prot |= PROT_EXEC;
+
+ return mprotect(addr, request_size, map_prot);
+}
+
+void
+os_dcache_flush(void)
+{}
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/common/posix/posix_socket.c b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/common/posix/posix_socket.c
new file mode 100644
index 000000000..e33781d7d
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/common/posix/posix_socket.c
@@ -0,0 +1,1028 @@
+/*
+ * Copyright (C) 2021 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#include "platform_api_vmcore.h"
+#include "platform_api_extension.h"
+
+#include <arpa/inet.h>
+#include <netdb.h>
+#include <netinet/tcp.h>
+#include <netinet/in.h>
+
+static bool
+textual_addr_to_sockaddr(const char *textual, int port, struct sockaddr *out,
+ socklen_t *out_len)
+{
+ struct sockaddr_in *v4;
+#ifdef IPPROTO_IPV6
+ struct sockaddr_in6 *v6;
+#endif
+
+ assert(textual);
+
+ v4 = (struct sockaddr_in *)out;
+ if (inet_pton(AF_INET, textual, &v4->sin_addr.s_addr) == 1) {
+ v4->sin_family = AF_INET;
+ v4->sin_port = htons(port);
+ *out_len = sizeof(struct sockaddr_in);
+ return true;
+ }
+
+#ifdef IPPROTO_IPV6
+ v6 = (struct sockaddr_in6 *)out;
+ if (inet_pton(AF_INET6, textual, &v6->sin6_addr.s6_addr) == 1) {
+ v6->sin6_family = AF_INET6;
+ v6->sin6_port = htons(port);
+ *out_len = sizeof(struct sockaddr_in6);
+ return true;
+ }
+#endif
+
+ return false;
+}
+
+static int
+sockaddr_to_bh_sockaddr(const struct sockaddr *sockaddr,
+ bh_sockaddr_t *bh_sockaddr)
+{
+ switch (sockaddr->sa_family) {
+ case AF_INET:
+ {
+ struct sockaddr_in *addr = (struct sockaddr_in *)sockaddr;
+
+ bh_sockaddr->port = ntohs(addr->sin_port);
+ bh_sockaddr->addr_bufer.ipv4 = ntohl(addr->sin_addr.s_addr);
+ bh_sockaddr->is_ipv4 = true;
+ return BHT_OK;
+ }
+#ifdef IPPROTO_IPV6
+ case AF_INET6:
+ {
+ struct sockaddr_in6 *addr = (struct sockaddr_in6 *)sockaddr;
+ size_t i;
+
+ bh_sockaddr->port = ntohs(addr->sin6_port);
+
+ for (i = 0; i < sizeof(bh_sockaddr->addr_bufer.ipv6)
+ / sizeof(bh_sockaddr->addr_bufer.ipv6[0]);
+ i++) {
+ uint16 part_addr = addr->sin6_addr.s6_addr[i * 2]
+ | (addr->sin6_addr.s6_addr[i * 2 + 1] << 8);
+ bh_sockaddr->addr_bufer.ipv6[i] = ntohs(part_addr);
+ }
+
+ bh_sockaddr->is_ipv4 = false;
+ return BHT_OK;
+ }
+#endif
+ default:
+ errno = EAFNOSUPPORT;
+ return BHT_ERROR;
+ }
+}
+
+static void
+bh_sockaddr_to_sockaddr(const bh_sockaddr_t *bh_sockaddr,
+ struct sockaddr_storage *sockaddr, socklen_t *socklen)
+{
+ if (bh_sockaddr->is_ipv4) {
+ struct sockaddr_in *addr = (struct sockaddr_in *)sockaddr;
+ addr->sin_port = htons(bh_sockaddr->port);
+ addr->sin_family = AF_INET;
+ addr->sin_addr.s_addr = htonl(bh_sockaddr->addr_bufer.ipv4);
+ *socklen = sizeof(*addr);
+ }
+#ifdef IPPROTO_IPV6
+ else {
+ struct sockaddr_in6 *addr = (struct sockaddr_in6 *)sockaddr;
+ size_t i;
+ addr->sin6_port = htons(bh_sockaddr->port);
+ addr->sin6_family = AF_INET6;
+
+ for (i = 0; i < sizeof(bh_sockaddr->addr_bufer.ipv6)
+ / sizeof(bh_sockaddr->addr_bufer.ipv6[0]);
+ i++) {
+ uint16 part_addr = htons(bh_sockaddr->addr_bufer.ipv6[i]);
+ addr->sin6_addr.s6_addr[i * 2] = 0xff & part_addr;
+ addr->sin6_addr.s6_addr[i * 2 + 1] = (0xff00 & part_addr) >> 8;
+ }
+
+ *socklen = sizeof(*addr);
+ }
+#endif
+}
+
+int
+os_socket_create(bh_socket_t *sock, bool is_ipv4, bool is_tcp)
+{
+ int af = is_ipv4 ? AF_INET : AF_INET6;
+
+ if (!sock) {
+ return BHT_ERROR;
+ }
+
+ if (is_tcp) {
+ *sock = socket(af, SOCK_STREAM, IPPROTO_TCP);
+ }
+ else {
+ *sock = socket(af, SOCK_DGRAM, 0);
+ }
+
+ return (*sock == -1) ? BHT_ERROR : BHT_OK;
+}
+
+int
+os_socket_bind(bh_socket_t socket, const char *host, int *port)
+{
+ struct sockaddr_storage addr = { 0 };
+ struct linger ling;
+ socklen_t socklen;
+ int ret;
+
+ assert(host);
+ assert(port);
+
+ ling.l_onoff = 1;
+ ling.l_linger = 0;
+
+ if (!textual_addr_to_sockaddr(host, *port, (struct sockaddr *)&addr,
+ &socklen)) {
+ goto fail;
+ }
+
+ ret = fcntl(socket, F_SETFD, FD_CLOEXEC);
+ if (ret < 0) {
+ goto fail;
+ }
+
+ ret = setsockopt(socket, SOL_SOCKET, SO_LINGER, &ling, sizeof(ling));
+ if (ret < 0) {
+ goto fail;
+ }
+
+ ret = bind(socket, (struct sockaddr *)&addr, socklen);
+ if (ret < 0) {
+ goto fail;
+ }
+
+ socklen = sizeof(addr);
+ if (getsockname(socket, (void *)&addr, &socklen) == -1) {
+ goto fail;
+ }
+
+ if (addr.ss_family == AF_INET) {
+ *port = ntohs(((struct sockaddr_in *)&addr)->sin_port);
+ }
+ else {
+#ifdef IPPROTO_IPV6
+ *port = ntohs(((struct sockaddr_in6 *)&addr)->sin6_port);
+#else
+ goto fail;
+#endif
+ }
+
+ return BHT_OK;
+
+fail:
+ return BHT_ERROR;
+}
+
+int
+os_socket_settimeout(bh_socket_t socket, uint64 timeout_us)
+{
+ struct timeval tv;
+ tv.tv_sec = timeout_us / 1000000UL;
+ tv.tv_usec = timeout_us % 1000000UL;
+
+ if (setsockopt(socket, SOL_SOCKET, SO_RCVTIMEO, (const char *)&tv,
+ sizeof(tv))
+ != 0) {
+ return BHT_ERROR;
+ }
+
+ return BHT_OK;
+}
+
+int
+os_socket_listen(bh_socket_t socket, int max_client)
+{
+ if (listen(socket, max_client) != 0) {
+ return BHT_ERROR;
+ }
+
+ return BHT_OK;
+}
+
+int
+os_socket_accept(bh_socket_t server_sock, bh_socket_t *sock, void *addr,
+ unsigned int *addrlen)
+{
+ *sock = accept(server_sock, addr, addrlen);
+
+ if (*sock < 0) {
+ return BHT_ERROR;
+ }
+
+ return BHT_OK;
+}
+
+int
+os_socket_connect(bh_socket_t socket, const char *addr, int port)
+{
+ struct sockaddr_storage addr_in = { 0 };
+ socklen_t addr_len;
+ int ret = 0;
+
+ if (!textual_addr_to_sockaddr(addr, port, (struct sockaddr *)&addr_in,
+ &addr_len)) {
+ return BHT_ERROR;
+ }
+
+ ret = connect(socket, (struct sockaddr *)&addr_in, addr_len);
+ if (ret == -1) {
+ return BHT_ERROR;
+ }
+
+ return BHT_OK;
+}
+
+int
+os_socket_recv(bh_socket_t socket, void *buf, unsigned int len)
+{
+ return recv(socket, buf, len, 0);
+}
+
+int
+os_socket_recv_from(bh_socket_t socket, void *buf, unsigned int len, int flags,
+ bh_sockaddr_t *src_addr)
+{
+ struct sockaddr_storage sock_addr = { 0 };
+ socklen_t socklen = sizeof(sock_addr);
+ int ret;
+
+ ret = recvfrom(socket, buf, len, flags, (struct sockaddr *)&sock_addr,
+ &socklen);
+
+ if (ret < 0) {
+ return ret;
+ }
+
+ if (src_addr && socklen > 0) {
+ if (sockaddr_to_bh_sockaddr((struct sockaddr *)&sock_addr, src_addr)
+ == BHT_ERROR) {
+ return -1;
+ }
+ }
+
+ return ret;
+}
+
+int
+os_socket_send(bh_socket_t socket, const void *buf, unsigned int len)
+{
+ return send(socket, buf, len, 0);
+}
+
+int
+os_socket_send_to(bh_socket_t socket, const void *buf, unsigned int len,
+ int flags, const bh_sockaddr_t *dest_addr)
+{
+ struct sockaddr_storage sock_addr = { 0 };
+ socklen_t socklen = 0;
+
+ bh_sockaddr_to_sockaddr(dest_addr, &sock_addr, &socklen);
+
+ return sendto(socket, buf, len, flags, (const struct sockaddr *)&sock_addr,
+ socklen);
+}
+
+int
+os_socket_close(bh_socket_t socket)
+{
+ close(socket);
+ return BHT_OK;
+}
+
+int
+os_socket_shutdown(bh_socket_t socket)
+{
+ shutdown(socket, O_RDWR);
+ return BHT_OK;
+}
+
+int
+os_socket_inet_network(bool is_ipv4, const char *cp, bh_ip_addr_buffer_t *out)
+{
+ if (!cp)
+ return BHT_ERROR;
+
+ if (is_ipv4) {
+ if (inet_pton(AF_INET, cp, &out->ipv4) != 1) {
+ return BHT_ERROR;
+ }
+ /* Note: ntohl(INADDR_NONE) == INADDR_NONE */
+ out->ipv4 = ntohl(out->ipv4);
+ }
+ else {
+#ifdef IPPROTO_IPV6
+ if (inet_pton(AF_INET6, cp, out->ipv6) != 1) {
+ return BHT_ERROR;
+ }
+ for (int i = 0; i < 8; i++) {
+ out->ipv6[i] = ntohs(out->ipv6[i]);
+ }
+#else
+ errno = EAFNOSUPPORT;
+ return BHT_ERROR;
+#endif
+ }
+
+ return BHT_OK;
+}
+
+static int
+getaddrinfo_error_to_errno(int error)
+{
+ switch (error) {
+ case EAI_AGAIN:
+ return EAGAIN;
+ case EAI_FAIL:
+ return EFAULT;
+ case EAI_MEMORY:
+ return ENOMEM;
+ case EAI_SYSTEM:
+ return errno;
+ default:
+ return EINVAL;
+ }
+}
+
+static int
+is_addrinfo_supported(struct addrinfo *info)
+{
+ return
+ // Allow only IPv4 and IPv6
+ (info->ai_family == AF_INET || info->ai_family == AF_INET6)
+ // Allow only UDP and TCP
+ && (info->ai_socktype == SOCK_DGRAM || info->ai_socktype == SOCK_STREAM)
+ && (info->ai_protocol == IPPROTO_TCP
+ || info->ai_protocol == IPPROTO_UDP);
+}
+
+int
+os_socket_addr_resolve(const char *host, const char *service,
+ uint8_t *hint_is_tcp, uint8_t *hint_is_ipv4,
+ bh_addr_info_t *addr_info, size_t addr_info_size,
+ size_t *max_info_size)
+{
+ struct addrinfo hints = { 0 }, *res, *result;
+ int hints_enabled = hint_is_tcp || hint_is_ipv4;
+ int ret;
+ size_t pos = 0;
+
+ if (hints_enabled) {
+ if (hint_is_ipv4) {
+ hints.ai_family = *hint_is_ipv4 ? AF_INET : AF_INET6;
+ }
+ if (hint_is_tcp) {
+ hints.ai_socktype = *hint_is_tcp ? SOCK_STREAM : SOCK_DGRAM;
+ }
+ }
+
+ ret = getaddrinfo(host, strlen(service) == 0 ? NULL : service,
+ hints_enabled ? &hints : NULL, &result);
+ if (ret != BHT_OK) {
+ errno = getaddrinfo_error_to_errno(ret);
+ return BHT_ERROR;
+ }
+
+ res = result;
+ while (res) {
+ if (addr_info_size > pos) {
+ if (!is_addrinfo_supported(res)) {
+ res = res->ai_next;
+ continue;
+ }
+
+ ret =
+ sockaddr_to_bh_sockaddr(res->ai_addr, &addr_info[pos].sockaddr);
+
+ if (ret == BHT_ERROR) {
+ freeaddrinfo(result);
+ return BHT_ERROR;
+ }
+
+ addr_info[pos].is_tcp = res->ai_socktype == SOCK_STREAM;
+ }
+
+ pos++;
+ res = res->ai_next;
+ }
+
+ *max_info_size = pos;
+ freeaddrinfo(result);
+
+ return BHT_OK;
+}
+
+static int
+os_socket_setbooloption(bh_socket_t socket, int level, int optname,
+ bool is_enabled)
+{
+ int option = (int)is_enabled;
+ if (setsockopt(socket, level, optname, &option, sizeof(option)) != 0) {
+ return BHT_ERROR;
+ }
+
+ return BHT_OK;
+}
+
+static int
+os_socket_getbooloption(bh_socket_t socket, int level, int optname,
+ bool *is_enabled)
+{
+ assert(is_enabled);
+
+ int optval;
+ socklen_t optval_size = sizeof(optval);
+ if (getsockopt(socket, level, optname, &optval, &optval_size) != 0) {
+ return BHT_ERROR;
+ }
+ *is_enabled = (bool)optval;
+ return BHT_OK;
+}
+
+int
+os_socket_set_send_buf_size(bh_socket_t socket, size_t bufsiz)
+{
+ int buf_size_int = (int)bufsiz;
+ if (setsockopt(socket, SOL_SOCKET, SO_SNDBUF, &buf_size_int,
+ sizeof(buf_size_int))
+ != 0) {
+ return BHT_ERROR;
+ }
+
+ return BHT_OK;
+}
+
+int
+os_socket_get_send_buf_size(bh_socket_t socket, size_t *bufsiz)
+{
+ assert(bufsiz);
+
+ int buf_size_int;
+ socklen_t bufsiz_len = sizeof(buf_size_int);
+ if (getsockopt(socket, SOL_SOCKET, SO_SNDBUF, &buf_size_int, &bufsiz_len)
+ != 0) {
+ return BHT_ERROR;
+ }
+ *bufsiz = (size_t)buf_size_int;
+
+ return BHT_OK;
+}
+
+int
+os_socket_set_recv_buf_size(bh_socket_t socket, size_t bufsiz)
+{
+ int buf_size_int = (int)bufsiz;
+ if (setsockopt(socket, SOL_SOCKET, SO_RCVBUF, &buf_size_int,
+ sizeof(buf_size_int))
+ != 0) {
+ return BHT_ERROR;
+ }
+
+ return BHT_OK;
+}
+
+int
+os_socket_get_recv_buf_size(bh_socket_t socket, size_t *bufsiz)
+{
+ assert(bufsiz);
+
+ int buf_size_int;
+ socklen_t bufsiz_len = sizeof(buf_size_int);
+ if (getsockopt(socket, SOL_SOCKET, SO_RCVBUF, &buf_size_int, &bufsiz_len)
+ != 0) {
+ return BHT_ERROR;
+ }
+ *bufsiz = (size_t)buf_size_int;
+
+ return BHT_OK;
+}
+
+int
+os_socket_set_keep_alive(bh_socket_t socket, bool is_enabled)
+{
+ return os_socket_setbooloption(socket, SOL_SOCKET, SO_KEEPALIVE,
+ is_enabled);
+}
+
+int
+os_socket_get_keep_alive(bh_socket_t socket, bool *is_enabled)
+{
+ return os_socket_getbooloption(socket, SOL_SOCKET, SO_KEEPALIVE,
+ is_enabled);
+}
+
+int
+os_socket_set_reuse_addr(bh_socket_t socket, bool is_enabled)
+{
+ return os_socket_setbooloption(socket, SOL_SOCKET, SO_REUSEADDR,
+ is_enabled);
+}
+
+int
+os_socket_get_reuse_addr(bh_socket_t socket, bool *is_enabled)
+{
+ return os_socket_getbooloption(socket, SOL_SOCKET, SO_REUSEADDR,
+ is_enabled);
+}
+
+int
+os_socket_set_reuse_port(bh_socket_t socket, bool is_enabled)
+{
+#if defined(SO_REUSEPORT) /* NuttX doesn't have SO_REUSEPORT */
+ return os_socket_setbooloption(socket, SOL_SOCKET, SO_REUSEPORT,
+ is_enabled);
+#else
+ errno = ENOTSUP;
+ return BHT_ERROR;
+#endif /* defined(SO_REUSEPORT) */
+}
+
+int
+os_socket_get_reuse_port(bh_socket_t socket, bool *is_enabled)
+{
+#if defined(SO_REUSEPORT) /* NuttX doesn't have SO_REUSEPORT */
+ return os_socket_getbooloption(socket, SOL_SOCKET, SO_REUSEPORT,
+ is_enabled);
+#else
+ errno = ENOTSUP;
+ return BHT_ERROR;
+#endif /* defined(SO_REUSEPORT) */
+}
+
+int
+os_socket_set_linger(bh_socket_t socket, bool is_enabled, int linger_s)
+{
+ struct linger linger_opts = { .l_onoff = (int)is_enabled,
+ .l_linger = linger_s };
+ if (setsockopt(socket, SOL_SOCKET, SO_LINGER, &linger_opts,
+ sizeof(linger_opts))
+ != 0) {
+ return BHT_ERROR;
+ }
+
+ return BHT_OK;
+}
+
+int
+os_socket_get_linger(bh_socket_t socket, bool *is_enabled, int *linger_s)
+{
+ assert(is_enabled);
+ assert(linger_s);
+
+ struct linger linger_opts;
+ socklen_t linger_opts_len = sizeof(linger_opts);
+ if (getsockopt(socket, SOL_SOCKET, SO_LINGER, &linger_opts,
+ &linger_opts_len)
+ != 0) {
+ return BHT_ERROR;
+ }
+ *linger_s = linger_opts.l_linger;
+ *is_enabled = (bool)linger_opts.l_onoff;
+ return BHT_OK;
+}
+
+int
+os_socket_set_tcp_no_delay(bh_socket_t socket, bool is_enabled)
+{
+ return os_socket_setbooloption(socket, IPPROTO_TCP, TCP_NODELAY,
+ is_enabled);
+}
+
+int
+os_socket_get_tcp_no_delay(bh_socket_t socket, bool *is_enabled)
+{
+ return os_socket_getbooloption(socket, IPPROTO_TCP, TCP_NODELAY,
+ is_enabled);
+}
+
+int
+os_socket_set_tcp_quick_ack(bh_socket_t socket, bool is_enabled)
+{
+#ifdef TCP_QUICKACK
+ return os_socket_setbooloption(socket, IPPROTO_TCP, TCP_QUICKACK,
+ is_enabled);
+#else
+ errno = ENOSYS;
+
+ return BHT_ERROR;
+#endif
+}
+
+int
+os_socket_get_tcp_quick_ack(bh_socket_t socket, bool *is_enabled)
+{
+#ifdef TCP_QUICKACK
+ return os_socket_getbooloption(socket, IPPROTO_TCP, TCP_QUICKACK,
+ is_enabled);
+#else
+ errno = ENOSYS;
+
+ return BHT_ERROR;
+#endif
+}
+
+int
+os_socket_set_tcp_keep_idle(bh_socket_t socket, uint32 time_s)
+{
+ int time_s_int = (int)time_s;
+#ifdef TCP_KEEPIDLE
+ if (setsockopt(socket, IPPROTO_TCP, TCP_KEEPIDLE, &time_s_int,
+ sizeof(time_s_int))
+ != 0) {
+ return BHT_ERROR;
+ }
+ return BHT_OK;
+#elif defined(TCP_KEEPALIVE)
+ if (setsockopt(socket, IPPROTO_TCP, TCP_KEEPALIVE, &time_s_int,
+ sizeof(time_s_int))
+ != 0) {
+ return BHT_ERROR;
+ }
+ return BHT_OK;
+#else
+ errno = ENOSYS;
+
+ return BHT_ERROR;
+#endif
+}
+
+int
+os_socket_get_tcp_keep_idle(bh_socket_t socket, uint32 *time_s)
+{
+ assert(time_s);
+ int time_s_int;
+ socklen_t time_s_len = sizeof(time_s_int);
+#ifdef TCP_KEEPIDLE
+ if (getsockopt(socket, IPPROTO_TCP, TCP_KEEPIDLE, &time_s_int, &time_s_len)
+ != 0) {
+ return BHT_ERROR;
+ }
+ *time_s = (uint32)time_s_int;
+ return BHT_OK;
+#elif defined(TCP_KEEPALIVE)
+ if (getsockopt(socket, IPPROTO_TCP, TCP_KEEPALIVE, &time_s_int, &time_s_len)
+ != 0) {
+ return BHT_ERROR;
+ }
+ *time_s = (uint32)time_s_int;
+ return BHT_OK;
+#else
+ errno = ENOSYS;
+
+ return BHT_ERROR;
+#endif
+}
+
+int
+os_socket_set_tcp_keep_intvl(bh_socket_t socket, uint32 time_s)
+{
+ int time_s_int = (int)time_s;
+#ifdef TCP_KEEPINTVL
+ if (setsockopt(socket, IPPROTO_TCP, TCP_KEEPINTVL, &time_s_int,
+ sizeof(time_s_int))
+ != 0) {
+ return BHT_ERROR;
+ }
+
+ return BHT_OK;
+#else
+ errno = ENOSYS;
+
+ return BHT_ERROR;
+#endif
+}
+
+int
+os_socket_get_tcp_keep_intvl(bh_socket_t socket, uint32 *time_s)
+{
+#ifdef TCP_KEEPINTVL
+ assert(time_s);
+ int time_s_int;
+ socklen_t time_s_len = sizeof(time_s_int);
+ if (getsockopt(socket, IPPROTO_TCP, TCP_KEEPINTVL, &time_s_int, &time_s_len)
+ != 0) {
+ return BHT_ERROR;
+ }
+ *time_s = (uint32)time_s_int;
+ return BHT_OK;
+#else
+ errno = ENOSYS;
+
+ return BHT_ERROR;
+#endif
+}
+
+int
+os_socket_set_tcp_fastopen_connect(bh_socket_t socket, bool is_enabled)
+{
+#ifdef TCP_FASTOPEN_CONNECT
+ return os_socket_setbooloption(socket, IPPROTO_TCP, TCP_FASTOPEN_CONNECT,
+ is_enabled);
+#else
+ errno = ENOSYS;
+
+ return BHT_ERROR;
+#endif
+}
+
+int
+os_socket_get_tcp_fastopen_connect(bh_socket_t socket, bool *is_enabled)
+{
+#ifdef TCP_FASTOPEN_CONNECT
+ return os_socket_getbooloption(socket, IPPROTO_TCP, TCP_FASTOPEN_CONNECT,
+ is_enabled);
+#else
+ errno = ENOSYS;
+
+ return BHT_ERROR;
+#endif
+}
+
+int
+os_socket_set_ip_multicast_loop(bh_socket_t socket, bool ipv6, bool is_enabled)
+{
+ if (ipv6) {
+#ifdef IPPROTO_IPV6
+ return os_socket_setbooloption(socket, IPPROTO_IPV6,
+ IPV6_MULTICAST_LOOP, is_enabled);
+#else
+ errno = EAFNOSUPPORT;
+ return BHT_ERROR;
+#endif
+ }
+ else {
+ return os_socket_setbooloption(socket, IPPROTO_IP, IP_MULTICAST_LOOP,
+ is_enabled);
+ }
+}
+
+int
+os_socket_get_ip_multicast_loop(bh_socket_t socket, bool ipv6, bool *is_enabled)
+{
+ if (ipv6) {
+#ifdef IPPROTO_IPV6
+ return os_socket_getbooloption(socket, IPPROTO_IPV6,
+ IPV6_MULTICAST_LOOP, is_enabled);
+#else
+ errno = EAFNOSUPPORT;
+ return BHT_ERROR;
+#endif
+ }
+ else {
+ return os_socket_getbooloption(socket, IPPROTO_IP, IP_MULTICAST_LOOP,
+ is_enabled);
+ }
+}
+
+int
+os_socket_set_ip_add_membership(bh_socket_t socket,
+ bh_ip_addr_buffer_t *imr_multiaddr,
+ uint32_t imr_interface, bool is_ipv6)
+{
+ assert(imr_multiaddr);
+ if (is_ipv6) {
+#ifdef IPPROTO_IPV6
+ struct ipv6_mreq mreq;
+ for (int i = 0; i < 8; i++) {
+ ((uint16_t *)mreq.ipv6mr_multiaddr.s6_addr)[i] =
+ imr_multiaddr->ipv6[i];
+ }
+ mreq.ipv6mr_interface = imr_interface;
+ if (setsockopt(socket, IPPROTO_IPV6, IPV6_JOIN_GROUP, &mreq,
+ sizeof(mreq))
+ != 0) {
+ return BHT_ERROR;
+ }
+#else
+ errno = EAFNOSUPPORT;
+ return BHT_ERROR;
+#endif
+ }
+ else {
+ struct ip_mreq mreq;
+ mreq.imr_multiaddr.s_addr = imr_multiaddr->ipv4;
+ mreq.imr_interface.s_addr = imr_interface;
+ if (setsockopt(socket, IPPROTO_IP, IP_ADD_MEMBERSHIP, &mreq,
+ sizeof(mreq))
+ != 0) {
+ return BHT_ERROR;
+ }
+ }
+
+ return BHT_OK;
+}
+
+int
+os_socket_set_ip_drop_membership(bh_socket_t socket,
+ bh_ip_addr_buffer_t *imr_multiaddr,
+ uint32_t imr_interface, bool is_ipv6)
+{
+ assert(imr_multiaddr);
+ if (is_ipv6) {
+#ifdef IPPROTO_IPV6
+ struct ipv6_mreq mreq;
+ for (int i = 0; i < 8; i++) {
+ ((uint16_t *)mreq.ipv6mr_multiaddr.s6_addr)[i] =
+ imr_multiaddr->ipv6[i];
+ }
+ mreq.ipv6mr_interface = imr_interface;
+ if (setsockopt(socket, IPPROTO_IPV6, IPV6_LEAVE_GROUP, &mreq,
+ sizeof(mreq))
+ != 0) {
+ return BHT_ERROR;
+ }
+#else
+ errno = EAFNOSUPPORT;
+ return BHT_ERROR;
+#endif
+ }
+ else {
+ struct ip_mreq mreq;
+ mreq.imr_multiaddr.s_addr = imr_multiaddr->ipv4;
+ mreq.imr_interface.s_addr = imr_interface;
+ if (setsockopt(socket, IPPROTO_IP, IP_DROP_MEMBERSHIP, &mreq,
+ sizeof(mreq))
+ != 0) {
+ return BHT_ERROR;
+ }
+ }
+
+ return BHT_OK;
+}
+
+int
+os_socket_set_ip_ttl(bh_socket_t socket, uint8_t ttl_s)
+{
+ if (setsockopt(socket, IPPROTO_IP, IP_TTL, &ttl_s, sizeof(ttl_s)) != 0) {
+ return BHT_ERROR;
+ }
+
+ return BHT_OK;
+}
+
+int
+os_socket_get_ip_ttl(bh_socket_t socket, uint8_t *ttl_s)
+{
+ socklen_t opt_len = sizeof(ttl_s);
+ if (getsockopt(socket, IPPROTO_IP, IP_TTL, ttl_s, &opt_len) != 0) {
+ return BHT_ERROR;
+ }
+
+ return BHT_OK;
+}
+
+int
+os_socket_set_ip_multicast_ttl(bh_socket_t socket, uint8_t ttl_s)
+{
+ if (setsockopt(socket, IPPROTO_IP, IP_MULTICAST_TTL, &ttl_s, sizeof(ttl_s))
+ != 0) {
+ return BHT_ERROR;
+ }
+
+ return BHT_OK;
+}
+
+int
+os_socket_get_ip_multicast_ttl(bh_socket_t socket, uint8_t *ttl_s)
+{
+ socklen_t opt_len = sizeof(ttl_s);
+ if (getsockopt(socket, IPPROTO_IP, IP_MULTICAST_TTL, ttl_s, &opt_len)
+ != 0) {
+ return BHT_ERROR;
+ }
+
+ return BHT_OK;
+}
+
+int
+os_socket_set_ipv6_only(bh_socket_t socket, bool is_enabled)
+{
+#ifdef IPPROTO_IPV6
+ return os_socket_setbooloption(socket, IPPROTO_IPV6, IPV6_V6ONLY,
+ is_enabled);
+#else
+ errno = EAFNOSUPPORT;
+ return BHT_ERROR;
+#endif
+}
+
+int
+os_socket_get_ipv6_only(bh_socket_t socket, bool *is_enabled)
+{
+#ifdef IPPROTO_IPV6
+ return os_socket_getbooloption(socket, IPPROTO_IPV6, IPV6_V6ONLY,
+ is_enabled);
+#else
+ errno = EAFNOSUPPORT;
+ return BHT_ERROR;
+#endif
+}
+
+int
+os_socket_set_broadcast(bh_socket_t socket, bool is_enabled)
+{
+ return os_socket_setbooloption(socket, SOL_SOCKET, SO_BROADCAST,
+ is_enabled);
+}
+
+int
+os_socket_get_broadcast(bh_socket_t socket, bool *is_enabled)
+{
+ return os_socket_getbooloption(socket, SOL_SOCKET, SO_BROADCAST,
+ is_enabled);
+}
+
+int
+os_socket_set_send_timeout(bh_socket_t socket, uint64 timeout_us)
+{
+ struct timeval tv;
+ tv.tv_sec = timeout_us / 1000000UL;
+ tv.tv_usec = timeout_us % 1000000UL;
+ if (setsockopt(socket, SOL_SOCKET, SO_SNDTIMEO, &tv, sizeof(tv)) != 0) {
+ return BHT_ERROR;
+ }
+ return BHT_OK;
+}
+
+int
+os_socket_get_send_timeout(bh_socket_t socket, uint64 *timeout_us)
+{
+ struct timeval tv;
+ socklen_t tv_len = sizeof(tv);
+ if (getsockopt(socket, SOL_SOCKET, SO_SNDTIMEO, &tv, &tv_len) != 0) {
+ return BHT_ERROR;
+ }
+ *timeout_us = (tv.tv_sec * 1000000UL) + tv.tv_usec;
+ return BHT_OK;
+}
+
+int
+os_socket_set_recv_timeout(bh_socket_t socket, uint64 timeout_us)
+{
+ struct timeval tv;
+ tv.tv_sec = timeout_us / 1000000UL;
+ tv.tv_usec = timeout_us % 1000000UL;
+ if (setsockopt(socket, SOL_SOCKET, SO_RCVTIMEO, &tv, sizeof(tv)) != 0) {
+ return BHT_ERROR;
+ }
+ return BHT_OK;
+}
+
+int
+os_socket_get_recv_timeout(bh_socket_t socket, uint64 *timeout_us)
+{
+ struct timeval tv;
+ socklen_t tv_len = sizeof(tv);
+ if (getsockopt(socket, SOL_SOCKET, SO_RCVTIMEO, &tv, &tv_len) != 0) {
+ return BHT_ERROR;
+ }
+ *timeout_us = (tv.tv_sec * 1000000UL) + tv.tv_usec;
+ return BHT_OK;
+}
+
+int
+os_socket_addr_local(bh_socket_t socket, bh_sockaddr_t *sockaddr)
+{
+ struct sockaddr_storage addr_storage = { 0 };
+ socklen_t addr_len = sizeof(addr_storage);
+ int ret;
+
+ ret = getsockname(socket, (struct sockaddr *)&addr_storage, &addr_len);
+
+ if (ret != BHT_OK) {
+ return BHT_ERROR;
+ }
+
+ return sockaddr_to_bh_sockaddr((struct sockaddr *)&addr_storage, sockaddr);
+}
+
+int
+os_socket_addr_remote(bh_socket_t socket, bh_sockaddr_t *sockaddr)
+{
+ struct sockaddr_storage addr_storage = { 0 };
+ socklen_t addr_len = sizeof(addr_storage);
+ int ret;
+
+ ret = getpeername(socket, (struct sockaddr *)&addr_storage, &addr_len);
+
+ if (ret != BHT_OK) {
+ return BHT_ERROR;
+ }
+
+ return sockaddr_to_bh_sockaddr((struct sockaddr *)&addr_storage, sockaddr);
+}
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/common/posix/posix_thread.c b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/common/posix/posix_thread.c
new file mode 100644
index 000000000..5e814c418
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/common/posix/posix_thread.c
@@ -0,0 +1,680 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#ifndef _GNU_SOURCE
+#define _GNU_SOURCE
+#endif
+#include "platform_api_vmcore.h"
+#include "platform_api_extension.h"
+
+typedef struct {
+ thread_start_routine_t start;
+ void *arg;
+#ifdef OS_ENABLE_HW_BOUND_CHECK
+ os_signal_handler signal_handler;
+#endif
+} thread_wrapper_arg;
+
+#ifdef OS_ENABLE_HW_BOUND_CHECK
+/* The signal handler passed to os_thread_signal_init() */
+static os_thread_local_attribute os_signal_handler signal_handler;
+#endif
+
+static void *
+os_thread_wrapper(void *arg)
+{
+ thread_wrapper_arg *targ = arg;
+ thread_start_routine_t start_func = targ->start;
+ void *thread_arg = targ->arg;
+#ifdef OS_ENABLE_HW_BOUND_CHECK
+ os_signal_handler handler = targ->signal_handler;
+#endif
+
+#if 0
+ os_printf("THREAD CREATED %jx\n", (uintmax_t)(uintptr_t)pthread_self());
+#endif
+ BH_FREE(targ);
+#ifdef OS_ENABLE_HW_BOUND_CHECK
+ if (os_thread_signal_init(handler) != 0)
+ return NULL;
+#endif
+ start_func(thread_arg);
+#ifdef OS_ENABLE_HW_BOUND_CHECK
+ os_thread_signal_destroy();
+#endif
+ return NULL;
+}
+
+int
+os_thread_create_with_prio(korp_tid *tid, thread_start_routine_t start,
+ void *arg, unsigned int stack_size, int prio)
+{
+ pthread_attr_t tattr;
+ thread_wrapper_arg *targ;
+
+ assert(stack_size > 0);
+ assert(tid);
+ assert(start);
+
+ pthread_attr_init(&tattr);
+ pthread_attr_setdetachstate(&tattr, PTHREAD_CREATE_JOINABLE);
+ if (pthread_attr_setstacksize(&tattr, stack_size) != 0) {
+ os_printf("Invalid thread stack size %u. "
+ "Min stack size on Linux = %u\n",
+ stack_size, (unsigned int)PTHREAD_STACK_MIN);
+ pthread_attr_destroy(&tattr);
+ return BHT_ERROR;
+ }
+
+ targ = (thread_wrapper_arg *)BH_MALLOC(sizeof(*targ));
+ if (!targ) {
+ pthread_attr_destroy(&tattr);
+ return BHT_ERROR;
+ }
+
+ targ->start = start;
+ targ->arg = arg;
+#ifdef OS_ENABLE_HW_BOUND_CHECK
+ targ->signal_handler = signal_handler;
+#endif
+
+ if (pthread_create(tid, &tattr, os_thread_wrapper, targ) != 0) {
+ pthread_attr_destroy(&tattr);
+ BH_FREE(targ);
+ return BHT_ERROR;
+ }
+
+ pthread_attr_destroy(&tattr);
+ return BHT_OK;
+}
+
+int
+os_thread_create(korp_tid *tid, thread_start_routine_t start, void *arg,
+ unsigned int stack_size)
+{
+ return os_thread_create_with_prio(tid, start, arg, stack_size,
+ BH_THREAD_DEFAULT_PRIORITY);
+}
+
+korp_tid
+os_self_thread()
+{
+ return (korp_tid)pthread_self();
+}
+
+int
+os_mutex_init(korp_mutex *mutex)
+{
+ return pthread_mutex_init(mutex, NULL) == 0 ? BHT_OK : BHT_ERROR;
+}
+
+int
+os_recursive_mutex_init(korp_mutex *mutex)
+{
+ int ret;
+
+ pthread_mutexattr_t mattr;
+
+ assert(mutex);
+ ret = pthread_mutexattr_init(&mattr);
+ if (ret)
+ return BHT_ERROR;
+
+ pthread_mutexattr_settype(&mattr, PTHREAD_MUTEX_RECURSIVE);
+ ret = pthread_mutex_init(mutex, &mattr);
+ pthread_mutexattr_destroy(&mattr);
+
+ return ret == 0 ? BHT_OK : BHT_ERROR;
+}
+
+int
+os_mutex_destroy(korp_mutex *mutex)
+{
+ int ret;
+
+ assert(mutex);
+ ret = pthread_mutex_destroy(mutex);
+
+ return ret == 0 ? BHT_OK : BHT_ERROR;
+}
+
+int
+os_mutex_lock(korp_mutex *mutex)
+{
+ int ret;
+
+ assert(mutex);
+ ret = pthread_mutex_lock(mutex);
+
+ return ret == 0 ? BHT_OK : BHT_ERROR;
+}
+
+int
+os_mutex_unlock(korp_mutex *mutex)
+{
+ int ret;
+
+ assert(mutex);
+ ret = pthread_mutex_unlock(mutex);
+
+ return ret == 0 ? BHT_OK : BHT_ERROR;
+}
+
+int
+os_cond_init(korp_cond *cond)
+{
+ assert(cond);
+
+ if (pthread_cond_init(cond, NULL) != BHT_OK)
+ return BHT_ERROR;
+
+ return BHT_OK;
+}
+
+int
+os_cond_destroy(korp_cond *cond)
+{
+ assert(cond);
+
+ if (pthread_cond_destroy(cond) != BHT_OK)
+ return BHT_ERROR;
+
+ return BHT_OK;
+}
+
+int
+os_cond_wait(korp_cond *cond, korp_mutex *mutex)
+{
+ assert(cond);
+ assert(mutex);
+
+ if (pthread_cond_wait(cond, mutex) != BHT_OK)
+ return BHT_ERROR;
+
+ return BHT_OK;
+}
+
+korp_sem *
+os_sem_open(const char *name, int oflags, int mode, int val)
+{
+ return sem_open(name, oflags, mode, val);
+}
+
+int
+os_sem_close(korp_sem *sem)
+{
+ return sem_close(sem);
+}
+
+int
+os_sem_wait(korp_sem *sem)
+{
+ return sem_wait(sem);
+}
+
+int
+os_sem_trywait(korp_sem *sem)
+{
+ return sem_trywait(sem);
+}
+
+int
+os_sem_post(korp_sem *sem)
+{
+ return sem_post(sem);
+}
+
+int
+os_sem_getvalue(korp_sem *sem, int *sval)
+{
+#if defined(__APPLE__)
+ /*
+ * macOS doesn't have working sem_getvalue.
+ * It's marked as deprecated in the system header.
+ * Mock it up here to avoid compile-time deprecation warnings.
+ */
+ errno = ENOSYS;
+ return -1;
+#else
+ return sem_getvalue(sem, sval);
+#endif
+}
+
+int
+os_sem_unlink(const char *name)
+{
+ return sem_unlink(name);
+}
+
+static void
+msec_nsec_to_abstime(struct timespec *ts, uint64 usec)
+{
+ struct timeval tv;
+ time_t tv_sec_new;
+ long int tv_nsec_new;
+
+ gettimeofday(&tv, NULL);
+
+ tv_sec_new = (time_t)(tv.tv_sec + usec / 1000000);
+ if (tv_sec_new >= tv.tv_sec) {
+ ts->tv_sec = tv_sec_new;
+ }
+ else {
+ /* integer overflow */
+ ts->tv_sec = BH_TIME_T_MAX;
+ os_printf("Warning: os_cond_reltimedwait exceeds limit, "
+ "set to max timeout instead\n");
+ }
+
+ tv_nsec_new = (long int)(tv.tv_usec * 1000 + (usec % 1000000) * 1000);
+ if (tv.tv_usec * 1000 >= tv.tv_usec && tv_nsec_new >= tv.tv_usec * 1000) {
+ ts->tv_nsec = tv_nsec_new;
+ }
+ else {
+ /* integer overflow */
+ ts->tv_nsec = LONG_MAX;
+ os_printf("Warning: os_cond_reltimedwait exceeds limit, "
+ "set to max timeout instead\n");
+ }
+
+ if (ts->tv_nsec >= 1000000000L && ts->tv_sec < BH_TIME_T_MAX) {
+ ts->tv_sec++;
+ ts->tv_nsec -= 1000000000L;
+ }
+}
+
+int
+os_cond_reltimedwait(korp_cond *cond, korp_mutex *mutex, uint64 useconds)
+{
+ int ret;
+ struct timespec abstime;
+
+ if (useconds == BHT_WAIT_FOREVER)
+ ret = pthread_cond_wait(cond, mutex);
+ else {
+ msec_nsec_to_abstime(&abstime, useconds);
+ ret = pthread_cond_timedwait(cond, mutex, &abstime);
+ }
+
+ if (ret != BHT_OK && ret != ETIMEDOUT)
+ return BHT_ERROR;
+
+ return ret;
+}
+
+int
+os_cond_signal(korp_cond *cond)
+{
+ assert(cond);
+
+ if (pthread_cond_signal(cond) != BHT_OK)
+ return BHT_ERROR;
+
+ return BHT_OK;
+}
+
+int
+os_cond_broadcast(korp_cond *cond)
+{
+ assert(cond);
+
+ if (pthread_cond_broadcast(cond) != BHT_OK)
+ return BHT_ERROR;
+
+ return BHT_OK;
+}
+
+int
+os_thread_join(korp_tid thread, void **value_ptr)
+{
+ return pthread_join(thread, value_ptr);
+}
+
+int
+os_thread_detach(korp_tid thread)
+{
+ return pthread_detach(thread);
+}
+
+void
+os_thread_exit(void *retval)
+{
+#ifdef OS_ENABLE_HW_BOUND_CHECK
+ os_thread_signal_destroy();
+#endif
+ return pthread_exit(retval);
+}
+
+#if defined(os_thread_local_attribute)
+static os_thread_local_attribute uint8 *thread_stack_boundary = NULL;
+#endif
+
+uint8 *
+os_thread_get_stack_boundary()
+{
+ pthread_t self;
+#ifdef __linux__
+ pthread_attr_t attr;
+ size_t guard_size;
+#endif
+ uint8 *addr = NULL;
+ size_t stack_size, max_stack_size;
+ int page_size;
+
+#if defined(os_thread_local_attribute)
+ if (thread_stack_boundary)
+ return thread_stack_boundary;
+#endif
+
+ page_size = getpagesize();
+ self = pthread_self();
+ max_stack_size =
+ (size_t)(APP_THREAD_STACK_SIZE_MAX + page_size - 1) & ~(page_size - 1);
+
+ if (max_stack_size < APP_THREAD_STACK_SIZE_DEFAULT)
+ max_stack_size = APP_THREAD_STACK_SIZE_DEFAULT;
+
+#ifdef __linux__
+ if (pthread_getattr_np(self, &attr) == 0) {
+ pthread_attr_getstack(&attr, (void **)&addr, &stack_size);
+ pthread_attr_getguardsize(&attr, &guard_size);
+ pthread_attr_destroy(&attr);
+ if (stack_size > max_stack_size)
+ addr = addr + stack_size - max_stack_size;
+ if (guard_size < (size_t)page_size)
+ /* Reserved 1 guard page at least for safety */
+ guard_size = (size_t)page_size;
+ addr += guard_size;
+ }
+ (void)stack_size;
+#elif defined(__APPLE__) || defined(__NuttX__)
+ if ((addr = (uint8 *)pthread_get_stackaddr_np(self))) {
+ stack_size = pthread_get_stacksize_np(self);
+
+ /**
+ * Check whether stack_addr is the base or end of the stack,
+ * change it to the base if it is the end of stack.
+ */
+ if (addr <= (uint8 *)&stack_size)
+ addr = addr + stack_size;
+
+ if (stack_size > max_stack_size)
+ stack_size = max_stack_size;
+
+ addr -= stack_size;
+ /* Reserved 1 guard page at least for safety */
+ addr += page_size;
+ }
+#endif
+
+#if defined(os_thread_local_attribute)
+ thread_stack_boundary = addr;
+#endif
+ return addr;
+}
+
+#ifdef OS_ENABLE_HW_BOUND_CHECK
+
+#define SIG_ALT_STACK_SIZE (32 * 1024)
+
+/**
+ * Whether thread signal enviornment is initialized:
+ * the signal handler is registered, the stack pages are touched,
+ * the stack guard pages are set and signal alternate stack are set.
+ */
+static os_thread_local_attribute bool thread_signal_inited = false;
+
+#if WASM_DISABLE_STACK_HW_BOUND_CHECK == 0
+/* The signal alternate stack base addr */
+static os_thread_local_attribute uint8 *sigalt_stack_base_addr;
+
+#if defined(__clang__)
+#pragma clang optimize off
+#elif defined(__GNUC__)
+#pragma GCC push_options
+#pragma GCC optimize("O0")
+__attribute__((no_sanitize_address))
+#endif
+static uint32
+touch_pages(uint8 *stack_min_addr, uint32 page_size)
+{
+ uint8 sum = 0;
+ while (1) {
+ volatile uint8 *touch_addr = (volatile uint8 *)os_alloca(page_size / 2);
+ if (touch_addr < stack_min_addr + page_size) {
+ sum += *(stack_min_addr + page_size - 1);
+ break;
+ }
+ *touch_addr = 0;
+ sum += *touch_addr;
+ }
+ return sum;
+}
+#if defined(__clang__)
+#pragma clang optimize on
+#elif defined(__GNUC__)
+#pragma GCC pop_options
+#endif
+
+static bool
+init_stack_guard_pages()
+{
+ uint32 page_size = os_getpagesize();
+ uint32 guard_page_count = STACK_OVERFLOW_CHECK_GUARD_PAGE_COUNT;
+ uint8 *stack_min_addr = os_thread_get_stack_boundary();
+
+ if (stack_min_addr == NULL)
+ return false;
+
+ /* Touch each stack page to ensure that it has been mapped: the OS
+ may lazily grow the stack mapping as a guard page is hit. */
+ (void)touch_pages(stack_min_addr, page_size);
+ /* First time to call aot function, protect guard pages */
+ if (os_mprotect(stack_min_addr, page_size * guard_page_count,
+ MMAP_PROT_NONE)
+ != 0) {
+ return false;
+ }
+ return true;
+}
+
+static void
+destroy_stack_guard_pages()
+{
+ uint32 page_size = os_getpagesize();
+ uint32 guard_page_count = STACK_OVERFLOW_CHECK_GUARD_PAGE_COUNT;
+ uint8 *stack_min_addr = os_thread_get_stack_boundary();
+
+ os_mprotect(stack_min_addr, page_size * guard_page_count,
+ MMAP_PROT_READ | MMAP_PROT_WRITE);
+}
+#endif /* end of WASM_DISABLE_STACK_HW_BOUND_CHECK == 0 */
+
+static void
+mask_signals(int how)
+{
+ sigset_t set;
+
+ sigemptyset(&set);
+ sigaddset(&set, SIGSEGV);
+ sigaddset(&set, SIGBUS);
+ pthread_sigmask(how, &set, NULL);
+}
+
+static os_thread_local_attribute struct sigaction prev_sig_act_SIGSEGV;
+static os_thread_local_attribute struct sigaction prev_sig_act_SIGBUS;
+
+static void
+signal_callback(int sig_num, siginfo_t *sig_info, void *sig_ucontext)
+{
+ void *sig_addr = sig_info->si_addr;
+ struct sigaction *prev_sig_act = NULL;
+
+ mask_signals(SIG_BLOCK);
+
+ /* Try to handle signal with the registered signal handler */
+ if (signal_handler && (sig_num == SIGSEGV || sig_num == SIGBUS)) {
+ signal_handler(sig_addr);
+ }
+
+ if (sig_num == SIGSEGV)
+ prev_sig_act = &prev_sig_act_SIGSEGV;
+ else if (sig_num == SIGBUS)
+ prev_sig_act = &prev_sig_act_SIGBUS;
+
+ /* Forward the signal to next handler if found */
+ if (prev_sig_act && (prev_sig_act->sa_flags & SA_SIGINFO)) {
+ prev_sig_act->sa_sigaction(sig_num, sig_info, sig_ucontext);
+ }
+ else if (prev_sig_act
+ && ((void *)prev_sig_act->sa_sigaction == SIG_DFL
+ || (void *)prev_sig_act->sa_sigaction == SIG_IGN)) {
+ sigaction(sig_num, prev_sig_act, NULL);
+ }
+ /* Output signal info and then crash if signal is unhandled */
+ else {
+ switch (sig_num) {
+ case SIGSEGV:
+ os_printf("unhandled SIGSEGV, si_addr: %p\n", sig_addr);
+ break;
+ case SIGBUS:
+ os_printf("unhandled SIGBUS, si_addr: %p\n", sig_addr);
+ break;
+ default:
+ os_printf("unhandle signal %d, si_addr: %p\n", sig_num,
+ sig_addr);
+ break;
+ }
+
+ abort();
+ }
+}
+
+int
+os_thread_signal_init(os_signal_handler handler)
+{
+ struct sigaction sig_act;
+#if WASM_DISABLE_STACK_HW_BOUND_CHECK == 0
+ stack_t sigalt_stack_info;
+ uint32 map_size = SIG_ALT_STACK_SIZE;
+ uint8 *map_addr;
+#endif
+
+ if (thread_signal_inited)
+ return 0;
+
+#if WASM_DISABLE_STACK_HW_BOUND_CHECK == 0
+ if (!init_stack_guard_pages()) {
+ os_printf("Failed to init stack guard pages\n");
+ return -1;
+ }
+
+ /* Initialize memory for signal alternate stack of current thread */
+ if (!(map_addr = os_mmap(NULL, map_size, MMAP_PROT_READ | MMAP_PROT_WRITE,
+ MMAP_MAP_NONE))) {
+ os_printf("Failed to mmap memory for alternate stack\n");
+ goto fail1;
+ }
+
+ /* Initialize signal alternate stack */
+ memset(map_addr, 0, map_size);
+ sigalt_stack_info.ss_sp = map_addr;
+ sigalt_stack_info.ss_size = map_size;
+ sigalt_stack_info.ss_flags = 0;
+ if (sigaltstack(&sigalt_stack_info, NULL) != 0) {
+ os_printf("Failed to init signal alternate stack\n");
+ goto fail2;
+ }
+#endif
+
+ memset(&prev_sig_act_SIGSEGV, 0, sizeof(struct sigaction));
+ memset(&prev_sig_act_SIGBUS, 0, sizeof(struct sigaction));
+
+ /* Install signal hanlder */
+ sig_act.sa_sigaction = signal_callback;
+ sig_act.sa_flags = SA_SIGINFO | SA_NODEFER;
+#if WASM_DISABLE_STACK_HW_BOUND_CHECK == 0
+ sig_act.sa_flags |= SA_ONSTACK;
+#endif
+ sigemptyset(&sig_act.sa_mask);
+ if (sigaction(SIGSEGV, &sig_act, &prev_sig_act_SIGSEGV) != 0
+ || sigaction(SIGBUS, &sig_act, &prev_sig_act_SIGBUS) != 0) {
+ os_printf("Failed to register signal handler\n");
+ goto fail3;
+ }
+
+#if WASM_DISABLE_STACK_HW_BOUND_CHECK == 0
+ sigalt_stack_base_addr = map_addr;
+#endif
+ signal_handler = handler;
+ thread_signal_inited = true;
+ return 0;
+
+fail3:
+#if WASM_DISABLE_STACK_HW_BOUND_CHECK == 0
+ memset(&sigalt_stack_info, 0, sizeof(stack_t));
+ sigalt_stack_info.ss_flags = SS_DISABLE;
+ sigalt_stack_info.ss_size = map_size;
+ sigaltstack(&sigalt_stack_info, NULL);
+fail2:
+ os_munmap(map_addr, map_size);
+fail1:
+ destroy_stack_guard_pages();
+#endif
+ return -1;
+}
+
+void
+os_thread_signal_destroy()
+{
+#if WASM_DISABLE_STACK_HW_BOUND_CHECK == 0
+ stack_t sigalt_stack_info;
+#endif
+
+ if (!thread_signal_inited)
+ return;
+
+#if WASM_DISABLE_STACK_HW_BOUND_CHECK == 0
+ /* Disable signal alternate stack */
+ memset(&sigalt_stack_info, 0, sizeof(stack_t));
+ sigalt_stack_info.ss_flags = SS_DISABLE;
+ sigalt_stack_info.ss_size = SIG_ALT_STACK_SIZE;
+ sigaltstack(&sigalt_stack_info, NULL);
+
+ os_munmap(sigalt_stack_base_addr, SIG_ALT_STACK_SIZE);
+
+ destroy_stack_guard_pages();
+#endif
+
+ thread_signal_inited = false;
+}
+
+bool
+os_thread_signal_inited()
+{
+ return thread_signal_inited;
+}
+
+void
+os_signal_unmask()
+{
+ mask_signals(SIG_UNBLOCK);
+}
+
+void
+os_sigreturn()
+{
+#if WASM_DISABLE_STACK_HW_BOUND_CHECK == 0
+#if defined(__APPLE__)
+#define UC_RESET_ALT_STACK 0x80000000
+ extern int __sigreturn(void *, int);
+
+ /* It's necessary to call __sigreturn to restore the sigaltstack state
+ after exiting the signal handler. */
+ __sigreturn(NULL, UC_RESET_ALT_STACK);
+#endif
+#endif
+}
+#endif /* end of OS_ENABLE_HW_BOUND_CHECK */
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/common/posix/posix_time.c b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/common/posix/posix_time.c
new file mode 100644
index 000000000..bcf5ca3ce
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/shared/platform/common/posix/posix_time.c
@@ -0,0 +1,17 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#include "platform_api_vmcore.h"
+
+uint64
+os_time_get_boot_microsecond()
+{
+ struct timespec ts;
+ if (clock_gettime(CLOCK_MONOTONIC, &ts) != 0) {
+ return 0;
+ }
+
+ return ((uint64)ts.tv_sec) * 1000 * 1000 + ((uint64)ts.tv_nsec) / 1000;
+}