summaryrefslogtreecommitdiffstats
path: root/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/libraries/wasi-nn/test
diff options
context:
space:
mode:
Diffstat (limited to 'fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/libraries/wasi-nn/test')
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/libraries/wasi-nn/test/CMakeLists.txt173
-rwxr-xr-xfluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/libraries/wasi-nn/test/build.sh21
-rwxr-xr-xfluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/libraries/wasi-nn/test/models/average.py16
-rwxr-xr-xfluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/libraries/wasi-nn/test/models/max.py17
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/libraries/wasi-nn/test/models/mult_dimension.py15
-rwxr-xr-xfluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/libraries/wasi-nn/test/models/mult_outputs.py33
-rwxr-xr-xfluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/libraries/wasi-nn/test/models/sum.py17
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/libraries/wasi-nn/test/models/utils.py13
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/libraries/wasi-nn/test/requirements.txt1
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/libraries/wasi-nn/test/test_tensorflow.c146
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/libraries/wasi-nn/test/utils.c162
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/libraries/wasi-nn/test/utils.h52
12 files changed, 666 insertions, 0 deletions
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/libraries/wasi-nn/test/CMakeLists.txt b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/libraries/wasi-nn/test/CMakeLists.txt
new file mode 100644
index 000000000..33fad71eb
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/libraries/wasi-nn/test/CMakeLists.txt
@@ -0,0 +1,173 @@
+# Copyright (C) 2019 Intel Corporation. All rights reserved.
+# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+
+cmake_minimum_required (VERSION 2.9)
+
+project (iwasm)
+
+set (CMAKE_VERBOSE_MAKEFILE OFF)
+# Reset default linker flags
+set (CMAKE_C_STANDARD 99)
+set (CMAKE_CXX_STANDARD 14)
+set (CMAKE_SHARED_LIBRARY_LINK_C_FLAGS "")
+set (CMAKE_SHARED_LIBRARY_LINK_CXX_FLAGS "")
+
+if (NOT DEFINED WAMR_BUILD_PLATFORM)
+ set (WAMR_BUILD_PLATFORM "linux")
+endif ()
+
+# Set WAMR_BUILD_TARGET, currently values supported:
+# "X86_64", "AMD_64", "X86_32", "AARCH64[sub]", "ARM[sub]", "THUMB[sub]",
+# "MIPS", "XTENSA", "RISCV64[sub]", "RISCV32[sub]"
+if (NOT DEFINED WAMR_BUILD_TARGET)
+ if (CMAKE_SYSTEM_PROCESSOR MATCHES "^(arm64|aarch64)")
+ set (WAMR_BUILD_TARGET "AARCH64")
+ elseif (CMAKE_SYSTEM_PROCESSOR STREQUAL "riscv64")
+ set (WAMR_BUILD_TARGET "RISCV64")
+ elseif (CMAKE_SIZEOF_VOID_P EQUAL 8)
+ # Build as X86_64 by default in 64-bit platform
+ set (WAMR_BUILD_TARGET "X86_64")
+ elseif (CMAKE_SIZEOF_VOID_P EQUAL 4)
+ # Build as X86_32 by default in 32-bit platform
+ set (WAMR_BUILD_TARGET "X86_32")
+ else ()
+ message(SEND_ERROR "Unsupported build target platform!")
+ endif ()
+endif ()
+
+if (NOT CMAKE_BUILD_TYPE)
+ set(CMAKE_BUILD_TYPE Release)
+endif ()
+
+if (NOT DEFINED WAMR_BUILD_INTERP)
+ # Enable Interpreter by default
+ set (WAMR_BUILD_INTERP 1)
+endif ()
+
+if (NOT DEFINED WAMR_BUILD_AOT)
+ # Enable AOT by default.
+ set (WAMR_BUILD_AOT 1)
+endif ()
+
+if (NOT DEFINED WAMR_BUILD_JIT)
+ # Disable JIT by default.
+ set (WAMR_BUILD_JIT 0)
+endif ()
+
+if (NOT DEFINED WAMR_BUILD_FAST_JIT)
+ # Disable Fast JIT by default
+ set (WAMR_BUILD_FAST_JIT 0)
+endif ()
+
+if (NOT DEFINED WAMR_BUILD_LIBC_BUILTIN)
+ # Enable libc builtin support by default
+ set (WAMR_BUILD_LIBC_BUILTIN 1)
+endif ()
+
+if (NOT DEFINED WAMR_BUILD_LIBC_WASI)
+ # Enable libc wasi support by default
+ set (WAMR_BUILD_LIBC_WASI 1)
+endif ()
+
+if (NOT DEFINED WAMR_BUILD_FAST_INTERP)
+ # Enable fast interpreter
+ set (WAMR_BUILD_FAST_INTERP 1)
+endif ()
+
+if (NOT DEFINED WAMR_BUILD_MULTI_MODULE)
+ # Disable multiple modules by default
+ set (WAMR_BUILD_MULTI_MODULE 0)
+endif ()
+
+if (NOT DEFINED WAMR_BUILD_LIB_PTHREAD)
+ # Disable pthread library by default
+ set (WAMR_BUILD_LIB_PTHREAD 0)
+endif ()
+
+if (NOT DEFINED WAMR_BUILD_MINI_LOADER)
+ # Disable wasm mini loader by default
+ set (WAMR_BUILD_MINI_LOADER 0)
+endif ()
+
+if (NOT DEFINED WAMR_BUILD_SIMD)
+ # Enable SIMD by default
+ set (WAMR_BUILD_SIMD 1)
+endif ()
+
+if (NOT DEFINED WAMR_BUILD_REF_TYPES)
+ # Disable reference types by default
+ set (WAMR_BUILD_REF_TYPES 0)
+endif ()
+
+if (NOT DEFINED WAMR_BUILD_DEBUG_INTERP)
+ # Disable Debug feature by default
+ set (WAMR_BUILD_DEBUG_INTERP 0)
+endif ()
+
+if (WAMR_BUILD_DEBUG_INTERP EQUAL 1)
+ set (WAMR_BUILD_FAST_INTERP 0)
+ set (WAMR_BUILD_MINI_LOADER 0)
+ set (WAMR_BUILD_SIMD 0)
+endif ()
+
+set (WAMR_ROOT_DIR ${CMAKE_CURRENT_SOURCE_DIR}/../../../../..)
+
+include (${WAMR_ROOT_DIR}/build-scripts/runtime_lib.cmake)
+add_library(vmlib ${WAMR_RUNTIME_LIB_SOURCE})
+
+set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,--gc-sections -pie -fPIE")
+
+set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wall -Wextra -Wformat -Wformat-security -Wshadow")
+# set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wconversion -Wsign-conversion")
+
+set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall -Wextra -Wformat -Wformat-security -Wno-unused")
+
+if (WAMR_BUILD_TARGET MATCHES "X86_.*" OR WAMR_BUILD_TARGET STREQUAL "AMD_64")
+ if (NOT (CMAKE_C_COMPILER MATCHES ".*clang.*" OR CMAKE_C_COMPILER_ID MATCHES ".*Clang"))
+ set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mindirect-branch-register")
+ set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mindirect-branch-register")
+ # UNDEFINED BEHAVIOR, refer to https://en.cppreference.com/w/cpp/language/ub
+ if(CMAKE_BUILD_TYPE STREQUAL "Debug" AND NOT WAMR_BUILD_JIT EQUAL 1)
+ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fsanitize=undefined \
+ -fno-sanitize=bounds,bounds-strict,alignment \
+ -fno-sanitize-recover")
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fsanitize=undefined \
+ -fno-sanitize=bounds,bounds-strict,alignment \
+ -fno-sanitize-recover")
+ endif()
+ else ()
+ # UNDEFINED BEHAVIOR, refer to https://en.cppreference.com/w/cpp/language/ub
+ if(CMAKE_BUILD_TYPE STREQUAL "Debug" AND NOT WAMR_BUILD_JIT EQUAL 1)
+ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fsanitize=undefined \
+ -fno-sanitize=bounds,alignment \
+ -fno-sanitize-recover")
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fsanitize=undefined \
+ -fno-sanitize=bounds,alignment \
+ -fno-sanitize-recover")
+ endif()
+ endif ()
+endif ()
+
+# The following flags are to enhance security, but it may impact performance,
+# we disable them by default.
+#if (WAMR_BUILD_TARGET MATCHES "X86_.*" OR WAMR_BUILD_TARGET STREQUAL "AMD_64")
+# set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -ftrapv -D_FORTIFY_SOURCE=2")
+#endif ()
+#set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fstack-protector-strong --param ssp-buffer-size=4")
+#set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wl,-z,noexecstack,-z,relro,-z,now")
+
+include (${SHARED_DIR}/utils/uncommon/shared_uncommon.cmake)
+
+add_executable (iwasm ${WAMR_ROOT_DIR}/product-mini/platforms/${WAMR_BUILD_PLATFORM}/main.c ${UNCOMMON_SHARED_SOURCE})
+
+install (TARGETS iwasm DESTINATION bin)
+
+target_link_libraries (iwasm vmlib ${LLVM_AVAILABLE_LIBS} ${UV_A_LIBS} ${TENSORFLOW_LIB} -lm -ldl -lpthread)
+
+add_library (libiwasm SHARED ${WAMR_RUNTIME_LIB_SOURCE})
+
+install (TARGETS libiwasm DESTINATION lib)
+
+set_target_properties (libiwasm PROPERTIES OUTPUT_NAME iwasm)
+
+target_link_libraries (libiwasm ${LLVM_AVAILABLE_LIBS} ${UV_A_LIBS} -lm -ldl -lpthread)
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/libraries/wasi-nn/test/build.sh b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/libraries/wasi-nn/test/build.sh
new file mode 100755
index 000000000..33879eaf7
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/libraries/wasi-nn/test/build.sh
@@ -0,0 +1,21 @@
+# Copyright (C) 2019 Intel Corporation. All rights reserved.
+# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+
+# WASM application that uses WASI-NN
+
+/opt/wasi-sdk/bin/clang \
+ -Wl,--allow-undefined \
+ -Wl,--strip-all,--no-entry \
+ --sysroot=/opt/wasi-sdk/share/wasi-sysroot \
+ -I.. -I../src/utils \
+ -o test_tensorflow.wasm \
+ test_tensorflow.c utils.c
+
+# TFLite models to use in the tests
+
+cd models
+python3 average.py
+python3 max.py
+python3 mult_dimension.py
+python3 mult_outputs.py
+python3 sum.py
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/libraries/wasi-nn/test/models/average.py b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/libraries/wasi-nn/test/models/average.py
new file mode 100755
index 000000000..a21fe7520
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/libraries/wasi-nn/test/models/average.py
@@ -0,0 +1,16 @@
+# Copyright (C) 2019 Intel Corporation. All rights reserved.
+# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+
+import tensorflow as tf
+from utils import save_model
+
+model = tf.keras.Sequential([
+ tf.keras.layers.InputLayer(input_shape=[5, 5, 1]),
+ tf.keras.layers.AveragePooling2D(
+ pool_size=(5, 5), strides=None, padding="valid", data_format=None)
+
+])
+
+# Export model to tflite
+
+save_model(model, "average.tflite")
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/libraries/wasi-nn/test/models/max.py b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/libraries/wasi-nn/test/models/max.py
new file mode 100755
index 000000000..a3ec45677
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/libraries/wasi-nn/test/models/max.py
@@ -0,0 +1,17 @@
+# Copyright (C) 2019 Intel Corporation. All rights reserved.
+# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+
+import tensorflow as tf
+
+from utils import save_model
+
+model = tf.keras.Sequential([
+ tf.keras.layers.InputLayer(input_shape=[5, 5, 1]),
+ tf.keras.layers.MaxPooling2D(
+ pool_size=(5, 5), strides=None, padding="valid", data_format=None)
+
+])
+
+# Export model to tflite
+
+save_model(model, "max.tflite")
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/libraries/wasi-nn/test/models/mult_dimension.py b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/libraries/wasi-nn/test/models/mult_dimension.py
new file mode 100644
index 000000000..f521a93af
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/libraries/wasi-nn/test/models/mult_dimension.py
@@ -0,0 +1,15 @@
+# Copyright (C) 2019 Intel Corporation. All rights reserved.
+# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+
+import tensorflow as tf
+from utils import save_model
+
+model = tf.keras.Sequential([
+ tf.keras.layers.InputLayer(input_shape=[3, 3, 1]),
+ tf.keras.layers.Conv2D(1, (1, 1), kernel_initializer=tf.keras.initializers.Constant(
+ value=1), bias_initializer='zeros'
+ )
+])
+# Export model to tflite
+
+save_model(model, "mult_dim.tflite")
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/libraries/wasi-nn/test/models/mult_outputs.py b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/libraries/wasi-nn/test/models/mult_outputs.py
new file mode 100755
index 000000000..98a50129c
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/libraries/wasi-nn/test/models/mult_outputs.py
@@ -0,0 +1,33 @@
+# Copyright (C) 2019 Intel Corporation. All rights reserved.
+# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+
+import tensorflow as tf
+import numpy as np
+from keras.layers import AveragePooling2D, Conv2D
+
+from tensorflow.keras import Input, Model
+
+from utils import save_model
+
+
+inputs = Input(shape=(4, 4, 1))
+
+output1 = Conv2D(1, (4, 1), kernel_initializer=tf.keras.initializers.Constant(
+ value=1), bias_initializer='zeros'
+)(inputs)
+output2 = AveragePooling2D(pool_size=(
+ 4, 1), strides=None, padding="valid", data_format=None)(inputs)
+
+model = Model(inputs=inputs, outputs=[output1, output2])
+
+inp = np.arange(16).reshape((1, 4, 4, 1))
+
+print(inp)
+
+res = model.predict(inp)
+
+print(res)
+print(res[0].shape)
+print(res[1].shape)
+
+save_model(model, "mult_out.tflite")
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/libraries/wasi-nn/test/models/sum.py b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/libraries/wasi-nn/test/models/sum.py
new file mode 100755
index 000000000..503125b34
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/libraries/wasi-nn/test/models/sum.py
@@ -0,0 +1,17 @@
+# Copyright (C) 2019 Intel Corporation. All rights reserved.
+# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+
+import tensorflow as tf
+
+from utils import save_model
+
+model = tf.keras.Sequential([
+ tf.keras.layers.InputLayer(input_shape=[5, 5, 1]),
+ tf.keras.layers.Conv2D(1, (5, 5), kernel_initializer=tf.keras.initializers.Constant(
+ value=1), bias_initializer='zeros'
+ )
+])
+
+# Export model to tflite
+
+save_model(model, "sum.tflite")
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/libraries/wasi-nn/test/models/utils.py b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/libraries/wasi-nn/test/models/utils.py
new file mode 100644
index 000000000..8335f05da
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/libraries/wasi-nn/test/models/utils.py
@@ -0,0 +1,13 @@
+# Copyright (C) 2019 Intel Corporation. All rights reserved.
+# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+
+import tensorflow as tf
+import pathlib
+
+
+def save_model(model, filename):
+ converter = tf.lite.TFLiteConverter.from_keras_model(model)
+ tflite_model = converter.convert()
+ tflite_models_dir = pathlib.Path("./")
+ tflite_model_file = tflite_models_dir/filename
+ tflite_model_file.write_bytes(tflite_model)
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/libraries/wasi-nn/test/requirements.txt b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/libraries/wasi-nn/test/requirements.txt
new file mode 100644
index 000000000..4cf2910db
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/libraries/wasi-nn/test/requirements.txt
@@ -0,0 +1 @@
+tensorflow==2.11.1 \ No newline at end of file
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/libraries/wasi-nn/test/test_tensorflow.c b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/libraries/wasi-nn/test/test_tensorflow.c
new file mode 100644
index 000000000..2fa516538
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/libraries/wasi-nn/test/test_tensorflow.c
@@ -0,0 +1,146 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <assert.h>
+#include <string.h>
+#include <math.h>
+
+#include "utils.h"
+#include "logger.h"
+
+void
+test_sum(execution_target target)
+{
+ int dims[] = { 1, 5, 5, 1 };
+ input_info input = create_input(dims);
+
+ uint32_t output_size = 0;
+ float *output = run_inference(target, input.input_tensor, input.dim,
+ &output_size, "/assets/models/sum.tflite", 1);
+
+ assert(output_size == 1);
+ assert(fabs(output[0] - 300.0) < EPSILON);
+
+ free(input.dim);
+ free(input.input_tensor);
+ free(output);
+}
+
+void
+test_max(execution_target target)
+{
+ int dims[] = { 1, 5, 5, 1 };
+ input_info input = create_input(dims);
+
+ uint32_t output_size = 0;
+ float *output = run_inference(target, input.input_tensor, input.dim,
+ &output_size, "/assets/models/max.tflite", 1);
+
+ assert(output_size == 1);
+ assert(fabs(output[0] - 24.0) < EPSILON);
+ NN_INFO_PRINTF("Result: max is %f", output[0]);
+
+ free(input.dim);
+ free(input.input_tensor);
+ free(output);
+}
+
+void
+test_average(execution_target target)
+{
+ int dims[] = { 1, 5, 5, 1 };
+ input_info input = create_input(dims);
+
+ uint32_t output_size = 0;
+ float *output =
+ run_inference(target, input.input_tensor, input.dim, &output_size,
+ "/assets/models/average.tflite", 1);
+
+ assert(output_size == 1);
+ assert(fabs(output[0] - 12.0) < EPSILON);
+ NN_INFO_PRINTF("Result: average is %f", output[0]);
+
+ free(input.dim);
+ free(input.input_tensor);
+ free(output);
+}
+
+void
+test_mult_dimensions(execution_target target)
+{
+ int dims[] = { 1, 3, 3, 1 };
+ input_info input = create_input(dims);
+
+ uint32_t output_size = 0;
+ float *output =
+ run_inference(target, input.input_tensor, input.dim, &output_size,
+ "/assets/models/mult_dim.tflite", 1);
+
+ assert(output_size == 9);
+ for (int i = 0; i < 9; i++)
+ assert(fabs(output[i] - i) < EPSILON);
+
+ free(input.dim);
+ free(input.input_tensor);
+ free(output);
+}
+
+void
+test_mult_outputs(execution_target target)
+{
+ int dims[] = { 1, 4, 4, 1 };
+ input_info input = create_input(dims);
+
+ uint32_t output_size = 0;
+ float *output =
+ run_inference(target, input.input_tensor, input.dim, &output_size,
+ "/assets/models/mult_out.tflite", 2);
+
+ assert(output_size == 8);
+ // first tensor check
+ for (int i = 0; i < 4; i++)
+ assert(fabs(output[i] - (i * 4 + 24)) < EPSILON);
+ // second tensor check
+ for (int i = 0; i < 4; i++)
+ assert(fabs(output[i + 4] - (i + 6)) < EPSILON);
+
+ free(input.dim);
+ free(input.input_tensor);
+ free(output);
+}
+
+int
+main()
+{
+ char *env = getenv("TARGET");
+ if (env == NULL) {
+ NN_INFO_PRINTF("Usage:\n--env=\"TARGET=[cpu|gpu]\"");
+ return 1;
+ }
+ execution_target target;
+ if (strcmp(env, "cpu") == 0)
+ target = cpu;
+ else if (strcmp(env, "gpu") == 0)
+ target = gpu;
+ else {
+ NN_ERR_PRINTF("Wrong target!");
+ return 1;
+ }
+ NN_INFO_PRINTF("################### Testing sum...");
+ test_sum(target);
+ NN_INFO_PRINTF("################### Testing max...");
+ test_max(target);
+ NN_INFO_PRINTF("################### Testing average...");
+ test_average(target);
+ NN_INFO_PRINTF("################### Testing multiple dimensions...");
+ test_mult_dimensions(target);
+ NN_INFO_PRINTF("################### Testing multiple outputs...");
+ test_mult_outputs(target);
+
+ NN_INFO_PRINTF("Tests: passed!");
+ return 0;
+}
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/libraries/wasi-nn/test/utils.c b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/libraries/wasi-nn/test/utils.c
new file mode 100644
index 000000000..e0704cab4
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/libraries/wasi-nn/test/utils.c
@@ -0,0 +1,162 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#include "utils.h"
+#include "logger.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+
+error
+wasm_load(char *model_name, graph *g, execution_target target)
+{
+ FILE *pFile = fopen(model_name, "r");
+ if (pFile == NULL)
+ return invalid_argument;
+
+ uint8_t *buffer;
+ size_t result;
+
+ // allocate memory to contain the whole file:
+ buffer = (uint8_t *)malloc(sizeof(uint8_t) * MAX_MODEL_SIZE);
+ if (buffer == NULL) {
+ fclose(pFile);
+ return missing_memory;
+ }
+
+ result = fread(buffer, 1, MAX_MODEL_SIZE, pFile);
+ if (result <= 0) {
+ fclose(pFile);
+ free(buffer);
+ return missing_memory;
+ }
+
+ graph_builder_array arr;
+
+ arr.size = 1;
+ arr.buf = (graph_builder *)malloc(sizeof(graph_builder));
+ if (arr.buf == NULL) {
+ fclose(pFile);
+ free(buffer);
+ return missing_memory;
+ }
+
+ arr.buf[0].size = result;
+ arr.buf[0].buf = buffer;
+
+ error res = load(&arr, tensorflowlite, target, g);
+
+ fclose(pFile);
+ free(buffer);
+ free(arr.buf);
+ return res;
+}
+
+error
+wasm_init_execution_context(graph g, graph_execution_context *ctx)
+{
+ return init_execution_context(g, ctx);
+}
+
+error
+wasm_set_input(graph_execution_context ctx, float *input_tensor, uint32_t *dim)
+{
+ tensor_dimensions dims;
+ dims.size = INPUT_TENSOR_DIMS;
+ dims.buf = (uint32_t *)malloc(dims.size * sizeof(uint32_t));
+ if (dims.buf == NULL)
+ return missing_memory;
+
+ tensor tensor;
+ tensor.dimensions = &dims;
+ for (int i = 0; i < tensor.dimensions->size; ++i)
+ tensor.dimensions->buf[i] = dim[i];
+ tensor.type = fp32;
+ tensor.data = (uint8_t *)input_tensor;
+ error err = set_input(ctx, 0, &tensor);
+
+ free(dims.buf);
+ return err;
+}
+
+error
+wasm_compute(graph_execution_context ctx)
+{
+ return compute(ctx);
+}
+
+error
+wasm_get_output(graph_execution_context ctx, uint32_t index, float *out_tensor,
+ uint32_t *out_size)
+{
+ return get_output(ctx, index, (uint8_t *)out_tensor, out_size);
+}
+
+float *
+run_inference(execution_target target, float *input, uint32_t *input_size,
+ uint32_t *output_size, char *model_name,
+ uint32_t num_output_tensors)
+{
+ graph graph;
+ if (wasm_load(model_name, &graph, target) != success) {
+ NN_ERR_PRINTF("Error when loading model.");
+ exit(1);
+ }
+
+ graph_execution_context ctx;
+ if (wasm_init_execution_context(graph, &ctx) != success) {
+ NN_ERR_PRINTF("Error when initialixing execution context.");
+ exit(1);
+ }
+
+ if (wasm_set_input(ctx, input, input_size) != success) {
+ NN_ERR_PRINTF("Error when setting input tensor.");
+ exit(1);
+ }
+
+ if (wasm_compute(ctx) != success) {
+ NN_ERR_PRINTF("Error when running inference.");
+ exit(1);
+ }
+
+ float *out_tensor = (float *)malloc(sizeof(float) * MAX_OUTPUT_TENSOR_SIZE);
+ if (out_tensor == NULL) {
+ NN_ERR_PRINTF("Error when allocating memory for output tensor.");
+ exit(1);
+ }
+
+ uint32_t offset = 0;
+ for (int i = 0; i < num_output_tensors; ++i) {
+ *output_size = MAX_OUTPUT_TENSOR_SIZE - *output_size;
+ if (wasm_get_output(ctx, i, &out_tensor[offset], output_size)
+ != success) {
+ NN_ERR_PRINTF("Error when getting output.");
+ exit(1);
+ }
+
+ offset += *output_size;
+ }
+ *output_size = offset;
+ return out_tensor;
+}
+
+input_info
+create_input(int *dims)
+{
+ input_info input = { .dim = NULL, .input_tensor = NULL, .elements = 1 };
+
+ input.dim = malloc(INPUT_TENSOR_DIMS * sizeof(uint32_t));
+ if (input.dim)
+ for (int i = 0; i < INPUT_TENSOR_DIMS; ++i) {
+ input.dim[i] = dims[i];
+ input.elements *= dims[i];
+ }
+
+ input.input_tensor = malloc(input.elements * sizeof(float));
+ for (int i = 0; i < input.elements; ++i)
+ input.input_tensor[i] = i;
+
+ return input;
+}
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/libraries/wasi-nn/test/utils.h b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/libraries/wasi-nn/test/utils.h
new file mode 100644
index 000000000..6373be542
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/core/iwasm/libraries/wasi-nn/test/utils.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#ifndef WASI_NN_UTILS
+#define WASI_NN_UTILS
+
+#include <stdint.h>
+
+#include "wasi_nn.h"
+
+#define MAX_MODEL_SIZE 85000000
+#define MAX_OUTPUT_TENSOR_SIZE 200
+#define INPUT_TENSOR_DIMS 4
+#define EPSILON 1e-8
+
+typedef struct {
+ float *input_tensor;
+ uint32_t *dim;
+ uint32_t elements;
+} input_info;
+
+/* wasi-nn wrappers */
+
+error
+wasm_load(char *model_name, graph *g, execution_target target);
+
+error
+wasm_init_execution_context(graph g, graph_execution_context *ctx);
+
+error
+wasm_set_input(graph_execution_context ctx, float *input_tensor, uint32_t *dim);
+
+error
+wasm_compute(graph_execution_context ctx);
+
+error
+wasm_get_output(graph_execution_context ctx, uint32_t index, float *out_tensor,
+ uint32_t *out_size);
+
+/* Utils */
+
+float *
+run_inference(execution_target target, float *input, uint32_t *input_size,
+ uint32_t *output_size, char *model_name,
+ uint32_t num_output_tensors);
+
+input_info
+create_input(int *dims);
+
+#endif