summaryrefslogtreecommitdiffstats
path: root/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/samples/workload/tensorflow
diff options
context:
space:
mode:
Diffstat (limited to 'fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/samples/workload/tensorflow')
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/samples/workload/tensorflow/README.md19
-rwxr-xr-xfluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/samples/workload/tensorflow/build.sh157
-rw-r--r--fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/samples/workload/tensorflow/tf_lite.patch84
3 files changed, 260 insertions, 0 deletions
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/samples/workload/tensorflow/README.md b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/samples/workload/tensorflow/README.md
new file mode 100644
index 000000000..7bc7dd259
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/samples/workload/tensorflow/README.md
@@ -0,0 +1,19 @@
+"tensorflow" sample introduction
+==============
+
+This sample demonstrates how to build [tensorflow](https://github.com/tensorflow/tensorflow) into WebAssembly with emsdk toolchain and run it with iwasm.:
+```bash
+./build.sh
+# for linux platform, or
+./build.sh --threads
+# for multi-threading on linux platform
+./build.sh --sgx
+# for linux-sgx platform
+```
+to build tensorflow and run it with iwasm, which basically contains the following steps:
+- clone emsdk under `<wamr_dir>/core/deps`, install and activate 2.0.26
+- hack emcc to delete some objects in libc.a
+- build tf-lite with emcc compiler
+- build iwasm with lib-pthread and libc-emcc enabled
+- run benchmark model with iwasm:
+ --max-secs 300: means the max training time cost is 5 minutes, you can adjust it by yourself
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/samples/workload/tensorflow/build.sh b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/samples/workload/tensorflow/build.sh
new file mode 100755
index 000000000..6df8db423
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/samples/workload/tensorflow/build.sh
@@ -0,0 +1,157 @@
+#!/bin/bash
+
+#
+# Copyright (C) 2019 Intel Corporation. All rights reserved.
+# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+#
+
+####################################
+# build tensorflow-lite sample #
+####################################
+BUILD_SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
+
+WAMR_DIR="${BUILD_SCRIPT_DIR}/../../.."
+WAMR_PLATFORM_DIR="${WAMR_DIR}/product-mini/platforms"
+WAMRC_DIR="${WAMR_DIR}/wamr-compiler"
+CORE_DEPS_DIR="${WAMR_DIR}/core/deps"
+EMSDK_DIR="${CORE_DEPS_DIR}/emsdk"
+
+EMSDK_WASM_DIR="${EMSDK_DIR}/upstream/emscripten/cache/sysroot/lib/wasm32-emscripten"
+OUT_DIR="${BUILD_SCRIPT_DIR}/out"
+TENSORFLOW_DIR="${BUILD_SCRIPT_DIR}/tensorflow"
+TF_LITE_BUILD_DIR="${TENSORFLOW_DIR}/tensorflow/lite/tools/make"
+
+function Clear_Before_Exit()
+{
+ [[ -f ${TENSORFLOW_DIR}/tf_lite.patch ]] &&
+ rm -f ${TENSORFLOW_DIR}/tf_lite.patch
+ # resume the libc.a under EMSDK_WASM_DIR
+ cd ${EMSDK_WASM_DIR}
+ mv libc.a.bak libc.a
+}
+
+set -xe
+
+# 1.clone emsdk
+cd ${CORE_DEPS_DIR}
+rm -fr emsdk
+git clone https://github.com/emscripten-core/emsdk.git
+cd emsdk
+./emsdk install 2.0.26
+./emsdk activate 2.0.26
+source emsdk_env.sh
+
+# 2.hack emcc
+cd ${EMSDK_WASM_DIR}
+# back up libc.a
+cp libc.a libc.a.bak
+# delete some objects in libc.a
+emar d libc.a open.o
+emar d libc.a mmap.o
+emar d libc.a munmap.o
+emar d libc.a library_pthread_stub.o
+emar d libc.a pthread_self.o
+emranlib libc.a
+
+# 3. build tf-lite
+cd ${BUILD_SCRIPT_DIR}
+# 3.1 clone tf repo from Github and checkout to 2303ed commit
+if [ ! -d "tensorflow" ]; then
+ git clone https://github.com/tensorflow/tensorflow.git
+fi
+
+cd ${TENSORFLOW_DIR}
+git checkout 2303ed4bdb344a1fc4545658d1df6d9ce20331dd
+
+# 3.2 copy the tf-lite.patch to tensorflow_root_dir and apply it
+cd ${TENSORFLOW_DIR}
+cp ${BUILD_SCRIPT_DIR}/tf_lite.patch .
+git checkout tensorflow/lite/tools/make/Makefile
+git checkout tensorflow/lite/tools/make/targets/linux_makefile.inc
+
+if [[ $(git apply tf_lite.patch 2>&1) =~ "error" ]]; then
+ echo "git apply patch failed, please check tf-lite related changes..."
+ Clear_Before_Exit
+ exit 0
+fi
+
+cd ${TF_LITE_BUILD_DIR}
+# 3.3 download dependencies
+if [ ! -d "${TF_LITE_BUILD_DIR}/downloads" ]; then
+ source download_dependencies.sh
+fi
+
+# 3.4 build tf-lite target
+if [ -d "${TF_LITE_BUILD_DIR}/gen" ]; then
+ rm -fr ${TF_LITE_BUILD_DIR}/gen
+fi
+
+make -j 4 -C "${TENSORFLOW_DIR}" -f ${TF_LITE_BUILD_DIR}/Makefile
+
+# remove patch file and recover emcc libc.a after building
+Clear_Before_Exit
+
+# 3.5 copy /make/gen target files to out/
+rm -rf ${OUT_DIR}
+mkdir ${OUT_DIR}
+cp -r ${TF_LITE_BUILD_DIR}/gen/linux_x86_64/bin/. ${OUT_DIR}/
+
+# 4. compile tf-model.wasm to tf-model.aot with wamrc
+# 4.1 build wamr-compiler
+cd ${WAMRC_DIR}
+./build_llvm.sh
+rm -fr build && mkdir build
+cd build && cmake ..
+make
+# 4.2 compile tf-mode.wasm to tf-model.aot
+WAMRC_CMD="$(pwd)/wamrc"
+cd ${OUT_DIR}
+if [[ $1 == '--sgx' ]]; then
+ ${WAMRC_CMD} -sgx -o benchmark_model.aot benchmark_model.wasm
+elif [[ $1 == '--threads' ]]; then
+ ${WAMRC_CMD} --enable-multi-thread -o benchmark_model.aot benchmark_model.wasm
+else
+ ${WAMRC_CMD} -o benchmark_model.aot benchmark_model.wasm
+fi
+
+# 5. build iwasm with pthread and libc_emcc enable
+# platform:
+# linux by default
+# linux-sgx if $1 equals '--sgx'
+if [[ $1 == '--sgx' ]]; then
+ cd ${WAMR_PLATFORM_DIR}/linux-sgx
+ rm -fr build && mkdir build
+ cd build && cmake .. -DWAMR_BUILD_LIBC_EMCC=1
+ make
+ cd ../enclave-sample
+ make
+else
+ cd ${WAMR_PLATFORM_DIR}/linux
+ rm -fr build && mkdir build
+ cd build && cmake .. -DWAMR_BUILD_LIB_PTHREAD=1 -DWAMR_BUILD_LIBC_EMCC=1
+ make
+fi
+
+# 6. run tensorflow with iwasm
+cd ${OUT_DIR}
+# 6.1 download tf-lite model
+wget "https://storage.googleapis.com/download.tensorflow.org/models/tflite/mobilenet_v1_224_android_quant_2017_11_08.zip"
+unzip mobilenet_v1_224_android_quant_2017_11_08.zip
+
+# 6.2 run tf-lite model with iwasm
+echo "---> run tensorflow benchmark model with iwasm"
+if [[ $1 == '--sgx' ]]; then
+ IWASM_CMD="${WAMR_PLATFORM_DIR}/linux-sgx/enclave-sample/iwasm"
+else
+ IWASM_CMD="${WAMR_PLATFORM_DIR}/linux/build/iwasm"
+fi
+
+if [[ $1 == '--threads' ]]; then
+ ${IWASM_CMD} --heap-size=10475860 \
+ benchmark_model.aot --num_threads=4 \
+ --graph=mobilenet_quant_v1_224.tflite --max_secs=300
+else
+ ${IWASM_CMD} --heap-size=10475860 \
+ benchmark_model.aot \
+ --graph=mobilenet_quant_v1_224.tflite --max_secs=300
+fi
diff --git a/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/samples/workload/tensorflow/tf_lite.patch b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/samples/workload/tensorflow/tf_lite.patch
new file mode 100644
index 000000000..b6224d581
--- /dev/null
+++ b/fluent-bit/lib/wasm-micro-runtime-WAMR-1.2.2/samples/workload/tensorflow/tf_lite.patch
@@ -0,0 +1,84 @@
+diff --git a/tensorflow/lite/tools/make/Makefile b/tensorflow/lite/tools/make/Makefile
+index c7ddff58440..ebfebaead35 100644
+--- a/tensorflow/lite/tools/make/Makefile
++++ b/tensorflow/lite/tools/make/Makefile
+@@ -48,11 +48,7 @@ INCLUDES += -I/usr/local/include
+
+ # These are the default libraries needed, but they can be added to or
+ # overridden by the platform-specific settings in target makefiles.
+-LIBS := \
+--lstdc++ \
+--lpthread \
+--lm \
+--lz \
++LIBS := -lm \
+ -ldl
+
+ # There are no rules for compiling objects for the host system (since we don't
+@@ -84,14 +80,24 @@ endif # ifeq ($(HOST_ARCH),$(TARGET_ARCH))
+ endif # ifeq ($(HOST_OS),$(TARGET))
+ endif
+
++CFLAGS+=-msimd128 -mbulk-memory -matomics
++CXXFLAGS+=-msimd128 -mbulk-memory -matomics
++
++LIBFLAGS += -s TOTAL_STACK=1048576 -s MALLOC="none" \
++ -s INITIAL_MEMORY=16777216 \
++ -s MAXIMUM_MEMORY=167772160 \
++ -s ALLOW_MEMORY_GROWTH=1 \
++ -Wl,--export=__data_end -Wl,--export=__heap_base,--shared-memory,--no-check-features \
++ -s ERROR_ON_UNDEFINED_SYMBOLS=0
++
+ # This library is the main target for this makefile. It will contain a minimal
+ # runtime that can be linked in to other programs.
+ LIB_NAME := libtensorflow-lite.a
+
+ # Benchmark static library and binary
+ BENCHMARK_LIB_NAME := benchmark-lib.a
+-BENCHMARK_BINARY_NAME := benchmark_model
+-BENCHMARK_PERF_OPTIONS_BINARY_NAME := benchmark_model_performance_options
++BENCHMARK_BINARY_NAME := benchmark_model.wasm
++BENCHMARK_PERF_OPTIONS_BINARY_NAME := benchmark_model_performance_options.wasm
+
+ # A small example program that shows how to link against the library.
+ MINIMAL_SRCS := \
+@@ -277,12 +283,16 @@ LIB_PATH := $(LIBDIR)$(LIB_NAME)
+ BENCHMARK_LIB := $(LIBDIR)$(BENCHMARK_LIB_NAME)
+ BENCHMARK_BINARY := $(BINDIR)$(BENCHMARK_BINARY_NAME)
+ BENCHMARK_PERF_OPTIONS_BINARY := $(BINDIR)$(BENCHMARK_PERF_OPTIONS_BINARY_NAME)
+-MINIMAL_BINARY := $(BINDIR)minimal
++MINIMAL_BINARY := $(BINDIR)minimal.wasm
+ LABEL_IMAGE_BINARY := $(BINDIR)label_image
+
+-CXX := $(CC_PREFIX)${TARGET_TOOLCHAIN_PREFIX}g++
+-CC := $(CC_PREFIX)${TARGET_TOOLCHAIN_PREFIX}gcc
+-AR := $(CC_PREFIX)${TARGET_TOOLCHAIN_PREFIX}ar
++# CXX := $(CC_PREFIX)${TARGET_TOOLCHAIN_PREFIX}g++
++# CC := $(CC_PREFIX)${TARGET_TOOLCHAIN_PREFIX}gcc
++# AR := $(CC_PREFIX)${TARGET_TOOLCHAIN_PREFIX}ar
++
++CXX := em++
++CC := emcc
++AR := emar
+
+ MINIMAL_OBJS := $(addprefix $(OBJDIR), \
+ $(patsubst %.cc,%.o,$(patsubst %.c,%.o,$(MINIMAL_SRCS))))
+diff --git a/tensorflow/lite/tools/make/targets/linux_makefile.inc b/tensorflow/lite/tools/make/targets/linux_makefile.inc
+index 222cef9e5ff..eea89a38f01 100644
+--- a/tensorflow/lite/tools/make/targets/linux_makefile.inc
++++ b/tensorflow/lite/tools/make/targets/linux_makefile.inc
+@@ -2,12 +2,10 @@
+ ifeq ($(TARGET), linux)
+ CXXFLAGS += \
+ -fPIC \
+- -DGEMMLOWP_ALLOW_SLOW_SCALAR_FALLBACK \
+- -pthread
++ -DGEMMLOWP_ALLOW_SLOW_SCALAR_FALLBACK
+ CFLAGS += \
+ -fPIC \
+- -DGEMMLOWP_ALLOW_SLOW_SCALAR_FALLBACK \
+- -pthread
++ -DGEMMLOWP_ALLOW_SLOW_SCALAR_FALLBACK
+ # TODO(petewarden): In the future we may want to add architecture-specific
+ # flags like -msse4.2
+ LIBS += -ldl