summaryrefslogtreecommitdiffstats
path: root/src/spdk/ocf/env
diff options
context:
space:
mode:
Diffstat (limited to 'src/spdk/ocf/env')
-rw-r--r--src/spdk/ocf/env/posix/ocf_env.c192
-rw-r--r--src/spdk/ocf/env/posix/ocf_env.h642
-rw-r--r--src/spdk/ocf/env/posix/ocf_env_headers.h22
-rw-r--r--src/spdk/ocf/env/posix/ocf_env_list.h168
4 files changed, 1024 insertions, 0 deletions
diff --git a/src/spdk/ocf/env/posix/ocf_env.c b/src/spdk/ocf/env/posix/ocf_env.c
new file mode 100644
index 000000000..6b8a6d5da
--- /dev/null
+++ b/src/spdk/ocf/env/posix/ocf_env.c
@@ -0,0 +1,192 @@
+/*
+ * Copyright(c) 2019 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#include "ocf_env.h"
+#include <sched.h>
+#include <execinfo.h>
+
+/* ALLOCATOR */
+struct _env_allocator {
+ /*!< Memory pool ID unique name */
+ char *name;
+
+ /*!< Size of specific item of memory pool */
+ uint32_t item_size;
+
+ /*!< Number of currently allocated items in pool */
+ env_atomic count;
+};
+
+static inline size_t env_allocator_align(size_t size)
+{
+ if (size <= 2)
+ return size;
+ return (1ULL << 32) >> __builtin_clz(size - 1);
+}
+
+struct _env_allocator_item {
+ uint32_t flags;
+ uint32_t cpu;
+ char data[];
+};
+
+void *env_allocator_new(env_allocator *allocator)
+{
+ struct _env_allocator_item *item = NULL;
+
+ item = calloc(1, allocator->item_size);
+
+ if (item) {
+ item->cpu = 0;
+ env_atomic_inc(&allocator->count);
+ }
+
+ return &item->data;
+}
+
+env_allocator *env_allocator_create(uint32_t size, const char *fmt_name, ...)
+{
+ char name[OCF_ALLOCATOR_NAME_MAX] = { '\0' };
+ int result, error = -1;
+ va_list args;
+
+ env_allocator *allocator = calloc(1, sizeof(*allocator));
+ if (!allocator) {
+ error = __LINE__;
+ goto err;
+ }
+
+ allocator->item_size = size + sizeof(struct _env_allocator_item);
+
+ /* Format allocator name */
+ va_start(args, fmt_name);
+ result = vsnprintf(name, sizeof(name), fmt_name, args);
+ va_end(args);
+
+ if ((result > 0) && (result < sizeof(name))) {
+ allocator->name = strdup(name);
+
+ if (!allocator->name) {
+ error = __LINE__;
+ goto err;
+ }
+ } else {
+ /* Formated string name exceed max allowed size of name */
+ error = __LINE__;
+ goto err;
+ }
+
+ return allocator;
+
+err:
+ printf("Cannot create memory allocator, ERROR %d", error);
+ env_allocator_destroy(allocator);
+
+ return NULL;
+}
+
+void env_allocator_del(env_allocator *allocator, void *obj)
+{
+ struct _env_allocator_item *item =
+ container_of(obj, struct _env_allocator_item, data);
+
+ env_atomic_dec(&allocator->count);
+
+ free(item);
+}
+
+void env_allocator_destroy(env_allocator *allocator)
+{
+ if (allocator) {
+ if (env_atomic_read(&allocator->count)) {
+ printf("Not all objects deallocated\n");
+ ENV_WARN(true, OCF_PREFIX_SHORT" Cleanup problem\n");
+ }
+
+ free(allocator->name);
+ free(allocator);
+ }
+}
+
+/* DEBUGING */
+#define ENV_TRACE_DEPTH 16
+
+void env_stack_trace(void)
+{
+ void *trace[ENV_TRACE_DEPTH];
+ char **messages = NULL;
+ int i, size;
+
+ size = backtrace(trace, ENV_TRACE_DEPTH);
+ messages = backtrace_symbols(trace, size);
+ printf("[stack trace]>>>\n");
+ for (i = 0; i < size; ++i)
+ printf("%s\n", messages[i]);
+ printf("<<<[stack trace]\n");
+ free(messages);
+}
+
+/* CRC */
+uint32_t env_crc32(uint32_t crc, uint8_t const *data, size_t len)
+{
+ return crc32(crc, data, len);
+}
+
+/* EXECUTION CONTEXTS */
+pthread_mutex_t *exec_context_mutex;
+
+static void __attribute__((constructor)) init_execution_context(void)
+{
+ unsigned count = env_get_execution_context_count();
+ unsigned i;
+
+ ENV_BUG_ON(count == 0);
+ exec_context_mutex = malloc(count * sizeof(exec_context_mutex[0]));
+ ENV_BUG_ON(exec_context_mutex == NULL);
+ for (i = 0; i < count; i++)
+ ENV_BUG_ON(pthread_mutex_init(&exec_context_mutex[i], NULL));
+}
+
+static void __attribute__((destructor)) deinit_execution_context(void)
+{
+ unsigned count = env_get_execution_context_count();
+ unsigned i;
+
+ ENV_BUG_ON(count == 0);
+ ENV_BUG_ON(exec_context_mutex == NULL);
+
+ for (i = 0; i < count; i++)
+ ENV_BUG_ON(pthread_mutex_destroy(&exec_context_mutex[i]));
+ free(exec_context_mutex);
+}
+
+/* get_execuction_context must assure that after the call finishes, the caller
+ * will not get preempted from current execution context. For userspace env
+ * we simulate this behavior by acquiring per execution context mutex. As a
+ * result the caller might actually get preempted, but no other thread will
+ * execute in this context by the time the caller puts current execution ctx. */
+unsigned env_get_execution_context(void)
+{
+ unsigned cpu;
+
+ cpu = sched_getcpu();
+ cpu = (cpu == -1) ? 0 : cpu;
+
+ ENV_BUG_ON(pthread_mutex_lock(&exec_context_mutex[cpu]));
+
+ return cpu;
+}
+
+void env_put_execution_context(unsigned ctx)
+{
+ pthread_mutex_unlock(&exec_context_mutex[ctx]);
+}
+
+unsigned env_get_execution_context_count(void)
+{
+ int num = sysconf(_SC_NPROCESSORS_ONLN);
+
+ return (num == -1) ? 0 : num;
+}
diff --git a/src/spdk/ocf/env/posix/ocf_env.h b/src/spdk/ocf/env/posix/ocf_env.h
new file mode 100644
index 000000000..da9c6421a
--- /dev/null
+++ b/src/spdk/ocf/env/posix/ocf_env.h
@@ -0,0 +1,642 @@
+/*
+ * Copyright(c) 2019 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#ifndef __OCF_ENV_H__
+#define __OCF_ENV_H__
+
+#ifndef _GNU_SOURCE
+#define _GNU_SOURCE
+#endif
+#ifndef __USE_GNU
+#define __USE_GNU
+#endif
+
+#include <linux/limits.h>
+#include <linux/stddef.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <stdarg.h>
+#include <string.h>
+#include <pthread.h>
+#include <assert.h>
+#include <semaphore.h>
+#include <errno.h>
+#include <limits.h>
+#include <unistd.h>
+#include <inttypes.h>
+#include <sys/time.h>
+#include <sys/param.h>
+#include <sys/mman.h>
+#include <zlib.h>
+
+#include "ocf_env_list.h"
+#include "ocf_env_headers.h"
+#include "ocf/ocf_err.h"
+
+/* linux sector 512-bytes */
+#define ENV_SECTOR_SHIFT 9
+
+#define OCF_ALLOCATOR_NAME_MAX 128
+
+#define PAGE_SIZE 4096
+
+#define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
+#define min(a,b) MIN(a,b)
+
+#define ENV_PRIu64 "lu"
+
+typedef uint8_t u8;
+typedef uint16_t u16;
+typedef uint32_t u32;
+typedef uint64_t u64;
+
+typedef uint64_t sector_t;
+
+#define __packed __attribute__((packed))
+
+#define likely(cond) __builtin_expect(!!(cond), 1)
+#define unlikely(cond) __builtin_expect(!!(cond), 0)
+
+/* MEMORY MANAGEMENT */
+#define ENV_MEM_NORMAL 0
+#define ENV_MEM_NOIO 0
+#define ENV_MEM_ATOMIC 0
+
+/* DEBUGING */
+#define ENV_WARN(cond, fmt...) printf(fmt)
+#define ENV_WARN_ON(cond) ;
+#define ENV_WARN_ONCE(cond, fmt...) ENV_WARN(cond, fmt)
+
+#define ENV_BUG() assert(0)
+#define ENV_BUG_ON(cond) do { if (cond) ENV_BUG(); } while (0)
+#define ENV_BUILD_BUG_ON(cond) _Static_assert(!(cond), "static "\
+ "assertion failure")
+
+/* MISC UTILITIES */
+#define container_of(ptr, type, member) ({ \
+ const typeof(((type *)0)->member)*__mptr = (ptr); \
+ (type *)((char *)__mptr - offsetof(type, member)); })
+
+#define ARRAY_SIZE(x) (sizeof(x) / sizeof(*(x)))
+
+/* STRING OPERATIONS */
+#define env_memcpy(dest, dmax, src, slen) ({ \
+ memcpy(dest, src, min(dmax, slen)); \
+ 0; \
+ })
+#define env_memset(dest, dmax, val) ({ \
+ memset(dest, val, dmax); \
+ 0; \
+ })
+#define env_memcmp(s1, s1max, s2, s2max, diff) ({ \
+ *diff = memcmp(s1, s2, min(s1max, s2max)); \
+ 0; \
+ })
+#define env_strdup strndup
+#define env_strnlen(s, smax) strnlen(s, smax)
+#define env_strncmp(s1, slen1, s2, slen2) strncmp(s1, s2, min(slen1, slen2))
+#define env_strncpy(dest, dmax, src, slen) ({ \
+ strncpy(dest, src, min(dmax - 1, slen)); \
+ dest[dmax - 1] = '\0'; \
+ 0; \
+ })
+
+/* MEMORY MANAGEMENT */
+static inline void *env_malloc(size_t size, int flags)
+{
+ return malloc(size);
+}
+
+static inline void *env_zalloc(size_t size, int flags)
+{
+ void *ptr = malloc(size);
+
+ if (ptr)
+ memset(ptr, 0, size);
+
+ return ptr;
+}
+
+static inline void env_free(const void *ptr)
+{
+ free((void *)ptr);
+}
+
+static inline void *env_vmalloc_flags(size_t size, int flags)
+{
+ return malloc(size);
+}
+
+static inline void *env_vzalloc_flags(size_t size, int flags)
+{
+ return env_zalloc(size, 0);
+}
+
+static inline void *env_vmalloc(size_t size)
+{
+ return malloc(size);
+}
+
+static inline void *env_vzalloc(size_t size)
+{
+ return env_zalloc(size, 0);
+}
+
+static inline void env_vfree(const void *ptr)
+{
+ free((void *)ptr);
+}
+
+/* SECURE MEMORY MANAGEMENT */
+/*
+ * OCF adapter can opt to take additional steps to securely allocate and free
+ * memory used by OCF to store cache metadata. This is to prevent other
+ * entities in the system from acquiring parts of OCF cache metadata via
+ * memory allocations. If this is not a concern in given product, secure
+ * alloc/free should default to vmalloc/vfree.
+ *
+ * Memory returned from secure alloc is not expected to be physically continous
+ * nor zeroed.
+ */
+
+/* default to standard memory allocations for secure allocations */
+#define SECURE_MEMORY_HANDLING 0
+
+static inline void *env_secure_alloc(size_t size)
+{
+ void *ptr = malloc(size);
+
+#if SECURE_MEMORY_HANDLING
+ if (ptr && mlock(ptr, size)) {
+ free(ptr);
+ ptr = NULL;
+ }
+#endif
+
+ return ptr;
+}
+
+static inline void env_secure_free(const void *ptr, size_t size)
+{
+ if (ptr) {
+#if SECURE_MEMORY_HANDLING
+ memset(ptr, size, 0);
+ /* TODO: flush CPU caches ? */
+ ENV_BUG_ON(munlock(ptr));
+#endif
+ free((void*)ptr);
+ }
+}
+
+static inline uint64_t env_get_free_memory(void)
+{
+ return (uint64_t)(-1);
+}
+
+/* ALLOCATOR */
+typedef struct _env_allocator env_allocator;
+
+env_allocator *env_allocator_create(uint32_t size, const char *fmt_name, ...);
+
+void env_allocator_destroy(env_allocator *allocator);
+
+void *env_allocator_new(env_allocator *allocator);
+
+void env_allocator_del(env_allocator *allocator, void *item);
+
+/* MUTEX */
+typedef struct {
+ pthread_mutex_t m;
+} env_mutex;
+
+#define env_cond_resched() ({})
+
+static inline int env_mutex_init(env_mutex *mutex)
+{
+ if(pthread_mutex_init(&mutex->m, NULL))
+ return 1;
+
+ return 0;
+}
+
+static inline void env_mutex_lock(env_mutex *mutex)
+{
+ ENV_BUG_ON(pthread_mutex_lock(&mutex->m));
+}
+
+static inline int env_mutex_trylock(env_mutex *mutex)
+{
+ return pthread_mutex_trylock(&mutex->m);
+}
+
+static inline int env_mutex_lock_interruptible(env_mutex *mutex)
+{
+ env_mutex_lock(mutex);
+ return 0;
+}
+
+static inline void env_mutex_unlock(env_mutex *mutex)
+{
+ ENV_BUG_ON(pthread_mutex_unlock(&mutex->m));
+}
+
+static inline int env_mutex_destroy(env_mutex *mutex)
+{
+ if(pthread_mutex_destroy(&mutex->m))
+ return 1;
+
+ return 0;
+}
+
+/* RECURSIVE MUTEX */
+typedef env_mutex env_rmutex;
+
+static inline int env_rmutex_init(env_rmutex *rmutex)
+{
+ pthread_mutexattr_t attr;
+
+ pthread_mutexattr_init(&attr);
+ pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE);
+ pthread_mutex_init(&rmutex->m, &attr);
+
+ return 0;
+}
+
+static inline void env_rmutex_lock(env_rmutex *rmutex)
+{
+ env_mutex_lock(rmutex);
+}
+
+static inline int env_rmutex_lock_interruptible(env_rmutex *rmutex)
+{
+ return env_mutex_lock_interruptible(rmutex);
+}
+
+static inline void env_rmutex_unlock(env_rmutex *rmutex)
+{
+ env_mutex_unlock(rmutex);
+}
+
+static inline int env_rmutex_destroy(env_rmutex *rmutex)
+{
+ if(pthread_mutex_destroy(&rmutex->m))
+ return 1;
+
+ return 0;
+}
+
+/* RW SEMAPHORE */
+typedef struct {
+ pthread_rwlock_t lock;
+} env_rwsem;
+
+static inline int env_rwsem_init(env_rwsem *s)
+{
+ return pthread_rwlock_init(&s->lock, NULL);
+}
+
+static inline void env_rwsem_up_read(env_rwsem *s)
+{
+ pthread_rwlock_unlock(&s->lock);
+}
+
+static inline void env_rwsem_down_read(env_rwsem *s)
+{
+ ENV_BUG_ON(pthread_rwlock_rdlock(&s->lock));
+}
+
+static inline int env_rwsem_down_read_trylock(env_rwsem *s)
+{
+ return pthread_rwlock_tryrdlock(&s->lock) ? -OCF_ERR_NO_LOCK : 0;
+}
+
+static inline void env_rwsem_up_write(env_rwsem *s)
+{
+ ENV_BUG_ON(pthread_rwlock_unlock(&s->lock));
+}
+
+static inline void env_rwsem_down_write(env_rwsem *s)
+{
+ ENV_BUG_ON(pthread_rwlock_wrlock(&s->lock));
+}
+
+static inline int env_rwsem_down_write_trylock(env_rwsem *s)
+{
+ return pthread_rwlock_trywrlock(&s->lock) ? -OCF_ERR_NO_LOCK : 0;
+}
+
+static inline int env_rwsem_destroy(env_rwsem *s)
+{
+ return pthread_rwlock_destroy(&s->lock);
+}
+
+/* COMPLETION */
+struct completion {
+ sem_t sem;
+};
+
+typedef struct completion env_completion;
+
+static inline void env_completion_init(env_completion *completion)
+{
+ sem_init(&completion->sem, 0, 0);
+}
+
+static inline void env_completion_wait(env_completion *completion)
+{
+ sem_wait(&completion->sem);
+}
+
+static inline void env_completion_complete(env_completion *completion)
+{
+ sem_post(&completion->sem);
+}
+
+static inline void env_completion_destroy(env_completion *completion)
+{
+ sem_destroy(&completion->sem);
+}
+
+/* ATOMIC VARIABLES */
+typedef struct {
+ volatile int counter;
+} env_atomic;
+
+typedef struct {
+ volatile long counter;
+} env_atomic64;
+
+static inline int env_atomic_read(const env_atomic *a)
+{
+ return a->counter; /* TODO */
+}
+
+static inline void env_atomic_set(env_atomic *a, int i)
+{
+ a->counter = i; /* TODO */
+}
+
+static inline void env_atomic_add(int i, env_atomic *a)
+{
+ __sync_add_and_fetch(&a->counter, i);
+}
+
+static inline void env_atomic_sub(int i, env_atomic *a)
+{
+ __sync_sub_and_fetch(&a->counter, i);
+}
+
+static inline void env_atomic_inc(env_atomic *a)
+{
+ env_atomic_add(1, a);
+}
+
+static inline void env_atomic_dec(env_atomic *a)
+{
+ env_atomic_sub(1, a);
+}
+
+static inline bool env_atomic_dec_and_test(env_atomic *a)
+{
+ return __sync_sub_and_fetch(&a->counter, 1) == 0;
+}
+
+static inline int env_atomic_add_return(int i, env_atomic *a)
+{
+ return __sync_add_and_fetch(&a->counter, i);
+}
+
+static inline int env_atomic_sub_return(int i, env_atomic *a)
+{
+ return __sync_sub_and_fetch(&a->counter, i);
+}
+
+static inline int env_atomic_inc_return(env_atomic *a)
+{
+ return env_atomic_add_return(1, a);
+}
+
+static inline int env_atomic_dec_return(env_atomic *a)
+{
+ return env_atomic_sub_return(1, a);
+}
+
+static inline int env_atomic_cmpxchg(env_atomic *a, int old, int new_value)
+{
+ return __sync_val_compare_and_swap(&a->counter, old, new_value);
+}
+
+static inline int env_atomic_add_unless(env_atomic *a, int i, int u)
+{
+ int c, old;
+ c = env_atomic_read(a);
+ for (;;) {
+ if (unlikely(c == (u)))
+ break;
+ old = env_atomic_cmpxchg((a), c, c + (i));
+ if (likely(old == c))
+ break;
+ c = old;
+ }
+ return c != (u);
+}
+
+static inline long env_atomic64_read(const env_atomic64 *a)
+{
+ return a->counter; /* TODO */
+}
+
+static inline void env_atomic64_set(env_atomic64 *a, long i)
+{
+ a->counter = i; /* TODO */
+}
+
+static inline void env_atomic64_add(long i, env_atomic64 *a)
+{
+ __sync_add_and_fetch(&a->counter, i);
+}
+
+static inline void env_atomic64_sub(long i, env_atomic64 *a)
+{
+ __sync_sub_and_fetch(&a->counter, i);
+}
+
+static inline void env_atomic64_inc(env_atomic64 *a)
+{
+ env_atomic64_add(1, a);
+}
+
+static inline void env_atomic64_dec(env_atomic64 *a)
+{
+ env_atomic64_sub(1, a);
+}
+
+static inline long env_atomic64_inc_return(env_atomic64 *a)
+{
+ return __sync_add_and_fetch(&a->counter, 1);
+}
+
+static inline long env_atomic64_cmpxchg(env_atomic64 *a, long old_v, long new_v)
+{
+ return __sync_val_compare_and_swap(&a->counter, old_v, new_v);
+}
+
+/* SPIN LOCKS */
+typedef struct {
+ pthread_spinlock_t lock;
+} env_spinlock;
+
+static inline int env_spinlock_init(env_spinlock *l)
+{
+ return pthread_spin_init(&l->lock, 0);
+}
+
+static inline int env_spinlock_trylock(env_spinlock *l)
+{
+ return pthread_spin_trylock(&l->lock) ? -OCF_ERR_NO_LOCK : 0;
+}
+
+static inline void env_spinlock_lock(env_spinlock *l)
+{
+ ENV_BUG_ON(pthread_spin_lock(&l->lock));
+}
+
+static inline void env_spinlock_unlock(env_spinlock *l)
+{
+ ENV_BUG_ON(pthread_spin_unlock(&l->lock));
+}
+
+#define env_spinlock_lock_irqsave(l, flags) \
+ (void)flags; \
+ env_spinlock_lock(l)
+
+#define env_spinlock_unlock_irqrestore(l, flags) \
+ (void)flags; \
+ env_spinlock_unlock(l)
+
+static inline void env_spinlock_destroy(env_spinlock *l)
+{
+ ENV_BUG_ON(pthread_spin_destroy(&l->lock));
+}
+
+/* RW LOCKS */
+typedef struct {
+ pthread_rwlock_t lock;
+} env_rwlock;
+
+static inline void env_rwlock_init(env_rwlock *l)
+{
+ ENV_BUG_ON(pthread_rwlock_init(&l->lock, NULL));
+}
+
+static inline void env_rwlock_read_lock(env_rwlock *l)
+{
+ ENV_BUG_ON(pthread_rwlock_rdlock(&l->lock));
+}
+
+static inline void env_rwlock_read_unlock(env_rwlock *l)
+{
+ ENV_BUG_ON(pthread_rwlock_unlock(&l->lock));
+}
+
+static inline void env_rwlock_write_lock(env_rwlock *l)
+{
+ ENV_BUG_ON(pthread_rwlock_wrlock(&l->lock));
+}
+
+static inline void env_rwlock_write_unlock(env_rwlock *l)
+{
+ ENV_BUG_ON(pthread_rwlock_unlock(&l->lock));
+}
+
+static inline void env_rwlock_destroy(env_rwlock *l)
+{
+ ENV_BUG_ON(pthread_rwlock_destroy(&l->lock));
+}
+
+/* BIT OPERATIONS */
+static inline void env_bit_set(int nr, volatile void *addr)
+{
+ char *byte = (char *)addr + (nr >> 3);
+ char mask = 1 << (nr & 7);
+
+ __sync_or_and_fetch(byte, mask);
+}
+
+static inline void env_bit_clear(int nr, volatile void *addr)
+{
+ char *byte = (char *)addr + (nr >> 3);
+ char mask = 1 << (nr & 7);
+
+ mask = ~mask;
+ __sync_and_and_fetch(byte, mask);
+}
+
+static inline bool env_bit_test(int nr, const volatile unsigned long *addr)
+{
+ const char *byte = (char *)addr + (nr >> 3);
+ char mask = 1 << (nr & 7);
+
+ return !!(*byte & mask);
+}
+
+/* SCHEDULING */
+static inline int env_in_interrupt(void)
+{
+ return 0;
+}
+
+static inline uint64_t env_get_tick_count(void)
+{
+ struct timeval tv;
+ gettimeofday(&tv, NULL);
+ return tv.tv_sec * 1000000 + tv.tv_usec;
+}
+
+static inline uint64_t env_ticks_to_nsecs(uint64_t j)
+{
+ return j * 1000;
+}
+
+static inline uint64_t env_ticks_to_msecs(uint64_t j)
+{
+ return j / 1000;
+}
+
+static inline uint64_t env_ticks_to_secs(uint64_t j)
+{
+ return j / 1000000;
+}
+
+static inline uint64_t env_secs_to_ticks(uint64_t j)
+{
+ return j * 1000000;
+}
+
+/* SORTING */
+static inline void env_sort(void *base, size_t num, size_t size,
+ int (*cmp_fn)(const void *, const void *),
+ void (*swap_fn)(void *, void *, int size))
+{
+ qsort(base, num, size, cmp_fn);
+}
+
+/* TIME */
+static inline void env_msleep(uint64_t n)
+{
+ usleep(n * 1000);
+}
+
+struct env_timeval {
+ uint64_t sec, usec;
+};
+
+uint32_t env_crc32(uint32_t crc, uint8_t const *data, size_t len);
+
+unsigned env_get_execution_context(void);
+void env_put_execution_context(unsigned ctx);
+unsigned env_get_execution_context_count(void);
+
+#endif /* __OCF_ENV_H__ */
diff --git a/src/spdk/ocf/env/posix/ocf_env_headers.h b/src/spdk/ocf/env/posix/ocf_env_headers.h
new file mode 100644
index 000000000..2e4c9e182
--- /dev/null
+++ b/src/spdk/ocf/env/posix/ocf_env_headers.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright(c) 2019-2020 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#ifndef __OCF_ENV_HEADERS_H__
+#define __OCF_ENV_HEADERS_H__
+
+#include <stdint.h>
+#include <stddef.h>
+#include <stdbool.h>
+
+/* TODO: Move prefix printing to context logger. */
+#define OCF_LOGO "OCF"
+#define OCF_PREFIX_SHORT "[" OCF_LOGO "] "
+#define OCF_PREFIX_LONG "Open CAS Framework"
+
+#define OCF_VERSION_MAIN 20
+#define OCF_VERSION_MAJOR 3
+#define OCF_VERSION_MINOR 0
+
+#endif /* __OCF_ENV_HEADERS_H__ */
diff --git a/src/spdk/ocf/env/posix/ocf_env_list.h b/src/spdk/ocf/env/posix/ocf_env_list.h
new file mode 100644
index 000000000..53ce53e46
--- /dev/null
+++ b/src/spdk/ocf/env/posix/ocf_env_list.h
@@ -0,0 +1,168 @@
+/*
+ * Copyright(c) 2019 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#ifndef __OCF_ENV_LIST__
+#define __OCF_ENV_LIST__
+
+#define LIST_POISON1 ((void *)0x101)
+#define LIST_POISON2 ((void *)0x202)
+
+/**
+ * List entry structure mimicking linux kernel based one.
+ */
+struct list_head {
+ struct list_head *next;
+ struct list_head *prev;
+};
+
+/**
+ * start an empty list
+ */
+#define INIT_LIST_HEAD(l) { (l)->prev = l; (l)->next = l; }
+
+/**
+ * Add item to list head.
+ * @param it list entry to be added
+ * @param l1 list main node (head)
+ */
+static inline void list_add(struct list_head *it, struct list_head *l1)
+{
+ it->prev = l1;
+ it->next = l1->next;
+
+ l1->next->prev = it;
+ l1->next = it;
+}
+
+/**
+ * Add item it to tail.
+ * @param it list entry to be added
+ * @param l1 list main node (head)
+ */
+static inline void list_add_tail(struct list_head *it, struct list_head *l1)
+{
+ it->prev = l1->prev;
+ it->next = l1;
+
+ l1->prev->next = it;
+ l1->prev = it;
+}
+
+/**
+ * check if a list is empty (return true)
+ * @param l1 list main node (head)
+ */
+static inline int list_empty(struct list_head *l1)
+{
+ return l1->next == l1;
+}
+
+/**
+ * delete an entry from a list
+ * @param it list entry to be deleted
+ */
+static inline void list_del(struct list_head *it)
+{
+ it->next->prev = it->prev;
+ it->prev->next = it->next;
+}
+
+/**
+ * Move element to list head.
+ * @param it list entry to be moved
+ * @param l1 list main node (head)
+ */
+static inline void list_move(struct list_head *it, struct list_head *l1)
+{
+ list_del(it);
+ list_add(it, l1);
+}
+
+/**
+ * Move element to list tail.
+ * @param it list entry to be moved
+ * @param l1 list main node (head)
+ */
+static inline void list_move_tail(struct list_head *it, struct list_head *l1)
+{
+ list_del(it);
+ list_add_tail(it, l1);
+}
+
+/**
+ * Extract an entry.
+ * @param list_head_i list head item, from which entry is extracted
+ * @param item_type type (struct) of list entry
+ * @param field_name name of list_head field within item_type
+ */
+#define list_entry(list_head_i, item_type, field_name) \
+ (item_type *)(((void*)(list_head_i)) - offsetof(item_type, field_name))
+
+#define list_first_entry(list_head_i, item_type, field_name) \
+ list_entry((list_head_i)->next, item_type, field_name)
+
+/**
+ * @param iterator uninitialized list_head pointer, to be used as iterator
+ * @param plist list head (main node)
+ */
+#define list_for_each(iterator, plist) \
+ for (iterator = (plist)->next; \
+ (iterator)->next != (plist)->next; \
+ iterator = (iterator)->next)
+
+/**
+ * Safe version of list_for_each which works even if entries are deleted during
+ * loop.
+ * @param iterator uninitialized list_head pointer, to be used as iterator
+ * @param q another uninitialized list_head, used as helper
+ * @param plist list head (main node)
+ */
+/*
+ * Algorithm handles situation, where q is deleted.
+ * consider in example 3 element list with header h:
+ *
+ * h -> 1 -> 2 -> 3 ->
+ *1. i q
+ *
+ *2. i q
+ *
+ *3. q i
+ */
+#define list_for_each_safe(iterator, q, plist) \
+ for (iterator = (q = (plist)->next->next)->prev; \
+ (q) != (plist)->next; \
+ iterator = (q = (q)->next)->prev)
+
+#define _list_entry_helper(item, head, field_name) \
+ list_entry(head, typeof(*item), field_name)
+
+/**
+ * Iterate over list entries.
+ * @param list pointer to list item (iterator)
+ * @param plist pointer to list_head item
+ * @param field_name name of list_head field in list entry
+ */
+#define list_for_each_entry(item, plist, field_name) \
+ for (item = _list_entry_helper(item, (plist)->next, field_name); \
+ _list_entry_helper(item, (item)->field_name.next, field_name) !=\
+ _list_entry_helper(item, (plist)->next, field_name); \
+ item = _list_entry_helper(item, (item)->field_name.next, field_name))
+
+/**
+ * Safe version of list_for_each_entry which works even if entries are deleted
+ * during loop.
+ * @param list pointer to list item (iterator)
+ * @param q another pointer to list item, used as helper
+ * @param plist pointer to list_head item
+ * @param field_name name of list_head field in list entry
+ */
+#define list_for_each_entry_safe(item, q, plist, field_name) \
+ for (item = _list_entry_helper(item, (plist)->next, field_name), \
+ q = _list_entry_helper(item, (item)->field_name.next, field_name); \
+ _list_entry_helper(item, (item)->field_name.next, field_name) != \
+ _list_entry_helper(item, (plist)->next, field_name); \
+ item = q, q = _list_entry_helper(q, (q)->field_name.next, field_name))
+
+#endif // __OCF_ENV_LIST__