summaryrefslogtreecommitdiffstats
path: root/src/spdk/ocf/tests/functional
diff options
context:
space:
mode:
Diffstat (limited to 'src/spdk/ocf/tests/functional')
-rw-r--r--src/spdk/ocf/tests/functional/.gitignore9
-rwxr-xr-xsrc/spdk/ocf/tests/functional/Makefile52
-rw-r--r--src/spdk/ocf/tests/functional/__init__.py0
-rw-r--r--src/spdk/ocf/tests/functional/config/random.cfg2
-rw-r--r--src/spdk/ocf/tests/functional/pyocf/__init__.py0
-rw-r--r--src/spdk/ocf/tests/functional/pyocf/ocf.py30
-rw-r--r--src/spdk/ocf/tests/functional/pyocf/types/__init__.py0
-rw-r--r--src/spdk/ocf/tests/functional/pyocf/types/cache.py593
-rw-r--r--src/spdk/ocf/tests/functional/pyocf/types/cleaner.py43
-rw-r--r--src/spdk/ocf/tests/functional/pyocf/types/core.py227
-rw-r--r--src/spdk/ocf/tests/functional/pyocf/types/ctx.py122
-rw-r--r--src/spdk/ocf/tests/functional/pyocf/types/data.py225
-rw-r--r--src/spdk/ocf/tests/functional/pyocf/types/io.py118
-rw-r--r--src/spdk/ocf/tests/functional/pyocf/types/logger.py182
-rw-r--r--src/spdk/ocf/tests/functional/pyocf/types/metadata_updater.py102
-rw-r--r--src/spdk/ocf/tests/functional/pyocf/types/queue.py105
-rw-r--r--src/spdk/ocf/tests/functional/pyocf/types/shared.py160
-rw-r--r--src/spdk/ocf/tests/functional/pyocf/types/stats/__init__.py0
-rw-r--r--src/spdk/ocf/tests/functional/pyocf/types/stats/cache.py39
-rw-r--r--src/spdk/ocf/tests/functional/pyocf/types/stats/core.py21
-rw-r--r--src/spdk/ocf/tests/functional/pyocf/types/stats/shared.py88
-rw-r--r--src/spdk/ocf/tests/functional/pyocf/types/volume.py361
-rw-r--r--src/spdk/ocf/tests/functional/pyocf/utils.py173
-rw-r--r--src/spdk/ocf/tests/functional/pyocf/wrappers/ocf_io_wrappers.c36
-rw-r--r--src/spdk/ocf/tests/functional/pyocf/wrappers/ocf_logger_wrappers.c42
-rw-r--r--src/spdk/ocf/tests/functional/pyocf/wrappers/ocf_volume_wrappers.c12
-rw-r--r--src/spdk/ocf/tests/functional/pytest.ini2
-rw-r--r--src/spdk/ocf/tests/functional/tests/__init__.py0
-rw-r--r--src/spdk/ocf/tests/functional/tests/basic/__init__.py0
-rw-r--r--src/spdk/ocf/tests/functional/tests/basic/test_pyocf.py86
-rw-r--r--src/spdk/ocf/tests/functional/tests/conftest.py39
-rw-r--r--src/spdk/ocf/tests/functional/tests/engine/__init__.py0
-rw-r--r--src/spdk/ocf/tests/functional/tests/engine/test_pp.py305
-rw-r--r--src/spdk/ocf/tests/functional/tests/engine/test_wo.py213
-rw-r--r--src/spdk/ocf/tests/functional/tests/eviction/__init__.py0
-rw-r--r--src/spdk/ocf/tests/functional/tests/eviction/test_eviction.py80
-rw-r--r--src/spdk/ocf/tests/functional/tests/flush/__init__.py0
-rw-r--r--src/spdk/ocf/tests/functional/tests/management/__init__.py0
-rw-r--r--src/spdk/ocf/tests/functional/tests/management/test_add_remove.py278
-rw-r--r--src/spdk/ocf/tests/functional/tests/management/test_change_params.py135
-rw-r--r--src/spdk/ocf/tests/functional/tests/management/test_start_stop.py545
-rw-r--r--src/spdk/ocf/tests/functional/tests/security/__init__.py0
-rw-r--r--src/spdk/ocf/tests/functional/tests/security/conftest.py98
-rw-r--r--src/spdk/ocf/tests/functional/tests/security/test_management_fuzzy.py315
-rw-r--r--src/spdk/ocf/tests/functional/tests/security/test_management_start_fuzzy.py155
-rw-r--r--src/spdk/ocf/tests/functional/tests/security/test_negative_io.py205
-rw-r--r--src/spdk/ocf/tests/functional/tests/security/test_secure_erase.py215
-rw-r--r--src/spdk/ocf/tests/functional/tests/utils/random.py95
-rwxr-xr-xsrc/spdk/ocf/tests/functional/utils/configure_random.py13
49 files changed, 5521 insertions, 0 deletions
diff --git a/src/spdk/ocf/tests/functional/.gitignore b/src/spdk/ocf/tests/functional/.gitignore
new file mode 100644
index 000000000..76988e6da
--- /dev/null
+++ b/src/spdk/ocf/tests/functional/.gitignore
@@ -0,0 +1,9 @@
+__pycache__
+pyocf/__pycache__
+pyocf/libocf.so
+*.o
+pyocf/ocf/*
+*.pyc
+*.gcov
+*.gcda
+*.gcno
diff --git a/src/spdk/ocf/tests/functional/Makefile b/src/spdk/ocf/tests/functional/Makefile
new file mode 100755
index 000000000..c074d23de
--- /dev/null
+++ b/src/spdk/ocf/tests/functional/Makefile
@@ -0,0 +1,52 @@
+#
+# Copyright(c) 2019 Intel Corporation
+# SPDX-License-Identifier: BSD-3-Clause-Clear
+#
+
+PWD=$(shell pwd)
+OCFDIR=$(PWD)/../../
+ADAPTERDIR=$(PWD)/pyocf
+SRCDIR=$(ADAPTERDIR)/ocf/src
+INCDIR=$(ADAPTERDIR)/ocf/include
+WRAPDIR=$(ADAPTERDIR)/wrappers
+
+CC=gcc
+CFLAGS=-g -Wall -I$(INCDIR) -I$(SRCDIR)/ocf/env
+LDFLAGS=-pthread -lz
+
+SRC=$(shell find $(SRCDIR) $(WRAPDIR) -name \*.c)
+OBJS=$(patsubst %.c, %.o, $(SRC))
+OCFLIB=$(ADAPTERDIR)/libocf.so
+
+all: | sync config_random
+ $(MAKE) $(OCFLIB)
+
+$(OCFLIB): $(OBJS)
+ @echo "Building $@"
+ @$(CC) -coverage -shared -o $@ $(CFLAGS) $^ -fPIC $(LDFLAGS)
+
+%.o: %.c
+ @echo "Compiling $@"
+ @$(CC) -coverage -c $(CFLAGS) -o $@ -fPIC $^ $(LDFLAGS)
+
+sync:
+ @echo "Syncing OCF sources"
+ @mkdir -p $(ADAPTERDIR)/ocf
+ @$(MAKE) -C $(OCFDIR) inc O=$(ADAPTERDIR)/ocf
+ @$(MAKE) -C $(OCFDIR) src O=$(ADAPTERDIR)/ocf
+ @$(MAKE) -C $(OCFDIR) env O=$(ADAPTERDIR)/ocf OCF_ENV=posix
+
+config_random:
+ @python3 utils/configure_random.py
+
+clean:
+ @rm -rf $(OCFLIB) $(OBJS)
+ @echo " CLEAN "
+
+distclean: clean
+ @rm -rf $(OCFLIB) $(OBJS)
+ @rm -rf $(SRCDIR)/ocf
+ @rm -rf $(INCDIR)/ocf
+ @echo " DISTCLEAN "
+
+.PHONY: all clean sync config_random distclean
diff --git a/src/spdk/ocf/tests/functional/__init__.py b/src/spdk/ocf/tests/functional/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/src/spdk/ocf/tests/functional/__init__.py
diff --git a/src/spdk/ocf/tests/functional/config/random.cfg b/src/spdk/ocf/tests/functional/config/random.cfg
new file mode 100644
index 000000000..f7ab21256
--- /dev/null
+++ b/src/spdk/ocf/tests/functional/config/random.cfg
@@ -0,0 +1,2 @@
+# This file content will be generated by utils/configure_random.py
+# triggered from the Makefile
diff --git a/src/spdk/ocf/tests/functional/pyocf/__init__.py b/src/spdk/ocf/tests/functional/pyocf/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/src/spdk/ocf/tests/functional/pyocf/__init__.py
diff --git a/src/spdk/ocf/tests/functional/pyocf/ocf.py b/src/spdk/ocf/tests/functional/pyocf/ocf.py
new file mode 100644
index 000000000..b24d8265f
--- /dev/null
+++ b/src/spdk/ocf/tests/functional/pyocf/ocf.py
@@ -0,0 +1,30 @@
+#
+# Copyright(c) 2019 Intel Corporation
+# SPDX-License-Identifier: BSD-3-Clause-Clear
+#
+from ctypes import c_void_p, cdll
+import inspect
+import os
+
+
+class OcfLib:
+ __lib__ = None
+
+ @classmethod
+ def getInstance(cls):
+ if cls.__lib__ is None:
+ lib = cdll.LoadLibrary(
+ os.path.join(
+ os.path.dirname(inspect.getfile(inspect.currentframe())),
+ "libocf.so",
+ )
+ )
+ lib.ocf_volume_get_uuid.restype = c_void_p
+ lib.ocf_volume_get_uuid.argtypes = [c_void_p]
+
+ lib.ocf_core_get_front_volume.restype = c_void_p
+ lib.ocf_core_get_front_volume.argtypes = [c_void_p]
+
+ cls.__lib__ = lib
+
+ return cls.__lib__
diff --git a/src/spdk/ocf/tests/functional/pyocf/types/__init__.py b/src/spdk/ocf/tests/functional/pyocf/types/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/src/spdk/ocf/tests/functional/pyocf/types/__init__.py
diff --git a/src/spdk/ocf/tests/functional/pyocf/types/cache.py b/src/spdk/ocf/tests/functional/pyocf/types/cache.py
new file mode 100644
index 000000000..1a74a05f3
--- /dev/null
+++ b/src/spdk/ocf/tests/functional/pyocf/types/cache.py
@@ -0,0 +1,593 @@
+#
+# Copyright(c) 2019 Intel Corporation
+# SPDX-License-Identifier: BSD-3-Clause-Clear
+#
+
+from ctypes import (
+ c_uint64,
+ c_uint32,
+ c_uint16,
+ c_int,
+ c_char,
+ c_char_p,
+ c_void_p,
+ c_bool,
+ c_uint8,
+ Structure,
+ byref,
+ cast,
+ create_string_buffer,
+)
+from enum import IntEnum
+from datetime import timedelta
+
+from ..ocf import OcfLib
+from .shared import (
+ Uuid,
+ OcfError,
+ CacheLineSize,
+ CacheLines,
+ OcfCompletion,
+ SeqCutOffPolicy,
+)
+from ..utils import Size, struct_to_dict
+from .core import Core
+from .queue import Queue
+from .stats.cache import CacheInfo
+from .stats.shared import UsageStats, RequestsStats, BlocksStats, ErrorsStats
+
+
+class Backfill(Structure):
+ _fields_ = [("_max_queue_size", c_uint32), ("_queue_unblock_size", c_uint32)]
+
+
+class CacheConfig(Structure):
+ MAX_CACHE_NAME_SIZE = 32
+ _fields_ = [
+ ("_name", c_char * MAX_CACHE_NAME_SIZE),
+ ("_cache_mode", c_uint32),
+ ("_eviction_policy", c_uint32),
+ ("_promotion_policy", c_uint32),
+ ("_cache_line_size", c_uint64),
+ ("_metadata_layout", c_uint32),
+ ("_metadata_volatile", c_bool),
+ ("_backfill", Backfill),
+ ("_locked", c_bool),
+ ("_pt_unaligned_io", c_bool),
+ ("_use_submit_io_fast", c_bool),
+ ]
+
+
+class CacheDeviceConfig(Structure):
+ _fields_ = [
+ ("_uuid", Uuid),
+ ("_volume_type", c_uint8),
+ ("_cache_line_size", c_uint64),
+ ("_force", c_bool),
+ ("_min_free_ram", c_uint64),
+ ("_perform_test", c_bool),
+ ("_discard_on_start", c_bool),
+ ]
+
+
+class ConfValidValues:
+ promotion_nhit_insertion_threshold_range = range(2, 1000)
+ promotion_nhit_trigger_threshold_range = range(0, 100)
+
+
+class CacheMode(IntEnum):
+ WT = 0
+ WB = 1
+ WA = 2
+ PT = 3
+ WI = 4
+ WO = 5
+ DEFAULT = WT
+
+ def lazy_write(self):
+ return self.value in [CacheMode.WB, CacheMode.WO]
+
+ def write_insert(self):
+ return self.value not in [CacheMode.PT, CacheMode.WA, CacheMode.WI]
+
+ def read_insert(self):
+ return self.value not in [CacheMode.PT, CacheMode.WO]
+
+
+class EvictionPolicy(IntEnum):
+ LRU = 0
+ DEFAULT = LRU
+
+
+class PromotionPolicy(IntEnum):
+ ALWAYS = 0
+ NHIT = 1
+ DEFAULT = ALWAYS
+
+
+class NhitParams(IntEnum):
+ INSERTION_THRESHOLD = 0
+ TRIGGER_THRESHOLD = 1
+
+
+class CleaningPolicy(IntEnum):
+ NOP = 0
+ ALRU = 1
+ ACP = 2
+ DEFAULT = ALRU
+
+
+class AlruParams(IntEnum):
+ WAKE_UP_TIME = 0
+ STALE_BUFFER_TIME = 1
+ FLUSH_MAX_BUFFERS = 2
+ ACTIVITY_THRESHOLD = 3
+
+
+class AcpParams(IntEnum):
+ WAKE_UP_TIME = 0
+ FLUSH_MAX_BUFFERS = 1
+
+
+class MetadataLayout(IntEnum):
+ STRIPING = 0
+ SEQUENTIAL = 1
+ DEFAULT = STRIPING
+
+
+class Cache:
+ DEFAULT_BACKFILL_QUEUE_SIZE = 65536
+ DEFAULT_BACKFILL_UNBLOCK = 60000
+ DEFAULT_PT_UNALIGNED_IO = False
+ DEFAULT_USE_SUBMIT_FAST = False
+
+ def __init__(
+ self,
+ owner,
+ name: str = "cache",
+ cache_mode: CacheMode = CacheMode.DEFAULT,
+ eviction_policy: EvictionPolicy = EvictionPolicy.DEFAULT,
+ promotion_policy: PromotionPolicy = PromotionPolicy.DEFAULT,
+ cache_line_size: CacheLineSize = CacheLineSize.DEFAULT,
+ metadata_layout: MetadataLayout = MetadataLayout.DEFAULT,
+ metadata_volatile: bool = False,
+ max_queue_size: int = DEFAULT_BACKFILL_QUEUE_SIZE,
+ queue_unblock_size: int = DEFAULT_BACKFILL_UNBLOCK,
+ locked: bool = False,
+ pt_unaligned_io: bool = DEFAULT_PT_UNALIGNED_IO,
+ use_submit_fast: bool = DEFAULT_USE_SUBMIT_FAST,
+ ):
+ self.device = None
+ self.started = False
+ self.owner = owner
+ self.cache_line_size = cache_line_size
+
+ self.cfg = CacheConfig(
+ _name=name.encode("ascii"),
+ _cache_mode=cache_mode,
+ _eviction_policy=eviction_policy,
+ _promotion_policy=promotion_policy,
+ _cache_line_size=cache_line_size,
+ _metadata_layout=metadata_layout,
+ _metadata_volatile=metadata_volatile,
+ _backfill=Backfill(
+ _max_queue_size=max_queue_size, _queue_unblock_size=queue_unblock_size
+ ),
+ _locked=locked,
+ _pt_unaligned_io=pt_unaligned_io,
+ _use_submit_fast=use_submit_fast,
+ )
+ self.cache_handle = c_void_p()
+ self._as_parameter_ = self.cache_handle
+ self.io_queues = []
+ self.cores = []
+
+ def start_cache(self, default_io_queue: Queue = None, mngt_queue: Queue = None):
+ status = self.owner.lib.ocf_mngt_cache_start(
+ self.owner.ctx_handle, byref(self.cache_handle), byref(self.cfg)
+ )
+ if status:
+ raise OcfError("Creating cache instance failed", status)
+ self.owner.caches.append(self)
+
+ self.mngt_queue = mngt_queue or Queue(self, "mgmt-{}".format(self.get_name()))
+
+ if default_io_queue:
+ self.io_queues += [default_io_queue]
+ else:
+ self.io_queues += [Queue(self, "default-io-{}".format(self.get_name()))]
+
+ status = self.owner.lib.ocf_mngt_cache_set_mngt_queue(self, self.mngt_queue)
+ if status:
+ raise OcfError("Error setting management queue", status)
+
+ self.started = True
+
+ def change_cache_mode(self, cache_mode: CacheMode):
+ self.write_lock()
+ status = self.owner.lib.ocf_mngt_cache_set_mode(self.cache_handle, cache_mode)
+
+ self.write_unlock()
+
+ if status:
+ raise OcfError("Error changing cache mode", status)
+
+ def set_cleaning_policy(self, cleaning_policy: CleaningPolicy):
+ self.write_lock()
+
+ status = self.owner.lib.ocf_mngt_cache_cleaning_set_policy(
+ self.cache_handle, cleaning_policy
+ )
+
+ self.write_unlock()
+
+ if status:
+ raise OcfError("Error changing cleaning policy", status)
+
+ def set_cleaning_policy_param(
+ self, cleaning_policy: CleaningPolicy, param_id, param_value
+ ):
+ self.write_lock()
+
+ status = self.owner.lib.ocf_mngt_cache_cleaning_set_param(
+ self.cache_handle, cleaning_policy, param_id, param_value
+ )
+
+ self.write_unlock()
+
+ if status:
+ raise OcfError("Error setting cleaning policy param", status)
+
+ def set_promotion_policy(self, promotion_policy: PromotionPolicy):
+ self.write_lock()
+
+ status = self.owner.lib.ocf_mngt_cache_promotion_set_policy(
+ self.cache_handle, promotion_policy
+ )
+
+ self.write_unlock()
+ if status:
+ raise OcfError("Error setting promotion policy", status)
+
+ def get_promotion_policy_param(self, promotion_type, param_id):
+ self.read_lock()
+
+ param_value = c_uint64()
+
+ status = self.owner.lib.ocf_mngt_cache_promotion_get_param(
+ self.cache_handle, promotion_type, param_id, byref(param_value)
+ )
+
+ self.read_unlock()
+ if status:
+ raise OcfError("Error getting promotion policy parameter", status)
+
+ return param_value
+
+ def set_promotion_policy_param(self, promotion_type, param_id, param_value):
+ self.write_lock()
+
+ status = self.owner.lib.ocf_mngt_cache_promotion_set_param(
+ self.cache_handle, promotion_type, param_id, param_value
+ )
+
+ self.write_unlock()
+ if status:
+ raise OcfError("Error setting promotion policy parameter", status)
+
+ def set_seq_cut_off_policy(self, policy: SeqCutOffPolicy):
+ self.write_lock()
+
+ status = self.owner.lib.ocf_mngt_core_set_seq_cutoff_policy_all(
+ self.cache_handle, policy
+ )
+
+ self.write_unlock()
+
+ if status:
+ raise OcfError("Error setting cache seq cut off policy", status)
+
+ def configure_device(
+ self, device, force=False, perform_test=True, cache_line_size=None
+ ):
+ self.device = device
+ self.device_name = device.uuid
+ self.dev_cfg = CacheDeviceConfig(
+ _uuid=Uuid(
+ _data=cast(
+ create_string_buffer(self.device_name.encode("ascii")), c_char_p
+ ),
+ _size=len(self.device_name) + 1,
+ ),
+ _volume_type=device.type_id,
+ _cache_line_size=cache_line_size
+ if cache_line_size
+ else self.cache_line_size,
+ _force=force,
+ _min_free_ram=0,
+ _perform_test=perform_test,
+ _discard_on_start=False,
+ )
+
+ def attach_device(
+ self, device, force=False, perform_test=False, cache_line_size=None
+ ):
+ self.configure_device(device, force, perform_test, cache_line_size)
+ self.write_lock()
+
+ c = OcfCompletion([("cache", c_void_p), ("priv", c_void_p), ("error", c_int)])
+
+ device.owner.lib.ocf_mngt_cache_attach(
+ self.cache_handle, byref(self.dev_cfg), c, None
+ )
+
+ c.wait()
+ self.write_unlock()
+
+ if c.results["error"]:
+ raise OcfError("Attaching cache device failed", c.results["error"])
+
+ def load_cache(self, device):
+ self.configure_device(device)
+ c = OcfCompletion([("cache", c_void_p), ("priv", c_void_p), ("error", c_int)])
+ device.owner.lib.ocf_mngt_cache_load(
+ self.cache_handle, byref(self.dev_cfg), c, None
+ )
+
+ c.wait()
+ if c.results["error"]:
+ raise OcfError("Loading cache device failed", c.results["error"])
+
+ @classmethod
+ def load_from_device(cls, device, name="cache"):
+ c = cls(name=name, owner=device.owner)
+
+ c.start_cache()
+ try:
+ c.load_cache(device)
+ except: # noqa E722
+ c.stop()
+ raise
+
+ return c
+
+ @classmethod
+ def start_on_device(cls, device, **kwargs):
+ c = cls(owner=device.owner, **kwargs)
+
+ c.start_cache()
+ try:
+ c.attach_device(device, force=True)
+ except: # noqa E722
+ c.stop()
+ raise
+
+ return c
+
+ def put(self):
+ self.owner.lib.ocf_mngt_cache_put(self.cache_handle)
+
+ def get(self):
+ status = self.owner.lib.ocf_mngt_cache_get(self.cache_handle)
+ if status:
+ raise OcfError("Couldn't get cache instance", status)
+
+ def read_lock(self):
+ c = OcfCompletion([("cache", c_void_p), ("priv", c_void_p), ("error", c_int)])
+ self.owner.lib.ocf_mngt_cache_read_lock(self.cache_handle, c, None)
+ c.wait()
+ if c.results["error"]:
+ raise OcfError("Couldn't lock cache instance", c.results["error"])
+
+ def write_lock(self):
+ c = OcfCompletion([("cache", c_void_p), ("priv", c_void_p), ("error", c_int)])
+ self.owner.lib.ocf_mngt_cache_lock(self.cache_handle, c, None)
+ c.wait()
+ if c.results["error"]:
+ raise OcfError("Couldn't lock cache instance", c.results["error"])
+
+ def read_unlock(self):
+ self.owner.lib.ocf_mngt_cache_read_unlock(self.cache_handle)
+
+ def write_unlock(self):
+ self.owner.lib.ocf_mngt_cache_unlock(self.cache_handle)
+
+ def add_core(self, core: Core):
+ self.write_lock()
+
+ c = OcfCompletion(
+ [
+ ("cache", c_void_p),
+ ("core", c_void_p),
+ ("priv", c_void_p),
+ ("error", c_int),
+ ]
+ )
+
+ self.owner.lib.ocf_mngt_cache_add_core(
+ self.cache_handle, byref(core.get_cfg()), c, None
+ )
+
+ c.wait()
+ if c.results["error"]:
+ self.write_unlock()
+ raise OcfError("Failed adding core", c.results["error"])
+
+ core.cache = self
+ core.handle = c.results["core"]
+ self.cores.append(core)
+
+ self.write_unlock()
+
+ def remove_core(self, core: Core):
+ self.write_lock()
+
+ c = OcfCompletion([("priv", c_void_p), ("error", c_int)])
+
+ self.owner.lib.ocf_mngt_cache_remove_core(core.handle, c, None)
+
+ c.wait()
+ self.write_unlock()
+
+ if c.results["error"]:
+ raise OcfError("Failed removing core", c.results["error"])
+
+ self.cores.remove(core)
+
+ def get_stats(self):
+ cache_info = CacheInfo()
+ usage = UsageStats()
+ req = RequestsStats()
+ block = BlocksStats()
+ errors = ErrorsStats()
+
+ self.read_lock()
+
+ status = self.owner.lib.ocf_cache_get_info(self.cache_handle, byref(cache_info))
+ if status:
+ self.read_unlock()
+ raise OcfError("Failed getting cache info", status)
+
+ status = self.owner.lib.ocf_stats_collect_cache(
+ self.cache_handle, byref(usage), byref(req), byref(block), byref(errors)
+ )
+ if status:
+ self.read_unlock()
+ raise OcfError("Failed getting stats", status)
+
+ line_size = CacheLineSize(cache_info.cache_line_size)
+ cache_name = self.owner.lib.ocf_cache_get_name(self).decode("ascii")
+
+ self.read_unlock()
+ return {
+ "conf": {
+ "attached": cache_info.attached,
+ "volume_type": self.owner.volume_types[cache_info.volume_type],
+ "size": CacheLines(cache_info.size, line_size),
+ "inactive": {
+ "occupancy": CacheLines(
+ cache_info.inactive.occupancy.value, line_size
+ ),
+ "dirty": CacheLines(cache_info.inactive.dirty.value, line_size),
+ "clean": CacheLines(cache_info.inactive.clean.value, line_size),
+ },
+ "occupancy": CacheLines(cache_info.occupancy, line_size),
+ "dirty": CacheLines(cache_info.dirty, line_size),
+ "dirty_initial": CacheLines(cache_info.dirty_initial, line_size),
+ "dirty_for": timedelta(seconds=cache_info.dirty_for),
+ "cache_mode": CacheMode(cache_info.cache_mode),
+ "fallback_pt": {
+ "error_counter": cache_info.fallback_pt.error_counter,
+ "status": cache_info.fallback_pt.status,
+ },
+ "state": cache_info.state,
+ "eviction_policy": EvictionPolicy(cache_info.eviction_policy),
+ "cleaning_policy": CleaningPolicy(cache_info.cleaning_policy),
+ "promotion_policy": PromotionPolicy(cache_info.promotion_policy),
+ "cache_line_size": line_size,
+ "flushed": CacheLines(cache_info.flushed, line_size),
+ "core_count": cache_info.core_count,
+ "metadata_footprint": Size(cache_info.metadata_footprint),
+ "metadata_end_offset": Size(cache_info.metadata_end_offset),
+ "cache_name": cache_name,
+ },
+ "block": struct_to_dict(block),
+ "req": struct_to_dict(req),
+ "usage": struct_to_dict(usage),
+ "errors": struct_to_dict(errors),
+ }
+
+ def reset_stats(self):
+ self.owner.lib.ocf_core_stats_initialize_all(self.cache_handle)
+
+ def get_default_queue(self):
+ if not self.io_queues:
+ raise Exception("No queues added for cache")
+
+ return self.io_queues[0]
+
+ def save(self):
+ if not self.started:
+ raise Exception("Not started!")
+
+ self.get_and_write_lock()
+ c = OcfCompletion([("cache", c_void_p), ("priv", c_void_p), ("error", c_int)])
+ self.owner.lib.ocf_mngt_cache_save(self.cache_handle, c, None)
+
+ c.wait()
+ self.put_and_write_unlock()
+
+ if c.results["error"]:
+ raise OcfError("Failed saving cache", c.results["error"])
+
+ def stop(self):
+ if not self.started:
+ raise Exception("Already stopped!")
+
+ self.write_lock()
+
+ c = OcfCompletion([("cache", c_void_p), ("priv", c_void_p), ("error", c_int)])
+
+ self.owner.lib.ocf_mngt_cache_stop(self.cache_handle, c, None)
+
+ c.wait()
+ if c.results["error"]:
+ self.write_unlock()
+ raise OcfError("Failed stopping cache", c.results["error"])
+
+ self.mngt_queue.put()
+ del self.io_queues[:]
+ self.started = False
+
+ self.write_unlock()
+
+ self.owner.caches.remove(self)
+
+ def flush(self):
+ self.write_lock()
+
+ c = OcfCompletion([("cache", c_void_p), ("priv", c_void_p), ("error", c_int)])
+ self.owner.lib.ocf_mngt_cache_flush(self.cache_handle, c, None)
+ c.wait()
+ self.write_unlock()
+
+ if c.results["error"]:
+ raise OcfError("Couldn't flush cache", c.results["error"])
+
+ def get_name(self):
+ self.read_lock()
+
+ try:
+ return str(self.owner.lib.ocf_cache_get_name(self), encoding="ascii")
+ except: # noqa E722
+ raise OcfError("Couldn't get cache name")
+ finally:
+ self.read_unlock()
+
+
+lib = OcfLib.getInstance()
+lib.ocf_mngt_cache_remove_core.argtypes = [c_void_p, c_void_p, c_void_p]
+lib.ocf_mngt_cache_add_core.argtypes = [c_void_p, c_void_p, c_void_p, c_void_p]
+lib.ocf_cache_get_name.argtypes = [c_void_p]
+lib.ocf_cache_get_name.restype = c_char_p
+lib.ocf_mngt_cache_cleaning_set_policy.argtypes = [c_void_p, c_uint32]
+lib.ocf_mngt_cache_cleaning_set_policy.restype = c_int
+lib.ocf_mngt_core_set_seq_cutoff_policy_all.argtypes = [c_void_p, c_uint32]
+lib.ocf_mngt_core_set_seq_cutoff_policy_all.restype = c_int
+lib.ocf_stats_collect_cache.argtypes = [
+ c_void_p,
+ c_void_p,
+ c_void_p,
+ c_void_p,
+ c_void_p,
+]
+lib.ocf_stats_collect_cache.restype = c_int
+lib.ocf_cache_get_info.argtypes = [c_void_p, c_void_p]
+lib.ocf_cache_get_info.restype = c_int
+lib.ocf_mngt_cache_cleaning_set_param.argtypes = [
+ c_void_p,
+ c_uint32,
+ c_uint32,
+ c_uint32,
+]
+lib.ocf_mngt_cache_cleaning_set_param.restype = c_int
diff --git a/src/spdk/ocf/tests/functional/pyocf/types/cleaner.py b/src/spdk/ocf/tests/functional/pyocf/types/cleaner.py
new file mode 100644
index 000000000..df28290aa
--- /dev/null
+++ b/src/spdk/ocf/tests/functional/pyocf/types/cleaner.py
@@ -0,0 +1,43 @@
+#
+# Copyright(c) 2019 Intel Corporation
+# SPDX-License-Identifier: BSD-3-Clause-Clear
+#
+
+from ctypes import c_void_p, CFUNCTYPE, Structure, c_int
+from .shared import SharedOcfObject
+
+
+class CleanerOps(Structure):
+ INIT = CFUNCTYPE(c_int, c_void_p)
+ KICK = CFUNCTYPE(None, c_void_p)
+ STOP = CFUNCTYPE(None, c_void_p)
+
+ _fields_ = [("init", INIT), ("kick", KICK), ("stop", STOP)]
+
+
+class Cleaner(SharedOcfObject):
+ _instances_ = {}
+ _fields_ = [("cleaner", c_void_p)]
+
+ def __init__(self):
+ self._as_parameter_ = self.cleaner
+ super().__init__()
+
+ @classmethod
+ def get_ops(cls):
+ return CleanerOps(init=cls._init, kick=cls._kick, stop=cls._stop)
+
+ @staticmethod
+ @CleanerOps.INIT
+ def _init(cleaner):
+ return 0
+
+ @staticmethod
+ @CleanerOps.KICK
+ def _kick(cleaner):
+ pass
+
+ @staticmethod
+ @CleanerOps.STOP
+ def _stop(cleaner):
+ pass
diff --git a/src/spdk/ocf/tests/functional/pyocf/types/core.py b/src/spdk/ocf/tests/functional/pyocf/types/core.py
new file mode 100644
index 000000000..0003c0daf
--- /dev/null
+++ b/src/spdk/ocf/tests/functional/pyocf/types/core.py
@@ -0,0 +1,227 @@
+#
+# Copyright(c) 2019 Intel Corporation
+# SPDX-License-Identifier: BSD-3-Clause-Clear
+#
+
+import logging
+from ctypes import (
+ c_size_t,
+ c_void_p,
+ Structure,
+ c_int,
+ c_uint8,
+ c_uint16,
+ c_uint32,
+ c_uint64,
+ c_char,
+ c_char_p,
+ c_bool,
+ cast,
+ byref,
+ create_string_buffer,
+)
+from datetime import timedelta
+
+from .data import Data
+from .io import Io, IoDir
+from .queue import Queue
+from .shared import Uuid, OcfCompletion, OcfError, SeqCutOffPolicy
+from .stats.core import CoreInfo
+from .stats.shared import UsageStats, RequestsStats, BlocksStats, ErrorsStats
+from .volume import Volume
+from ..ocf import OcfLib
+from ..utils import Size, struct_to_dict
+
+
+class UserMetadata(Structure):
+ _fields_ = [("data", c_void_p), ("size", c_size_t)]
+
+
+class CoreConfig(Structure):
+ MAX_CORE_NAME_SIZE = 32
+ _fields_ = [
+ ("_name", c_char * MAX_CORE_NAME_SIZE),
+ ("_uuid", Uuid),
+ ("_volume_type", c_uint8),
+ ("_try_add", c_bool),
+ ("_seq_cutoff_threshold", c_uint32),
+ ("_user_metadata", UserMetadata),
+ ]
+
+
+class Core:
+ DEFAULT_ID = 4096
+ DEFAULT_SEQ_CUTOFF_THRESHOLD = 1024 * 1024
+
+ def __init__(
+ self,
+ device: Volume,
+ try_add: bool,
+ name: str = "core",
+ seq_cutoff_threshold: int = DEFAULT_SEQ_CUTOFF_THRESHOLD,
+ ):
+ self.cache = None
+ self.device = device
+ self.device_name = device.uuid
+ self.handle = c_void_p()
+ self.cfg = CoreConfig(
+ _uuid=Uuid(
+ _data=cast(
+ create_string_buffer(self.device_name.encode("ascii")),
+ c_char_p,
+ ),
+ _size=len(self.device_name) + 1,
+ ),
+ _name=name.encode("ascii"),
+ _volume_type=self.device.type_id,
+ _try_add=try_add,
+ _seq_cutoff_threshold=seq_cutoff_threshold,
+ _user_metadata=UserMetadata(_data=None, _size=0),
+ )
+
+ @classmethod
+ def using_device(cls, device, **kwargs):
+ c = cls(device=device, try_add=False, **kwargs)
+
+ return c
+
+ def get_cfg(self):
+ return self.cfg
+
+ def get_handle(self):
+ return self.handle
+
+ def new_io(
+ self, queue: Queue, addr: int, length: int, direction: IoDir,
+ io_class: int, flags: int
+ ):
+ if not self.cache:
+ raise Exception("Core isn't attached to any cache")
+
+ io = OcfLib.getInstance().ocf_core_new_io_wrapper(
+ self.handle, queue.handle, addr, length, direction, io_class, flags)
+
+ if io is None:
+ raise Exception("Failed to create io!")
+
+ return Io.from_pointer(io)
+
+ def new_core_io(
+ self, queue: Queue, addr: int, length: int, direction: IoDir,
+ io_class: int, flags: int
+ ):
+ lib = OcfLib.getInstance()
+ volume = lib.ocf_core_get_volume(self.handle)
+ io = lib.ocf_volume_new_io(
+ volume, queue.handle, addr, length, direction, io_class, flags)
+ return Io.from_pointer(io)
+
+ def get_stats(self):
+ core_info = CoreInfo()
+ usage = UsageStats()
+ req = RequestsStats()
+ blocks = BlocksStats()
+ errors = ErrorsStats()
+
+ self.cache.read_lock()
+ status = self.cache.owner.lib.ocf_stats_collect_core(
+ self.handle, byref(usage), byref(req), byref(blocks), byref(errors)
+ )
+ if status:
+ self.cache.read_unlock()
+ raise OcfError("Failed collecting core stats", status)
+
+ status = self.cache.owner.lib.ocf_core_get_info(
+ self.handle, byref(core_info)
+ )
+ if status:
+ self.cache.read_unlock()
+ raise OcfError("Failed getting core stats", status)
+
+ self.cache.read_unlock()
+ return {
+ "size": Size(core_info.core_size_bytes),
+ "dirty_for": timedelta(seconds=core_info.dirty_for),
+ "seq_cutoff_policy": SeqCutOffPolicy(core_info.seq_cutoff_policy),
+ "seq_cutoff_threshold": core_info.seq_cutoff_threshold,
+ "usage": struct_to_dict(usage),
+ "req": struct_to_dict(req),
+ "blocks": struct_to_dict(blocks),
+ "errors": struct_to_dict(errors),
+ }
+
+ def set_seq_cut_off_policy(self, policy: SeqCutOffPolicy):
+ self.cache.write_lock()
+
+ status = self.cache.owner.lib.ocf_mngt_core_set_seq_cutoff_policy(
+ self.handle, policy
+ )
+ if status:
+ self.cache.write_unlock()
+ raise OcfError("Error setting core seq cut off policy", status)
+
+ self.cache.write_unlock()
+
+ def reset_stats(self):
+ self.cache.owner.lib.ocf_core_stats_initialize(self.handle)
+
+ def exp_obj_md5(self):
+ logging.getLogger("pyocf").warning(
+ "Reading whole exported object! This disturbs statistics values"
+ )
+
+ cache_line_size = int(self.cache.get_stats()['conf']['cache_line_size'])
+ read_buffer_all = Data(self.device.size)
+
+ read_buffer = Data(cache_line_size)
+
+ position = 0
+ while position < read_buffer_all.size:
+ io = self.new_io(self.cache.get_default_queue(), position,
+ cache_line_size, IoDir.READ, 0, 0)
+ io.set_data(read_buffer)
+
+ cmpl = OcfCompletion([("err", c_int)])
+ io.callback = cmpl.callback
+ io.submit()
+ cmpl.wait()
+
+ if cmpl.results["err"]:
+ raise Exception("Error reading whole exported object")
+
+ read_buffer_all.copy(read_buffer, position, 0, cache_line_size)
+ position += cache_line_size
+
+ return read_buffer_all.md5()
+
+
+lib = OcfLib.getInstance()
+lib.ocf_core_get_volume.restype = c_void_p
+lib.ocf_volume_new_io.argtypes = [
+ c_void_p,
+ c_void_p,
+ c_uint64,
+ c_uint32,
+ c_uint32,
+ c_uint32,
+ c_uint64,
+]
+lib.ocf_volume_new_io.restype = c_void_p
+lib.ocf_core_get_volume.argtypes = [c_void_p]
+lib.ocf_core_get_volume.restype = c_void_p
+lib.ocf_mngt_core_set_seq_cutoff_policy.argtypes = [c_void_p, c_uint32]
+lib.ocf_mngt_core_set_seq_cutoff_policy.restype = c_int
+lib.ocf_stats_collect_core.argtypes = [c_void_p, c_void_p, c_void_p, c_void_p, c_void_p]
+lib.ocf_stats_collect_core.restype = c_int
+lib.ocf_core_get_info.argtypes = [c_void_p, c_void_p]
+lib.ocf_core_get_info.restype = c_int
+lib.ocf_core_new_io_wrapper.argtypes = [
+ c_void_p,
+ c_void_p,
+ c_uint64,
+ c_uint32,
+ c_uint32,
+ c_uint32,
+ c_uint64,
+]
+lib.ocf_core_new_io_wrapper.restype = c_void_p
diff --git a/src/spdk/ocf/tests/functional/pyocf/types/ctx.py b/src/spdk/ocf/tests/functional/pyocf/types/ctx.py
new file mode 100644
index 000000000..14c4b5757
--- /dev/null
+++ b/src/spdk/ocf/tests/functional/pyocf/types/ctx.py
@@ -0,0 +1,122 @@
+#
+# Copyright(c) 2019 Intel Corporation
+# SPDX-License-Identifier: BSD-3-Clause-Clear
+#
+
+from ctypes import c_void_p, Structure, c_char_p, cast, pointer, byref, c_int
+
+from .logger import LoggerOps, Logger
+from .data import DataOps, Data
+from .cleaner import CleanerOps, Cleaner
+from .metadata_updater import MetadataUpdaterOps, MetadataUpdater
+from .shared import OcfError
+from ..ocf import OcfLib
+from .queue import Queue
+from .volume import Volume
+
+
+class OcfCtxOps(Structure):
+ _fields_ = [
+ ("data", DataOps),
+ ("cleaner", CleanerOps),
+ ("metadata_updater", MetadataUpdaterOps),
+ ("logger", LoggerOps),
+ ]
+
+
+class OcfCtxCfg(Structure):
+ _fields_ = [("name", c_char_p), ("ops", OcfCtxOps), ("logger_priv", c_void_p)]
+
+
+class OcfCtx:
+ def __init__(self, lib, name, logger, data, mu, cleaner):
+ self.logger = logger
+ self.data = data
+ self.mu = mu
+ self.cleaner = cleaner
+ self.ctx_handle = c_void_p()
+ self.lib = lib
+ self.volume_types_count = 1
+ self.volume_types = {}
+ self.caches = []
+
+ self.cfg = OcfCtxCfg(
+ name=name,
+ ops=OcfCtxOps(
+ data=self.data.get_ops(),
+ cleaner=self.cleaner.get_ops(),
+ metadata_updater=self.mu.get_ops(),
+ logger=logger.get_ops(),
+ ),
+ logger_priv=cast(pointer(logger.get_priv()), c_void_p),
+ )
+
+ result = self.lib.ocf_ctx_create(byref(self.ctx_handle), byref(self.cfg))
+ if result != 0:
+ raise OcfError("Context initialization failed", result)
+
+ def register_volume_type(self, volume_type):
+ self.volume_types[self.volume_types_count] = volume_type
+ volume_type.type_id = self.volume_types_count
+ volume_type.owner = self
+
+ result = self.lib.ocf_ctx_register_volume_type(
+ self.ctx_handle,
+ self.volume_types_count,
+ byref(self.volume_types[self.volume_types_count].get_props()),
+ )
+ if result != 0:
+ raise OcfError("Volume type registration failed", result)
+
+ self.volume_types_count += 1
+
+ def unregister_volume_type(self, vol_type):
+ if not vol_type.type_id:
+ raise Exception("Already unregistered")
+
+ self.lib.ocf_ctx_unregister_volume_type(
+ self.ctx_handle, vol_type.type_id
+ )
+
+ del self.volume_types[vol_type.type_id]
+
+ def cleanup_volume_types(self):
+ for k, vol_type in list(self.volume_types.items()):
+ if vol_type:
+ self.unregister_volume_type(vol_type)
+
+ def stop_caches(self):
+ for cache in self.caches[:]:
+ cache.stop()
+
+ def exit(self):
+ self.stop_caches()
+ self.cleanup_volume_types()
+
+ self.lib.ocf_ctx_put(self.ctx_handle)
+
+ self.cfg = None
+ self.logger = None
+ self.data = None
+ self.mu = None
+ self.cleaner = None
+ Queue._instances_ = {}
+ Volume._instances_ = {}
+ Data._instances_ = {}
+ Logger._instances_ = {}
+
+
+def get_default_ctx(logger):
+ return OcfCtx(
+ OcfLib.getInstance(),
+ b"PyOCF default ctx",
+ logger,
+ Data,
+ MetadataUpdater,
+ Cleaner,
+ )
+
+
+lib = OcfLib.getInstance()
+lib.ocf_mngt_cache_get_by_name.argtypes = [c_void_p, c_void_p, c_void_p]
+lib.ocf_mngt_cache_get_by_name.restype = c_int
diff --git a/src/spdk/ocf/tests/functional/pyocf/types/data.py b/src/spdk/ocf/tests/functional/pyocf/types/data.py
new file mode 100644
index 000000000..b032cf3ce
--- /dev/null
+++ b/src/spdk/ocf/tests/functional/pyocf/types/data.py
@@ -0,0 +1,225 @@
+#
+# Copyright(c) 2019 Intel Corporation
+# SPDX-License-Identifier: BSD-3-Clause-Clear
+#
+
+from ctypes import (
+ c_void_p,
+ c_uint32,
+ CFUNCTYPE,
+ c_uint64,
+ create_string_buffer,
+ cast,
+ memset,
+ string_at,
+ Structure,
+ c_int,
+ memmove,
+ byref,
+)
+from enum import IntEnum
+from hashlib import md5
+import weakref
+
+from ..utils import print_buffer, Size as S
+
+
+class DataSeek(IntEnum):
+ BEGIN = 0
+ CURRENT = 1
+
+
+class DataOps(Structure):
+ ALLOC = CFUNCTYPE(c_void_p, c_uint32)
+ FREE = CFUNCTYPE(None, c_void_p)
+ MLOCK = CFUNCTYPE(c_int, c_void_p)
+ MUNLOCK = CFUNCTYPE(None, c_void_p)
+ READ = CFUNCTYPE(c_uint32, c_void_p, c_void_p, c_uint32)
+ WRITE = CFUNCTYPE(c_uint32, c_void_p, c_void_p, c_uint32)
+ ZERO = CFUNCTYPE(c_uint32, c_void_p, c_uint32)
+ SEEK = CFUNCTYPE(c_uint32, c_void_p, c_uint32, c_uint32)
+ COPY = CFUNCTYPE(c_uint64, c_void_p, c_void_p, c_uint64, c_uint64, c_uint64)
+ SECURE_ERASE = CFUNCTYPE(None, c_void_p)
+
+ _fields_ = [
+ ("_alloc", ALLOC),
+ ("_free", FREE),
+ ("_mlock", MLOCK),
+ ("_munlock", MUNLOCK),
+ ("_read", READ),
+ ("_write", WRITE),
+ ("_zero", ZERO),
+ ("_seek", SEEK),
+ ("_copy", COPY),
+ ("_secure_erase", SECURE_ERASE),
+ ]
+
+
+class Data:
+ DATA_POISON = 0xA5
+ PAGE_SIZE = 4096
+
+ _instances_ = {}
+ _ocf_instances_ = []
+
+ def __init__(self, byte_count: int):
+ self.size = int(byte_count)
+ self.position = 0
+ self.buffer = create_string_buffer(int(self.size))
+ self.handle = cast(byref(self.buffer), c_void_p)
+
+ memset(self.handle, self.DATA_POISON, self.size)
+ type(self)._instances_[self.handle.value] = weakref.ref(self)
+ self._as_parameter_ = self.handle
+
+ @classmethod
+ def get_instance(cls, ref):
+ return cls._instances_[ref]()
+
+ @classmethod
+ def get_ops(cls):
+ return DataOps(
+ _alloc=cls._alloc,
+ _free=cls._free,
+ _mlock=cls._mlock,
+ _munlock=cls._munlock,
+ _read=cls._read,
+ _write=cls._write,
+ _zero=cls._zero,
+ _seek=cls._seek,
+ _copy=cls._copy,
+ _secure_erase=cls._secure_erase,
+ )
+
+ @classmethod
+ def pages(cls, pages: int):
+ return cls(pages * Data.PAGE_SIZE)
+
+ @classmethod
+ def from_bytes(cls, source: bytes, offset: int = 0, size: int = 0):
+ if size == 0:
+ size = len(source) - offset
+ d = cls(size)
+
+ memmove(d.handle, cast(source, c_void_p).value + offset, size)
+
+ return d
+
+ @classmethod
+ def from_string(cls, source: str, encoding: str = "ascii"):
+ b = bytes(source, encoding)
+ # duplicate string to fill space up to sector boundary
+ padding_len = S.from_B(len(b), sector_aligned=True).B - len(b)
+ padding = b * (padding_len // len(b) + 1)
+ padding = padding[:padding_len]
+ b = b + padding
+ return cls.from_bytes(b)
+
+ @staticmethod
+ @DataOps.ALLOC
+ def _alloc(pages):
+ data = Data.pages(pages)
+ Data._ocf_instances_.append(data)
+
+ return data.handle.value
+
+ @staticmethod
+ @DataOps.FREE
+ def _free(ref):
+ Data._ocf_instances_.remove(Data.get_instance(ref))
+
+ @staticmethod
+ @DataOps.MLOCK
+ def _mlock(ref):
+ return Data.get_instance(ref).mlock()
+
+ @staticmethod
+ @DataOps.MUNLOCK
+ def _munlock(ref):
+ Data.get_instance(ref).munlock()
+
+ @staticmethod
+ @DataOps.READ
+ def _read(dst, src, size):
+ return Data.get_instance(src).read(dst, size)
+
+ @staticmethod
+ @DataOps.WRITE
+ def _write(dst, src, size):
+ return Data.get_instance(dst).write(src, size)
+
+ @staticmethod
+ @DataOps.ZERO
+ def _zero(dst, size):
+ return Data.get_instance(dst).zero(size)
+
+ @staticmethod
+ @DataOps.SEEK
+ def _seek(dst, seek, size):
+ return Data.get_instance(dst).seek(DataSeek(seek), size)
+
+ @staticmethod
+ @DataOps.COPY
+ def _copy(dst, src, skip, seek, size):
+ return Data.get_instance(dst).copy(
+ Data.get_instance(src), skip, seek, size
+ )
+
+ @staticmethod
+ @DataOps.SECURE_ERASE
+ def _secure_erase(dst):
+ Data.get_instance(dst).secure_erase()
+
+ def read(self, dst, size):
+ to_read = min(self.size - self.position, size)
+ memmove(dst, self.handle.value + self.position, to_read)
+
+ self.position += to_read
+ return to_read
+
+ def write(self, src, size):
+ to_write = min(self.size - self.position, size)
+ memmove(self.handle.value + self.position, src, to_write)
+
+ self.position += to_write
+ return to_write
+
+ def mlock(self):
+ return 0
+
+ def munlock(self):
+ pass
+
+ def zero(self, size):
+ to_zero = min(self.size - self.position, size)
+ memset(self.handle.value + self.position, 0, to_zero)
+
+ self.position += to_zero
+ return to_zero
+
+ def seek(self, seek, size):
+ if seek == DataSeek.CURRENT:
+ to_move = min(self.size - self.position, size)
+ self.position += to_move
+ else:
+ to_move = min(self.size, size)
+ self.position = to_move
+
+ return to_move
+
+ def copy(self, src, skip, seek, size):
+ to_write = min(self.size - skip, size, src.size - seek)
+
+ memmove(self.handle.value + skip, src.handle.value + seek, to_write)
+ return to_write
+
+ def secure_erase(self):
+ pass
+
+ def dump(self, ignore=DATA_POISON, **kwargs):
+ print_buffer(self.buffer, self.size, ignore=ignore, **kwargs)
+
+ def md5(self):
+ m = md5()
+ m.update(string_at(self.handle, self.size))
+ return m.hexdigest()
diff --git a/src/spdk/ocf/tests/functional/pyocf/types/io.py b/src/spdk/ocf/tests/functional/pyocf/types/io.py
new file mode 100644
index 000000000..7e3671c5b
--- /dev/null
+++ b/src/spdk/ocf/tests/functional/pyocf/types/io.py
@@ -0,0 +1,118 @@
+#
+# Copyright(c) 2019 Intel Corporation
+# SPDX-License-Identifier: BSD-3-Clause-Clear
+#
+
+from ctypes import (
+ c_void_p,
+ c_int,
+ c_uint32,
+ c_uint64,
+ CFUNCTYPE,
+ Structure,
+ POINTER,
+ byref,
+ cast,
+)
+from enum import IntEnum
+
+from ..ocf import OcfLib
+from .data import Data
+
+
+class IoDir(IntEnum):
+ READ = 0
+ WRITE = 1
+
+
+class IoOps(Structure):
+ pass
+
+
+class Io(Structure):
+ START = CFUNCTYPE(None, c_void_p)
+ HANDLE = CFUNCTYPE(None, c_void_p, c_void_p)
+ END = CFUNCTYPE(None, c_void_p, c_int)
+
+ _instances_ = {}
+ _fields_ = [
+ ("_addr", c_uint64),
+ ("_flags", c_uint64),
+ ("_bytes", c_uint32),
+ ("_class", c_uint32),
+ ("_dir", c_uint32),
+ ("_io_queue", c_void_p),
+ ("_start", START),
+ ("_handle", HANDLE),
+ ("_end", END),
+ ("_priv1", c_void_p),
+ ("_priv2", c_void_p),
+ ]
+
+ @classmethod
+ def from_pointer(cls, ref):
+ c = cls.from_address(ref)
+ cls._instances_[ref] = c
+ OcfLib.getInstance().ocf_io_set_cmpl_wrapper(
+ byref(c), None, None, c.c_end
+ )
+ return c
+
+ @classmethod
+ def get_instance(cls, ref):
+ return cls._instances_[cast(ref, c_void_p).value]
+
+ def del_object(self):
+ del type(self)._instances_[cast(byref(self), c_void_p).value]
+
+ def put(self):
+ OcfLib.getInstance().ocf_io_put(byref(self))
+
+ def get(self):
+ OcfLib.getInstance().ocf_io_get(byref(self))
+
+ @staticmethod
+ @END
+ def c_end(io, err):
+ Io.get_instance(io).end(err)
+
+ @staticmethod
+ @START
+ def c_start(io):
+ Io.get_instance(io).start()
+
+ @staticmethod
+ @HANDLE
+ def c_handle(io, opaque):
+ Io.get_instance(io).handle(opaque)
+
+ def end(self, err):
+ try:
+ self.callback(err)
+ except: # noqa E722
+ pass
+
+ self.put()
+ self.del_object()
+
+ def submit(self):
+ return OcfLib.getInstance().ocf_core_submit_io_wrapper(byref(self))
+
+ def set_data(self, data: Data, offset: int = 0):
+ self.data = data
+ OcfLib.getInstance().ocf_io_set_data(byref(self), data, offset)
+
+
+IoOps.SET_DATA = CFUNCTYPE(c_int, POINTER(Io), c_void_p, c_uint32)
+IoOps.GET_DATA = CFUNCTYPE(c_void_p, POINTER(Io))
+
+IoOps._fields_ = [("_set_data", IoOps.SET_DATA), ("_get_data", IoOps.GET_DATA)]
+
+lib = OcfLib.getInstance()
+lib.ocf_io_set_cmpl_wrapper.argtypes = [POINTER(Io), c_void_p, c_void_p, Io.END]
+
+lib.ocf_core_new_io_wrapper.argtypes = [c_void_p]
+lib.ocf_core_new_io_wrapper.restype = c_void_p
+
+lib.ocf_io_set_data.argtypes = [POINTER(Io), c_void_p, c_uint32]
+lib.ocf_io_set_data.restype = c_int
diff --git a/src/spdk/ocf/tests/functional/pyocf/types/logger.py b/src/spdk/ocf/tests/functional/pyocf/types/logger.py
new file mode 100644
index 000000000..3d85b3a7f
--- /dev/null
+++ b/src/spdk/ocf/tests/functional/pyocf/types/logger.py
@@ -0,0 +1,182 @@
+#
+# Copyright(c) 2019 Intel Corporation
+# SPDX-License-Identifier: BSD-3-Clause-Clear
+#
+
+from ctypes import (
+ c_void_p,
+ Structure,
+ c_char_p,
+ c_uint,
+ c_int,
+ cast,
+ CFUNCTYPE,
+ pointer,
+)
+from enum import IntEnum
+import logging
+from io import StringIO
+import weakref
+
+from ..ocf import OcfLib
+
+logger = logging.getLogger("pyocf")
+logger.setLevel(logging.DEBUG)
+
+
+class LogLevel(IntEnum):
+ EMERG = 0
+ ALERT = 1
+ CRIT = 2
+ ERR = 3
+ WARN = 4
+ NOTICE = 5
+ INFO = 6
+ DEBUG = 7
+
+
+LevelMapping = {
+ LogLevel.EMERG: logging.CRITICAL,
+ LogLevel.ALERT: logging.CRITICAL,
+ LogLevel.CRIT: logging.CRITICAL,
+ LogLevel.ERR: logging.ERROR,
+ LogLevel.WARN: logging.WARNING,
+ LogLevel.NOTICE: logging.INFO,
+ LogLevel.INFO: logging.INFO,
+ LogLevel.DEBUG: logging.DEBUG,
+}
+
+
+class LoggerOps(Structure):
+ OPEN = CFUNCTYPE(c_int, c_void_p)
+ CLOSE = CFUNCTYPE(None, c_void_p)
+ # PRINTF ommited - we cannot make variadic function call in ctypes
+ LOG = CFUNCTYPE(c_int, c_void_p, c_uint, c_char_p)
+ PRINT_RL = CFUNCTYPE(c_int, c_void_p, c_char_p)
+ DUMP_STACK = CFUNCTYPE(c_int, c_void_p)
+
+ _fields_ = [
+ ("_open", OPEN),
+ ("_close", CLOSE),
+ ("_print", c_void_p),
+ ("_print_rl", PRINT_RL),
+ ("_dump_stack", DUMP_STACK),
+ ]
+
+
+class LoggerPriv(Structure):
+ _fields_ = [("_log", LoggerOps.LOG)]
+
+
+class Logger(Structure):
+ _instances_ = {}
+
+ _fields_ = [("logger", c_void_p)]
+
+ def __init__(self):
+ self.ops = LoggerOps(
+ _open=self._open,
+ _print=cast(OcfLib.getInstance().pyocf_printf_helper, c_void_p),
+ _close=self._close,
+ )
+ self.priv = LoggerPriv(_log=self._log)
+ self._as_parameter_ = cast(pointer(self.priv), c_void_p).value
+ self._instances_[self._as_parameter_] = weakref.ref(self)
+
+ def get_ops(self):
+ return self.ops
+
+ def get_priv(self):
+ return self.priv
+
+ @classmethod
+ def get_instance(cls, ctx: int):
+ priv = OcfLib.getInstance().ocf_logger_get_priv(ctx)
+ return cls._instances_[priv]()
+
+ @staticmethod
+ @LoggerOps.LOG
+ def _log(ref, lvl, msg):
+ Logger.get_instance(ref).log(lvl, str(msg, "ascii").strip())
+ return 0
+
+ @staticmethod
+ @LoggerOps.OPEN
+ def _open(ref):
+ if hasattr(Logger.get_instance(ref), "open"):
+ return Logger.get_instance(ref).open()
+ else:
+ return 0
+
+ @staticmethod
+ @LoggerOps.CLOSE
+ def _close(ref):
+ if hasattr(Logger.get_instance(ref), "close"):
+ return Logger.get_instance(ref).close()
+ else:
+ return 0
+
+
+class DefaultLogger(Logger):
+ def __init__(self, level: LogLevel = LogLevel.WARN):
+ super().__init__()
+ self.level = level
+
+ ch = logging.StreamHandler()
+ fmt = logging.Formatter(
+ "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
+ )
+ ch.setFormatter(fmt)
+ ch.setLevel(LevelMapping[level])
+ logger.addHandler(ch)
+
+ def log(self, lvl: int, msg: str):
+ logger.log(LevelMapping[lvl], msg)
+
+ def close(self):
+ logger.handlers = []
+
+
+class FileLogger(Logger):
+ def __init__(self, f, console_level=None):
+ super().__init__()
+ fmt = logging.Formatter(
+ "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
+ )
+
+ fh = logging.FileHandler(f)
+ fh.setLevel(logging.DEBUG)
+ fh.setFormatter(fmt)
+
+ logger.addHandler(fh)
+
+ if console_level:
+ sh = logging.StreamHandler()
+ sh.setLevel(LevelMapping[console_level])
+ sh.setFormatter(fmt)
+ logger.addHandler(sh)
+
+ def log(self, lvl, msg):
+ logger.log(LevelMapping[lvl], msg)
+
+ def close(self):
+ logger.handlers = []
+
+
+class BufferLogger(Logger):
+ def __init__(self, level: LogLevel):
+ super().__init__()
+ self.level = level
+ self.buffer = StringIO()
+
+ def log(self, lvl, msg):
+ if lvl < self.level:
+ self.buffer.write(msg + "\n")
+
+ def get_lines(self):
+ return self.buffer.getvalue().split("\n")
+
+
+lib = OcfLib.getInstance()
+lib.ocf_logger_get_priv.restype = c_void_p
+lib.ocf_logger_get_priv.argtypes = [c_void_p]
diff --git a/src/spdk/ocf/tests/functional/pyocf/types/metadata_updater.py b/src/spdk/ocf/tests/functional/pyocf/types/metadata_updater.py
new file mode 100644
index 000000000..592d2a14c
--- /dev/null
+++ b/src/spdk/ocf/tests/functional/pyocf/types/metadata_updater.py
@@ -0,0 +1,102 @@
+#
+# Copyright(c) 2019 Intel Corporation
+# SPDX-License-Identifier: BSD-3-Clause-Clear
+#
+
+from ctypes import c_void_p, c_int, c_uint32, Structure, CFUNCTYPE
+from threading import Thread, Event
+
+from ..ocf import OcfLib
+
+
+class MetadataUpdaterOps(Structure):
+ INIT = CFUNCTYPE(c_int, c_void_p)
+ KICK = CFUNCTYPE(None, c_void_p)
+ STOP = CFUNCTYPE(None, c_void_p)
+
+ _fields_ = [("_init", INIT), ("_kick", KICK), ("_stop", STOP)]
+
+
+class MetadataUpdater:
+ pass
+
+
+def mu_run(*, mu: MetadataUpdater, kick: Event, stop: Event):
+ while True:
+ kick.clear()
+
+ if OcfLib.getInstance().ocf_metadata_updater_run(mu):
+ continue
+
+ kick.wait()
+ if stop.is_set():
+ break
+
+
+class MetadataUpdater:
+ _instances_ = {}
+ ops = None
+
+ def __init__(self, ref):
+ self._as_parameter_ = ref
+ MetadataUpdater._instances_[ref] = self
+ self.kick_event = Event()
+ self.stop_event = Event()
+
+ lib = OcfLib.getInstance()
+ self.thread = Thread(
+ group=None,
+ target=mu_run,
+ name="mu-{}".format(
+ lib.ocf_cache_get_name(lib.ocf_metadata_updater_get_cache(self))
+ ),
+ kwargs={"mu": self, "kick": self.kick_event, "stop": self.stop_event},
+ )
+ self.thread.start()
+
+ @classmethod
+ def get_ops(cls):
+ if not cls.ops:
+ cls.ops = MetadataUpdaterOps(
+ _init=cls._init, _kick=cls._kick, _stop=cls._stop
+ )
+ return cls.ops
+
+ @classmethod
+ def get_instance(cls, ref):
+ return cls._instances_[ref]
+
+ @classmethod
+ def del_instance(cls, ref):
+ del cls._instances_[ref]
+
+ @staticmethod
+ @MetadataUpdaterOps.INIT
+ def _init(ref):
+ m = MetadataUpdater(ref)
+ return 0
+
+ @staticmethod
+ @MetadataUpdaterOps.KICK
+ def _kick(ref):
+ MetadataUpdater.get_instance(ref).kick()
+
+ @staticmethod
+ @MetadataUpdaterOps.STOP
+ def _stop(ref):
+ MetadataUpdater.get_instance(ref).stop()
+ del MetadataUpdater._instances_[ref]
+
+ def kick(self):
+ self.kick_event.set()
+
+ def stop(self):
+ self.stop_event.set()
+ self.kick_event.set()
+
+
+lib = OcfLib.getInstance()
+lib.ocf_metadata_updater_get_cache.argtypes = [c_void_p]
+lib.ocf_metadata_updater_get_cache.restype = c_void_p
+lib.ocf_metadata_updater_run.argtypes = [c_void_p]
+lib.ocf_metadata_updater_run.restype = c_uint32
diff --git a/src/spdk/ocf/tests/functional/pyocf/types/queue.py b/src/spdk/ocf/tests/functional/pyocf/types/queue.py
new file mode 100644
index 000000000..da0963907
--- /dev/null
+++ b/src/spdk/ocf/tests/functional/pyocf/types/queue.py
@@ -0,0 +1,105 @@
+#
+# Copyright(c) 2019 Intel Corporation
+# SPDX-License-Identifier: BSD-3-Clause-Clear
+#
+
+from ctypes import c_void_p, CFUNCTYPE, Structure, byref
+from threading import Thread, Condition, Event
+import weakref
+
+from ..ocf import OcfLib
+from .shared import OcfError
+
+
+class QueueOps(Structure):
+ KICK = CFUNCTYPE(None, c_void_p)
+ KICK_SYNC = CFUNCTYPE(None, c_void_p)
+ STOP = CFUNCTYPE(None, c_void_p)
+
+ _fields_ = [("kick", KICK), ("kick_sync", KICK_SYNC), ("stop", STOP)]
+
+
+class Queue:
+ pass
+
+
+def io_queue_run(*, queue: Queue, kick: Condition, stop: Event):
+ def wait_predicate():
+ return stop.is_set() or OcfLib.getInstance().ocf_queue_pending_io(queue)
+
+ while True:
+ with kick:
+ kick.wait_for(wait_predicate)
+
+ OcfLib.getInstance().ocf_queue_run(queue)
+
+ if stop.is_set() and not OcfLib.getInstance().ocf_queue_pending_io(queue):
+ break
+
+
+class Queue:
+ _instances_ = {}
+
+ def __init__(self, cache, name):
+
+ self.ops = QueueOps(kick=type(self)._kick, stop=type(self)._stop)
+
+ self.handle = c_void_p()
+ status = OcfLib.getInstance().ocf_queue_create(
+ cache.cache_handle, byref(self.handle), byref(self.ops)
+ )
+ if status:
+ raise OcfError("Couldn't create queue object", status)
+
+ Queue._instances_[self.handle.value] = weakref.ref(self)
+ self._as_parameter_ = self.handle
+
+ self.stop_event = Event()
+ self.kick_condition = Condition()
+ self.thread = Thread(
+ group=None,
+ target=io_queue_run,
+ name=name,
+ kwargs={
+ "queue": self,
+ "kick": self.kick_condition,
+ "stop": self.stop_event,
+ },
+ )
+ self.thread.start()
+
+ @classmethod
+ def get_instance(cls, ref):
+ return cls._instances_[ref]()
+
+ @staticmethod
+ @QueueOps.KICK_SYNC
+ def _kick_sync(ref):
+ Queue.get_instance(ref).kick_sync()
+
+ @staticmethod
+ @QueueOps.KICK
+ def _kick(ref):
+ Queue.get_instance(ref).kick()
+
+ @staticmethod
+ @QueueOps.STOP
+ def _stop(ref):
+ Queue.get_instance(ref).stop()
+
+ def kick_sync(self):
+ OcfLib.getInstance().ocf_queue_run(self.handle)
+
+ def kick(self):
+ with self.kick_condition:
+ self.kick_condition.notify_all()
+
+ def put(self):
+ OcfLib.getInstance().ocf_queue_put(self)
+
+ def stop(self):
+ with self.kick_condition:
+ self.stop_event.set()
+ self.kick_condition.notify_all()
+
+ self.thread.join()
diff --git a/src/spdk/ocf/tests/functional/pyocf/types/shared.py b/src/spdk/ocf/tests/functional/pyocf/types/shared.py
new file mode 100644
index 000000000..5244b4d36
--- /dev/null
+++ b/src/spdk/ocf/tests/functional/pyocf/types/shared.py
@@ -0,0 +1,160 @@
+#
+# Copyright(c) 2019 Intel Corporation
+# SPDX-License-Identifier: BSD-3-Clause-Clear
+#
+
+import logging
+from ctypes import CFUNCTYPE, c_size_t, c_char_p, Structure, c_void_p
+from enum import IntEnum, auto
+from threading import Event
+
+from ..utils import Size as S
+
+
+class OcfErrorCode(IntEnum):
+ OCF_ERR_INVAL = 1000000
+ OCF_ERR_AGAIN = auto()
+ OCF_ERR_INTR = auto()
+ OCF_ERR_NOT_SUPP = auto()
+ OCF_ERR_NO_MEM = auto()
+ OCF_ERR_NO_LOCK = auto()
+ OCF_ERR_METADATA_VER = auto()
+ OCF_ERR_NO_METADATA = auto()
+ OCF_ERR_METADATA_FOUND = auto()
+ OCF_ERR_INVAL_VOLUME_TYPE = auto()
+ OCF_ERR_UNKNOWN = auto()
+ OCF_ERR_TOO_MANY_CACHES = auto()
+ OCF_ERR_NO_FREE_RAM = auto()
+ OCF_ERR_START_CACHE_FAIL = auto()
+ OCF_ERR_CACHE_NOT_EXIST = auto()
+ OCF_ERR_CORE_NOT_EXIST = auto()
+ OCF_ERR_CACHE_EXIST = auto()
+ OCF_ERR_CORE_EXIST = auto()
+ OCF_ERR_TOO_MANY_CORES = auto()
+ OCF_ERR_CORE_NOT_AVAIL = auto()
+ OCF_ERR_NOT_OPEN_EXC = auto()
+ OCF_ERR_CACHE_NOT_AVAIL = auto()
+ OCF_ERR_IO_CLASS_NOT_EXIST = auto()
+ OCF_ERR_IO = auto()
+ OCF_ERR_WRITE_CACHE = auto()
+ OCF_ERR_WRITE_CORE = auto()
+ OCF_ERR_DIRTY_SHUTDOWN = auto()
+ OCF_ERR_DIRTY_EXISTS = auto()
+ OCF_ERR_FLUSHING_INTERRUPTED = auto()
+ OCF_ERR_FLUSH_IN_PROGRESS = auto()
+ OCF_ERR_CANNOT_ADD_CORE_TO_POOL = auto()
+ OCF_ERR_CACHE_IN_INCOMPLETE_STATE = auto()
+ OCF_ERR_CORE_IN_INACTIVE_STATE = auto()
+ OCF_ERR_INVALID_CACHE_MODE = auto()
+ OCF_ERR_INVALID_CACHE_LINE_SIZE = auto()
+ OCF_ERR_CACHE_NAME_MISMATCH = auto()
+ OCF_ERR_INVAL_CACHE_DEV = auto()
+
+
+class OcfCompletion:
+ """
+ This class provides Completion mechanism for interacting with OCF async
+ management API.
+ """
+
+ class CompletionResult:
+ def __init__(self, completion_args):
+ self.completion_args = {
+ x[0]: i for i, x in enumerate(completion_args)
+ }
+ self.results = None
+ self.arg_types = [x[1] for x in completion_args]
+
+ def __getitem__(self, key):
+ try:
+ position = self.completion_args[key]
+ return self.results[position]
+ except KeyError:
+ raise KeyError(f"No completion argument {key} specified")
+
+ def __init__(self, completion_args: list):
+ """
+ Provide ctypes arg list, and optionally index of status argument in
+ completion function which will be extracted (default - last argument).
+
+ :param completion_args: list of tuples (parameter name, parameter type)
+ for OCF completion function
+ """
+ self.e = Event()
+ self.results = OcfCompletion.CompletionResult(completion_args)
+ self._as_parameter_ = self.callback
+
+ @property
+ def callback(self):
+ @CFUNCTYPE(c_void_p, *self.results.arg_types)
+ def complete(*args):
+ self.results.results = args
+ self.e.set()
+
+ return complete
+
+ def wait(self):
+ self.e.wait()
+
+
+class OcfError(BaseException):
+ def __init__(self, msg, error_code):
+ super().__init__(self, msg)
+ self.error_code = OcfErrorCode(abs(error_code))
+ self.msg = msg
+
+ def __str__(self):
+ return "{} ({})".format(self.msg, repr(self.error_code))
+
+
+class SharedOcfObject(Structure):
+ _instances_ = {}
+
+ def __init__(self):
+ super().__init__()
+ type(self)._instances_[self._as_parameter_] = self
+
+ @classmethod
+ def get_instance(cls, ref: int):
+ try:
+ return cls._instances_[ref]
+ except: # noqa E722
+ logging.getLogger("pyocf").error(
+ "OcfSharedObject corruption. wanted: {} instances: {}".format(
+ ref, cls._instances_
+ )
+ )
+ return None
+
+ @classmethod
+ def del_object(cls, ref: int):
+ del cls._instances_[ref]
+
+
+class Uuid(Structure):
+ _fields_ = [("_size", c_size_t), ("_data", c_char_p)]
+
+
+class CacheLineSize(IntEnum):
+ LINE_4KiB = S.from_KiB(4)
+ LINE_8KiB = S.from_KiB(8)
+ LINE_16KiB = S.from_KiB(16)
+ LINE_32KiB = S.from_KiB(32)
+ LINE_64KiB = S.from_KiB(64)
+ DEFAULT = LINE_4KiB
+
+
+class SeqCutOffPolicy(IntEnum):
+ ALWAYS = 0
+ FULL = 1
+ NEVER = 2
+ DEFAULT = FULL
+
+
+class CacheLines(S):
+ def __init__(self, count: int, line_size: CacheLineSize):
+ self.bytes = count * line_size
+ self.line_size = line_size
+
+ def __int__(self):
+ return int(self.bytes / self.line_size)
diff --git a/src/spdk/ocf/tests/functional/pyocf/types/stats/__init__.py b/src/spdk/ocf/tests/functional/pyocf/types/stats/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/src/spdk/ocf/tests/functional/pyocf/types/stats/__init__.py
diff --git a/src/spdk/ocf/tests/functional/pyocf/types/stats/cache.py b/src/spdk/ocf/tests/functional/pyocf/types/stats/cache.py
new file mode 100644
index 000000000..59a4bdfa0
--- /dev/null
+++ b/src/spdk/ocf/tests/functional/pyocf/types/stats/cache.py
@@ -0,0 +1,39 @@
+#
+# Copyright(c) 2019 Intel Corporation
+# SPDX-License-Identifier: BSD-3-Clause-Clear
+#
+
+from ctypes import c_uint8, c_uint32, c_uint64, c_bool, c_int, Structure
+from pyocf.types.stats.shared import _Stat
+
+
+class _Inactive(Structure):
+ _fields_ = [("occupancy", _Stat), ("clean", _Stat), ("dirty", _Stat)]
+
+
+class _FallbackPt(Structure):
+ _fields_ = [("error_counter", c_int), ("status", c_bool)]
+
+
+class CacheInfo(Structure):
+ _fields_ = [
+ ("attached", c_bool),
+ ("volume_type", c_uint8),
+ ("size", c_uint32),
+ ("inactive", _Inactive),
+ ("occupancy", c_uint32),
+ ("dirty", c_uint32),
+ ("dirty_initial", c_uint32),
+ ("dirty_for", c_uint32),
+ ("cache_mode", c_uint32),
+ ("fallback_pt", _FallbackPt),
+ ("state", c_uint8),
+ ("eviction_policy", c_uint32),
+ ("cleaning_policy", c_uint32),
+ ("promotion_policy", c_uint32),
+ ("cache_line_size", c_uint64),
+ ("flushed", c_uint32),
+ ("core_count", c_uint32),
+ ("metadata_footprint", c_uint64),
+ ("metadata_end_offset", c_uint32),
+ ]
diff --git a/src/spdk/ocf/tests/functional/pyocf/types/stats/core.py b/src/spdk/ocf/tests/functional/pyocf/types/stats/core.py
new file mode 100644
index 000000000..dd2d06689
--- /dev/null
+++ b/src/spdk/ocf/tests/functional/pyocf/types/stats/core.py
@@ -0,0 +1,21 @@
+
+#
+# Copyright(c) 2019 Intel Corporation
+# SPDX-License-Identifier: BSD-3-Clause-Clear
+#
+
+from ctypes import c_uint32, c_uint64, Structure
+
+from .shared import OcfStatsReq, OcfStatsBlock, OcfStatsDebug, OcfStatsError
+
+
+class CoreInfo(Structure):
+ _fields_ = [
+ ("core_size", c_uint64),
+ ("core_size_bytes", c_uint64),
+ ("dirty", c_uint32),
+ ("flushed", c_uint32),
+ ("dirty_for", c_uint32),
+ ("seq_cutoff_threshold", c_uint32),
+ ("seq_cutoff_policy", c_uint32),
+ ]
diff --git a/src/spdk/ocf/tests/functional/pyocf/types/stats/shared.py b/src/spdk/ocf/tests/functional/pyocf/types/stats/shared.py
new file mode 100644
index 000000000..e6719d985
--- /dev/null
+++ b/src/spdk/ocf/tests/functional/pyocf/types/stats/shared.py
@@ -0,0 +1,88 @@
+#
+# Copyright(c) 2019 Intel Corporation
+# SPDX-License-Identifier: BSD-3-Clause-Clear
+#
+
+from ctypes import c_uint64, c_uint32, Structure
+
+
+class _Stat(Structure):
+ _fields_ = [("value", c_uint64), ("fraction", c_uint64)]
+
+
+class OcfStatsReq(Structure):
+ _fields_ = [
+ ("partial_miss", c_uint64),
+ ("full_miss", c_uint64),
+ ("total", c_uint64),
+ ("pass_through", c_uint64),
+ ]
+
+
+class OcfStatsBlock(Structure):
+ _fields_ = [("read", c_uint64), ("write", c_uint64)]
+
+
+class OcfStatsError(Structure):
+ _fields_ = [("read", c_uint32), ("write", c_uint32)]
+
+
+class OcfStatsDebug(Structure):
+ _fields_ = [
+ ("read_size", c_uint64 * 12),
+ ("write_size", c_uint64 * 12),
+ ("read_align", c_uint64 * 4),
+ ("write_align", c_uint64 * 4),
+ ]
+
+
+class UsageStats(Structure):
+ _fields_ = [
+ ("occupancy", _Stat),
+ ("free", _Stat),
+ ("clean", _Stat),
+ ("dirty", _Stat),
+ ]
+
+
+class RequestsStats(Structure):
+ _fields_ = [
+ ("rd_hits", _Stat),
+ ("rd_partial_misses", _Stat),
+ ("rd_full_misses", _Stat),
+ ("rd_total", _Stat),
+ ("wr_hits", _Stat),
+ ("wr_partial_misses", _Stat),
+ ("wr_full_misses", _Stat),
+ ("wr_total", _Stat),
+ ("rd_pt", _Stat),
+ ("wr_pt", _Stat),
+ ("serviced", _Stat),
+ ("total", _Stat),
+ ]
+
+
+class BlocksStats(Structure):
+ _fields_ = [
+ ("core_volume_rd", _Stat),
+ ("core_volume_wr", _Stat),
+ ("core_volume_total", _Stat),
+ ("cache_volume_rd", _Stat),
+ ("cache_volume_wr", _Stat),
+ ("cache_volume_total", _Stat),
+ ("volume_rd", _Stat),
+ ("volume_wr", _Stat),
+ ("volume_total", _Stat),
+ ]
+
+
+class ErrorsStats(Structure):
+ _fields_ = [
+ ("core_volume_rd", _Stat),
+ ("core_volume_wr", _Stat),
+ ("core_volume_total", _Stat),
+ ("cache_volume_rd", _Stat),
+ ("cache_volume_wr", _Stat),
+ ("cache_volume_total", _Stat),
+ ("total", _Stat),
+ ]
diff --git a/src/spdk/ocf/tests/functional/pyocf/types/volume.py b/src/spdk/ocf/tests/functional/pyocf/types/volume.py
new file mode 100644
index 000000000..4bca10bd1
--- /dev/null
+++ b/src/spdk/ocf/tests/functional/pyocf/types/volume.py
@@ -0,0 +1,361 @@
+#
+# Copyright(c) 2019 Intel Corporation
+# SPDX-License-Identifier: BSD-3-Clause-Clear
+#
+
+from ctypes import (
+ POINTER,
+ c_void_p,
+ c_uint32,
+ c_char_p,
+ create_string_buffer,
+ memmove,
+ memset,
+ Structure,
+ CFUNCTYPE,
+ c_int,
+ c_uint,
+ c_uint64,
+ sizeof,
+ cast,
+ string_at,
+)
+from hashlib import md5
+import weakref
+
+from .io import Io, IoOps, IoDir
+from .shared import OcfErrorCode, Uuid
+from ..ocf import OcfLib
+from ..utils import print_buffer, Size as S
+from .data import Data
+
+
+class VolumeCaps(Structure):
+ _fields_ = [("_atomic_writes", c_uint32, 1)]
+
+
+class VolumeOps(Structure):
+ SUBMIT_IO = CFUNCTYPE(None, POINTER(Io))
+ SUBMIT_FLUSH = CFUNCTYPE(None, c_void_p)
+ SUBMIT_METADATA = CFUNCTYPE(None, c_void_p)
+ SUBMIT_DISCARD = CFUNCTYPE(None, c_void_p)
+ SUBMIT_WRITE_ZEROES = CFUNCTYPE(None, c_void_p)
+ OPEN = CFUNCTYPE(c_int, c_void_p)
+ CLOSE = CFUNCTYPE(None, c_void_p)
+ GET_MAX_IO_SIZE = CFUNCTYPE(c_uint, c_void_p)
+ GET_LENGTH = CFUNCTYPE(c_uint64, c_void_p)
+
+ _fields_ = [
+ ("_submit_io", SUBMIT_IO),
+ ("_submit_flush", SUBMIT_FLUSH),
+ ("_submit_metadata", SUBMIT_METADATA),
+ ("_submit_discard", SUBMIT_DISCARD),
+ ("_submit_write_zeroes", SUBMIT_WRITE_ZEROES),
+ ("_open", OPEN),
+ ("_close", CLOSE),
+ ("_get_max_io_size", GET_MAX_IO_SIZE),
+ ("_get_length", GET_LENGTH),
+ ]
+
+
+class VolumeProperties(Structure):
+ _fields_ = [
+ ("_name", c_char_p),
+ ("_io_priv_size", c_uint32),
+ ("_volume_priv_size", c_uint32),
+ ("_caps", VolumeCaps),
+ ("_ops", VolumeOps),
+ ("_io_ops", IoOps),
+ ("_deinit", c_char_p),
+ ]
+
+
+class VolumeIoPriv(Structure):
+ _fields_ = [("_data", c_void_p), ("_offset", c_uint64)]
+
+
+class Volume(Structure):
+ VOLUME_POISON = 0x13
+
+ _fields_ = [("_storage", c_void_p)]
+ _instances_ = {}
+ _uuid_ = {}
+
+ props = None
+
+ def __init__(self, size: S, uuid=None):
+ super().__init__()
+ self.size = size
+ if uuid:
+ if uuid in type(self)._uuid_:
+ raise Exception(
+ "Volume with uuid {} already created".format(uuid)
+ )
+ self.uuid = uuid
+ else:
+ self.uuid = str(id(self))
+
+ type(self)._uuid_[self.uuid] = weakref.ref(self)
+
+ self.data = create_string_buffer(int(self.size))
+ memset(self.data, self.VOLUME_POISON, self.size)
+ self._storage = cast(self.data, c_void_p)
+
+ self.reset_stats()
+ self.opened = False
+
+ def get_copy(self):
+ new_volume = Volume(self.size)
+ memmove(new_volume.data, self.data, self.size)
+ return new_volume
+
+ @classmethod
+ def get_props(cls):
+ if not cls.props:
+ cls.props = VolumeProperties(
+ _name=str(cls.__name__).encode("ascii"),
+ _io_priv_size=sizeof(VolumeIoPriv),
+ _volume_priv_size=0,
+ _caps=VolumeCaps(_atomic_writes=0),
+ _ops=VolumeOps(
+ _submit_io=cls._submit_io,
+ _submit_flush=cls._submit_flush,
+ _submit_metadata=cls._submit_metadata,
+ _submit_discard=cls._submit_discard,
+ _submit_write_zeroes=cls._submit_write_zeroes,
+ _open=cls._open,
+ _close=cls._close,
+ _get_max_io_size=cls._get_max_io_size,
+ _get_length=cls._get_length,
+ ),
+ _io_ops=IoOps(
+ _set_data=cls._io_set_data, _get_data=cls._io_get_data
+ ),
+ _deinit=0,
+ )
+
+ return cls.props
+
+ @classmethod
+ def get_instance(cls, ref):
+ instance = cls._instances_[ref]()
+ if instance is None:
+ print("tried to access {} but it's gone".format(ref))
+
+ return instance
+
+ @classmethod
+ def get_by_uuid(cls, uuid):
+ return cls._uuid_[uuid]()
+
+ @staticmethod
+ @VolumeOps.SUBMIT_IO
+ def _submit_io(io):
+ io_structure = cast(io, POINTER(Io))
+ volume = Volume.get_instance(
+ OcfLib.getInstance().ocf_io_get_volume(io_structure)
+ )
+
+ volume.submit_io(io_structure)
+
+ @staticmethod
+ @VolumeOps.SUBMIT_FLUSH
+ def _submit_flush(flush):
+ io_structure = cast(flush, POINTER(Io))
+ volume = Volume.get_instance(
+ OcfLib.getInstance().ocf_io_get_volume(io_structure)
+ )
+
+ volume.submit_flush(io_structure)
+
+ @staticmethod
+ @VolumeOps.SUBMIT_METADATA
+ def _submit_metadata(meta):
+ pass
+
+ @staticmethod
+ @VolumeOps.SUBMIT_DISCARD
+ def _submit_discard(discard):
+ io_structure = cast(discard, POINTER(Io))
+ volume = Volume.get_instance(
+ OcfLib.getInstance().ocf_io_get_volume(io_structure)
+ )
+
+ volume.submit_discard(io_structure)
+
+ @staticmethod
+ @VolumeOps.SUBMIT_WRITE_ZEROES
+ def _submit_write_zeroes(write_zeroes):
+ pass
+
+ @staticmethod
+ @CFUNCTYPE(c_int, c_void_p)
+ def _open(ref):
+ uuid_ptr = cast(
+ OcfLib.getInstance().ocf_volume_get_uuid(ref), POINTER(Uuid)
+ )
+ uuid = str(uuid_ptr.contents._data, encoding="ascii")
+ try:
+ volume = Volume.get_by_uuid(uuid)
+ except: # noqa E722 TODO:Investigate whether this really should be so broad
+ print("Tried to access unallocated volume {}".format(uuid))
+ print("{}".format(Volume._uuid_))
+ return -1
+
+ if volume.opened:
+ return OcfErrorCode.OCF_ERR_NOT_OPEN_EXC
+
+ Volume._instances_[ref] = weakref.ref(volume)
+
+ return volume.open()
+
+ @staticmethod
+ @VolumeOps.CLOSE
+ def _close(ref):
+ volume = Volume.get_instance(ref)
+ volume.close()
+ volume.opened = False
+
+ @staticmethod
+ @VolumeOps.GET_MAX_IO_SIZE
+ def _get_max_io_size(ref):
+ return Volume.get_instance(ref).get_max_io_size()
+
+ @staticmethod
+ @VolumeOps.GET_LENGTH
+ def _get_length(ref):
+ return Volume.get_instance(ref).get_length()
+
+ @staticmethod
+ @IoOps.SET_DATA
+ def _io_set_data(io, data, offset):
+ io_priv = cast(
+ OcfLib.getInstance().ocf_io_get_priv(io), POINTER(VolumeIoPriv)
+ )
+ data = Data.get_instance(data)
+ io_priv.contents._offset = offset
+ io_priv.contents._data = data.handle
+
+ return 0
+
+ @staticmethod
+ @IoOps.GET_DATA
+ def _io_get_data(io):
+ io_priv = cast(
+ OcfLib.getInstance().ocf_io_get_priv(io), POINTER(VolumeIoPriv)
+ )
+ return io_priv.contents._data
+
+ def open(self):
+ self.opened = True
+ return 0
+
+ def close(self):
+ pass
+
+ def get_length(self):
+ return self.size
+
+ def get_max_io_size(self):
+ return S.from_KiB(128)
+
+ def submit_flush(self, flush):
+ flush.contents._end(flush, 0)
+
+ def submit_discard(self, discard):
+ try:
+ dst = self._storage + discard.contents._addr
+ memset(dst, 0, discard.contents._bytes)
+
+ discard.contents._end(discard, 0)
+ except: # noqa E722
+ discard.contents._end(discard, -5)
+
+ def get_stats(self):
+ return self.stats
+
+ def reset_stats(self):
+ self.stats = {IoDir.WRITE: 0, IoDir.READ: 0}
+
+ def submit_io(self, io):
+ try:
+ self.stats[IoDir(io.contents._dir)] += 1
+
+ io_priv = cast(
+ OcfLib.getInstance().ocf_io_get_priv(io), POINTER(VolumeIoPriv))
+ offset = io_priv.contents._offset
+
+ if io.contents._dir == IoDir.WRITE:
+ src_ptr = cast(OcfLib.getInstance().ocf_io_get_data(io), c_void_p)
+ src = Data.get_instance(src_ptr.value).handle.value + offset
+ dst = self._storage + io.contents._addr
+ elif io.contents._dir == IoDir.READ:
+ dst_ptr = cast(OcfLib.getInstance().ocf_io_get_data(io), c_void_p)
+ dst = Data.get_instance(dst_ptr.value).handle.value + offset
+ src = self._storage + io.contents._addr
+
+ memmove(dst, src, io.contents._bytes)
+ io_priv.contents._offset += io.contents._bytes
+
+ io.contents._end(io, 0)
+ except: # noqa E722
+ io.contents._end(io, -5)
+
+ def dump(self, offset=0, size=0, ignore=VOLUME_POISON, **kwargs):
+ if size == 0:
+ size = int(self.size) - int(offset)
+
+ print_buffer(
+ self._storage,
+ size,
+ ignore=ignore,
+ **kwargs
+ )
+
+ def md5(self):
+ m = md5()
+ m.update(string_at(self._storage, self.size))
+ return m.hexdigest()
+
+
+class ErrorDevice(Volume):
+ def __init__(self, size, error_sectors: set = None, uuid=None):
+ super().__init__(size, uuid)
+ self.error_sectors = error_sectors or set()
+
+ def set_mapping(self, error_sectors: set):
+ self.error_sectors = error_sectors
+
+ def submit_io(self, io):
+ if io.contents._addr in self.error_sectors:
+ io.contents._end(io, -5)
+ self.stats["errors"][io.contents._dir] += 1
+ else:
+ super().submit_io(io)
+
+ def reset_stats(self):
+ super().reset_stats()
+ self.stats["errors"] = {IoDir.WRITE: 0, IoDir.READ: 0}
+
+
+class TraceDevice(Volume):
+ def __init__(self, size, trace_fcn=None, uuid=None):
+ super().__init__(size, uuid)
+ self.trace_fcn = trace_fcn
+
+ def submit_io(self, io):
+ submit = True
+
+ if self.trace_fcn:
+ submit = self.trace_fcn(self, io)
+
+ if submit:
+ super().submit_io(io)
+
+
+lib = OcfLib.getInstance()
+lib.ocf_io_get_priv.restype = POINTER(VolumeIoPriv)
+lib.ocf_io_get_volume.argtypes = [c_void_p]
+lib.ocf_io_get_volume.restype = c_void_p
+lib.ocf_io_get_data.argtypes = [c_void_p]
+lib.ocf_io_get_data.restype = c_void_p
diff --git a/src/spdk/ocf/tests/functional/pyocf/utils.py b/src/spdk/ocf/tests/functional/pyocf/utils.py
new file mode 100644
index 000000000..d4ef42300
--- /dev/null
+++ b/src/spdk/ocf/tests/functional/pyocf/utils.py
@@ -0,0 +1,173 @@
+#
+# Copyright(c) 2019 Intel Corporation
+# SPDX-License-Identifier: BSD-3-Clause-Clear
+#
+
+from ctypes import string_at
+
+
+def print_buffer(
+ buf,
+ length,
+ offset=0,
+ width=16,
+ ignore=0,
+ stop_after_count_ignored=0,
+ print_fcn=print,
+):
+ end = int(offset) + int(length)
+ offset = int(offset)
+ ignored_lines = 0
+ buf = string_at(buf, length)
+ whole_buffer_ignored = True
+ stop_after_count_ignored = int(stop_after_count_ignored / width)
+
+ for addr in range(offset, end, width):
+ cur_line = buf[addr : min(end, addr + width)]
+ byteline = ""
+ asciiline = ""
+ if not any(x != ignore for x in cur_line):
+ if (
+ stop_after_count_ignored
+ and ignored_lines > stop_after_count_ignored
+ ):
+ print_fcn(
+ "<{} bytes of '0x{:02X}' encountered, stopping>".format(
+ stop_after_count_ignored * width, ignore
+ )
+ )
+ return
+ ignored_lines += 1
+ continue
+
+ if ignored_lines:
+ print_fcn(
+ "<{} of '0x{:02X}' bytes omitted>".format(
+ ignored_lines * width, ignore
+ )
+ )
+ ignored_lines = 0
+
+ for byte in cur_line:
+ byte = int(byte)
+ byteline += "{:02X} ".format(byte)
+ if 31 < byte < 126:
+ char = chr(byte)
+ else:
+ char = "."
+ asciiline += char
+
+ print_fcn("0x{:08X}\t{}\t{}".format(addr, byteline, asciiline))
+ whole_buffer_ignored = False
+
+ if whole_buffer_ignored:
+ print_fcn("<whole buffer ignored>")
+ elif ignored_lines:
+ print_fcn("<'0x{:02X}' until end>".format(ignore))
+
+
+class Size:
+ _KiB = 1024
+ _MiB = _KiB * 1024
+ _GiB = _MiB * 1024
+ _TiB = _GiB * 1024
+ _SECTOR_SIZE = 512
+
+ def __init__(self, b: int, sector_aligned: bool = False):
+ if sector_aligned:
+ self.bytes = int(
+ ((b + self._SECTOR_SIZE - 1) // self._SECTOR_SIZE)
+ * self._SECTOR_SIZE
+ )
+ else:
+ self.bytes = int(b)
+
+ def __int__(self):
+ return self.bytes
+
+ def __index__(self):
+ return self.bytes
+
+ @classmethod
+ def from_B(cls, value, sector_aligned=False):
+ return cls(value, sector_aligned)
+
+ @classmethod
+ def from_KiB(cls, value, sector_aligned=False):
+ return cls(value * cls._KiB, sector_aligned)
+
+ @classmethod
+ def from_MiB(cls, value, sector_aligned=False):
+ return cls(value * cls._MiB, sector_aligned)
+
+ @classmethod
+ def from_GiB(cls, value, sector_aligned=False):
+ return cls(value * cls._GiB, sector_aligned)
+
+ @classmethod
+ def from_TiB(cls, value, sector_aligned=False):
+ return cls(value * cls._TiB, sector_aligned)
+
+ @classmethod
+ def from_sector(cls, value):
+ return cls(value * cls._SECTOR_SIZE)
+
+ @property
+ def B(self):
+ return self.bytes
+
+ @property
+ def KiB(self):
+ return self.bytes / self._KiB
+
+ @property
+ def MiB(self):
+ return self.bytes / self._MiB
+
+ @property
+ def GiB(self):
+ return self.bytes / self._GiB
+
+ @property
+ def TiB(self):
+ return self.bytes / self._TiB
+
+ @property
+ def sectors(self):
+ return self.bytes // self._SECTOR_SIZE
+
+ def __str__(self):
+ if self.bytes < self._KiB:
+ return "{} B".format(self.B)
+ elif self.bytes < self._MiB:
+ return "{} KiB".format(self.KiB)
+ elif self.bytes < self._GiB:
+ return "{} MiB".format(self.MiB)
+ elif self.bytes < self._TiB:
+ return "{} GiB".format(self.GiB)
+ else:
+ return "{} TiB".format(self.TiB)
+
+
+def print_structure(struct, indent=0):
+ print(struct)
+ for field, field_type in struct._fields_:
+ value = getattr(struct, field)
+ if hasattr(value, "_fields_"):
+ print("{}{: <20} :".format(" " * indent, field))
+ print_structure(value, indent=indent + 1)
+ continue
+
+ print("{}{: <20} : {}".format(" " * indent, field, value))
+
+
+def struct_to_dict(struct):
+ d = {}
+ for field, field_type in struct._fields_:
+ value = getattr(struct, field)
+ if hasattr(value, "_fields_"):
+ d[field] = struct_to_dict(value)
+ continue
+ d[field] = value
+
+ return d
diff --git a/src/spdk/ocf/tests/functional/pyocf/wrappers/ocf_io_wrappers.c b/src/spdk/ocf/tests/functional/pyocf/wrappers/ocf_io_wrappers.c
new file mode 100644
index 000000000..79b9331e0
--- /dev/null
+++ b/src/spdk/ocf/tests/functional/pyocf/wrappers/ocf_io_wrappers.c
@@ -0,0 +1,36 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#include "ocf/ocf_io.h"
+#include "ocf/ocf_core.h"
+
+struct ocf_io *ocf_core_new_io_wrapper(ocf_core_t core, ocf_queue_t queue,
+ uint64_t addr, uint32_t bytes, uint32_t dir,
+ uint32_t io_class, uint64_t flags)
+{
+ return ocf_core_new_io(core, queue, addr, bytes, dir, io_class, flags);
+}
+
+void ocf_io_set_cmpl_wrapper(struct ocf_io *io, void *context,
+ void *context2, ocf_end_io_t fn)
+{
+ ocf_io_set_cmpl(io, context, context2, fn);
+}
+
+void ocf_io_set_start_wrapper(struct ocf_io *io, ocf_start_io_t fn)
+{
+ ocf_io_set_start(io, fn);
+}
+
+void ocf_io_set_handle_wrapper(struct ocf_io *io, ocf_handle_io_t fn)
+{
+ ocf_io_set_handle(io, fn);
+}
+
+void ocf_core_submit_io_wrapper(struct ocf_io *io)
+{
+ ocf_core_submit_io(io);
+}
+
diff --git a/src/spdk/ocf/tests/functional/pyocf/wrappers/ocf_logger_wrappers.c b/src/spdk/ocf/tests/functional/pyocf/wrappers/ocf_logger_wrappers.c
new file mode 100644
index 000000000..60ded8dad
--- /dev/null
+++ b/src/spdk/ocf/tests/functional/pyocf/wrappers/ocf_logger_wrappers.c
@@ -0,0 +1,42 @@
+
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#include <ocf/ocf_types.h>
+#include <ocf/ocf_logger.h>
+#include <stdarg.h>
+#include "ocf_env.h"
+
+#define LOG_BUFFER_SIZE 4096
+
+struct pyocf_logger_priv {
+ int (*pyocf_log)(void *pyocf_logger, ocf_logger_lvl_t lvl, char *msg);
+};
+
+int pyocf_printf_helper(ocf_logger_t logger, ocf_logger_lvl_t lvl,
+ const char *fmt, va_list args)
+{
+ char *buffer = env_zalloc(LOG_BUFFER_SIZE, ENV_MEM_NORMAL);
+ struct pyocf_logger_priv *priv = ocf_logger_get_priv(logger);
+ int ret;
+
+ if (!buffer) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = vsnprintf(buffer, LOG_BUFFER_SIZE, fmt, args);
+ if (ret < 0) {
+ env_free(buffer);
+ goto out;
+ }
+
+ ret = priv->pyocf_log(logger, lvl, buffer);
+
+ env_free(buffer);
+
+out:
+ return ret;
+}
diff --git a/src/spdk/ocf/tests/functional/pyocf/wrappers/ocf_volume_wrappers.c b/src/spdk/ocf/tests/functional/pyocf/wrappers/ocf_volume_wrappers.c
new file mode 100644
index 000000000..cb3787761
--- /dev/null
+++ b/src/spdk/ocf/tests/functional/pyocf/wrappers/ocf_volume_wrappers.c
@@ -0,0 +1,12 @@
+
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#include "ocf/ocf_io.h"
+#include "ocf/ocf_volume.h"
+
+const char *ocf_uuid_to_str_wrapper(const struct ocf_volume_uuid *uuid) {
+ return ocf_uuid_to_str(uuid);
+}
diff --git a/src/spdk/ocf/tests/functional/pytest.ini b/src/spdk/ocf/tests/functional/pytest.ini
new file mode 100644
index 000000000..10796150b
--- /dev/null
+++ b/src/spdk/ocf/tests/functional/pytest.ini
@@ -0,0 +1,2 @@
+[pytest]
+addopts = --ignore=tests/security -m "not long"
diff --git a/src/spdk/ocf/tests/functional/tests/__init__.py b/src/spdk/ocf/tests/functional/tests/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/src/spdk/ocf/tests/functional/tests/__init__.py
diff --git a/src/spdk/ocf/tests/functional/tests/basic/__init__.py b/src/spdk/ocf/tests/functional/tests/basic/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/src/spdk/ocf/tests/functional/tests/basic/__init__.py
diff --git a/src/spdk/ocf/tests/functional/tests/basic/test_pyocf.py b/src/spdk/ocf/tests/functional/tests/basic/test_pyocf.py
new file mode 100644
index 000000000..b881abdc6
--- /dev/null
+++ b/src/spdk/ocf/tests/functional/tests/basic/test_pyocf.py
@@ -0,0 +1,86 @@
+#
+# Copyright(c) 2019 Intel Corporation
+# SPDX-License-Identifier: BSD-3-Clause-Clear
+#
+
+import pytest
+from ctypes import c_int
+
+from pyocf.types.cache import Cache
+from pyocf.types.core import Core
+from pyocf.types.volume import Volume, ErrorDevice
+from pyocf.types.data import Data
+from pyocf.types.io import IoDir
+from pyocf.utils import Size as S
+from pyocf.types.shared import OcfError, OcfCompletion
+
+
+def test_ctx_fixture(pyocf_ctx):
+ pass
+
+
+def test_simple_wt_write(pyocf_ctx):
+ cache_device = Volume(S.from_MiB(30))
+ core_device = Volume(S.from_MiB(30))
+
+ cache = Cache.start_on_device(cache_device)
+ core = Core.using_device(core_device)
+
+ cache.add_core(core)
+
+ cache_device.reset_stats()
+ core_device.reset_stats()
+
+ write_data = Data.from_string("This is test data")
+ io = core.new_io(cache.get_default_queue(), S.from_sector(1).B,
+ write_data.size, IoDir.WRITE, 0, 0)
+ io.set_data(write_data)
+
+ cmpl = OcfCompletion([("err", c_int)])
+ io.callback = cmpl.callback
+ io.submit()
+ cmpl.wait()
+
+ assert cmpl.results["err"] == 0
+ assert cache_device.get_stats()[IoDir.WRITE] == 1
+ stats = cache.get_stats()
+ assert stats["req"]["wr_full_misses"]["value"] == 1
+ assert stats["usage"]["occupancy"]["value"] == 1
+
+ assert core.exp_obj_md5() == core_device.md5()
+ cache.stop()
+
+
+def test_start_corrupted_metadata_lba(pyocf_ctx):
+ cache_device = ErrorDevice(S.from_MiB(30), error_sectors=set([0]))
+
+ with pytest.raises(OcfError, match="OCF_ERR_WRITE_CACHE"):
+ cache = Cache.start_on_device(cache_device)
+
+
+def test_load_cache_no_preexisting_data(pyocf_ctx):
+ cache_device = Volume(S.from_MiB(30))
+
+ with pytest.raises(OcfError, match="OCF_ERR_NO_METADATA"):
+ cache = Cache.load_from_device(cache_device)
+
+
+def test_load_cache(pyocf_ctx):
+ cache_device = Volume(S.from_MiB(30))
+
+ cache = Cache.start_on_device(cache_device)
+ cache.stop()
+
+ cache = Cache.load_from_device(cache_device)
+
+
+def test_load_cache_recovery(pyocf_ctx):
+ cache_device = Volume(S.from_MiB(30))
+
+ cache = Cache.start_on_device(cache_device)
+
+ device_copy = cache_device.get_copy()
+
+ cache.stop()
+
+ cache = Cache.load_from_device(device_copy)
diff --git a/src/spdk/ocf/tests/functional/tests/conftest.py b/src/spdk/ocf/tests/functional/tests/conftest.py
new file mode 100644
index 000000000..943c1c07b
--- /dev/null
+++ b/src/spdk/ocf/tests/functional/tests/conftest.py
@@ -0,0 +1,39 @@
+#
+# Copyright(c) 2019 Intel Corporation
+# SPDX-License-Identifier: BSD-3-Clause-Clear
+#
+
+import os
+import sys
+import pytest
+import gc
+
+sys.path.append(os.path.join(os.path.dirname(__file__), os.path.pardir))
+from pyocf.types.logger import LogLevel, DefaultLogger, BufferLogger
+from pyocf.types.volume import Volume, ErrorDevice
+from pyocf.types.ctx import get_default_ctx
+
+
+def pytest_configure(config):
+ sys.path.append(os.path.join(os.path.dirname(__file__), os.path.pardir))
+
+
+@pytest.fixture()
+def pyocf_ctx():
+ c = get_default_ctx(DefaultLogger(LogLevel.WARN))
+ c.register_volume_type(Volume)
+ c.register_volume_type(ErrorDevice)
+ yield c
+ c.exit()
+ gc.collect()
+
+
+@pytest.fixture()
+def pyocf_ctx_log_buffer():
+ logger = BufferLogger(LogLevel.DEBUG)
+ c = get_default_ctx(logger)
+ c.register_volume_type(Volume)
+ c.register_volume_type(ErrorDevice)
+ yield logger
+ c.exit()
+ gc.collect()
diff --git a/src/spdk/ocf/tests/functional/tests/engine/__init__.py b/src/spdk/ocf/tests/functional/tests/engine/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/src/spdk/ocf/tests/functional/tests/engine/__init__.py
diff --git a/src/spdk/ocf/tests/functional/tests/engine/test_pp.py b/src/spdk/ocf/tests/functional/tests/engine/test_pp.py
new file mode 100644
index 000000000..e45377559
--- /dev/null
+++ b/src/spdk/ocf/tests/functional/tests/engine/test_pp.py
@@ -0,0 +1,305 @@
+#
+# Copyright(c) 2019 Intel Corporation
+# SPDX-License-Identifier: BSD-3-Clause-Clear
+#
+
+from ctypes import c_int
+import pytest
+import math
+
+from pyocf.types.cache import Cache, PromotionPolicy, NhitParams
+from pyocf.types.core import Core
+from pyocf.types.volume import Volume
+from pyocf.types.data import Data
+from pyocf.types.io import IoDir
+from pyocf.utils import Size
+from pyocf.types.shared import OcfCompletion
+
+
+@pytest.mark.parametrize("promotion_policy", PromotionPolicy)
+def test_init_nhit(pyocf_ctx, promotion_policy):
+ """
+ Check if starting cache with promotion policy is reflected in stats
+
+ 1. Create core/cache pair with parametrized promotion policy
+ 2. Get cache statistics
+ * verify that promotion policy type is properly reflected in stats
+ """
+
+ cache_device = Volume(Size.from_MiB(30))
+ core_device = Volume(Size.from_MiB(30))
+
+ cache = Cache.start_on_device(cache_device, promotion_policy=promotion_policy)
+ core = Core.using_device(core_device)
+
+ cache.add_core(core)
+
+ assert cache.get_stats()["conf"]["promotion_policy"] == promotion_policy
+
+
+def test_change_to_nhit_and_back_io_in_flight(pyocf_ctx):
+ """
+ Try switching promotion policy during io, no io's should return with error
+
+ 1. Create core/cache pair with promotion policy ALWAYS
+ 2. Issue IOs without waiting for completion
+ 3. Change promotion policy to NHIT
+ 4. Wait for IO completions
+ * no IOs should fail
+ 5. Issue IOs without waiting for completion
+ 6. Change promotion policy to ALWAYS
+ 7. Wait for IO completions
+ * no IOs should fail
+ """
+
+ # Step 1
+ cache_device = Volume(Size.from_MiB(30))
+ core_device = Volume(Size.from_MiB(30))
+
+ cache = Cache.start_on_device(cache_device)
+ core = Core.using_device(core_device)
+
+ cache.add_core(core)
+
+ # Step 2
+ completions = []
+ for i in range(2000):
+ comp = OcfCompletion([("error", c_int)])
+ write_data = Data(4096)
+ io = core.new_io(
+ cache.get_default_queue(), i * 4096, write_data.size, IoDir.WRITE, 0, 0
+ )
+ completions += [comp]
+ io.set_data(write_data)
+ io.callback = comp.callback
+ io.submit()
+
+ # Step 3
+ cache.set_promotion_policy(PromotionPolicy.NHIT)
+
+ # Step 4
+ for c in completions:
+ c.wait()
+ assert not c.results["error"], "No IO's should fail when turning NHIT policy on"
+
+ # Step 5
+ completions = []
+ for i in range(2000):
+ comp = OcfCompletion([("error", c_int)])
+ write_data = Data(4096)
+ io = core.new_io(
+ cache.get_default_queue(), i * 4096, write_data.size, IoDir.WRITE, 0, 0
+ )
+ completions += [comp]
+ io.set_data(write_data)
+ io.callback = comp.callback
+ io.submit()
+
+ # Step 6
+ cache.set_promotion_policy(PromotionPolicy.ALWAYS)
+
+ # Step 7
+ for c in completions:
+ c.wait()
+ assert not c.results[
+ "error"
+ ], "No IO's should fail when turning NHIT policy off"
+
+
+def fill_cache(cache, fill_ratio):
+ """
+ Helper to fill cache from LBA 0.
+ TODO:
+ * make it generic and share across all tests
+ * reasonable error handling
+ """
+
+ cache_lines = cache.get_stats()["conf"]["size"]
+
+ bytes_to_fill = cache_lines.bytes * fill_ratio
+ max_io_size = cache.device.get_max_io_size().bytes
+
+ ios_to_issue = math.floor(bytes_to_fill / max_io_size)
+
+ core = cache.cores[0]
+ completions = []
+ for i in range(ios_to_issue):
+ comp = OcfCompletion([("error", c_int)])
+ write_data = Data(max_io_size)
+ io = core.new_io(
+ cache.get_default_queue(),
+ i * max_io_size,
+ write_data.size,
+ IoDir.WRITE,
+ 0,
+ 0,
+ )
+ io.set_data(write_data)
+ io.callback = comp.callback
+ completions += [comp]
+ io.submit()
+
+ if bytes_to_fill % max_io_size:
+ comp = OcfCompletion([("error", c_int)])
+ write_data = Data(Size.from_B(bytes_to_fill % max_io_size, sector_aligned=True))
+ io = core.new_io(
+ cache.get_default_queue(),
+ ios_to_issue * max_io_size,
+ write_data.size,
+ IoDir.WRITE,
+ 0,
+ 0,
+ )
+ io.set_data(write_data)
+ io.callback = comp.callback
+ completions += [comp]
+ io.submit()
+
+ for c in completions:
+ c.wait()
+
+
+@pytest.mark.parametrize("fill_percentage", [0, 1, 50, 99])
+@pytest.mark.parametrize("insertion_threshold", [2, 8])
+def test_promoted_after_hits_various_thresholds(
+ pyocf_ctx, insertion_threshold, fill_percentage
+):
+ """
+ Check promotion policy behavior with various set thresholds
+
+ 1. Create core/cache pair with promotion policy NHIT
+ 2. Set TRIGGER_THRESHOLD/INSERTION_THRESHOLD to predefined values
+ 3. Fill cache from the beggining until occupancy reaches TRIGGER_THRESHOLD%
+ 4. Issue INSERTION_THRESHOLD - 1 requests to core line not inserted to cache
+ * occupancy should not change
+ 5. Issue one request to LBA from step 4
+ * occupancy should rise by one cache line
+ """
+
+ # Step 1
+ cache_device = Volume(Size.from_MiB(30))
+ core_device = Volume(Size.from_MiB(30))
+
+ cache = Cache.start_on_device(cache_device, promotion_policy=PromotionPolicy.NHIT)
+ core = Core.using_device(core_device)
+ cache.add_core(core)
+
+ # Step 2
+ cache.set_promotion_policy_param(
+ PromotionPolicy.NHIT, NhitParams.TRIGGER_THRESHOLD, fill_percentage
+ )
+ cache.set_promotion_policy_param(
+ PromotionPolicy.NHIT, NhitParams.INSERTION_THRESHOLD, insertion_threshold
+ )
+ # Step 3
+ fill_cache(cache, fill_percentage / 100)
+
+ stats = cache.get_stats()
+ cache_lines = stats["conf"]["size"]
+ assert stats["usage"]["occupancy"]["fraction"] // 10 == fill_percentage * 10
+ filled_occupancy = stats["usage"]["occupancy"]["value"]
+
+ # Step 4
+ last_core_line = int(core_device.size) - cache_lines.line_size
+ completions = []
+ for i in range(insertion_threshold - 1):
+ comp = OcfCompletion([("error", c_int)])
+ write_data = Data(cache_lines.line_size)
+ io = core.new_io(
+ cache.get_default_queue(),
+ last_core_line,
+ write_data.size,
+ IoDir.WRITE,
+ 0,
+ 0,
+ )
+ completions += [comp]
+ io.set_data(write_data)
+ io.callback = comp.callback
+ io.submit()
+
+ for c in completions:
+ c.wait()
+
+ stats = cache.get_stats()
+ threshold_reached_occupancy = stats["usage"]["occupancy"]["value"]
+ assert threshold_reached_occupancy == filled_occupancy, (
+ "No insertion should occur while NHIT is triggered and core line ",
+ "didn't reach INSERTION_THRESHOLD",
+ )
+
+ # Step 5
+ comp = OcfCompletion([("error", c_int)])
+ write_data = Data(cache_lines.line_size)
+ io = core.new_io(
+ cache.get_default_queue(), last_core_line, write_data.size, IoDir.WRITE, 0, 0
+ )
+ io.set_data(write_data)
+ io.callback = comp.callback
+ io.submit()
+
+ comp.wait()
+
+ assert (
+ threshold_reached_occupancy
+ == cache.get_stats()["usage"]["occupancy"]["value"] - 1
+ ), "Previous request should be promoted and occupancy should rise"
+
+
+def test_partial_hit_promotion(pyocf_ctx):
+ """
+ Check if NHIT promotion policy doesn't prevent partial hits from getting
+ promoted to cache
+
+ 1. Create core/cache pair with promotion policy ALWAYS
+ 2. Issue one-sector IO to cache to insert partially valid cache line
+ 3. Set NHIT promotion policy with trigger=0 (always triggered) and high
+ insertion threshold
+ 4. Issue a request containing partially valid cache line and next cache line
+ * occupancy should rise - partially hit request should bypass nhit criteria
+ """
+
+ # Step 1
+ cache_device = Volume(Size.from_MiB(30))
+ core_device = Volume(Size.from_MiB(30))
+
+ cache = Cache.start_on_device(cache_device)
+ core = Core.using_device(core_device)
+ cache.add_core(core)
+
+ # Step 2
+ comp = OcfCompletion([("error", c_int)])
+ write_data = Data(Size.from_sector(1))
+ io = core.new_io(cache.get_default_queue(), 0, write_data.size, IoDir.READ, 0, 0)
+ io.set_data(write_data)
+ io.callback = comp.callback
+ io.submit()
+
+ comp.wait()
+
+ stats = cache.get_stats()
+ cache_lines = stats["conf"]["size"]
+ assert stats["usage"]["occupancy"]["value"] == 1
+
+ # Step 3
+ cache.set_promotion_policy(PromotionPolicy.NHIT)
+ cache.set_promotion_policy_param(
+ PromotionPolicy.NHIT, NhitParams.TRIGGER_THRESHOLD, 0
+ )
+ cache.set_promotion_policy_param(
+ PromotionPolicy.NHIT, NhitParams.INSERTION_THRESHOLD, 100
+ )
+
+ # Step 4
+ comp = OcfCompletion([("error", c_int)])
+ write_data = Data(2 * cache_lines.line_size)
+ io = core.new_io(cache.get_default_queue(), 0, write_data.size, IoDir.WRITE, 0, 0)
+ io.set_data(write_data)
+ io.callback = comp.callback
+ io.submit()
+ comp.wait()
+
+ stats = cache.get_stats()
+ assert (
+ stats["usage"]["occupancy"]["value"] == 2
+ ), "Second cache line should be mapped"
diff --git a/src/spdk/ocf/tests/functional/tests/engine/test_wo.py b/src/spdk/ocf/tests/functional/tests/engine/test_wo.py
new file mode 100644
index 000000000..e0cd10fdd
--- /dev/null
+++ b/src/spdk/ocf/tests/functional/tests/engine/test_wo.py
@@ -0,0 +1,213 @@
+#
+# Copyright(c) 2019 Intel Corporation
+# SPDX-License-Identifier: BSD-3-Clause-Clear
+#
+
+from ctypes import c_int, memmove, cast, c_void_p
+from enum import IntEnum
+from itertools import product
+import random
+
+from pyocf.types.cache import Cache, CacheMode
+from pyocf.types.core import Core
+from pyocf.types.volume import Volume
+from pyocf.types.data import Data
+from pyocf.types.io import IoDir
+from pyocf.utils import Size
+from pyocf.types.shared import OcfCompletion
+
+
+def __io(io, queue, address, size, data, direction):
+ io.set_data(data, 0)
+ completion = OcfCompletion([("err", c_int)])
+ io.callback = completion.callback
+ io.submit()
+ completion.wait()
+ return int(completion.results['err'])
+
+
+def _io(new_io, queue, address, size, data, offset, direction):
+ io = new_io(queue, address, size, direction, 0, 0)
+ if direction == IoDir.READ:
+ _data = Data.from_bytes(bytes(size))
+ else:
+ _data = Data.from_bytes(data, offset, size)
+ ret = __io(io, queue, address, size, _data, direction)
+ if not ret and direction == IoDir.READ:
+ memmove(cast(data, c_void_p).value + offset, _data.handle, size)
+ return ret
+
+
+def io_to_core(core, address, size, data, offset, direction):
+ return _io(core.new_core_io, core.cache.get_default_queue(), address, size,
+ data, offset, direction)
+
+
+def io_to_exp_obj(core, address, size, data, offset, direction):
+ return _io(core.new_io, core.cache.get_default_queue(), address, size, data,
+ offset, direction)
+
+
+def sector_to_region(sector, region_start):
+ i = 0
+ while i < len(region_start) - 1 and sector >= region_start[i + 1]:
+ i += 1
+ return i
+
+
+class SectorStatus(IntEnum):
+ DIRTY = 0,
+ CLEAN = 1,
+ INVALID = 2,
+
+
+I = SectorStatus.INVALID
+D = SectorStatus.DIRTY
+C = SectorStatus.CLEAN
+
+# Test reads with 4k cacheline and different combinations of sectors status and
+# IO range. Three consecutive core lines are targeted, with the middle one (no 1)
+# having all sectors status (clean, dirty, invalid) set independently. The other
+# two lines either are fully dirty/clean/invalid or have the single sector
+# neighbouring with middle core line with different status. This gives total of
+# 12 regions with independent state, listed on the diagram below.
+#
+# cache line | CL 0 | CL 1 | CL 2 |
+# sector no |01234567|89ABCDEF|(ctd..) |
+# |........|........|........|
+# region no |00000001|23456789|ABBBBBBB|
+# io start possible | | | |
+# values @START |> >>|>>>>>>>>| |
+# io end possible | | | |
+# values @END | |<<<<<<<<|<< <|
+#
+# Each test iteration is described by region states and IO start/end sectors,
+# giving total of 14 parameters
+#
+# In order to determine data consistency, cache is filled with data so so that:
+# - core sector no @n is filled with @n
+# - if clean, exported object sector no @n is filled with 100 + @n
+# - if dirty, exported object sector no @n is filled with 200 + @n
+#
+
+
+def test_wo_read_data_consistency(pyocf_ctx):
+ # start sector for each region
+ region_start = [0, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17]
+ # possible start sectors for test iteration
+ start_sec = [0, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
+ # possible end sectors for test iteration
+ end_sec = [8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 23]
+
+ CACHELINE_COUNT = 3
+ CACHELINE_SIZE = 4096
+ SECTOR_SIZE = Size.from_sector(1).B
+ CLS = CACHELINE_SIZE // SECTOR_SIZE
+ WORKSET_SIZE = CACHELINE_COUNT * CACHELINE_SIZE
+ WORKSET_OFFSET = 1024 * CACHELINE_SIZE
+ SECTOR_COUNT = int(WORKSET_SIZE / SECTOR_SIZE)
+ ITRATION_COUNT = 200
+
+ # fixed test cases
+ fixed_combinations = [
+ [I, I, D, D, D, D, D, D, D, D, I, I],
+ [I, I, C, C, C, C, C, C, C, C, I, I],
+ [I, I, D, D, D, I, D, D, D, D, I, I],
+ [I, I, D, D, D, I, I, D, D, D, I, I],
+ [I, I, I, I, D, I, I, D, C, D, I, I],
+ [I, D, D, D, D, D, D, D, D, D, D, I],
+ [C, C, I, D, D, I, D, D, D, D, D, I],
+ [D, D, D, D, D, D, D, D, D, D, D, I],
+ ]
+
+ data = {}
+ # memset n-th sector of core data with n
+ data[SectorStatus.INVALID] = bytes([x // SECTOR_SIZE for x in range(WORKSET_SIZE)])
+ # memset n-th sector of clean data with n + 100
+ data[SectorStatus.CLEAN] = bytes([100 + x // SECTOR_SIZE for x in range(WORKSET_SIZE)])
+ # memset n-th sector of dirty data with n + 200
+ data[SectorStatus.DIRTY] = bytes([200 + x // SECTOR_SIZE for x in range(WORKSET_SIZE)])
+
+ result_b = bytes(WORKSET_SIZE)
+
+ cache_device = Volume(Size.from_MiB(30))
+ core_device = Volume(Size.from_MiB(30))
+
+ cache = Cache.start_on_device(cache_device, cache_mode=CacheMode.WO)
+ core = Core.using_device(core_device)
+
+ cache.add_core(core)
+
+ insert_order = [x for x in range(CACHELINE_COUNT)]
+
+ # generate regions status combinations and shuffle it
+ combinations = []
+ state_combinations = product(SectorStatus, repeat=len(region_start))
+ for S in state_combinations:
+ combinations.append(S)
+ random.shuffle(combinations)
+
+ # add fixed test cases at the beginning
+ combinations = fixed_combinations + combinations
+
+ for S in combinations[:ITRATION_COUNT]:
+ # write data to core and invalidate all CL
+ cache.change_cache_mode(cache_mode=CacheMode.PT)
+ io_to_exp_obj(core, WORKSET_OFFSET, len(data[SectorStatus.INVALID]),
+ data[SectorStatus.INVALID], 0, IoDir.WRITE)
+
+ # randomize cacheline insertion order to exercise different
+ # paths with regard to cache I/O physical addresses continuousness
+ random.shuffle(insert_order)
+ sectors = [insert_order[i // CLS] * CLS + (i % CLS) for i in range(SECTOR_COUNT)]
+
+ # insert clean sectors - iterate over cachelines in @insert_order order
+ cache.change_cache_mode(cache_mode=CacheMode.WT)
+ for sec in sectors:
+ region = sector_to_region(sec, region_start)
+ if S[region] != SectorStatus.INVALID:
+ io_to_exp_obj(core, WORKSET_OFFSET + SECTOR_SIZE * sec, SECTOR_SIZE,
+ data[SectorStatus.CLEAN], sec * SECTOR_SIZE, IoDir.WRITE)
+
+ # write dirty sectors
+ cache.change_cache_mode(cache_mode=CacheMode.WO)
+ for sec in sectors:
+ region = sector_to_region(sec, region_start)
+ if S[region] == SectorStatus.DIRTY:
+ io_to_exp_obj(core, WORKSET_OFFSET + SECTOR_SIZE * sec, SECTOR_SIZE,
+ data[SectorStatus.DIRTY], sec * SECTOR_SIZE, IoDir.WRITE)
+
+ core_device.reset_stats()
+
+ for s in start_sec:
+ for e in end_sec:
+ if s > e:
+ continue
+
+ # issue WO read
+ START = s * SECTOR_SIZE
+ END = e * SECTOR_SIZE
+ size = (e - s + 1) * SECTOR_SIZE
+ assert 0 == io_to_exp_obj(
+ core, WORKSET_OFFSET + START, size, result_b, START, IoDir.READ
+ ), "error reading in WO mode: S={}, start={}, end={}, insert_order={}".format(
+ S, s, e, insert_order
+ )
+
+ # verify read data
+ for sec in range(s, e + 1):
+ # just check the first byte of sector
+ region = sector_to_region(sec, region_start)
+ check_byte = sec * SECTOR_SIZE
+ assert (
+ result_b[check_byte] == data[S[region]][check_byte]
+ ), "unexpected data in sector {}, S={}, s={}, e={}, insert_order={}\n".format(
+ sec, S, s, e, insert_order
+ )
+
+ # WO is not supposed to clean dirty data
+ assert (
+ core_device.get_stats()[IoDir.WRITE] == 0
+ ), "unexpected write to core device, S={}, s={}, e={}, insert_order = {}\n".format(
+ S, s, e, insert_order
+ )
diff --git a/src/spdk/ocf/tests/functional/tests/eviction/__init__.py b/src/spdk/ocf/tests/functional/tests/eviction/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/src/spdk/ocf/tests/functional/tests/eviction/__init__.py
diff --git a/src/spdk/ocf/tests/functional/tests/eviction/test_eviction.py b/src/spdk/ocf/tests/functional/tests/eviction/test_eviction.py
new file mode 100644
index 000000000..d17bbdb55
--- /dev/null
+++ b/src/spdk/ocf/tests/functional/tests/eviction/test_eviction.py
@@ -0,0 +1,80 @@
+#
+# Copyright(c) 2019 Intel Corporation
+# SPDX-License-Identifier: BSD-3-Clause-Clear
+#
+
+import logging
+from ctypes import c_int
+
+import pytest
+
+from pyocf.types.cache import Cache, CacheMode
+from pyocf.types.core import Core
+from pyocf.types.data import Data
+from pyocf.types.io import IoDir
+from pyocf.types.shared import OcfCompletion, CacheLineSize, SeqCutOffPolicy
+from pyocf.types.volume import Volume
+from pyocf.utils import Size
+
+logger = logging.getLogger(__name__)
+
+
+@pytest.mark.parametrize("cls", CacheLineSize)
+@pytest.mark.parametrize("mode", [CacheMode.WT, CacheMode.WB, CacheMode.WO])
+@pytest.mark.xfail # TODO: remove when fixed
+def test_write_size_greater_than_cache(pyocf_ctx, mode: CacheMode, cls: CacheLineSize):
+ """Test if eviction does not occur when IO greater than cache size is submitted.
+ """
+ cache_device = Volume(Size.from_MiB(20)) # this gives about 1.375 MiB actual caching space
+
+ core_device = Volume(Size.from_MiB(5))
+ cache = Cache.start_on_device(cache_device, cache_mode=mode,
+ cache_line_size=cls)
+ core_exported = Core.using_device(core_device)
+ cache.add_core(core_exported)
+ cache.set_seq_cut_off_policy(SeqCutOffPolicy.NEVER)
+
+ valid_io_size = Size.from_KiB(512)
+ test_data = Data(valid_io_size)
+ send_io(core_exported, test_data)
+
+ stats = core_exported.cache.get_stats()
+ assert stats["usage"]["occupancy"]["value"] == (valid_io_size.B / Size.from_KiB(4).B),\
+ "Occupancy after first IO"
+ prev_writes_to_core = stats["block"]["core_volume_wr"]["value"]
+
+ # Anything below 5 MiB is a valid size (less than core device size)
+ # Writing over 1.375 MiB in this case should go directly to core and shouldn't trigger eviction
+ io_size_bigger_than_cache = Size.from_MiB(2)
+ test_data = Data(io_size_bigger_than_cache)
+ send_io(core_exported, test_data)
+
+ stats = core_exported.cache.get_stats()
+
+ # Writes from IO greater than cache size should go directly to core
+ # Writes to core should equal the following:
+ # Previous writes to core + size written + size cleaned (reads from cache)
+ assert stats["block"]["core_volume_wr"]["value"] == \
+ stats["block"]["cache_volume_rd"]["value"] + \
+ prev_writes_to_core + io_size_bigger_than_cache.B / Size.from_KiB(4).B, \
+ "Writes to core after second IO"
+
+ # Occupancy shouldn't change (no eviction)
+ assert stats["usage"]["occupancy"]["value"] == (valid_io_size.B / Size.from_KiB(4).B),\
+ "Occupancy after second IO"
+
+
+def send_io(exported_obj: Core, data: Data):
+ io = exported_obj.new_io(
+ exported_obj.cache.get_default_queue(),
+ 0, data.size, IoDir.WRITE, 0, 0
+ )
+
+ io.set_data(data)
+
+ completion = OcfCompletion([("err", c_int)])
+ io.callback = completion.callback
+ io.submit()
+ completion.wait()
+
+ assert completion.results["err"] == 0, "IO to exported object completion"
diff --git a/src/spdk/ocf/tests/functional/tests/flush/__init__.py b/src/spdk/ocf/tests/functional/tests/flush/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/src/spdk/ocf/tests/functional/tests/flush/__init__.py
diff --git a/src/spdk/ocf/tests/functional/tests/management/__init__.py b/src/spdk/ocf/tests/functional/tests/management/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/src/spdk/ocf/tests/functional/tests/management/__init__.py
diff --git a/src/spdk/ocf/tests/functional/tests/management/test_add_remove.py b/src/spdk/ocf/tests/functional/tests/management/test_add_remove.py
new file mode 100644
index 000000000..2397be753
--- /dev/null
+++ b/src/spdk/ocf/tests/functional/tests/management/test_add_remove.py
@@ -0,0 +1,278 @@
+# Copyright(c) 2019 Intel Corporation
+# SPDX-License-Identifier: BSD-3-Clause-Clear
+#
+
+import pytest
+from ctypes import c_int
+
+from random import randint
+from pyocf.types.cache import Cache, CacheMode
+from pyocf.types.core import Core
+from pyocf.types.volume import Volume
+from pyocf.types.data import Data
+from pyocf.types.io import IoDir
+from pyocf.utils import Size as S
+from pyocf.types.shared import OcfError, OcfCompletion, CacheLineSize
+
+
+@pytest.mark.parametrize("cache_mode", CacheMode)
+@pytest.mark.parametrize("cls", CacheLineSize)
+def test_adding_core(pyocf_ctx, cache_mode, cls):
+ # Start cache device
+ cache_device = Volume(S.from_MiB(30))
+ cache = Cache.start_on_device(
+ cache_device, cache_mode=cache_mode, cache_line_size=cls
+ )
+
+ # Create core device
+ core_device = Volume(S.from_MiB(10))
+ core = Core.using_device(core_device)
+
+ # Check statistics before adding core
+ stats = cache.get_stats()
+ assert stats["conf"]["core_count"] == 0
+
+ # Add core to cache
+ cache.add_core(core)
+
+ # Check statistics after adding core
+ stats = cache.get_stats()
+ assert stats["conf"]["core_count"] == 1
+
+
+@pytest.mark.parametrize("cache_mode", CacheMode)
+@pytest.mark.parametrize("cls", CacheLineSize)
+def test_removing_core(pyocf_ctx, cache_mode, cls):
+ # Start cache device
+ cache_device = Volume(S.from_MiB(30))
+ cache = Cache.start_on_device(
+ cache_device, cache_mode=cache_mode, cache_line_size=cls
+ )
+
+ # Create core device
+ core_device = Volume(S.from_MiB(10))
+ core = Core.using_device(core_device)
+
+ # Add core to cache
+ cache.add_core(core)
+
+ # Remove core from cache
+ cache.remove_core(core)
+
+ # Check statistics after removing core
+ stats = cache.get_stats()
+ assert stats["conf"]["core_count"] == 0
+
+
+def test_30add_remove(pyocf_ctx):
+ # Start cache device
+ cache_device = Volume(S.from_MiB(30))
+ cache = Cache.start_on_device(cache_device)
+
+ # Create core device
+ core_device = Volume(S.from_MiB(10))
+ core = Core.using_device(core_device)
+
+ # Add and remove core device in a loop 100 times
+ # Check statistics after every operation
+ for i in range(0, 30):
+ cache.add_core(core)
+ stats = cache.get_stats()
+ assert stats["conf"]["core_count"] == 1
+
+ cache.remove_core(core)
+ stats = cache.get_stats()
+ assert stats["conf"]["core_count"] == 0
+
+
+def test_10add_remove_with_io(pyocf_ctx):
+ # Start cache device
+ cache_device = Volume(S.from_MiB(30))
+ cache = Cache.start_on_device(cache_device)
+
+ # Create core device
+ core_device = Volume(S.from_MiB(10))
+ core = Core.using_device(core_device)
+
+ # Add and remove core 10 times in a loop with io in between
+ for i in range(0, 10):
+ cache.add_core(core)
+ stats = cache.get_stats()
+ assert stats["conf"]["core_count"] == 1
+
+ write_data = Data.from_string("Test data")
+ io = core.new_io(
+ cache.get_default_queue(), S.from_sector(1).B, write_data.size,
+ IoDir.WRITE, 0, 0
+ )
+ io.set_data(write_data)
+
+ cmpl = OcfCompletion([("err", c_int)])
+ io.callback = cmpl.callback
+ io.submit()
+ cmpl.wait()
+
+ cache.remove_core(core)
+ stats = cache.get_stats()
+ assert stats["conf"]["core_count"] == 0
+
+
+def test_add_remove_30core(pyocf_ctx):
+ # Start cache device
+ cache_device = Volume(S.from_MiB(30))
+ cache = Cache.start_on_device(cache_device)
+ core_devices = []
+ core_amount = 30
+
+ # Add 50 cores and check stats after each addition
+ for i in range(0, core_amount):
+ stats = cache.get_stats()
+ assert stats["conf"]["core_count"] == i
+ core_device = Volume(S.from_MiB(10))
+ core = Core.using_device(core_device, name=f"core{i}")
+ core_devices.append(core)
+ cache.add_core(core)
+
+ # Remove 50 cores and check stats before each removal
+ for i in range(0, core_amount):
+ stats = cache.get_stats()
+ assert stats["conf"]["core_count"] == core_amount - i
+ cache.remove_core(core_devices[i])
+
+ # Check statistics
+ stats = cache.get_stats()
+ assert stats["conf"]["core_count"] == 0
+
+
+def test_adding_to_random_cache(pyocf_ctx):
+ cache_devices = []
+ core_devices = {}
+ cache_amount = 5
+ core_amount = 30
+
+ # Create 5 cache devices
+ for i in range(0, cache_amount):
+ cache_device = Volume(S.from_MiB(30))
+ cache = Cache.start_on_device(cache_device, name=f"cache{i}")
+ cache_devices.append(cache)
+
+ # Create 50 core devices and add to random cache
+ for i in range(0, core_amount):
+ core_device = Volume(S.from_MiB(10))
+ core = Core.using_device(core_device, name=f"core{i}")
+ core_devices[core] = randint(0, cache_amount - 1)
+ cache_devices[core_devices[core]].add_core(core)
+
+ # Count expected number of cores per cache
+ count_dict = {}
+ for i in range(0, cache_amount):
+ count_dict[i] = sum(k == i for k in core_devices.values())
+
+ # Check if cache statistics are as expected
+ for i in range(0, cache_amount):
+ stats = cache_devices[i].get_stats()
+ assert stats["conf"]["core_count"] == count_dict[i]
+
+
+@pytest.mark.parametrize("cache_mode", CacheMode)
+@pytest.mark.parametrize("cls", CacheLineSize)
+def test_adding_core_twice(pyocf_ctx, cache_mode, cls):
+ # Start cache device
+ cache_device = Volume(S.from_MiB(30))
+ cache = Cache.start_on_device(
+ cache_device, cache_mode=cache_mode, cache_line_size=cls
+ )
+
+ # Create core device
+ core_device = Volume(S.from_MiB(10))
+ core = Core.using_device(core_device)
+
+ # Add core
+ cache.add_core(core)
+
+ # Check that it is not possible to add the same core again
+ with pytest.raises(OcfError):
+ cache.add_core(core)
+
+ # Check that core count is still equal to one
+ stats = cache.get_stats()
+ assert stats["conf"]["core_count"] == 1
+
+
+@pytest.mark.parametrize("cache_mode", CacheMode)
+@pytest.mark.parametrize("cls", CacheLineSize)
+def test_adding_core_already_used(pyocf_ctx, cache_mode, cls):
+ # Start first cache device
+ cache_device1 = Volume(S.from_MiB(30))
+ cache1 = Cache.start_on_device(
+ cache_device1, cache_mode=cache_mode, cache_line_size=cls, name="cache1"
+ )
+
+ # Start second cache device
+ cache_device2 = Volume(S.from_MiB(30))
+ cache2 = Cache.start_on_device(
+ cache_device2, cache_mode=cache_mode, cache_line_size=cls, name="cache2"
+ )
+
+ # Create core device
+ core_device = Volume(S.from_MiB(10))
+ core = Core.using_device(core_device)
+
+ # Add core to first cache
+ cache1.add_core(core)
+
+ # Check that it is not possible to add core to second cache
+ with pytest.raises(OcfError):
+ cache2.add_core(core)
+
+ # Check that core count is as expected
+ stats = cache1.get_stats()
+ assert stats["conf"]["core_count"] == 1
+
+ stats = cache2.get_stats()
+ assert stats["conf"]["core_count"] == 0
+
+
+@pytest.mark.parametrize("cache_mode", CacheMode)
+@pytest.mark.parametrize("cls", CacheLineSize)
+def test_add_remove_incrementally(pyocf_ctx, cache_mode, cls):
+ # Start cache device
+ cache_device = Volume(S.from_MiB(30))
+ cache = Cache.start_on_device(
+ cache_device, cache_mode=cache_mode, cache_line_size=cls
+ )
+ core_devices = []
+ core_amount = 5
+
+ # Create 5 core devices and add to cache
+ for i in range(0, core_amount):
+ core_device = Volume(S.from_MiB(10))
+ core = Core.using_device(core_device, name=f"core{i}")
+ core_devices.append(core)
+ cache.add_core(core)
+
+ # Check that core count is as expected
+ stats = cache.get_stats()
+ assert stats["conf"]["core_count"] == core_amount
+
+ # Remove 3 cores
+ cache.remove_core(core_devices[0])
+ cache.remove_core(core_devices[1])
+ cache.remove_core(core_devices[2])
+
+ # Add 2 cores and check if core count is as expected
+ cache.add_core(core_devices[0])
+ cache.add_core(core_devices[1])
+ stats = cache.get_stats()
+ assert stats["conf"]["core_count"] == core_amount - 1
+
+ # Remove 1 core and check if core count is as expected
+ cache.remove_core(core_devices[1])
+ stats = cache.get_stats()
+ assert stats["conf"]["core_count"] == core_amount - 2
+
+ # Add 2 cores and check if core count is as expected
+ cache.add_core(core_devices[1])
+ cache.add_core(core_devices[2])
+ stats = cache.get_stats()
+ assert stats["conf"]["core_count"] == core_amount
diff --git a/src/spdk/ocf/tests/functional/tests/management/test_change_params.py b/src/spdk/ocf/tests/functional/tests/management/test_change_params.py
new file mode 100644
index 000000000..69b25e436
--- /dev/null
+++ b/src/spdk/ocf/tests/functional/tests/management/test_change_params.py
@@ -0,0 +1,135 @@
+#
+# Copyright(c) 2019 Intel Corporation
+# SPDX-License-Identifier: BSD-3-Clause-Clear
+#
+
+import pytest
+
+from pyocf.types.cache import Cache, CacheMode, CleaningPolicy, SeqCutOffPolicy
+from pyocf.types.core import Core
+from pyocf.types.volume import Volume
+from pyocf.utils import Size as S
+from pyocf.types.shared import CacheLineSize
+
+
+@pytest.mark.parametrize("from_cm", CacheMode)
+@pytest.mark.parametrize("to_cm", CacheMode)
+@pytest.mark.parametrize("cls", CacheLineSize)
+def test_change_cache_mode(pyocf_ctx, from_cm, to_cm, cls):
+ # Start cache device
+ cache_device = Volume(S.from_MiB(30))
+ cache = Cache.start_on_device(
+ cache_device, cache_mode=from_cm, cache_line_size=cls
+ )
+
+ # Change cache mode and check if stats are as expected
+ cache.change_cache_mode(to_cm)
+ stats_after = cache.get_stats()
+ assert stats_after["conf"]["cache_mode"] == to_cm
+
+
+@pytest.mark.parametrize("cm", CacheMode)
+@pytest.mark.parametrize("cls", CacheLineSize)
+def test_change_cleaning_policy(pyocf_ctx, cm, cls):
+ # Start cache device
+ cache_device = Volume(S.from_MiB(30))
+ cache = Cache.start_on_device(
+ cache_device, cache_mode=cm, cache_line_size=cls
+ )
+
+ # Check all possible cleaning policy switches
+ for cp_from in CleaningPolicy:
+ for cp_to in CleaningPolicy:
+ cache.set_cleaning_policy(cp_from.value)
+
+ # Check if cleaning policy is correct
+ stats = cache.get_stats()
+ assert stats["conf"]["cleaning_policy"] == cp_from.value
+
+ cache.set_cleaning_policy(cp_to.value)
+
+ # Check if cleaning policy is correct
+ stats = cache.get_stats()
+ assert stats["conf"]["cleaning_policy"] == cp_to.value
+
+
+@pytest.mark.parametrize("cm", CacheMode)
+@pytest.mark.parametrize("cls", CacheLineSize)
+def test_cache_change_seq_cut_off_policy(pyocf_ctx, cm, cls):
+ # Start cache device
+ cache_device = Volume(S.from_MiB(30))
+ cache = Cache.start_on_device(
+ cache_device, cache_mode=cm, cache_line_size=cls
+ )
+
+ # Create 2 core devices
+ core_device1 = Volume(S.from_MiB(10))
+ core1 = Core.using_device(core_device1, name="core1")
+ core_device2 = Volume(S.from_MiB(10))
+ core2 = Core.using_device(core_device2, name="core2")
+
+ # Add cores
+ cache.add_core(core1)
+ cache.add_core(core2)
+
+ # Check all possible seq cut off policy switches
+ for seq_from in SeqCutOffPolicy:
+ for seq_to in SeqCutOffPolicy:
+ cache.set_seq_cut_off_policy(seq_from.value)
+
+ # Check if seq cut off policy is correct
+ stats = core1.get_stats()
+ assert stats["seq_cutoff_policy"] == seq_from.value
+ stats = core2.get_stats()
+ assert stats["seq_cutoff_policy"] == seq_from.value
+
+ cache.set_seq_cut_off_policy(seq_to.value)
+
+ # Check if seq cut off policy is correct
+ stats = core1.get_stats()
+ assert stats["seq_cutoff_policy"] == seq_to.value
+ stats = core2.get_stats()
+ assert stats["seq_cutoff_policy"] == seq_to.value
+
+
+@pytest.mark.parametrize("cm", CacheMode)
+@pytest.mark.parametrize("cls", CacheLineSize)
+def test_core_change_seq_cut_off_policy(pyocf_ctx, cm, cls):
+ # Start cache device
+ cache_device = Volume(S.from_MiB(30))
+ cache = Cache.start_on_device(
+ cache_device, cache_mode=cm, cache_line_size=cls
+ )
+
+ # Create 2 core devices
+ core_device1 = Volume(S.from_MiB(10))
+ core1 = Core.using_device(core_device1, name="core1")
+ core_device2 = Volume(S.from_MiB(10))
+ core2 = Core.using_device(core_device2, name="core2")
+
+ # Add cores
+ cache.add_core(core1)
+ cache.add_core(core2)
+
+ # Check all possible seq cut off policy switches for first core
+ for seq_from in SeqCutOffPolicy:
+ for seq_to in SeqCutOffPolicy:
+ core1.set_seq_cut_off_policy(seq_from.value)
+
+ # Check if seq cut off policy of the first core is correct
+ stats = core1.get_stats()
+ assert stats["seq_cutoff_policy"] == seq_from.value
+
+ # Check if seq cut off policy of the second core did not change
+ stats = core2.get_stats()
+ assert stats["seq_cutoff_policy"] == SeqCutOffPolicy.DEFAULT
+
+ core1.set_seq_cut_off_policy(seq_to.value)
+
+ # Check if seq cut off policy of the first core is correct
+ stats = core1.get_stats()
+ assert stats["seq_cutoff_policy"] == seq_to.value
+
+ # Check if seq cut off policy of the second core did not change
+ stats = core2.get_stats()
+ assert stats["seq_cutoff_policy"] == SeqCutOffPolicy.DEFAULT
diff --git a/src/spdk/ocf/tests/functional/tests/management/test_start_stop.py b/src/spdk/ocf/tests/functional/tests/management/test_start_stop.py
new file mode 100644
index 000000000..f455ea1e1
--- /dev/null
+++ b/src/spdk/ocf/tests/functional/tests/management/test_start_stop.py
@@ -0,0 +1,545 @@
+#
+# Copyright(c) 2019 Intel Corporation
+# SPDX-License-Identifier: BSD-3-Clause-Clear
+#
+
+import logging
+from ctypes import c_int, c_void_p, byref, c_uint32
+from random import randrange
+from itertools import count
+
+import pytest
+
+from pyocf.ocf import OcfLib
+from pyocf.types.cache import Cache, CacheMode, MetadataLayout, EvictionPolicy, CleaningPolicy
+from pyocf.types.core import Core
+from pyocf.types.data import Data
+from pyocf.types.io import IoDir
+from pyocf.types.shared import OcfError, OcfCompletion, CacheLineSize, SeqCutOffPolicy
+from pyocf.types.volume import Volume
+from pyocf.utils import Size
+
+logger = logging.getLogger(__name__)
+
+
+def test_start_check_default(pyocf_ctx):
+ """Test if default values are correct after start.
+ """
+
+ cache_device = Volume(Size.from_MiB(40))
+ core_device = Volume(Size.from_MiB(10))
+ cache = Cache.start_on_device(cache_device)
+
+ core = Core.using_device(core_device)
+ cache.add_core(core)
+
+ # Check if values are default
+ stats = cache.get_stats()
+ assert stats["conf"]["cleaning_policy"] == CleaningPolicy.DEFAULT
+ assert stats["conf"]["cache_mode"] == CacheMode.DEFAULT
+ assert stats["conf"]["cache_line_size"] == CacheLineSize.DEFAULT
+ assert stats["conf"]["eviction_policy"] == EvictionPolicy.DEFAULT
+
+ core_stats = core.get_stats()
+ assert core_stats["seq_cutoff_policy"] == SeqCutOffPolicy.DEFAULT
+
+
+@pytest.mark.parametrize("cls", CacheLineSize)
+@pytest.mark.parametrize("mode", CacheMode)
+def test_start_write_first_and_check_mode(pyocf_ctx, mode: CacheMode, cls: CacheLineSize):
+ """Test starting cache in different modes with different cache line sizes.
+ After start check proper cache mode behaviour, starting with write operation.
+ """
+
+ cache_device = Volume(Size.from_MiB(40))
+ core_device = Volume(Size.from_MiB(10))
+ cache = Cache.start_on_device(cache_device, cache_mode=mode, cache_line_size=cls)
+ core_exported = Core.using_device(core_device)
+
+ cache.add_core(core_exported)
+
+ logger.info("[STAGE] Initial write to exported object")
+ cache_device.reset_stats()
+ core_device.reset_stats()
+
+ test_data = Data.from_string("This is test data")
+ io_to_core(core_exported, test_data, Size.from_sector(1).B)
+ check_stats_write_empty(core_exported, mode, cls)
+
+ logger.info("[STAGE] Read from exported object after initial write")
+ io_from_exported_object(core_exported, test_data.size, Size.from_sector(1).B)
+ check_stats_read_after_write(core_exported, mode, cls, True)
+
+ logger.info("[STAGE] Write to exported object after read")
+ cache_device.reset_stats()
+ core_device.reset_stats()
+
+ test_data = Data.from_string("Changed test data")
+
+ io_to_core(core_exported, test_data, Size.from_sector(1).B)
+ check_stats_write_after_read(core_exported, mode, cls)
+
+ check_md5_sums(core_exported, mode)
+
+
+@pytest.mark.parametrize("cls", CacheLineSize)
+@pytest.mark.parametrize("mode", CacheMode)
+def test_start_read_first_and_check_mode(pyocf_ctx, mode: CacheMode, cls: CacheLineSize):
+ """Starting cache in different modes with different cache line sizes.
+ After start check proper cache mode behaviour, starting with read operation.
+ """
+
+ cache_device = Volume(Size.from_MiB(20))
+ core_device = Volume(Size.from_MiB(5))
+ cache = Cache.start_on_device(cache_device, cache_mode=mode, cache_line_size=cls)
+ core_exported = Core.using_device(core_device)
+
+ cache.add_core(core_exported)
+
+ logger.info("[STAGE] Initial write to core device")
+ test_data = Data.from_string("This is test data")
+ io_to_core(core_exported, test_data, Size.from_sector(1).B, True)
+
+ cache_device.reset_stats()
+ core_device.reset_stats()
+
+ logger.info("[STAGE] Initial read from exported object")
+ io_from_exported_object(core_exported, test_data.size, Size.from_sector(1).B)
+ check_stats_read_empty(core_exported, mode, cls)
+
+ logger.info("[STAGE] Write to exported object after initial read")
+ cache_device.reset_stats()
+ core_device.reset_stats()
+
+ test_data = Data.from_string("Changed test data")
+
+ io_to_core(core_exported, test_data, Size.from_sector(1).B)
+
+ check_stats_write_after_read(core_exported, mode, cls, True)
+
+ logger.info("[STAGE] Read from exported object after write")
+ io_from_exported_object(core_exported, test_data.size, Size.from_sector(1).B)
+ check_stats_read_after_write(core_exported, mode, cls)
+
+ check_md5_sums(core_exported, mode)
+
+
+@pytest.mark.parametrize("cls", CacheLineSize)
+@pytest.mark.parametrize("mode", CacheMode)
+@pytest.mark.parametrize("layout", MetadataLayout)
+def test_start_params(pyocf_ctx, mode: CacheMode, cls: CacheLineSize, layout: MetadataLayout):
+ """Starting cache with different parameters.
+ Check if cache starts without errors.
+ If possible check whether cache reports properly set parameters.
+ """
+ cache_device = Volume(Size.from_MiB(20))
+ queue_size = randrange(60000, 2**32)
+ unblock_size = randrange(1, queue_size)
+ volatile_metadata = randrange(2) == 1
+ unaligned_io = randrange(2) == 1
+ submit_fast = randrange(2) == 1
+ name = "test"
+
+ logger.info("[STAGE] Start cache")
+ cache = Cache.start_on_device(
+ cache_device,
+ cache_mode=mode,
+ cache_line_size=cls,
+ name=name,
+ metadata_layout=MetadataLayout.SEQUENTIAL,
+ metadata_volatile=volatile_metadata,
+ max_queue_size=queue_size,
+ queue_unblock_size=unblock_size,
+ pt_unaligned_io=unaligned_io,
+ use_submit_fast=submit_fast)
+
+ stats = cache.get_stats()
+ assert stats["conf"]["cache_mode"] == mode, "Cache mode"
+ assert stats["conf"]["cache_line_size"] == cls, "Cache line size"
+ assert stats["conf"]["eviction_policy"] == EvictionPolicy.DEFAULT, "Eviction policy"
+ assert cache.get_name() == name, "Cache name"
+ # TODO: metadata_layout, metadata_volatile, max_queue_size,
+ # queue_unblock_size, pt_unaligned_io, use_submit_fast
+ # TODO: test in functional tests
+
+
+@pytest.mark.parametrize("cls", CacheLineSize)
+@pytest.mark.parametrize("mode", CacheMode)
+@pytest.mark.parametrize("with_flush", {True, False})
+def test_stop(pyocf_ctx, mode: CacheMode, cls: CacheLineSize, with_flush: bool):
+ """Stopping cache.
+ Check if cache is stopped properly in different modes with or without preceding flush operation.
+ """
+
+ cache_device = Volume(Size.from_MiB(20))
+ core_device = Volume(Size.from_MiB(5))
+ cache = Cache.start_on_device(cache_device, cache_mode=mode, cache_line_size=cls)
+ core_exported = Core.using_device(core_device)
+ cache.add_core(core_exported)
+ cls_no = 10
+
+ run_io_and_cache_data_if_possible(core_exported, mode, cls, cls_no)
+
+ stats = cache.get_stats()
+ assert int(stats["conf"]["dirty"]) == (cls_no if mode.lazy_write() else 0),\
+ "Dirty data before MD5"
+
+ md5_exported_core = core_exported.exp_obj_md5()
+
+ if with_flush:
+ cache.flush()
+ cache.stop()
+
+ if mode.lazy_write() and not with_flush:
+ assert core_device.md5() != md5_exported_core, \
+ "MD5 check: core device vs exported object with dirty data"
+ else:
+ assert core_device.md5() == md5_exported_core, \
+ "MD5 check: core device vs exported object with clean data"
+
+
+def test_start_stop_multiple(pyocf_ctx):
+ """Starting/stopping multiple caches.
+ Check whether OCF allows for starting multiple caches and stopping them in random order
+ """
+
+ caches = []
+ caches_no = randrange(6, 11)
+ for i in range(1, caches_no):
+ cache_device = Volume(Size.from_MiB(20))
+ cache_name = f"cache{i}"
+ cache_mode = CacheMode(randrange(0, len(CacheMode)))
+ size = 4096 * 2**randrange(0, len(CacheLineSize))
+ cache_line_size = CacheLineSize(size)
+
+ cache = Cache.start_on_device(
+ cache_device,
+ name=cache_name,
+ cache_mode=cache_mode,
+ cache_line_size=cache_line_size)
+ caches.append(cache)
+ stats = cache.get_stats()
+ assert stats["conf"]["cache_mode"] == cache_mode, "Cache mode"
+ assert stats["conf"]["cache_line_size"] == cache_line_size, "Cache line size"
+ assert stats["conf"]["cache_name"] == cache_name, "Cache name"
+
+ caches.sort(key=lambda e: randrange(1000))
+ for cache in caches:
+ logger.info("Getting stats before stopping cache")
+ stats = cache.get_stats()
+ cache_name = stats["conf"]["cache_name"]
+ cache.stop()
+ assert get_cache_by_name(pyocf_ctx, cache_name) != 0, "Try getting cache after stopping it"
+
+
+def test_100_start_stop(pyocf_ctx):
+ """Starting/stopping stress test.
+ Check OCF behaviour when cache is started and stopped continuously
+ """
+
+ for i in range(1, 101):
+ cache_device = Volume(Size.from_MiB(20))
+ cache_name = f"cache{i}"
+ cache_mode = CacheMode(randrange(0, len(CacheMode)))
+ size = 4096 * 2**randrange(0, len(CacheLineSize))
+ cache_line_size = CacheLineSize(size)
+
+ cache = Cache.start_on_device(
+ cache_device,
+ name=cache_name,
+ cache_mode=cache_mode,
+ cache_line_size=cache_line_size)
+ stats = cache.get_stats()
+ assert stats["conf"]["cache_mode"] == cache_mode, "Cache mode"
+ assert stats["conf"]["cache_line_size"] == cache_line_size, "Cache line size"
+ assert stats["conf"]["cache_name"] == cache_name, "Cache name"
+ cache.stop()
+ assert get_cache_by_name(pyocf_ctx, "cache1") != 0, "Try getting cache after stopping it"
+
+
+def test_start_stop_incrementally(pyocf_ctx):
+ """Starting/stopping multiple caches incrementally.
+ Check whether OCF behaves correctly when few caches at a time are
+ in turns added and removed (#added > #removed) until their number reaches limit,
+ and then proportions are reversed and number of caches gradually falls to 0.
+ """
+
+ counter = count()
+ caches = []
+ caches_limit = 10
+ add = True
+ run = True
+ increase = True
+ while run:
+ if add:
+ for i in range(0, randrange(3, 5) if increase else randrange(1, 3)):
+ cache_device = Volume(Size.from_MiB(20))
+ cache_name = f"cache{next(counter)}"
+ cache_mode = CacheMode(randrange(0, len(CacheMode)))
+ size = 4096 * 2**randrange(0, len(CacheLineSize))
+ cache_line_size = CacheLineSize(size)
+
+ cache = Cache.start_on_device(
+ cache_device,
+ name=cache_name,
+ cache_mode=cache_mode,
+ cache_line_size=cache_line_size)
+ caches.append(cache)
+ stats = cache.get_stats()
+ assert stats["conf"]["cache_mode"] == cache_mode, "Cache mode"
+ assert stats["conf"]["cache_line_size"] == cache_line_size, "Cache line size"
+ assert stats["conf"]["cache_name"] == cache_name, "Cache name"
+ if len(caches) == caches_limit:
+ increase = False
+ else:
+ for i in range(0, randrange(1, 3) if increase else randrange(3, 5)):
+ if len(caches) == 0:
+ run = False
+ break
+ cache = caches.pop()
+ logger.info("Getting stats before stopping cache")
+ stats = cache.get_stats()
+ cache_name = stats["conf"]["cache_name"]
+ cache.stop()
+ assert get_cache_by_name(pyocf_ctx, cache_name) != 0, \
+ "Try getting cache after stopping it"
+ add = not add
+
+
+@pytest.mark.parametrize("mode", CacheMode)
+@pytest.mark.parametrize("cls", CacheLineSize)
+def test_start_cache_same_id(pyocf_ctx, mode, cls):
+ """Adding two caches with the same name
+ Check that OCF does not allow for 2 caches to be started with the same cache_name
+ """
+
+ cache_device1 = Volume(Size.from_MiB(20))
+ cache_device2 = Volume(Size.from_MiB(20))
+ cache_name = "cache"
+ cache = Cache.start_on_device(cache_device1,
+ cache_mode=mode,
+ cache_line_size=cls,
+ name=cache_name)
+ cache.get_stats()
+
+ with pytest.raises(OcfError, match="OCF_ERR_CACHE_EXIST"):
+ cache = Cache.start_on_device(cache_device2,
+ cache_mode=mode,
+ cache_line_size=cls,
+ name=cache_name)
+ cache.get_stats()
+
+
+@pytest.mark.parametrize("cls", CacheLineSize)
+def test_start_cache_huge_device(pyocf_ctx_log_buffer, cls):
+ """
+ Test whether we can start cache which would overflow ocf_cache_line_t type.
+ pass_criteria:
+ - Starting cache on device too big to handle should fail
+ """
+ class HugeDevice(Volume):
+ def get_length(self):
+ return Size.from_B((cls * c_uint32(-1).value))
+
+ def submit_io(self, io):
+ io.contents._end(io, 0)
+
+ cache_device = HugeDevice(Size.from_MiB(20))
+
+ with pytest.raises(OcfError, match="OCF_ERR_START_CACHE_FAIL"):
+ cache = Cache.start_on_device(cache_device, cache_line_size=cls, metadata_volatile=True)
+
+ assert any(
+ [line.find("exceeds maximum") > 0 for line in pyocf_ctx_log_buffer.get_lines()]
+ ), "Expected to find log notifying that max size was exceeded"
+
+
+
+@pytest.mark.parametrize("mode", CacheMode)
+@pytest.mark.parametrize("cls", CacheLineSize)
+def test_start_cache_same_device(pyocf_ctx, mode, cls):
+ """Adding two caches using the same cache device
+ Check that OCF does not allow for 2 caches using the same cache device to be started
+ """
+
+ cache_device = Volume(Size.from_MiB(20))
+ cache = Cache.start_on_device(
+ cache_device, cache_mode=mode, cache_line_size=cls, name="cache1"
+ )
+ cache.get_stats()
+
+ with pytest.raises(OcfError, match="OCF_ERR_NOT_OPEN_EXC"):
+ cache = Cache.start_on_device(
+ cache_device, cache_mode=mode, cache_line_size=cls, name="cache2"
+ )
+ cache.get_stats()
+
+
+@pytest.mark.parametrize("mode", CacheMode)
+@pytest.mark.parametrize("cls", CacheLineSize)
+def test_start_too_small_device(pyocf_ctx, mode, cls):
+ """Starting cache with device below 100MiB
+ Check if starting cache with device below minimum size is blocked
+ """
+
+ cache_device = Volume(Size.from_B(20 * 1024 * 1024 - 1))
+
+ with pytest.raises(OcfError, match="OCF_ERR_INVAL_CACHE_DEV"):
+ Cache.start_on_device(cache_device, cache_mode=mode, cache_line_size=cls)
+
+
+def test_start_stop_noqueue(pyocf_ctx):
+ # cache object just to construct cfg conveniently
+ _cache = Cache(pyocf_ctx.ctx_handle)
+
+ cache_handle = c_void_p()
+ status = pyocf_ctx.lib.ocf_mngt_cache_start(
+ pyocf_ctx.ctx_handle, byref(cache_handle), byref(_cache.cfg)
+ )
+ assert not status, "Failed to start cache: {}".format(status)
+
+ # stop without creating mngmt queue
+ c = OcfCompletion(
+ [("cache", c_void_p), ("priv", c_void_p), ("error", c_int)]
+ )
+ pyocf_ctx.lib.ocf_mngt_cache_stop(cache_handle, c, None)
+ c.wait()
+ assert not c.results["error"], "Failed to stop cache: {}".format(c.results["error"])
+
+
+def run_io_and_cache_data_if_possible(exported_obj, mode, cls, cls_no):
+ test_data = Data(cls_no * cls)
+
+ if mode in {CacheMode.WI, CacheMode.WA}:
+ logger.info("[STAGE] Write to core device")
+ io_to_core(exported_obj, test_data, 0, True)
+ logger.info("[STAGE] Read from exported object")
+ io_from_exported_object(exported_obj, test_data.size, 0)
+ else:
+ logger.info("[STAGE] Write to exported object")
+ io_to_core(exported_obj, test_data, 0)
+
+ stats = exported_obj.cache.get_stats()
+ assert stats["usage"]["occupancy"]["value"] == \
+ ((cls_no * cls / CacheLineSize.LINE_4KiB) if mode != CacheMode.PT else 0), "Occupancy"
+
+
+def io_to_core(exported_obj: Core, data: Data, offset: int, to_core_device=False):
+ new_io = exported_obj.new_core_io if to_core_device else exported_obj.new_io
+ io = new_io(exported_obj.cache.get_default_queue(), offset, data.size,
+ IoDir.WRITE, 0, 0)
+ io.set_data(data)
+
+ completion = OcfCompletion([("err", c_int)])
+ io.callback = completion.callback
+ io.submit()
+ completion.wait()
+
+ assert completion.results["err"] == 0, "IO to exported object completion"
+
+
+def io_from_exported_object(exported_obj: Core, buffer_size: int, offset: int):
+ read_buffer = Data(buffer_size)
+ io = exported_obj.new_io(exported_obj.cache.get_default_queue(), offset,
+ read_buffer.size, IoDir.READ, 0, 0)
+ io.set_data(read_buffer)
+
+ completion = OcfCompletion([("err", c_int)])
+ io.callback = completion.callback
+ io.submit()
+ completion.wait()
+
+ assert completion.results["err"] == 0, "IO from exported object completion"
+ return read_buffer
+
+
+def check_stats_read_empty(exported_obj: Core, mode: CacheMode, cls: CacheLineSize):
+ stats = exported_obj.cache.get_stats()
+ assert stats["conf"]["cache_mode"] == mode, "Cache mode"
+ assert exported_obj.cache.device.get_stats()[IoDir.WRITE] == (1 if mode.read_insert() else 0), \
+ "Writes to cache device"
+ assert exported_obj.device.get_stats()[IoDir.READ] == 1, "Reads from core device"
+ assert stats["req"]["rd_full_misses"]["value"] == (0 if mode == CacheMode.PT else 1), \
+ "Read full misses"
+ assert stats["usage"]["occupancy"]["value"] == \
+ ((cls / CacheLineSize.LINE_4KiB) if mode.read_insert() else 0), "Occupancy"
+
+
+def check_stats_write_empty(exported_obj: Core, mode: CacheMode, cls: CacheLineSize):
+ stats = exported_obj.cache.get_stats()
+ assert stats["conf"]["cache_mode"] == mode, "Cache mode"
+ # TODO(ajrutkow): why 1 for WT ??
+ assert exported_obj.cache.device.get_stats()[IoDir.WRITE] == \
+ (2 if mode.lazy_write() else (1 if mode == CacheMode.WT else 0)), \
+ "Writes to cache device"
+ assert exported_obj.device.get_stats()[IoDir.WRITE] == (0 if mode.lazy_write() else 1), \
+ "Writes to core device"
+ assert stats["req"]["wr_full_misses"]["value"] == (1 if mode.write_insert() else 0), \
+ "Write full misses"
+ assert stats["usage"]["occupancy"]["value"] == \
+ ((cls / CacheLineSize.LINE_4KiB) if mode.write_insert() else 0), \
+ "Occupancy"
+
+
+def check_stats_write_after_read(exported_obj: Core,
+ mode: CacheMode,
+ cls: CacheLineSize,
+ read_from_empty=False):
+ stats = exported_obj.cache.get_stats()
+ assert exported_obj.cache.device.get_stats()[IoDir.WRITE] == \
+ (0 if mode in {CacheMode.WI, CacheMode.PT} else
+ (2 if read_from_empty and mode.lazy_write() else 1)), \
+ "Writes to cache device"
+ assert exported_obj.device.get_stats()[IoDir.WRITE] == (0 if mode.lazy_write() else 1), \
+ "Writes to core device"
+ assert stats["req"]["wr_hits"]["value"] == \
+ (1 if (mode.read_insert() and mode != CacheMode.WI)
+ or (mode.write_insert() and not read_from_empty) else 0), \
+ "Write hits"
+ assert stats["usage"]["occupancy"]["value"] == \
+ (0 if mode in {CacheMode.WI, CacheMode.PT} else (cls / CacheLineSize.LINE_4KiB)), \
+ "Occupancy"
+
+
+def check_stats_read_after_write(exported_obj, mode, cls, write_to_empty=False):
+ stats = exported_obj.cache.get_stats()
+ assert exported_obj.cache.device.get_stats()[IoDir.WRITE] == \
+ (2 if mode.lazy_write() else (0 if mode == CacheMode.PT else 1)), \
+ "Writes to cache device"
+ assert exported_obj.cache.device.get_stats()[IoDir.READ] == \
+ (1 if mode in {CacheMode.WT, CacheMode.WB, CacheMode.WO}
+ or (mode == CacheMode.WA and not write_to_empty) else 0), \
+ "Reads from cache device"
+ assert exported_obj.device.get_stats()[IoDir.READ] == \
+ (0 if mode in {CacheMode.WB, CacheMode.WO, CacheMode.WT}
+ or (mode == CacheMode.WA and not write_to_empty) else 1), \
+ "Reads from core device"
+ assert stats["req"]["rd_full_misses"]["value"] == \
+ (1 if mode in {CacheMode.WA, CacheMode.WI} else 0) \
+ + (0 if write_to_empty or mode in {CacheMode.PT, CacheMode.WA} else 1), \
+ "Read full misses"
+ assert stats["req"]["rd_hits"]["value"] == \
+ (1 if mode in {CacheMode.WT, CacheMode.WB, CacheMode.WO}
+ or (mode == CacheMode.WA and not write_to_empty) else 0), \
+ "Read hits"
+ assert stats["usage"]["occupancy"]["value"] == \
+ (0 if mode == CacheMode.PT else (cls / CacheLineSize.LINE_4KiB)), "Occupancy"
+
+
+def check_md5_sums(exported_obj: Core, mode: CacheMode):
+ if mode.lazy_write():
+ assert exported_obj.device.md5() != exported_obj.exp_obj_md5(), \
+ "MD5 check: core device vs exported object without flush"
+ exported_obj.cache.flush()
+ assert exported_obj.device.md5() == exported_obj.exp_obj_md5(), \
+ "MD5 check: core device vs exported object after flush"
+ else:
+ assert exported_obj.device.md5() == exported_obj.exp_obj_md5(), \
+ "MD5 check: core device vs exported object"
+
+
+def get_cache_by_name(ctx, cache_name):
+ cache_pointer = c_void_p()
+ return OcfLib.getInstance().ocf_mngt_cache_get_by_name(
+ ctx.ctx_handle, cache_name, byref(cache_pointer)
+ )
diff --git a/src/spdk/ocf/tests/functional/tests/security/__init__.py b/src/spdk/ocf/tests/functional/tests/security/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/src/spdk/ocf/tests/functional/tests/security/__init__.py
diff --git a/src/spdk/ocf/tests/functional/tests/security/conftest.py b/src/spdk/ocf/tests/functional/tests/security/conftest.py
new file mode 100644
index 000000000..7d9ca3bbb
--- /dev/null
+++ b/src/spdk/ocf/tests/functional/tests/security/conftest.py
@@ -0,0 +1,98 @@
+#
+# Copyright(c) 2019 Intel Corporation
+# SPDX-License-Identifier: BSD-3-Clause-Clear
+#
+
+import os
+import sys
+from ctypes import (
+ c_uint64,
+ c_uint32,
+ c_uint16,
+ c_int
+)
+from tests.utils.random import RandomStringGenerator, RandomGenerator, DefaultRanges, Range
+
+from pyocf.types.cache import CacheMode, EvictionPolicy, MetadataLayout, PromotionPolicy
+from pyocf.types.shared import CacheLineSize
+
+import pytest
+
+sys.path.append(os.path.join(os.path.dirname(__file__), os.path.pardir))
+
+
+def enum_min(enum):
+ return list(enum)[0].value
+
+
+def enum_max(enum):
+ return list(enum)[-1].value
+
+
+def enum_range(enum):
+ return Range(enum_min(enum), enum_max(enum))
+
+
+@pytest.fixture(params=RandomGenerator(DefaultRanges.UINT16))
+def c_uint16_randomize(request):
+ return request.param
+
+
+@pytest.fixture(params=RandomGenerator(DefaultRanges.UINT32))
+def c_uint32_randomize(request):
+ return request.param
+
+
+@pytest.fixture(params=RandomGenerator(DefaultRanges.UINT64))
+def c_uint64_randomize(request):
+ return request.param
+
+
+@pytest.fixture(params=RandomGenerator(DefaultRanges.INT))
+def c_int_randomize(request):
+ return request.param
+
+
+@pytest.fixture(params=RandomGenerator(DefaultRanges.INT))
+def c_int_sector_randomize(request):
+ return request.param // 512 * 512
+
+
+@pytest.fixture(params=RandomStringGenerator())
+def string_randomize(request):
+ return request.param
+
+
+@pytest.fixture(
+ params=RandomGenerator(DefaultRanges.UINT32).exclude_range(enum_range(CacheMode))
+)
+def not_cache_mode_randomize(request):
+ return request.param
+
+
+@pytest.fixture(
+ params=RandomGenerator(DefaultRanges.UINT32).exclude_range(enum_range(CacheLineSize))
+)
+def not_cache_line_size_randomize(request):
+ return request.param
+
+
+@pytest.fixture(
+ params=RandomGenerator(DefaultRanges.UINT32).exclude_range(enum_range(EvictionPolicy))
+)
+def not_eviction_policy_randomize(request):
+ return request.param
+
+
+@pytest.fixture(
+ params=RandomGenerator(DefaultRanges.UINT32).exclude_range(enum_range(PromotionPolicy))
+)
+def not_promotion_policy_randomize(request):
+ return request.param
+
+
+@pytest.fixture(
+ params=RandomGenerator(DefaultRanges.UINT32).exclude_range(enum_range(MetadataLayout))
+)
+def not_metadata_layout_randomize(request):
+ return request.param
diff --git a/src/spdk/ocf/tests/functional/tests/security/test_management_fuzzy.py b/src/spdk/ocf/tests/functional/tests/security/test_management_fuzzy.py
new file mode 100644
index 000000000..4369d49de
--- /dev/null
+++ b/src/spdk/ocf/tests/functional/tests/security/test_management_fuzzy.py
@@ -0,0 +1,315 @@
+#
+# Copyright(c) 2019 Intel Corporation
+# SPDX-License-Identifier: BSD-3-Clause-Clear
+#
+
+import pytest
+
+from pyocf.types.cache import (
+ Cache,
+ CacheMode,
+ CleaningPolicy,
+ AlruParams,
+ AcpParams,
+ PromotionPolicy,
+ NhitParams,
+ ConfValidValues,
+)
+from pyocf.types.core import Core
+from pyocf.types.volume import Volume
+from pyocf.utils import Size as S
+from tests.utils.random import RandomGenerator, DefaultRanges
+from pyocf.types.shared import OcfError, CacheLineSize, SeqCutOffPolicy
+from ctypes import c_uint64, c_uint32, c_uint8
+
+
+@pytest.mark.parametrize("cm", CacheMode)
+@pytest.mark.parametrize("cls", CacheLineSize)
+@pytest.mark.security
+def test_neg_change_cache_mode(pyocf_ctx, cm, cls):
+ """
+ Test whether it is possible to change cache mode to invalid value.
+ :param pyocf_ctx: basic pyocf context fixture
+ :param cm: cache mode we start with
+ :param cls: cache line size we start with
+ """
+ # Start cache device
+ cache_device = Volume(S.from_MiB(30))
+ cache = Cache.start_on_device(cache_device, cache_mode=cm, cache_line_size=cls)
+
+ # Change cache mode to invalid one and check if failed
+ for i in RandomGenerator(DefaultRanges.UINT32):
+ if i in [item.value for item in CacheMode]:
+ continue
+ with pytest.raises(OcfError, match="Error changing cache mode"):
+ cache.change_cache_mode(i)
+
+
+@pytest.mark.parametrize("cm", CacheMode)
+@pytest.mark.parametrize("cls", CacheLineSize)
+@pytest.mark.security
+def test_neg_set_cleaning_policy(pyocf_ctx, cm, cls):
+ """
+ Test whether it is possible to change cleaning policy to invalid value
+ :param pyocf_ctx: basic pyocf context fixture
+ :param cm: cache mode we start with
+ :param cls: cache line size we start with
+ :return:
+ """
+ # Start cache device
+ cache_device = Volume(S.from_MiB(30))
+ cache = Cache.start_on_device(cache_device, cache_mode=cm, cache_line_size=cls)
+
+ # Set cleaning policy to invalid one and check if failed
+ for i in RandomGenerator(DefaultRanges.UINT32):
+ if i in [item.value for item in CleaningPolicy]:
+ continue
+ with pytest.raises(OcfError, match="Error changing cleaning policy"):
+ cache.set_cleaning_policy(i)
+
+
+@pytest.mark.parametrize("cm", CacheMode)
+@pytest.mark.parametrize("cls", CacheLineSize)
+@pytest.mark.security
+def test_neg_attach_cls(pyocf_ctx, cm, cls):
+ """
+ Test whether it is possible to change cache line size to
+ invalid value while attaching cache device
+ :param pyocf_ctx: basic pyocf context fixture
+ :param cm: cache mode we start with
+ :param cls: cache line size we start with
+ :return:
+ """
+ # Start cache device
+ cache_device = Volume(S.from_MiB(30))
+ cache = Cache(owner=cache_device.owner, cache_mode=cm, cache_line_size=cls)
+ cache.start_cache()
+
+ # Check whether it is possible to attach cache device with invalid cache line size
+ for i in RandomGenerator(DefaultRanges.UINT64):
+ if i in [item.value for item in CacheLineSize]:
+ continue
+ with pytest.raises(OcfError, match="Attaching cache device failed"):
+ cache.attach_device(cache_device, cache_line_size=i)
+
+
+@pytest.mark.parametrize("cm", CacheMode)
+@pytest.mark.parametrize("cls", CacheLineSize)
+@pytest.mark.security
+def test_neg_cache_set_seq_cut_off_policy(pyocf_ctx, cm, cls):
+ """
+ Test whether it is possible to change cache seq cut-off policy to invalid value
+ :param pyocf_ctx: basic pyocf context fixture
+ :param cm: cache mode we start with
+ :param cls: cache line size we start with
+ :return:
+ """
+ # Start cache device
+ cache_device = Volume(S.from_MiB(30))
+ cache = Cache.start_on_device(cache_device, cache_mode=cm, cache_line_size=cls)
+
+ # Create 2 core devices
+ core_device1 = Volume(S.from_MiB(10))
+ core1 = Core.using_device(core_device1, name="core1")
+ core_device2 = Volume(S.from_MiB(10))
+ core2 = Core.using_device(core_device2, name="core2")
+
+ # Add cores
+ cache.add_core(core1)
+ cache.add_core(core2)
+
+ # Change cache seq cut off policy to invalid one and check if failed
+ for i in RandomGenerator(DefaultRanges.UINT32):
+ if i in [item.value for item in SeqCutOffPolicy]:
+ continue
+ with pytest.raises(OcfError, match="Error setting cache seq cut off policy"):
+ cache.set_seq_cut_off_policy(i)
+
+
+@pytest.mark.parametrize("cm", CacheMode)
+@pytest.mark.parametrize("cls", CacheLineSize)
+@pytest.mark.security
+def test_neg_core_set_seq_cut_off_policy(pyocf_ctx, cm, cls):
+ """
+ Test whether it is possible to change core seq cut-off policy to invalid value
+ :param pyocf_ctx: basic pyocf context fixture
+ :param cm: cache mode we start with
+ :param cls: cache line size we start with
+ :return:
+ """
+ # Start cache device
+ cache_device = Volume(S.from_MiB(30))
+ cache = Cache.start_on_device(cache_device, cache_mode=cm, cache_line_size=cls)
+
+ # Create core device
+ core_device = Volume(S.from_MiB(10))
+ core = Core.using_device(core_device)
+
+ # Add core
+ cache.add_core(core)
+
+ # Change core seq cut off policy to invalid one and check if failed
+ for i in RandomGenerator(DefaultRanges.UINT32):
+ if i in [item.value for item in SeqCutOffPolicy]:
+ continue
+ with pytest.raises(OcfError, match="Error setting core seq cut off policy"):
+ core.set_seq_cut_off_policy(i)
+
+
+@pytest.mark.parametrize("cm", CacheMode)
+@pytest.mark.parametrize("cls", CacheLineSize)
+@pytest.mark.security
+def test_neg_set_alru_param(pyocf_ctx, cm, cls):
+ """
+ Test whether it is possible to set invalid param for alru cleaning policy
+ :param pyocf_ctx: basic pyocf context fixture
+ :param cm: cache mode we start with
+ :param cls: cache line size we start with
+ :return:
+ """
+ # Start cache device
+ cache_device = Volume(S.from_MiB(30))
+ cache = Cache.start_on_device(cache_device, cache_mode=cm, cache_line_size=cls)
+
+ # Change invalid alru param and check if failed
+ for i in RandomGenerator(DefaultRanges.UINT32):
+ if i in [item.value for item in AlruParams]:
+ continue
+ with pytest.raises(OcfError, match="Error setting cleaning policy param"):
+ cache.set_cleaning_policy_param(CleaningPolicy.ALRU, i, 1)
+
+
+@pytest.mark.parametrize("cm", CacheMode)
+@pytest.mark.parametrize("cls", CacheLineSize)
+@pytest.mark.security
+def test_neg_set_acp_param(pyocf_ctx, cm, cls):
+ """
+ Test whether it is possible to set invalid param for acp cleaning policy
+ :param pyocf_ctx: basic pyocf context fixture
+ :param cm: cache mode we start with
+ :param cls: cache line size we start with
+ :return:
+ """
+ # Start cache device
+ cache_device = Volume(S.from_MiB(30))
+ cache = Cache.start_on_device(cache_device, cache_mode=cm, cache_line_size=cls)
+
+ # Change invalid acp param and check if failed
+ for i in RandomGenerator(DefaultRanges.UINT32):
+ if i in [item.value for item in AcpParams]:
+ continue
+ with pytest.raises(OcfError, match="Error setting cleaning policy param"):
+ cache.set_cleaning_policy_param(CleaningPolicy.ALRU, i, 1)
+
+
+@pytest.mark.parametrize("cm", CacheMode)
+@pytest.mark.parametrize("cls", CacheLineSize)
+@pytest.mark.security
+def test_neg_set_promotion_policy(pyocf_ctx, cm, cls):
+ """
+ Test whether it is possible to set invalid param for promotion policy
+ :param pyocf_ctx: basic pyocf context fixture
+ :param cm: cache mode we start with
+ :param cls: cache line size we start with
+ :return:
+ """
+ # Start cache device
+ cache_device = Volume(S.from_MiB(30))
+ cache = Cache.start_on_device(cache_device, cache_mode=cm, cache_line_size=cls)
+
+ # Change to invalid promotion policy and check if failed
+ for i in RandomGenerator(DefaultRanges.UINT32):
+ if i in [item.value for item in PromotionPolicy]:
+ continue
+ with pytest.raises(OcfError, match="Error setting promotion policy"):
+ cache.set_promotion_policy(i)
+
+
+@pytest.mark.parametrize("cm", CacheMode)
+@pytest.mark.parametrize("cls", CacheLineSize)
+@pytest.mark.security
+def test_neg_set_nhit_promotion_policy_param(pyocf_ctx, cm, cls):
+ """
+ Test whether it is possible to set invalid promotion policy param id for nhit promotion policy
+ :param pyocf_ctx: basic pyocf context fixture
+ :param cm: cache mode we start with
+ :param cls: cache line size we start with
+ :return:
+ """
+ # Start cache device
+ cache_device = Volume(S.from_MiB(30))
+ cache = Cache.start_on_device(
+ cache_device,
+ cache_mode=cm,
+ cache_line_size=cls,
+ promotion_policy=PromotionPolicy.NHIT,
+ )
+
+ # Set invalid promotion policy param id and check if failed
+ for i in RandomGenerator(DefaultRanges.UINT8):
+ if i in [item.value for item in NhitParams]:
+ continue
+ with pytest.raises(OcfError, match="Error setting promotion policy parameter"):
+ cache.set_promotion_policy_param(PromotionPolicy.NHIT, i, 1)
+
+
+@pytest.mark.parametrize("cm", CacheMode)
+@pytest.mark.parametrize("cls", CacheLineSize)
+@pytest.mark.security
+def test_neg_set_nhit_promotion_policy_param_trigger(pyocf_ctx, cm, cls):
+ """
+ Test whether it is possible to set invalid promotion policy param TRIGGER_THRESHOLD for
+ nhit promotion policy
+ :param pyocf_ctx: basic pyocf context fixture
+ :param cm: cache mode we start with
+ :param cls: cache line size we start with
+ :return:
+ """
+ # Start cache device
+ cache_device = Volume(S.from_MiB(30))
+ cache = Cache.start_on_device(
+ cache_device,
+ cache_mode=cm,
+ cache_line_size=cls,
+ promotion_policy=PromotionPolicy.NHIT,
+ )
+
+ # Set to invalid promotion policy trigger threshold and check if failed
+ for i in RandomGenerator(DefaultRanges.UINT32):
+ if i in ConfValidValues.promotion_nhit_trigger_threshold_range:
+ continue
+ with pytest.raises(OcfError, match="Error setting promotion policy parameter"):
+ cache.set_promotion_policy_param(
+ PromotionPolicy.NHIT, NhitParams.TRIGGER_THRESHOLD, i
+ )
+
+
+@pytest.mark.parametrize("cm", CacheMode)
+@pytest.mark.parametrize("cls", CacheLineSize)
+@pytest.mark.security
+def test_neg_set_nhit_promotion_policy_param_threshold(pyocf_ctx, cm, cls):
+ """
+ Test whether it is possible to set invalid promotion policy param INSERTION_THRESHOLD for
+ nhit promotion policy
+ :param pyocf_ctx: basic pyocf context fixture
+ :param cm: cache mode we start with
+ :param cls: cache line size we start with
+ :return:
+ """
+ # Start cache device
+ cache_device = Volume(S.from_MiB(30))
+ cache = Cache.start_on_device(
+ cache_device,
+ cache_mode=cm,
+ cache_line_size=cls,
+ promotion_policy=PromotionPolicy.NHIT,
+ )
+
+ # Set to invalid promotion policy insertion threshold and check if failed
+ for i in RandomGenerator(DefaultRanges.UINT32):
+ if i in ConfValidValues.promotion_nhit_insertion_threshold_range:
+ continue
+ with pytest.raises(OcfError, match="Error setting promotion policy parameter"):
+ cache.set_promotion_policy_param(
+ PromotionPolicy.NHIT, NhitParams.INSERTION_THRESHOLD, i
+ )
diff --git a/src/spdk/ocf/tests/functional/tests/security/test_management_start_fuzzy.py b/src/spdk/ocf/tests/functional/tests/security/test_management_start_fuzzy.py
new file mode 100644
index 000000000..ee399a9c3
--- /dev/null
+++ b/src/spdk/ocf/tests/functional/tests/security/test_management_start_fuzzy.py
@@ -0,0 +1,155 @@
+#
+# Copyright(c) 2019 Intel Corporation
+# SPDX-License-Identifier: BSD-3-Clause-Clear
+#
+
+import logging
+
+import pytest
+
+from pyocf.types.cache import Cache, CacheMode, EvictionPolicy, MetadataLayout, PromotionPolicy
+from pyocf.types.shared import OcfError, CacheLineSize
+from pyocf.types.volume import Volume
+from pyocf.utils import Size
+from tests.utils.random import RandomGenerator, DefaultRanges, Range
+
+logger = logging.getLogger(__name__)
+
+
+def try_start_cache(**config):
+ cache_device = Volume(Size.from_MiB(30))
+ cache = Cache.start_on_device(cache_device, **config)
+ cache.stop()
+
+@pytest.mark.security
+@pytest.mark.parametrize("cls", CacheLineSize)
+def test_fuzzy_start_cache_mode(pyocf_ctx, cls, not_cache_mode_randomize):
+ """
+ Test whether it is impossible to start cache with invalid cache mode value.
+ :param pyocf_ctx: basic pyocf context fixture
+ :param cls: cache line size value to start cache with
+ :param c_uint32_randomize: cache mode enum value to start cache with
+ """
+ with pytest.raises(OcfError, match="OCF_ERR_INVALID_CACHE_MODE"):
+ try_start_cache(cache_mode=not_cache_mode_randomize, cache_line_size=cls)
+
+
+@pytest.mark.security
+@pytest.mark.parametrize("cm", CacheMode)
+def test_fuzzy_start_cache_line_size(pyocf_ctx, not_cache_line_size_randomize, cm):
+ """
+ Test whether it is impossible to start cache with invalid cache line size value.
+ :param pyocf_ctx: basic pyocf context fixture
+ :param c_uint64_randomize: cache line size enum value to start cache with
+ :param cm: cache mode value to start cache with
+ """
+ with pytest.raises(OcfError, match="OCF_ERR_INVALID_CACHE_LINE_SIZE"):
+ try_start_cache(cache_mode=cm, cache_line_size=not_cache_line_size_randomize)
+
+
+@pytest.mark.security
+@pytest.mark.parametrize("cm", CacheMode)
+@pytest.mark.parametrize("cls", CacheLineSize)
+def test_fuzzy_start_name(pyocf_ctx, string_randomize, cm, cls):
+ """
+ Test whether it is possible to start cache with various cache name value.
+ :param pyocf_ctx: basic pyocf context fixture
+ :param string_randomize: fuzzed cache name value to start cache with
+ :param cm: cache mode value to start cache with
+ :param cls: cache line size value to start cache with
+ """
+ cache_device = Volume(Size.from_MiB(30))
+ incorrect_values = ['']
+ try:
+ cache = Cache.start_on_device(cache_device, name=string_randomize, cache_mode=cm,
+ cache_line_size=cls)
+ except OcfError:
+ if string_randomize not in incorrect_values:
+ logger.error(
+ f"Cache did not start properly with correct name value: '{string_randomize}'")
+ return
+ if string_randomize in incorrect_values:
+ logger.error(f"Cache started with incorrect name value: '{string_randomize}'")
+ cache.stop()
+
+
+@pytest.mark.security
+@pytest.mark.parametrize("cm", CacheMode)
+@pytest.mark.parametrize("cls", CacheLineSize)
+def test_fuzzy_start_eviction_policy(pyocf_ctx, not_eviction_policy_randomize, cm, cls):
+ """
+ Test whether it is impossible to start cache with invalid eviction policy value.
+ :param pyocf_ctx: basic pyocf context fixture
+ :param c_uint32_randomize: eviction policy enum value to start cache with
+ :param cm: cache mode value to start cache with
+ :param cls: cache line size value to start cache with
+ """
+ with pytest.raises(OcfError, match="OCF_ERR_INVAL"):
+ try_start_cache(
+ eviction_policy=not_eviction_policy_randomize,
+ cache_mode=cm,
+ cache_line_size=cls
+ )
+
+
+@pytest.mark.security
+@pytest.mark.parametrize("cm", CacheMode)
+@pytest.mark.parametrize("cls", CacheLineSize)
+def test_fuzzy_start_metadata_layout(pyocf_ctx, not_metadata_layout_randomize, cm, cls):
+ """
+ Test whether it is impossible to start cache with invalid metadata layout value.
+ :param pyocf_ctx: basic pyocf context fixture
+ :param c_uint32_randomize: metadata layout enum value to start cache with
+ :param cm: cache mode value to start cache with
+ :param cls: cache line size value to start cache with
+ """
+ with pytest.raises(OcfError, match="OCF_ERR_INVAL"):
+ try_start_cache(
+ metadata_layout=not_metadata_layout_randomize,
+ cache_mode=cm,
+ cache_line_size=cls
+ )
+
+
+@pytest.mark.security
+@pytest.mark.parametrize("cls", CacheLineSize)
+@pytest.mark.parametrize('max_wb_queue_size', RandomGenerator(DefaultRanges.UINT32, 10))
+def test_fuzzy_start_max_queue_size(pyocf_ctx, max_wb_queue_size, c_uint32_randomize, cls):
+ """
+ Test whether it is impossible to start cache with invalid dependence between max queue size
+ and queue unblock size.
+ :param pyocf_ctx: basic pyocf context fixture
+ :param max_wb_queue_size: max queue size value to start cache with
+ :param c_uint32_randomize: queue unblock size value to start cache with
+ :param cls: cache line size value to start cache with
+ """
+ if c_uint32_randomize > max_wb_queue_size:
+ with pytest.raises(OcfError, match="OCF_ERR_INVAL"):
+ try_start_cache(
+ max_queue_size=max_wb_queue_size,
+ queue_unblock_size=c_uint32_randomize,
+ cache_mode=CacheMode.WB,
+ cache_line_size=cls)
+ else:
+ logger.warning(f"Test skipped for valid values: "
+ f"'max_queue_size={max_wb_queue_size}, "
+ f"queue_unblock_size={c_uint32_randomize}'.")
+
+
+@pytest.mark.security
+@pytest.mark.parametrize("cm", CacheMode)
+@pytest.mark.parametrize("cls", CacheLineSize)
+def test_fuzzy_start_promotion_policy(pyocf_ctx, not_promotion_policy_randomize, cm, cls):
+ """
+ Test whether it is impossible to start cache with invalid promotion policy
+ :param pyocf_ctx: basic pyocf context fixture
+ :param c_uint32_randomize: promotion policy to start with
+ :param cm: cache mode value to start cache with
+ :param cls: cache line size to start cache with
+ """
+ with pytest.raises(OcfError, match="OCF_ERR_INVAL"):
+ try_start_cache(
+ cache_mode=cm,
+ cache_line_size=cls,
+ promotion_policy=not_promotion_policy_randomize
+ )
diff --git a/src/spdk/ocf/tests/functional/tests/security/test_negative_io.py b/src/spdk/ocf/tests/functional/tests/security/test_negative_io.py
new file mode 100644
index 000000000..c580df132
--- /dev/null
+++ b/src/spdk/ocf/tests/functional/tests/security/test_negative_io.py
@@ -0,0 +1,205 @@
+#
+# Copyright(c) 2019 Intel Corporation
+# SPDX-License-Identifier: BSD-3-Clause-Clear
+#
+
+from ctypes import c_int
+from random import randrange
+
+import pytest
+
+from pyocf.types.cache import Cache, Core
+from pyocf.types.data import Data
+from pyocf.types.io import IoDir
+from pyocf.types.shared import OcfCompletion
+from pyocf.types.volume import Volume
+from pyocf.utils import Size
+
+
+@pytest.mark.security
+def test_neg_write_too_long_data(pyocf_ctx, c_uint16_randomize):
+ """
+ Check if writing data larger than exported object size is properly blocked
+ """
+
+ core = prepare_cache_and_core(Size.from_MiB(1))
+ data = Data(int(Size.from_KiB(c_uint16_randomize)))
+ completion = io_operation(core, data, IoDir.WRITE)
+
+ if c_uint16_randomize > 1024:
+ assert completion.results["err"] != 0
+ else:
+ assert completion.results["err"] == 0
+
+
+@pytest.mark.security
+def test_neg_read_too_long_data(pyocf_ctx, c_uint16_randomize):
+ """
+ Check if reading data larger than exported object size is properly blocked
+ """
+
+ core = prepare_cache_and_core(Size.from_MiB(1))
+ data = Data(int(Size.from_KiB(c_uint16_randomize)))
+ completion = io_operation(core, data, IoDir.READ)
+
+ if c_uint16_randomize > 1024:
+ assert completion.results["err"] != 0
+ else:
+ assert completion.results["err"] == 0
+
+
+@pytest.mark.security
+def test_neg_write_too_far(pyocf_ctx, c_uint16_randomize):
+ """
+ Check if writing data which would normally fit on exported object is
+ blocked when offset is set so that data goes over exported device end
+ """
+
+ limited_size = c_uint16_randomize % (int(Size.from_KiB(4)) + 1)
+ core = prepare_cache_and_core(Size.from_MiB(4))
+ data = Data(int(Size.from_KiB(limited_size)))
+ completion = io_operation(core, data, IoDir.WRITE, int(Size.from_MiB(3)))
+
+ if limited_size > 1024:
+ assert completion.results["err"] != 0
+ else:
+ assert completion.results["err"] == 0
+
+
+@pytest.mark.security
+def test_neg_read_too_far(pyocf_ctx, c_uint16_randomize):
+ """
+ Check if reading data which would normally fit on exported object is
+ blocked when offset is set so that data is read beyond exported device end
+ """
+
+ limited_size = c_uint16_randomize % (int(Size.from_KiB(4)) + 1)
+ core = prepare_cache_and_core(Size.from_MiB(4))
+ data = Data(int(Size.from_KiB(limited_size)))
+ completion = io_operation(core, data, IoDir.READ, offset=(Size.from_MiB(3)))
+
+ if limited_size > 1024:
+ assert completion.results["err"] != 0
+ else:
+ assert completion.results["err"] == 0
+
+
+@pytest.mark.security
+def test_neg_write_offset_outside_of_device(pyocf_ctx, c_int_sector_randomize):
+ """
+ Check that write operations are blocked when
+ IO offset is located outside of device range
+ """
+
+ core = prepare_cache_and_core(Size.from_MiB(2))
+ data = Data(int(Size.from_KiB(1)))
+ completion = io_operation(core, data, IoDir.WRITE, offset=c_int_sector_randomize)
+
+ if 0 <= c_int_sector_randomize <= int(Size.from_MiB(2)) - int(Size.from_KiB(1)):
+ assert completion.results["err"] == 0
+ else:
+ assert completion.results["err"] != 0
+
+
+@pytest.mark.security
+def test_neg_read_offset_outside_of_device(pyocf_ctx, c_int_sector_randomize):
+ """
+ Check that read operations are blocked when
+ IO offset is located outside of device range
+ """
+
+ core = prepare_cache_and_core(Size.from_MiB(2))
+ data = Data(int(Size.from_KiB(1)))
+ completion = io_operation(core, data, IoDir.READ, offset=c_int_sector_randomize)
+
+ if 0 <= c_int_sector_randomize <= int(Size.from_MiB(2)) - int(Size.from_KiB(1)):
+ assert completion.results["err"] == 0
+ else:
+ assert completion.results["err"] != 0
+
+
+@pytest.mark.security
+def test_neg_offset_unaligned(pyocf_ctx, c_int_randomize):
+ """
+ Check that write operations are blocked when
+ IO offset is not aligned
+ """
+
+ core = prepare_cache_and_core(Size.from_MiB(2))
+ data = Data(int(Size.from_KiB(1)))
+ if c_int_randomize % 512 != 0:
+ with pytest.raises(Exception, match="Failed to create io!"):
+ core.new_io(core.cache.get_default_queue(), c_int_randomize, data.size,
+ IoDir.WRITE, 0, 0)
+
+
+@pytest.mark.security
+def test_neg_size_unaligned(pyocf_ctx, c_uint16_randomize):
+ """
+ Check that write operations are blocked when
+ IO size is not aligned
+ """
+
+ core = prepare_cache_and_core(Size.from_MiB(2))
+ data = Data(int(Size.from_B(c_uint16_randomize)))
+ if c_uint16_randomize % 512 != 0:
+ with pytest.raises(Exception, match="Failed to create io!"):
+ core.new_io(core.cache.get_default_queue(), 0, data.size,
+ IoDir.WRITE, 0, 0)
+
+
+@pytest.mark.security
+def test_neg_io_class(pyocf_ctx, c_int_randomize):
+ """
+ Check that IO operations are blocked when IO class
+ number is not in allowed values {0, ..., 32}
+ """
+
+ core = prepare_cache_and_core(Size.from_MiB(2))
+ data = Data(int(Size.from_MiB(1)))
+ completion = io_operation(core, data, randrange(0, 2), io_class=c_int_randomize)
+
+ if 0 <= c_int_randomize <= 32:
+ assert completion.results["err"] == 0
+ else:
+ assert completion.results["err"] != 0
+
+
+@pytest.mark.security
+def test_neg_io_direction(pyocf_ctx, c_int_randomize):
+ """
+ Check that IO operations are not executed for unknown IO direction,
+ that is when IO direction value is not in allowed values {0, 1}
+ """
+
+ core = prepare_cache_and_core(Size.from_MiB(2))
+ data = Data(int(Size.from_MiB(1)))
+ completion = io_operation(core, data, c_int_randomize)
+
+ if c_int_randomize in [0, 1]:
+ assert completion.results["err"] == 0
+ else:
+ assert completion.results["err"] != 0
+
+
+def prepare_cache_and_core(core_size: Size, cache_size: Size = Size.from_MiB(20)):
+ cache_device = Volume(cache_size)
+ core_device = Volume(core_size)
+
+ cache = Cache.start_on_device(cache_device)
+ core = Core.using_device(core_device)
+
+ cache.add_core(core)
+ return core
+
+
+def io_operation(core: Core, data: Data, io_direction: int, offset: int = 0, io_class: int = 0):
+ io = core.new_io(core.cache.get_default_queue(), offset, data.size,
+ io_direction, io_class, 0)
+ io.set_data(data)
+
+ completion = OcfCompletion([("err", c_int)])
+ io.callback = completion.callback
+ io.submit()
+ completion.wait()
+ return completion
diff --git a/src/spdk/ocf/tests/functional/tests/security/test_secure_erase.py b/src/spdk/ocf/tests/functional/tests/security/test_secure_erase.py
new file mode 100644
index 000000000..229410864
--- /dev/null
+++ b/src/spdk/ocf/tests/functional/tests/security/test_secure_erase.py
@@ -0,0 +1,215 @@
+#
+# Copyright(c) 2019 Intel Corporation
+# SPDX-License-Identifier: BSD-3-Clause-Clear
+#
+
+import pytest
+from ctypes import c_int
+
+from pyocf.types.cache import Cache, CacheMode
+from pyocf.types.core import Core
+from pyocf.types.volume import Volume
+from pyocf.utils import Size as S
+from pyocf.types.data import Data, DataOps
+from pyocf.types.ctx import OcfCtx
+from pyocf.types.logger import DefaultLogger, LogLevel
+from pyocf.ocf import OcfLib
+from pyocf.types.metadata_updater import MetadataUpdater
+from pyocf.types.cleaner import Cleaner
+from pyocf.types.io import IoDir
+from pyocf.types.shared import OcfCompletion
+
+
+class DataCopyTracer(Data):
+ """
+ This class enables tracking whether each copied over Data instance is
+ then securely erased.
+ """
+
+ needs_erase = set()
+ locked_instances = set()
+
+ @staticmethod
+ @DataOps.ALLOC
+ def _alloc(pages):
+ data = DataCopyTracer.pages(pages)
+ Data._ocf_instances_.append(data)
+
+ return data.handle.value
+
+ def mlock(self):
+ DataCopyTracer.locked_instances.add(self)
+ DataCopyTracer.needs_erase.add(self)
+ return super().mlock()
+
+ def munlock(self):
+ if self in DataCopyTracer.needs_erase:
+ assert 0, "Erase should be called first on locked Data!"
+
+ DataCopyTracer.locked_instances.remove(self)
+ return super().munlock()
+
+ def secure_erase(self):
+ DataCopyTracer.needs_erase.remove(self)
+ return super().secure_erase()
+
+ def copy(self, src, end, start, size):
+ DataCopyTracer.needs_erase.add(self)
+ return super().copy(src, end, start, size)
+
+
+@pytest.mark.security
+@pytest.mark.parametrize(
+ "cache_mode", [CacheMode.WT, CacheMode.WB, CacheMode.WA, CacheMode.WI]
+)
+def test_secure_erase_simple_io_read_misses(cache_mode):
+ """
+ Perform simple IO which will trigger read misses, which in turn should
+ trigger backfill. Track all the data locked/copied for backfill and make
+ sure OCF calls secure erase and unlock on them.
+ """
+ ctx = OcfCtx(
+ OcfLib.getInstance(),
+ b"Security tests ctx",
+ DefaultLogger(LogLevel.WARN),
+ DataCopyTracer,
+ MetadataUpdater,
+ Cleaner,
+ )
+
+ ctx.register_volume_type(Volume)
+
+ cache_device = Volume(S.from_MiB(30))
+ cache = Cache.start_on_device(cache_device, cache_mode=cache_mode)
+
+ core_device = Volume(S.from_MiB(50))
+ core = Core.using_device(core_device)
+ cache.add_core(core)
+
+ write_data = DataCopyTracer(S.from_sector(1))
+ io = core.new_io(
+ cache.get_default_queue(),
+ S.from_sector(1).B,
+ write_data.size,
+ IoDir.WRITE,
+ 0,
+ 0,
+ )
+ io.set_data(write_data)
+
+ cmpl = OcfCompletion([("err", c_int)])
+ io.callback = cmpl.callback
+ io.submit()
+ cmpl.wait()
+
+ cmpls = []
+ for i in range(100):
+ read_data = DataCopyTracer(S.from_sector(1))
+ io = core.new_io(
+ cache.get_default_queue(),
+ i * S.from_sector(1).B,
+ read_data.size,
+ IoDir.READ,
+ 0,
+ 0,
+ )
+ io.set_data(read_data)
+
+ cmpl = OcfCompletion([("err", c_int)])
+ io.callback = cmpl.callback
+ cmpls.append(cmpl)
+ io.submit()
+
+ for c in cmpls:
+ c.wait()
+
+ write_data = DataCopyTracer.from_string("TEST DATA" * 100)
+ io = core.new_io(
+ cache.get_default_queue(), S.from_sector(1), write_data.size, IoDir.WRITE, 0, 0
+ )
+ io.set_data(write_data)
+
+ cmpl = OcfCompletion([("err", c_int)])
+ io.callback = cmpl.callback
+ io.submit()
+ cmpl.wait()
+
+ stats = cache.get_stats()
+
+ ctx.exit()
+
+ assert (
+ len(DataCopyTracer.needs_erase) == 0
+ ), "Not all locked Data instances were secure erased!"
+ assert (
+ len(DataCopyTracer.locked_instances) == 0
+ ), "Not all locked Data instances were unlocked!"
+ assert (
+ stats["req"]["rd_partial_misses"]["value"]
+ + stats["req"]["rd_full_misses"]["value"]
+ ) > 0
+
+
+@pytest.mark.security
+def test_secure_erase_simple_io_cleaning():
+ """
+ Perform simple IO which will trigger WB cleaning. Track all the data from
+ cleaner (locked) and make sure they are erased and unlocked after use.
+
+ 1. Start cache in WB mode
+ 2. Write single sector at LBA 0
+ 3. Read whole cache line at LBA 0
+ 4. Assert that 3. triggered cleaning
+ 5. Check if all locked Data copies were erased and unlocked
+ """
+ ctx = OcfCtx(
+ OcfLib.getInstance(),
+ b"Security tests ctx",
+ DefaultLogger(LogLevel.WARN),
+ DataCopyTracer,
+ MetadataUpdater,
+ Cleaner,
+ )
+
+ ctx.register_volume_type(Volume)
+
+ cache_device = Volume(S.from_MiB(30))
+ cache = Cache.start_on_device(cache_device, cache_mode=CacheMode.WB)
+
+ core_device = Volume(S.from_MiB(100))
+ core = Core.using_device(core_device)
+ cache.add_core(core)
+
+ read_data = Data(S.from_sector(1).B)
+ io = core.new_io(
+ cache.get_default_queue(), S.from_sector(1).B, read_data.size, IoDir.WRITE, 0, 0
+ )
+ io.set_data(read_data)
+
+ cmpl = OcfCompletion([("err", c_int)])
+ io.callback = cmpl.callback
+ io.submit()
+ cmpl.wait()
+
+ read_data = Data(S.from_sector(8).B)
+ io = core.new_io(
+ cache.get_default_queue(), S.from_sector(1).B, read_data.size, IoDir.READ, 0, 0
+ )
+ io.set_data(read_data)
+
+ cmpl = OcfCompletion([("err", c_int)])
+ io.callback = cmpl.callback
+ io.submit()
+ cmpl.wait()
+
+ stats = cache.get_stats()
+
+ ctx.exit()
+
+ assert (
+ len(DataCopyTracer.needs_erase) == 0
+ ), "Not all locked Data instances were secure erased!"
+ assert (
+ len(DataCopyTracer.locked_instances) == 0
+ ), "Not all locked Data instances were unlocked!"
+ assert (stats["usage"]["clean"]["value"]) > 0, "Cleaner didn't run!"
diff --git a/src/spdk/ocf/tests/functional/tests/utils/random.py b/src/spdk/ocf/tests/functional/tests/utils/random.py
new file mode 100644
index 000000000..27735700f
--- /dev/null
+++ b/src/spdk/ocf/tests/functional/tests/utils/random.py
@@ -0,0 +1,95 @@
+#
+# Copyright(c) 2019 Intel Corporation
+# SPDX-License-Identifier: BSD-3-Clause-Clear
+#
+
+import random
+import string
+import enum
+from functools import reduce
+from ctypes import (
+ c_uint64,
+ c_uint32,
+ c_uint16,
+ c_uint8,
+ c_int,
+ c_uint
+)
+
+
+class Range:
+ def __init__(self, min_val, max_val):
+ self.min = min_val
+ self.max = max_val
+
+ def is_within(self, val):
+ return val >= self.min and val <= self.max
+
+
+class DefaultRanges(Range, enum.Enum):
+ UINT8 = 0, c_uint8(-1).value
+ UINT16 = 0, c_uint16(-1).value
+ UINT32 = 0, c_uint32(-1).value
+ UINT64 = 0, c_uint64(-1).value
+ INT = int(-c_uint(-1).value / 2) - 1, int(c_uint(-1).value / 2)
+
+
+class RandomGenerator:
+ def __init__(self, base_range=DefaultRanges.INT, count=1000):
+ with open("config/random.cfg") as f:
+ self.random = random.Random(int(f.read()))
+ self.exclude = []
+ self.range = base_range
+ self.count = count
+ self.n = 0
+
+ def exclude_range(self, excl_range):
+ self.exclude.append(excl_range)
+ return self
+
+ def __iter__(self):
+ return self
+
+ def __next__(self):
+ if self.n >= self.count:
+ raise StopIteration()
+ self.n += 1
+ while True:
+ val = self.random.randint(self.range.min, self.range.max)
+ if self.exclude:
+ excl_map = map(lambda e: e.is_within(val), self.exclude)
+ is_excluded = reduce(lambda a, b: a or b, excl_map)
+ if is_excluded:
+ continue
+ return val
+
+
+class RandomStringGenerator:
+ def __init__(self, len_range=Range(0, 20), count=700):
+ with open("config/random.cfg") as f:
+ self.random = random.Random(int(f.read()))
+ self.generator = self.__string_generator(len_range)
+ self.count = count
+ self.n = 0
+
+ def __string_generator(self, len_range):
+ while True:
+ for t in [string.digits,
+ string.ascii_letters + string.digits,
+ string.ascii_lowercase,
+ string.ascii_uppercase,
+ string.printable,
+ string.punctuation,
+ string.hexdigits]:
+ yield ''.join(random.choice(t) for _ in range(
+ self.random.randint(len_range.min, len_range.max)
+ ))
+
+ def __iter__(self):
+ return self
+
+ def __next__(self):
+ if self.n >= self.count:
+ raise StopIteration()
+ self.n += 1
+ return next(self.generator)
diff --git a/src/spdk/ocf/tests/functional/utils/configure_random.py b/src/spdk/ocf/tests/functional/utils/configure_random.py
new file mode 100755
index 000000000..71a044014
--- /dev/null
+++ b/src/spdk/ocf/tests/functional/utils/configure_random.py
@@ -0,0 +1,13 @@
+#!/usr/bin/env python3
+
+#
+# Copyright(c) 2012-2018 Intel Corporation
+# SPDX-License-Identifier: BSD-3-Clause-Clear
+#
+
+import sys
+import random
+
+
+with open("config/random.cfg", "w") as f:
+ f.write(str(random.randint(0, sys.maxsize)))